diff --git a/.gitattributes b/.gitattributes
index 5bad330e114d64a3d3446e1b88e05534e919a9e8..ada033c5dd41ec9d02f1ff3d0bddb307080ef16d 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -74,3 +74,5 @@ testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/3.0 filter=lfs
testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/0.0 filter=lfs diff=lfs merge=lfs -text
testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/4.0 filter=lfs diff=lfs merge=lfs -text
testbed/scverse__scanpy/scanpy/datasets/10x_pbmc68k_reduced.h5ad filter=lfs diff=lfs merge=lfs -text
+testbed/scikit-learn__scikit-learn/doc/logos/identity.pdf filter=lfs diff=lfs merge=lfs -text
+testbed/pyvista__pyvista/tests/plotting/fonts/Mplus2-Regular.ttf filter=lfs diff=lfs merge=lfs -text
diff --git a/testbed/googleapis__python-aiplatform/.coveragerc b/testbed/googleapis__python-aiplatform/.coveragerc
new file mode 100644
index 0000000000000000000000000000000000000000..be254c6b13ca80c682b5834a14d48130c3d63348
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/.coveragerc
@@ -0,0 +1,16 @@
+[run]
+branch = True
+
+[report]
+show_missing = True
+omit =
+ google/cloud/aiplatform/vizier/pyvizier/*
+ google/cloud/aiplatform_v1/*
+ google/cloud/aiplatform_v1beta1/*
+ google/cloud/aiplatform/v1/schema/*
+ google/cloud/aiplatform/v1beta1/schema/*
+exclude_lines =
+ # Re-enable the standard pragma
+ pragma: NO COVER
+ # Ignore debug-only repr
+ def __repr__
diff --git a/testbed/googleapis__python-aiplatform/.flake8 b/testbed/googleapis__python-aiplatform/.flake8
new file mode 100644
index 0000000000000000000000000000000000000000..87f6e408c47ddccb5e073f872a56b8e1eb310b16
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/.flake8
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Generated by synthtool. DO NOT EDIT!
+[flake8]
+ignore = E203, E231, E266, E501, W503
+exclude =
+ # Exclude generated code.
+ **/proto/**
+ **/gapic/**
+ **/services/**
+ **/types/**
+ *_pb2.py
+
+ # Standard linting exemptions.
+ **/.nox/**
+ __pycache__,
+ .git,
+ *.pyc,
+ conf.py
diff --git a/testbed/googleapis__python-aiplatform/.gitignore b/testbed/googleapis__python-aiplatform/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..d083ea1ddc3e65e9417f08ec80cec3f4d2be540f
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/.gitignore
@@ -0,0 +1,64 @@
+*.py[cod]
+*.sw[op]
+
+# C extensions
+*.so
+
+# Packages
+*.egg
+*.egg-info
+dist
+build
+eggs
+.eggs
+parts
+bin
+var
+sdist
+develop-eggs
+.installed.cfg
+lib
+lib64
+__pycache__
+
+# Installer logs
+pip-log.txt
+
+# Unit test / coverage reports
+.coverage
+.nox
+.cache
+.pytest_cache
+
+
+# Mac
+.DS_Store
+
+# JetBrains
+.idea
+
+# VS Code
+.vscode
+
+# emacs
+*~
+
+# Built documentation
+docs/_build
+bigquery/docs/generated
+docs.metadata
+
+# Virtual environment
+env/
+venv/
+
+# Test logs
+coverage.xml
+*sponge_log.xml
+
+# System test environment variables.
+system_tests/local_test_setup
+
+# Make sure a generated file isn't accidentally committed.
+pylintrc
+pylintrc.test
diff --git a/testbed/googleapis__python-aiplatform/.pre-commit-config.yaml b/testbed/googleapis__python-aiplatform/.pre-commit-config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..12ad9fb7c3665c542ed945c92323b3c33c478792
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/.pre-commit-config.yaml
@@ -0,0 +1,31 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# See https://pre-commit.com for more information
+# See https://pre-commit.com/hooks.html for more hooks
+repos:
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.0.1
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: check-yaml
+- repo: https://github.com/psf/black
+ rev: 22.3.0
+ hooks:
+ - id: black
+- repo: https://github.com/pycqa/flake8
+ rev: 6.1.0
+ hooks:
+ - id: flake8
diff --git a/testbed/googleapis__python-aiplatform/.release-please-manifest.json b/testbed/googleapis__python-aiplatform/.release-please-manifest.json
new file mode 100644
index 0000000000000000000000000000000000000000..eadb1b22c18bcc2d5483454918120a9c31f40a65
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/.release-please-manifest.json
@@ -0,0 +1,3 @@
+{
+ ".": "1.75.0"
+}
diff --git a/testbed/googleapis__python-aiplatform/.repo-metadata.json b/testbed/googleapis__python-aiplatform/.repo-metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..d207a3589621516595ce36a75e394fd210987049
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/.repo-metadata.json
@@ -0,0 +1,14 @@
+{
+ "name": "aiplatform",
+ "name_pretty": "AI Platform",
+ "product_documentation": "https://cloud.google.com/ai-platform",
+ "client_documentation": "https://cloud.google.com/python/docs/reference/aiplatform/latest",
+ "issue_tracker": "https://issuetracker.google.com/savedsearches/559744",
+ "release_level": "stable",
+ "language": "python",
+ "library_type": "GAPIC_COMBO",
+ "repo": "googleapis/python-aiplatform",
+ "distribution_name": "google-cloud-aiplatform",
+ "api_id": "aiplatform.googleapis.com",
+ "api_shortname": "aiplatform"
+}
diff --git a/testbed/googleapis__python-aiplatform/.trampolinerc b/testbed/googleapis__python-aiplatform/.trampolinerc
new file mode 100644
index 0000000000000000000000000000000000000000..a7dfeb42c6d0791bdccd62318976265f9348c331
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/.trampolinerc
@@ -0,0 +1,61 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Add required env vars here.
+required_envvars+=(
+)
+
+# Add env vars which are passed down into the container here.
+pass_down_envvars+=(
+ "NOX_SESSION"
+ ###############
+ # Docs builds
+ ###############
+ "STAGING_BUCKET"
+ "V2_STAGING_BUCKET"
+ ##################
+ # Samples builds
+ ##################
+ "INSTALL_LIBRARY_FROM_SOURCE"
+ "RUN_TESTS_SESSION"
+ "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ # Target directories.
+ "RUN_TESTS_DIRS"
+ # The nox session to run.
+ "RUN_TESTS_SESSION"
+)
+
+# Prevent unintentional override on the default image.
+if [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]] && \
+ [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then
+ echo "Please set TRAMPOLINE_IMAGE if you want to upload the Docker image."
+ exit 1
+fi
+
+# Define the default value if it makes sense.
+if [[ -z "${TRAMPOLINE_IMAGE_UPLOAD:-}" ]]; then
+ TRAMPOLINE_IMAGE_UPLOAD=""
+fi
+
+if [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then
+ TRAMPOLINE_IMAGE=""
+fi
+
+if [[ -z "${TRAMPOLINE_DOCKERFILE:-}" ]]; then
+ TRAMPOLINE_DOCKERFILE=""
+fi
+
+if [[ -z "${TRAMPOLINE_BUILD_FILE:-}" ]]; then
+ TRAMPOLINE_BUILD_FILE=""
+fi
diff --git a/testbed/googleapis__python-aiplatform/CHANGELOG.md b/testbed/googleapis__python-aiplatform/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..6fc2c86f79d2fea1d931d9a17dca764cd8e5887d
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/CHANGELOG.md
@@ -0,0 +1,2803 @@
+# Changelog
+
+## [1.75.0](https://github.com/googleapis/python-aiplatform/compare/v1.74.0...v1.75.0) (2024-12-17)
+
+
+### Features
+
+* A new field `list_all_versions` to `ListPublisherModelsRequest` ([4b7799b](https://github.com/googleapis/python-aiplatform/commit/4b7799bf1cb64edf2a3485e1ac593015122a7631))
+* A new value `NVIDIA_H100_MEGA_80GB` is added to enum `AcceleratorType` ([4b7799b](https://github.com/googleapis/python-aiplatform/commit/4b7799bf1cb64edf2a3485e1ac593015122a7631))
+* Add new `RequiredReplicaCount` field to DedicatedResources in MachineResources ([4b7799b](https://github.com/googleapis/python-aiplatform/commit/4b7799bf1cb64edf2a3485e1ac593015122a7631))
+* Add new `Status` field to DeployedModel in Endpoint ([4b7799b](https://github.com/googleapis/python-aiplatform/commit/4b7799bf1cb64edf2a3485e1ac593015122a7631))
+* Add new `Status` field to DeployedModel in Endpoint ([4b7799b](https://github.com/googleapis/python-aiplatform/commit/4b7799bf1cb64edf2a3485e1ac593015122a7631))
+* Add properties feature_stats_and_anomalies to `FeatureMonitorJob` ([92feb60](https://github.com/googleapis/python-aiplatform/commit/92feb600b72efa384f6bbc075061403dc2663ca7))
+* Add service renaming to GoSettings ([4b7799b](https://github.com/googleapis/python-aiplatform/commit/4b7799bf1cb64edf2a3485e1ac593015122a7631))
+* Add support for opt-in debug logging ([#4734](https://github.com/googleapis/python-aiplatform/issues/4734)) ([4b7799b](https://github.com/googleapis/python-aiplatform/commit/4b7799bf1cb64edf2a3485e1ac593015122a7631))
+* Add workbench_runtime and kernel_name to NotebookExecutionJob ([4b7799b](https://github.com/googleapis/python-aiplatform/commit/4b7799bf1cb64edf2a3485e1ac593015122a7631))
+* Add workbench_runtime and kernel_name to NotebookExecutionJob ([4b7799b](https://github.com/googleapis/python-aiplatform/commit/4b7799bf1cb64edf2a3485e1ac593015122a7631))
+* Feature Store - Support returning stats in get feature ([9a12097](https://github.com/googleapis/python-aiplatform/commit/9a12097aef6fc0aa2a4685688e366fca93dfbae5))
+* GenAI - Added support for `GenerationConfig.response_modalities` ([78898fc](https://github.com/googleapis/python-aiplatform/commit/78898fc70fc43a5fddf1d3b5af54c789f50f4540))
+* Support `stream_query` in LangChain Agent Templates in the Python Reasoning Engine Client ([99f613b](https://github.com/googleapis/python-aiplatform/commit/99f613b1c029f4ba7379fdf7b1c5ea653f5021b0))
+* Support streaming in the Reasoning Engine Python client. ([a604a2e](https://github.com/googleapis/python-aiplatform/commit/a604a2e5d037836691c30625dc57311baefc74d3))
+
+
+### Bug Fixes
+
+* RAG - Fix the uri used for upload_file ([05f27b6](https://github.com/googleapis/python-aiplatform/commit/05f27b60e142433889824f3bbd6caa93ec56083b))
+* Rag - fix upload file environment mismatch ([b9fecea](https://github.com/googleapis/python-aiplatform/commit/b9fecea66c95abf1ecc8dce4e72374daa7220ec3))
+
+
+### Documentation
+
+* A comment for field `encryption_spec` in message `.google.cloud.aiplatform.v1.NotebookExecutionJob` is changed ([4b7799b](https://github.com/googleapis/python-aiplatform/commit/4b7799bf1cb64edf2a3485e1ac593015122a7631))
+* A comment for field `encryption_spec` in message `.google.cloud.aiplatform.v1beta1.NotebookExecutionJob` is changed ([4b7799b](https://github.com/googleapis/python-aiplatform/commit/4b7799bf1cb64edf2a3485e1ac593015122a7631))
+
+## [1.74.0](https://github.com/googleapis/python-aiplatform/compare/v1.73.0...v1.74.0) (2024-12-04)
+
+
+### Features
+
+* Add browse pre-built metrics button for Ipython environments when listing example metrics for Gen AI Evaluation ([58ba55e](https://github.com/googleapis/python-aiplatform/commit/58ba55e2cf19d9c27e5d766072a982411d78880e))
+* Add compatibility for RagRetrievalConfig in rag_store and rag_retrieval ([c52e3e4](https://github.com/googleapis/python-aiplatform/commit/c52e3e4ea63e43346b439c3eaf6b264c83bf1c25))
+* Add deprecation warnings for use of similarity_top_k, vector_search_alpha, and vector_distance_threshold in retrieval_query, use RagRetrievalConfig instead. ([c52e3e4](https://github.com/googleapis/python-aiplatform/commit/c52e3e4ea63e43346b439c3eaf6b264c83bf1c25))
+* Add FeatureMonitorJob Create,Get,List in Vertex AI SDK ([03eb735](https://github.com/googleapis/python-aiplatform/commit/03eb7352cae92579a53064f0354ab84686229b86))
+* Add List FeatureMonitor function to FeatureGroup in Vertex AI SDK ([21bc83b](https://github.com/googleapis/python-aiplatform/commit/21bc83bed512532be8295fec211987af9635dd7b))
+* Add sdk support to inference timeout on cloud-based endpoints (dedicated or PSC). ([f917269](https://github.com/googleapis/python-aiplatform/commit/f917269b35b6582aecabd7a75610b2225407ae1f))
+* Add support for Document AI Layout Parser as a RAG import option ([565c800](https://github.com/googleapis/python-aiplatform/commit/565c80053e32fa4c71b0560806689906eec7a4aa))
+* Add support for ranking field in rag_retrieval_config for rag_store creation. ([6faa1d0](https://github.com/googleapis/python-aiplatform/commit/6faa1d0b3d55c3096489050d91a604b6fb590ae7))
+* Add vector_similarity_threshold support within RagRetrievalConfig in rag_store and rag_retrieval GA and preview versions ([9402b3d](https://github.com/googleapis/python-aiplatform/commit/9402b3de7623497e33fa4add9a4c0adeb76b75c0))
+* Add Vertex RAG service proto to v1 ([a56e4dd](https://github.com/googleapis/python-aiplatform/commit/a56e4ddc4568a178d2f45e6567e5131de5c6b90d))
+* Adding Vertex AI Search Config for RAG corpuses to SDK ([d3d69d6](https://github.com/googleapis/python-aiplatform/commit/d3d69d6d43470f009abd67dd8b6d88c23f7df25a))
+* Fix file type mismatch in uploading eval results to GCS, supported types: CSV, JSON. ([905c766](https://github.com/googleapis/python-aiplatform/commit/905c7667d130482e7412110e9307d84caa9a252e))
+* GenAI - Grounding - Released `VertexAiSearch` and `Retrieval` to GA ([0537fec](https://github.com/googleapis/python-aiplatform/commit/0537fec6cdde03afa82c93324b04eba3e3464aa6))
+* GenAI - Release the Prompt Management feature to Public Preview ([7432c2c](https://github.com/googleapis/python-aiplatform/commit/7432c2ce2e484432dbee047cc13fcfe1f8f21044))
+* GenAI - Support `FunctionDeclaration.response` schema ([4288fec](https://github.com/googleapis/python-aiplatform/commit/4288fec2b92272286d47b16a6a49bfc1f0103fca))
+* GenAI - Tuning - Added support for BYOSA ([7cbda03](https://github.com/googleapis/python-aiplatform/commit/7cbda0311b538807160040a0927a6c79ed85053e))
+* Refactor rag_store and rag_retrieval to use v1 protos ([dfe6d6c](https://github.com/googleapis/python-aiplatform/commit/dfe6d6c35179e67c91c937960af1febcec921524))
+* Support multi-methods in SDK. ([f02692d](https://github.com/googleapis/python-aiplatform/commit/f02692d211e8ae2b692cbdc41fbf4c4296d6e5a7))
+* Support NFS for Ray cluster creation ([1ca9a05](https://github.com/googleapis/python-aiplatform/commit/1ca9a056209a9caef45b1aa324c38c269e47537a))
+* Support RagFileTransformationConfig in upload_file function in V1 sdk ([88ac48c](https://github.com/googleapis/python-aiplatform/commit/88ac48c5d42b14b97561705524cec9afe63b5bac))
+* Update v1 sdk to use new RagCorpus, RagVectorDbConfig, & RagEmbeddingModelConfig proto ([47a5a6d](https://github.com/googleapis/python-aiplatform/commit/47a5a6d4731dac827e4c43e09f51d8c71b43edaa))
+* Update v1 sdk to use new RagFileTransformationConfig proto ([c23c62d](https://github.com/googleapis/python-aiplatform/commit/c23c62dbd4bbe26fddfcbbbe8b7ded502f40305f))
+* Update v1beta1 sdk for RagFileTransformationConfig and Ranking protos ([ffe3230](https://github.com/googleapis/python-aiplatform/commit/ffe3230dcea908e8eba0c727c5f03bf92f731ff9))
+* Update v1beta1 sdk for RagVectorDbConfig & RagCorpus protos. Backward compatible with older fields. ([216a30f](https://github.com/googleapis/python-aiplatform/commit/216a30ff5a687e270e840a8f564baff3d79d48e1))
+
+
+### Bug Fixes
+
+* `get_default_run` method in Experiment class ([9388fc9](https://github.com/googleapis/python-aiplatform/commit/9388fc91b2f51ef558e6376a74dc7eb2165e1f80))
+* Add MatchingEngineIndexConfig enums to proto value converters ([e6d3df8](https://github.com/googleapis/python-aiplatform/commit/e6d3df88d4361db2c661e6a0d0eb574c7136155b))
+* RAG Fix v1 rag_store compatibility with generative_models Tool by changing back to v1beta1 ([e220312](https://github.com/googleapis/python-aiplatform/commit/e22031269f249b3cc3063366e558c78888148498))
+* Remove redundant progress bar update commands within runnable inference ([598c931](https://github.com/googleapis/python-aiplatform/commit/598c931a147b5679327dd19c06c97ffefa729180))
+* Update upperbounds on pydantic version to < 2.10 to mitigate version comparison checks ([c13b6a8](https://github.com/googleapis/python-aiplatform/commit/c13b6a80a84d060821182df132f2d239a2b6c677))
+
+
+### Documentation
+
+* A comment for field `api_key_config` in message `.google.cloud.aiplatform.v1beta1.JiraSource` is changed ([d7dff72](https://github.com/googleapis/python-aiplatform/commit/d7dff722d445f3befe0029f0135a3fbd1b49f8dd))
+* A comment for field `class_method` in message `.google.cloud.aiplatform.v1beta1.StreamQueryReasoningEngineRequest` is changed (from steam_query to stream_query) ([b7f9492](https://github.com/googleapis/python-aiplatform/commit/b7f94921990954e6254020dfc85d01bb0c5c545d))
+* Updated example usage in docstring for rag_retrieval preview and GA ([a1f8bc2](https://github.com/googleapis/python-aiplatform/commit/a1f8bc2793f65cfc65ea3c8a22a14ff4e6dfcd8d))
+* Updated example usage in docstring for rag_store preview and GA ([a1f8bc2](https://github.com/googleapis/python-aiplatform/commit/a1f8bc2793f65cfc65ea3c8a22a14ff4e6dfcd8d))
+
+## [1.73.0](https://github.com/googleapis/python-aiplatform/compare/v1.72.0...v1.73.0) (2024-11-19)
+
+
+### Features
+
+* Add a `nfs_mounts` to RaySpec in PersistentResource API ([6a22bef](https://github.com/googleapis/python-aiplatform/commit/6a22bef757eb975772def918cba7a416f6fd3f70))
+* Add a v1 UpdateEndpointLongRunning API ([6a22bef](https://github.com/googleapis/python-aiplatform/commit/6a22bef757eb975772def918cba7a416f6fd3f70))
+* Add BatchCreateFeatures rpc to feature_registry_service.proto ([68fc5f2](https://github.com/googleapis/python-aiplatform/commit/68fc5f28b15fb5ac6b2440848b52376acbf09ecf))
+* Add BYOSA field to tuning_job ([68fc5f2](https://github.com/googleapis/python-aiplatform/commit/68fc5f28b15fb5ac6b2440848b52376acbf09ecf))
+* Add BYOSA field to tuning_job ([68fc5f2](https://github.com/googleapis/python-aiplatform/commit/68fc5f28b15fb5ac6b2440848b52376acbf09ecf))
+* Add COMET and MetricX to the evaluation SDK ([4135810](https://github.com/googleapis/python-aiplatform/commit/4135810abbd3d134a6f0818e65581c74df0241ee))
+* Add Create FeatureMonitor function to FeatureGroup in Vertex AI SDK ([285ec2c](https://github.com/googleapis/python-aiplatform/commit/285ec2cee01a7b135b82e160a1b27ebb9c60d7a9))
+* Add CustomEnvironmentSpec to NotebookExecutionJob ([6a22bef](https://github.com/googleapis/python-aiplatform/commit/6a22bef757eb975772def918cba7a416f6fd3f70))
+* Add CustomEnvironmentSpec to NotebookExecutionJob ([6a22bef](https://github.com/googleapis/python-aiplatform/commit/6a22bef757eb975772def918cba7a416f6fd3f70))
+* Add default_runtime in Vertex Pipeline SDK preview. ([44587ec](https://github.com/googleapis/python-aiplatform/commit/44587ecc6377cc23adc5fb5a792944a2e15276ed))
+* Add fast_tryout_enabled to FasterDeploymentConfig v1 proto ([68fc5f2](https://github.com/googleapis/python-aiplatform/commit/68fc5f28b15fb5ac6b2440848b52376acbf09ecf))
+* Add FeatureMonitor to FeatureGroup in Vertex AI SDK ([f2233ce](https://github.com/googleapis/python-aiplatform/commit/f2233ceebef523fe7ed9e3a93a3c94a109e8e448))
+* Add PSC automation support to matching engine index endpoint `deploy_index()`, `find_neighbors()`, `match()`, and `read_index_datapoints()`. ([3ab39a4](https://github.com/googleapis/python-aiplatform/commit/3ab39a4536dc72b8a93d30c89bff04f25c724ef5))
+* Add vertex_rag_source to create_feature_view method ([ca61c5a](https://github.com/googleapis/python-aiplatform/commit/ca61c5a4ed5138f01496ee1a80c4f7a221f671d3))
+* Adding fast_tryout_enabled option to Vertex SDK ([fde1b96](https://github.com/googleapis/python-aiplatform/commit/fde1b96db0060cc73dc3174636b16cf30368d61e))
+* Allow setting Vertex Model Garden source model name during model upload ([3af9cc8](https://github.com/googleapis/python-aiplatform/commit/3af9cc8756491dc90fd5b6f4fb960f006d68ca09))
+* Allow user control which class methods to export via `register_operations`. The `class methods` spec will be changed according to user's `register_operations`. ([74077b5](https://github.com/googleapis/python-aiplatform/commit/74077b597527ab220431ebac7ff21f6ab2f7ae85))
+* COMET added to evaluation service proto ([68fc5f2](https://github.com/googleapis/python-aiplatform/commit/68fc5f28b15fb5ac6b2440848b52376acbf09ecf))
+* Expose system_labels field to model deployment APIs in Vertex public preview SDK ([13cede4](https://github.com/googleapis/python-aiplatform/commit/13cede4cff4073225cc374122f80c6f2e85e8f89))
+* Expose system_labels field to model deployment APIs in Vertex Python SDK ([75655af](https://github.com/googleapis/python-aiplatform/commit/75655afd0e3b1affa6558aac97941cdf8642d6f1))
+* GenAI - Grounding - Added grounding dynamic_retrieval config to Vertex SDK ([c39334a](https://github.com/googleapis/python-aiplatform/commit/c39334a63e83d4ab8e4d5cc82b22b9096940fe50))
+* GenAI Evaluation: Release GenAI Evaluation SDK Agent Evaluation features to vertexai.preview module. ([41cd5a8](https://github.com/googleapis/python-aiplatform/commit/41cd5a8a502d7c28a1e68500931fcbc21bbbd243))
+* MetricX added to evaluation service proto ([68fc5f2](https://github.com/googleapis/python-aiplatform/commit/68fc5f28b15fb5ac6b2440848b52376acbf09ecf))
+* Trajectory eval metrics added to evaluation service proto ([6a22bef](https://github.com/googleapis/python-aiplatform/commit/6a22bef757eb975772def918cba7a416f6fd3f70))
+* Vision Models - Add support for Imagen 3 Capabilities to Vertex SDK ([9a30c31](https://github.com/googleapis/python-aiplatform/commit/9a30c3170a7294077681dfd6a16fc46cf7e71cc6))
+
+
+### Bug Fixes
+
+* GenAI - Fixed GAPIC-renamed keys (keys with trailing underscore) in structures returned by the `to_dict` methods. ([9d00424](https://github.com/googleapis/python-aiplatform/commit/9d004246aaa94d6b61881f4fa1cc25ed1cb97db5))
+* Implementation of proto conversion in reasoning engine utils when message is not specified ([99fafe9](https://github.com/googleapis/python-aiplatform/commit/99fafe9044258895a2ff5d04867516a682a08528))
+* Remove check on serving_container_image_uri when artifact_uri and local_model is given ([ccd89b4](https://github.com/googleapis/python-aiplatform/commit/ccd89b41229516c561aee686497ad5af95c844ed))
+
+
+### Documentation
+
+* Automatic Function calling comment update ([91f85ac](https://github.com/googleapis/python-aiplatform/commit/91f85ac2ff91a95eae4ced37a17ec5fc15cd1d28))
+* Clarity and typo fixes for documentation ([6a22bef](https://github.com/googleapis/python-aiplatform/commit/6a22bef757eb975772def918cba7a416f6fd3f70))
+* Fix example rpc naming ([6a22bef](https://github.com/googleapis/python-aiplatform/commit/6a22bef757eb975772def918cba7a416f6fd3f70))
+* GenAI - Grounding - Added sample for grounding with Google Search to the Vertex GenAI SDK README. ([4bf9816](https://github.com/googleapis/python-aiplatform/commit/4bf9816f49c02bcdbf83173b531e3a0b4f9ee04d))
+
+## [1.72.0](https://github.com/googleapis/python-aiplatform/compare/v1.71.1...v1.72.0) (2024-11-12)
+
+
+### Features
+
+* Add AdvancedCompleteQuery API ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Add BatchCreateFeatures rpc to feature_registry_service.proto ([acf3113](https://github.com/googleapis/python-aiplatform/commit/acf31138177fbcc29e469c863615cfa70b4ccc19))
+* Add BillingEstimation in data store ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Add code execution tool API ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Add fast_tryout_enabled to FasterDeploymentConfig message in aiplatform v1beta1 endpoint.proto ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Add Feature Monitoring API to Feature Store ([acf3113](https://github.com/googleapis/python-aiplatform/commit/acf31138177fbcc29e469c863615cfa70b4ccc19))
+* Add field `protobuf_pythonic_types_enabled` to message `ExperimentalFeatures` ([acf3113](https://github.com/googleapis/python-aiplatform/commit/acf31138177fbcc29e469c863615cfa70b4ccc19))
+* Add GroundedGenerationService API ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Add lite search API to allow public website search with API key ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Add new PscInterfaceConfig field to custom_job.proto ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Add one_box_page_size on search ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Add Sitemap APIs to preview channel ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Add StopNotebookRuntime method ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Add StopNotebookRuntime method ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Add system labels field to model garden deployments ([acf3113](https://github.com/googleapis/python-aiplatform/commit/acf31138177fbcc29e469c863615cfa70b4ccc19))
+* Add UpdateEndpointLongRunning API in v1beta1 version ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Adding fast_tryout_enabled option to Vertex public preview SDK ([98288b8](https://github.com/googleapis/python-aiplatform/commit/98288b8b2d6ab5592e67cca1bd8914df3a1f19e7))
+* Increase the upperbounds on langchain dependencies to support v0.3+ ([1fca9c0](https://github.com/googleapis/python-aiplatform/commit/1fca9c058d7d8993f6d0c28580f6e0b899e4e83a))
+* Support advanced boost search ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Support Google Workspace search ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Support natural language understanding search ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Support query regex in control match rules ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Support search personalization to preview channel ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+
+
+### Bug Fixes
+
+* Add timeout to prediction rawPredict/streamRawPredict ([b7de16a](https://github.com/googleapis/python-aiplatform/commit/b7de16ae21b2934bc9195bf4bfc14d56adaf7701))
+* Convert float values with no decimals to integers in FunctionCall. ([a1857ed](https://github.com/googleapis/python-aiplatform/commit/a1857ed9bc97b9c6e367a8fa06b62883540a6638))
+* Deepcopy error from baseline_model in pairwiseMetric ([ccc5c85](https://github.com/googleapis/python-aiplatform/commit/ccc5c85a8ba39728f710fe64e8afbe98f84bf50e))
+
+
+### Documentation
+
+* A comment for field `feature_group_id` in message `.google.cloud.aiplatform.v1.CreateFeatureGroupRequest` is changed ([acf3113](https://github.com/googleapis/python-aiplatform/commit/acf31138177fbcc29e469c863615cfa70b4ccc19))
+* A comment for field `unit` in message `.google.api.QuotaLimit` is changed ([acf3113](https://github.com/googleapis/python-aiplatform/commit/acf31138177fbcc29e469c863615cfa70b4ccc19))
+* A comment for message `BatchCreateFeaturesRequest` is modified to call out BatchCreateFeatures ([acf3113](https://github.com/googleapis/python-aiplatform/commit/acf31138177fbcc29e469c863615cfa70b4ccc19))
+* Deprecate asynchronous mode in answer generation ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Deprecate extractive_answers in answer generation ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Keep the API doc up-to-date with recent changes ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* **samples:** Fixed Vertex AI Vector Search Create Index `index_update_method` ([7dff586](https://github.com/googleapis/python-aiplatform/commit/7dff586f38ef53887dca733741f874686d16563d))
+* Update documentation ([169dd44](https://github.com/googleapis/python-aiplatform/commit/169dd447703d7734a0b979cb77a0ab84ef8df68f))
+* Update feature creation message commentary ([acf3113](https://github.com/googleapis/python-aiplatform/commit/acf31138177fbcc29e469c863615cfa70b4ccc19))
+
+## [1.71.1](https://github.com/googleapis/python-aiplatform/compare/v1.71.0...v1.71.1) (2024-10-31)
+
+
+### Bug Fixes
+
+* Audio_timestamp is supported only for some of the models ([df7269e](https://github.com/googleapis/python-aiplatform/commit/df7269eebfab8fa41b18e718b9e70097bdec1201))
+* Ignore AttributeError exception when importing google.auth.aio ([832fe60](https://github.com/googleapis/python-aiplatform/commit/832fe60264177afa78257893b28f4f66d3c736a7))
+* System test case media input file non existent ([c0718e1](https://github.com/googleapis/python-aiplatform/commit/c0718e1126cf6ebc9eeb75c21421d62c2088a340))
+
+## [1.71.0](https://github.com/googleapis/python-aiplatform/compare/v1.70.0...v1.71.0) (2024-10-29)
+
+
+### Features
+
+* Add `text` field for Grounding metadata support chunk output ([8a65b1d](https://github.com/googleapis/python-aiplatform/commit/8a65b1d6457c3335d96f448e6d20b210c9a6acd3))
+* Add audio_timestamp to GenerationConfig ([8a65b1d](https://github.com/googleapis/python-aiplatform/commit/8a65b1d6457c3335d96f448e6d20b210c9a6acd3))
+* Add private async REST support for transport override ([c0b31e2](https://github.com/googleapis/python-aiplatform/commit/c0b31e267b15b3ff9c0a2abaad2462958713cc41))
+* Add support for self-signed JWT for queries on private endpoints ([5025d03](https://github.com/googleapis/python-aiplatform/commit/5025d03d808971102995113a871c2ef78c2fb013))
+* Add support for task type (CODE_RETRIEVAL_QUERY) through get_embeddings. ([7246497](https://github.com/googleapis/python-aiplatform/commit/72464977dfe0068b5406653f65c0c4d99e887cc4))
+* From vertexai.batch_prediction import BatchPredictionJob ([fe53922](https://github.com/googleapis/python-aiplatform/commit/fe53922b40ae7f22d5612e649c6cdb0c0a4ae0b6))
+* GenAI - add labels ([92c13dd](https://github.com/googleapis/python-aiplatform/commit/92c13dd76e3e27f1386ac8b6a5625682c796ab16))
+* GenAI - Added `audio_timestamp` to `GenerationConfig`. ([91c2120](https://github.com/googleapis/python-aiplatform/commit/91c2120eb962a09c9b24a7f7058e00737a271eab))
+* GenAI - Added Anthropic models support in GenAI batch prediction ([0866009](https://github.com/googleapis/python-aiplatform/commit/0866009719e35f615950ee30a23fb82b9b8668f5))
+* GenAI - Tuning - Added support for tuned model rebasing. Added `rebase_tuned_model` to `vertexai.preview.tuning.sft`. ([2cef97f](https://github.com/googleapis/python-aiplatform/commit/2cef97f31bf4d0410c76b73da03805120605ef0c))
+* Introduce DefaultRuntime to PipelineJob ([8a65b1d](https://github.com/googleapis/python-aiplatform/commit/8a65b1d6457c3335d96f448e6d20b210c9a6acd3))
+* Release API key support for GenerateContent to Public Preview ([a4d4e46](https://github.com/googleapis/python-aiplatform/commit/a4d4e4691a173960f4f28f408ee26297151223c4))
+* Support customizing bring-your-own-response eval use case to use any columns ([3e7bf81](https://github.com/googleapis/python-aiplatform/commit/3e7bf819d95167502be6b9f04c7b92e8415e5a68))
+* Support PSC-I ingress in Ray Client ([da76253](https://github.com/googleapis/python-aiplatform/commit/da76253f3b53e81386e0c6e4e2b647cfc4feda2e))
+* Support tuning new text & code embedding model in LLM SDK. ([4268f28](https://github.com/googleapis/python-aiplatform/commit/4268f282823f806418a8f0dc0bf65854381bd444))
+* Update Experiment run names to be compatible with One Platform API ([c4b9ec2](https://github.com/googleapis/python-aiplatform/commit/c4b9ec279ce55842373da1734d2bebc5c6d66513))
+* Vision Models - onboard Image Segmentation. ([ae63a43](https://github.com/googleapis/python-aiplatform/commit/ae63a434d9833a5788348216c4a0b64313f653a8))
+
+
+### Bug Fixes
+
+* Add deprecation warning to Ray version 2.9.3 ([0ce106b](https://github.com/googleapis/python-aiplatform/commit/0ce106b944ce63dfbfa86e4e9efc5ebe317c8a53))
+
+
+### Documentation
+
+* A comment for field `partner_model_tuning_spec` in message `.google.cloud.aiplatform.v1beta1.TuningJob` is changed ([#4538](https://github.com/googleapis/python-aiplatform/issues/4538)) ([914c63c](https://github.com/googleapis/python-aiplatform/commit/914c63c91c8162350d792df4cbc70f3394134c3f))
+* Comment update ([b04196b](https://github.com/googleapis/python-aiplatform/commit/b04196b257236b10ca168dffbcc5039889073246))
+* Update the documentation for the `tabular_dataset` class ([82bb938](https://github.com/googleapis/python-aiplatform/commit/82bb9389deeadb428943ed957cfa460c8f807839))
+
+## [1.70.0](https://github.com/googleapis/python-aiplatform/compare/v1.69.0...v1.70.0) (2024-10-08)
+
+
+### Features
+
+* Add a dynamic retrieval API ([44df243](https://github.com/googleapis/python-aiplatform/commit/44df2430f1a4ba69a24d29a0be88a67670e3d742))
+* Add enable_secure_private_service_connect in service attachment ([44df243](https://github.com/googleapis/python-aiplatform/commit/44df2430f1a4ba69a24d29a0be88a67670e3d742))
+* Add new `PscInterfaceConfig` field to `pipeline_job.proto` ([44df243](https://github.com/googleapis/python-aiplatform/commit/44df2430f1a4ba69a24d29a0be88a67670e3d742))
+* Add psc_automation_configs to DeployIndex v1 ([44df243](https://github.com/googleapis/python-aiplatform/commit/44df2430f1a4ba69a24d29a0be88a67670e3d742))
+* Add TunedModelRef and RebaseTunedModel Api for Vertex GenAiTuningService ([44df243](https://github.com/googleapis/python-aiplatform/commit/44df2430f1a4ba69a24d29a0be88a67670e3d742))
+* Add TunedModelRef and RebaseTunedModel Api for Vertex GenAiTuningService ([44df243](https://github.com/googleapis/python-aiplatform/commit/44df2430f1a4ba69a24d29a0be88a67670e3d742))
+* Add update_corpus method for vertex rag ([09353cf](https://github.com/googleapis/python-aiplatform/commit/09353cfa030325471524be62cd0d87d59b885c38))
+* Automatically end Experiment runs when Tensorboard CustomJob is complete ([30cf221](https://github.com/googleapis/python-aiplatform/commit/30cf22191539ddb41ae2907ef9b2a266f6c2a668))
+* Vision Models - Add new safety filter levels to Vertex AI Image Generation API. ([b1d5007](https://github.com/googleapis/python-aiplatform/commit/b1d5007f07d50838ed596b35db00233343d14faf))
+
+
+### Bug Fixes
+
+* Annotate PipelineJob and PipelineTaskRerunConfig fields as optional ([44df243](https://github.com/googleapis/python-aiplatform/commit/44df2430f1a4ba69a24d29a0be88a67670e3d742))
+* Update the docstring for LangchainAgent ([f7fe2b0](https://github.com/googleapis/python-aiplatform/commit/f7fe2b0f98c2906a6c9ad2dc0cfca26f3df025f9))
+
+
+### Documentation
+
+* Add sample to retrieve experiment backing tensorboard resource name ([138dc1a](https://github.com/googleapis/python-aiplatform/commit/138dc1a8085e3260f1b12f68e973f08d72069a9b))
+* GenAI - update comments in GenerationConfig property ([c670eeb](https://github.com/googleapis/python-aiplatform/commit/c670eeb36a244bc1eca45b5c2c6e32815f16bf62))
+* Limit comment `SupervisedTuningSpec` for 1p tuning ([44df243](https://github.com/googleapis/python-aiplatform/commit/44df2430f1a4ba69a24d29a0be88a67670e3d742))
+
+## [1.69.0](https://github.com/googleapis/python-aiplatform/compare/v1.68.0...v1.69.0) (2024-10-01)
+
+
+### Features
+
+* Add rerun method to pipeline job preview client. ([29dec74](https://github.com/googleapis/python-aiplatform/commit/29dec74c4f828a266829efbdc99b20d8dba9d8f8))
+
+
+### Bug Fixes
+
+* GenAI - Fixed from_dict methods ([3090812](https://github.com/googleapis/python-aiplatform/commit/3090812b1e2d189acf532506c7a654a698bd192c))
+
+## [1.68.0](https://github.com/googleapis/python-aiplatform/compare/v1.67.1...v1.68.0) (2024-09-24)
+
+
+### Features
+
+* A new field `response_logprbs` is added to message `.google.cloud.aiplatform.v1.GenerationConfig` ([#4410](https://github.com/googleapis/python-aiplatform/issues/4410)) ([470933f](https://github.com/googleapis/python-aiplatform/commit/470933f65c4a6d08ca6fa099b09bec31e959b026))
+* Add support for partial failures sink in import rag files. ([8070411](https://github.com/googleapis/python-aiplatform/commit/80704112095a39ea755ac25b3f5e3a76faaa03f1))
+* Add support for SharePoint as a ImportRagFiles source. ([f89df1f](https://github.com/googleapis/python-aiplatform/commit/f89df1f30822d260176487f74c3743cab88a38fd))
+* Adding Vertex Vector Search Vector DB option for RAG corpuses to SDK ([d4193f2](https://github.com/googleapis/python-aiplatform/commit/d4193f227e554f49845d87e98efa06be8dae74b8))
+* Batch_predict method generally-available at TextEmbeddingModel. ([73c0dae](https://github.com/googleapis/python-aiplatform/commit/73c0dae6b084a42c04dcaf53bfe9ba43761c667f))
+* GenAI - Add model routing config to sdk ([c0626fe](https://github.com/googleapis/python-aiplatform/commit/c0626fe015e4befe50f50688159a53ed9b7e583b))
+* GenAI - Add support for logprobs and response_logprobs. ([7acf0f7](https://github.com/googleapis/python-aiplatform/commit/7acf0f718f6b930ec5f1a1873872f92d55742012))
+
+
+### Bug Fixes
+
+* Fix rag corpus creation error ([d25edce](https://github.com/googleapis/python-aiplatform/commit/d25edce9024750f12293b9f3767c8636f7f58c3e))
+* Fix typo in eval_task docstring ([86fc215](https://github.com/googleapis/python-aiplatform/commit/86fc215cb854c006a0836a6e8f98e6d40c191fee))
+* Fix typos in evaluation example metric prompt templates. ([5f4d586](https://github.com/googleapis/python-aiplatform/commit/5f4d586d4f2d20f48f0c3bdd3ea00ec8e5bcdd77))
+* GenAI - Fixed `GenerativeModel.compute_tokens` for v1 API ([4637b4c](https://github.com/googleapis/python-aiplatform/commit/4637b4cd601357b5d15967ff652338667e2832d1))
+* Tensorboard - Fix error in tensorboard batch upload of nested dirs ([86be328](https://github.com/googleapis/python-aiplatform/commit/86be3282334347d29220d849d72dca5238c02920))
+
+
+### Documentation
+
+* Update Gemini docs ([c561aa6](https://github.com/googleapis/python-aiplatform/commit/c561aa6bbc25021642399c0d4bc0859ac1693d18))
+* Update Gemini docs ([2b84142](https://github.com/googleapis/python-aiplatform/commit/2b84142abc47f4be1324a653441ab148ecf8e28b))
+
+## [1.67.1](https://github.com/googleapis/python-aiplatform/compare/v1.67.0...v1.67.1) (2024-09-18)
+
+
+### Bug Fixes
+
+* Fix rag corpus creation error ([6083b93](https://github.com/googleapis/python-aiplatform/commit/6083b93848be6ec975c95cd072c8d86357339d02))
+
+## [1.67.0](https://github.com/googleapis/python-aiplatform/compare/v1.66.0...v1.67.0) (2024-09-17)
+
+
+### Features
+
+* Add support for partial failures sink in import rag files. ([07e471e](https://github.com/googleapis/python-aiplatform/commit/07e471e0a069551f2c855e167e549fa92ac6af95))
+* Adding Feature Store Vector DB option for RAG corpuses to SDK ([cfc3421](https://github.com/googleapis/python-aiplatform/commit/cfc3421fe8a883d459b66ed8c9f39697ded23f20))
+* Adding Pinecone Vector DB option for RAG corpuses to SDK ([f78b953](https://github.com/googleapis/python-aiplatform/commit/f78b953f561b8697d07a530e89c7e727db1161ed))
+* Adding Vertex Vector Search Vector DB option for RAG corpuses to SDK ([f882657](https://github.com/googleapis/python-aiplatform/commit/f882657183e34c8e07baa4b8dc9f45ed8bca9db7))
+* Allow customizing pipeline caching options for model evaluation jobs. ([73490b2](https://github.com/googleapis/python-aiplatform/commit/73490b22a239cb1a3c31349f8db6cfbc5232e231))
+* GenAI - Switched the GA version of the `generative_models` classes to use the v1 service APIs instead of v1beta1 ([66d84af](https://github.com/googleapis/python-aiplatform/commit/66d84afdd5b20f70b3ff62f25cc32ac0b324d5d5))
+
+
+### Bug Fixes
+
+* GenAI - Fixed `GenerativeModel.compute_tokens` for v1 API ([0de2987](https://github.com/googleapis/python-aiplatform/commit/0de298786c43427cb1a20b91cbabd1ce921c16da))
+* Tensorboard - Fix error in tensorboard batch upload of nested dirs ([ab157c8](https://github.com/googleapis/python-aiplatform/commit/ab157c8ead718b2a1a1d13306c1256c1cb2561f1))
+
+
+### Documentation
+
+* Manually add summary overview page. ([0bc608a](https://github.com/googleapis/python-aiplatform/commit/0bc608a9c045007f12325231ed7f0069a40f469b))
+
+## [1.66.0](https://github.com/googleapis/python-aiplatform/compare/v1.65.0...v1.66.0) (2024-09-11)
+
+
+### Features
+
+* Add max_wait_duration option to custom jobs. ([ee65917](https://github.com/googleapis/python-aiplatform/commit/ee65917fbc1edc58a7e57c3601f9329ffa1304b0))
+* Add Ray 2.33 support to SDK Client Builder, remove deprecated protocol_version from ray client context. ([708a67b](https://github.com/googleapis/python-aiplatform/commit/708a67b9b781d41564058904d60af7db9beafbe0))
+* Add support for version 2.33 for RoV Bigquery read/write, remove dead code from version 2.4 ([a4b6c60](https://github.com/googleapis/python-aiplatform/commit/a4b6c60051efe61ad590ede8682bd36b79cafc94))
+* Update Ray system tests to be compatible with new RoV 2.33 changes ([8c7bf27](https://github.com/googleapis/python-aiplatform/commit/8c7bf2724037f1ad1081d29e9534f8d26b18d95f))
+* Update setup.py for including Ray v2.33, restrict RoV prediction to 2.9.3 for now ([71c6f3c](https://github.com/googleapis/python-aiplatform/commit/71c6f3c6b48a001bfdbe5f5b47d787d4797ae9bf))
+
+
+### Bug Fixes
+
+* Identify nested Vertex Tensorboard profile runs for uploading when standard event data is not present ([0a08027](https://github.com/googleapis/python-aiplatform/commit/0a08027df7a7442af63522039445500ce1fdb7f1))
+* Move region tag to include imports ([6d1f7fd](https://github.com/googleapis/python-aiplatform/commit/6d1f7fdaadade0f9f6a77c136490fac58d054ca8))
+* Move region tag to include imports ([#4357](https://github.com/googleapis/python-aiplatform/issues/4357)) ([6d1f7fd](https://github.com/googleapis/python-aiplatform/commit/6d1f7fdaadade0f9f6a77c136490fac58d054ca8))
+* Rollback change to tensorboard uploader causing increased latency ([879dbcd](https://github.com/googleapis/python-aiplatform/commit/879dbcd1ed49bee573df65e764914b708382404c))
+
+
+### Documentation
+
+* Update SDK Job Submission with Ray v2.33 updated comments ([7fda11f](https://github.com/googleapis/python-aiplatform/commit/7fda11fd25b5a78fcca69e48f9c734f2a8f86eb0))
+
+## [1.65.0](https://github.com/googleapis/python-aiplatform/compare/v1.64.0...v1.65.0) (2024-09-04)
+
+
+### ⚠ BREAKING CHANGES
+
+* Tokenization - Deprecated `ComputeTokenResult.token_info_list` in favor of `ComputeTokenResult.tokens_info`
+
+### Features
+
+* Add support for system instruction and tools in tokenization. ([72fcc06](https://github.com/googleapis/python-aiplatform/commit/72fcc063ed4a086da0ad37ec2ac58860d4e79051))
+* Add vector search alpha to rag retrieval for hybrid search ranking ([6624ebe](https://github.com/googleapis/python-aiplatform/commit/6624ebe22726942dd70781122352f47268ee2dee))
+* Adding Weaviate Vector DB option for RAG corpuses to SDK ([9b28202](https://github.com/googleapis/python-aiplatform/commit/9b28202a1bb17f54e042301d6cdac3b6aa826797))
+* GenAI - Added `system_instruction` and `tools` support to `GenerativeModel.count_tokens` ([50fca69](https://github.com/googleapis/python-aiplatform/commit/50fca693b2f3b1a0b61867dc136be5a468fb2b2f))
+* GenAI - Added Llama3 support in GenAI batch prediction ([6166152](https://github.com/googleapis/python-aiplatform/commit/6166152844dc0078f7a5a02355ef3555cc428cfa))
+* PrivateEndpoint.stream_raw_predict ([197f333](https://github.com/googleapis/python-aiplatform/commit/197f333be5a075d41f98b762cd933cd2e89cecae))
+* Support reserved_ip_ranges for VPC network in Ray on Vertex cluster ([36a56b9](https://github.com/googleapis/python-aiplatform/commit/36a56b99f9e53d19d80c2bff3bf55c208988c518))
+* Tokenization - Deprecated `ComputeTokenResult.token_info_list` in favor of `ComputeTokenResult.tokens_info` ([efbcb54](https://github.com/googleapis/python-aiplatform/commit/efbcb54e0d5df4d65a79e60afdbc5c328538aef6))
+
+
+### Bug Fixes
+
+* Tensorboard - Fixed bug in tensorboard uploader where profile logs are not uploaded from nested run directories. ([37627de](https://github.com/googleapis/python-aiplatform/commit/37627de4ec12b8c51abf41524b9e7e3adf2dab54))
+* Tokenizers - Fixed `Tokenizer.compute_tokens` ([c29fa5d](https://github.com/googleapis/python-aiplatform/commit/c29fa5d98fc1c1550c039e84ff2f5725818c2231))
+
+## [1.64.0](https://github.com/googleapis/python-aiplatform/compare/v1.63.0...v1.64.0) (2024-08-27)
+
+
+### Features
+
+* Endpoint - Add support for Prediction dedicated endpoint. predict/rawPredict/streamRawPredict can use dedicated DNS to access the dedicated endpoint. ([3d68777](https://github.com/googleapis/python-aiplatform/commit/3d687777d39b00280c22d2a14ddde3ba644febf7))
+* GenAI - Added the model Distillation feature (private preview) ([a0d4ff2](https://github.com/googleapis/python-aiplatform/commit/a0d4ff20ceb1c48806d1711fdb2691dc34f9f1db))
+* Grounding - Allow initialization of `grounding.VertexAISearch` with full resource name or data store ID, project ID, and location. ([f334321](https://github.com/googleapis/python-aiplatform/commit/f334321694bb3be1a421ee19a33fc973e5455da9))
+* Evaluation - Make Rouge class available in base level init ([aed82a1](https://github.com/googleapis/python-aiplatform/commit/aed82a1bd5d8942ded4dd325a4eb1a5f73bc50c7))
+* Feature Store - Read for online store w/private service connect ([7af80c6](https://github.com/googleapis/python-aiplatform/commit/7af80c624e026cfe8dda2d1644467a517f49b78f))
+* Ray - Support autoscaling in Ray on Vertex ([961da42](https://github.com/googleapis/python-aiplatform/commit/961da429683db113a822300342484aaf1128cfc0))
+
+
+### Bug Fixes
+
+* Fix error in tensorboard uploader thrown when time_series_id is None ([d59a052](https://github.com/googleapis/python-aiplatform/commit/d59a0522ddc2131ab39b052e742a6472f84e0a5a))
+* Evaluation - Fix typo in prompt templates: ([c8fa7a8](https://github.com/googleapis/python-aiplatform/commit/c8fa7a8cf53165354fa89e38fffc0ef4a821e211))
+
+
+### Documentation
+
+* **samples:** Adding code sample for vector search create streaming index ([71464e7](https://github.com/googleapis/python-aiplatform/commit/71464e7d2a57fa6770d2fcb7c5c0e669055c4cdb))
+
+## [1.63.0](https://github.com/googleapis/python-aiplatform/compare/v1.62.0...v1.63.0) (2024-08-20)
+
+
+### Features
+
+* A new field `satisfies_pzs` is added to message `.google.cloud.aiplatform.v1.BatchPredictionJob` ([#4192](https://github.com/googleapis/python-aiplatform/issues/4192)) ([6919037](https://github.com/googleapis/python-aiplatform/commit/6919037e9513e922e9ffe197e68a99fb343c4fff))
+* Add advanced PDF parsing option for RAG file import ([6e1dc06](https://github.com/googleapis/python-aiplatform/commit/6e1dc0658ffd875f4a3bbcab62976e15e997102e))
+* Add multithreading for custom metric computation. ([2c93fc1](https://github.com/googleapis/python-aiplatform/commit/2c93fc17b4a76623209b7699a73d4e6c9c27cc81))
+* Add progress bar for generating inference. ([b78714f](https://github.com/googleapis/python-aiplatform/commit/b78714f3cf8fc22f7caa193a7398efe3626c2c5f))
+* Add progress bar to custom metrics. ([3974aec](https://github.com/googleapis/python-aiplatform/commit/3974aec92595870b6f33ecd016763f59d6630898))
+* Add Prompt class support for configs and Prompt.generate_content wrapper ([7f1e031](https://github.com/googleapis/python-aiplatform/commit/7f1e0313842546b7e911ee3ef06d7193deb64a91))
+* GenAI - Added seed parameter to the GenerationConfig class ([9f1e073](https://github.com/googleapis/python-aiplatform/commit/9f1e0739d837b3110b40b8806514ca3e49e2b1da))
+* GenAI - Added the `Candidate.avg_logprobs` property ([de80695](https://github.com/googleapis/python-aiplatform/commit/de80695ad2359361a698cffbde2336417297ef35))
+* GenAI - Released the `Prompt` feature to Public Preview ([64eeab8](https://github.com/googleapis/python-aiplatform/commit/64eeab8b3404e87c0cc19fb6862ff51ec8b95954))
+* GenAI Evaluation: Add generic model-based `PointwiseMetric` and `PairwiseMetric` classes that allow customizing metric prompt templates. Add `PointwiseMetricPromptTemplate`, `PairwiseMetricPromptTemplate` classes to help formulate and customize metric prompt templates. Add `metric_column_mapping` parameter to `EvalTask` for metric prompt template input variable name mapping. ([fd38b49](https://github.com/googleapis/python-aiplatform/commit/fd38b49231bd1b35af57056b9e69a5427ddb114c))
+* GenAI Evaluation: Open source model-based metric prompt template examples for Gemini 1.5 Pro autorater. Add `MetricPromptTemplateExamples` class to help retrieve model-based metric prompt templates. ([fd38b49](https://github.com/googleapis/python-aiplatform/commit/fd38b49231bd1b35af57056b9e69a5427ddb114c))
+* GenAI Evaluation: Release GenAI Evaluation SDK GA features to `vertexai.preview` module. ([fd38b49](https://github.com/googleapis/python-aiplatform/commit/fd38b49231bd1b35af57056b9e69a5427ddb114c))
+* Publish GenAI Evaluation SDK GA features to `vertexai.evaluation` module. Switch GenAI Evaluation Service client to v1 version. ([45e4251](https://github.com/googleapis/python-aiplatform/commit/45e42516fbc47db1c44a7669f2730a1590a992c2))
+
+
+### Bug Fixes
+
+* Add support of display_name to create_cached_content in python SDK ([ecc2d54](https://github.com/googleapis/python-aiplatform/commit/ecc2d54a84c03f7d06e987fba5f5c67fba109ce0))
+* Always upload the pickled object and dependencies tarball when creating ReasoningEngine ([34ef5a3](https://github.com/googleapis/python-aiplatform/commit/34ef5a35bd91aea53c89650c20962dae29b3a535))
+* Remove grouding attribution ([f6ece65](https://github.com/googleapis/python-aiplatform/commit/f6ece65d8f2933ddcb4ec1a08784f8d2c365f2de))
+
+
+### Documentation
+
+* Update Prompt docstring for batch prompting ([e96b6e6](https://github.com/googleapis/python-aiplatform/commit/e96b6e6f0574b96fc4b61f99c671ef6646fc4956))
+
+## [1.62.0](https://github.com/googleapis/python-aiplatform/compare/v1.61.0...v1.62.0) (2024-08-13)
+
+
+### Features
+
+* Add metadata to evaluation result. ([375095e](https://github.com/googleapis/python-aiplatform/commit/375095e72cc4f43611710372a1e36753a891a710))
+* Add Prompt class for multimodal prompt templating ([1bdc235](https://github.com/googleapis/python-aiplatform/commit/1bdc235ea64f8d63ce9d60d88cb873ee341d3ff9))
+* Add support for query method in Vertex AI Extension SDK ([0008735](https://github.com/googleapis/python-aiplatform/commit/0008735968606a716add88072cff76f2fc552d7b))
+* Add support for reservation affinity in custom training jobs. ([802609b](https://github.com/googleapis/python-aiplatform/commit/802609b1f5e5d8d41a77dafb5b1a2dbf01f2bd30))
+* Add support for strategy in custom training jobs. ([a076191](https://github.com/googleapis/python-aiplatform/commit/a076191b8726363e1f7c47ef8343eb86cebf9918))
+* Adding spot, reservation_affinity to Vertex SDK ([3e785bd](https://github.com/googleapis/python-aiplatform/commit/3e785bd9c9d3d11197ef930f563ee96231a67d84))
+* Support api keys in initializer and create_client ([7404f67](https://github.com/googleapis/python-aiplatform/commit/7404f679246e41e0009ec2d49f05d669eb357f71))
+* Support creating optimized online store with private service connect ([659ba3f](https://github.com/googleapis/python-aiplatform/commit/659ba3f287f9aa78840d4b9b9ca216002d5f1e6a))
+* Support disable Cloud logging in Ray on Vertex ([accaa97](https://github.com/googleapis/python-aiplatform/commit/accaa9750d98b7a37b08da3bd2058d9cdd03bd5c))
+* Support PSC-Interface in Ray on Vertex ([accaa97](https://github.com/googleapis/python-aiplatform/commit/accaa9750d98b7a37b08da3bd2058d9cdd03bd5c))
+
+
+### Bug Fixes
+
+* Added credentials, project, and location on PipelineJobSchedule init ([281c171](https://github.com/googleapis/python-aiplatform/commit/281c1710afc6cac49c02d926bee7a6c43b6ef851))
+* Avoid breakage of langchain from orjson 3.10.7 ([c990f73](https://github.com/googleapis/python-aiplatform/commit/c990f73845f38e58ba2dddb372ad2f84d4a05479))
+* Deprecate disable_attribution in GoogleSearchRetrieval. ([c68d559](https://github.com/googleapis/python-aiplatform/commit/c68d559b9d0fd7288b6775f57d05f474f5f7920a))
+
+
+### Documentation
+
+* Update the docstring for compute_tokens method. ([849e8d4](https://github.com/googleapis/python-aiplatform/commit/849e8d409e4838cad0a020231b806b0c9ef587ce))
+
+## [1.61.0](https://github.com/googleapis/python-aiplatform/compare/v1.60.0...v1.61.0) (2024-08-05)
+
+
+### Features
+
+* Add a warning message for scheduled deprecation of Coherence metric class ([7f238fb](https://github.com/googleapis/python-aiplatform/commit/7f238fb3cebc44893b4e6959a77743cc4d96138e))
+* Add deprecation messages for all model-based metric classes ([71c0fd3](https://github.com/googleapis/python-aiplatform/commit/71c0fd397139a95b6045f898e906ce11b2e7e8ce))
+* Add support for task type (CODE_RETRIEVAL_QUERY) through get_embeddings. ([f2ce1e4](https://github.com/googleapis/python-aiplatform/commit/f2ce1e4caea9f344e39fc3232f697b1a6ea4f99a))
+* Add system_instruction to LangchainAgent template. ([c71c3dd](https://github.com/googleapis/python-aiplatform/commit/c71c3ddbfeaa577dfce683b3299d94e77d1c4895))
+* Adding Slack and Jira data connector for RAG to SDK ([d92e7c9](https://github.com/googleapis/python-aiplatform/commit/d92e7c91d280dd417d2c2a2cf5abc36592888593))
+* Allow protobuf 5.x ([ce9cd5d](https://github.com/googleapis/python-aiplatform/commit/ce9cd5def14597822c1d071e438cf63b6d4ba3ca))
+* LVM - Release `ImageGenerationModel` to GA ([718c199](https://github.com/googleapis/python-aiplatform/commit/718c1997778310b6898344b2e5a34513e7a82e5f))
+* Support "update" for reasoning engine. ([b73ef3e](https://github.com/googleapis/python-aiplatform/commit/b73ef3eaa2d88dbc8071e3a4f0c7da934683fc2a))
+* Update Rapid Evaluation Service QPS. Add a customizable evaluation service QPS parameter. ([9ee9289](https://github.com/googleapis/python-aiplatform/commit/9ee9289fbe5face719515e453d4f81648b44e7b1))
+
+
+### Documentation
+
+* Change init sample to use vertexai ([829e0f6](https://github.com/googleapis/python-aiplatform/commit/829e0f6fd286cf2de2ac307a836305766473faef))
+* Make small fixes to file import documentation ([f7d65c3](https://github.com/googleapis/python-aiplatform/commit/f7d65c32948c54bcf3a6927639f2173b556bb310))
+
+## [1.60.0](https://github.com/googleapis/python-aiplatform/compare/v1.59.0...v1.60.0) (2024-07-24)
+
+
+### Features
+
+* Add preflight validations to PipelineJob submit and run methods. ([c5a3535](https://github.com/googleapis/python-aiplatform/commit/c5a35354485a577dd5477449bc4bdcd7866a8df4))
+* Add support for langchain v0.2+ package versions in default installation ([259b638](https://github.com/googleapis/python-aiplatform/commit/259b638300054e13b2dfe1d3f32d7126bbb18b15))
+* GenAI - Added tokenization support via `GenerativeModel.compute_tokens` ([cfe0cc6](https://github.com/googleapis/python-aiplatform/commit/cfe0cc62cbf8dc12b4b021377ddd613d1072fe95))
+* GenAI - ContextCaching - allow from_cached_content to take the cached_content resource name ([8f53902](https://github.com/googleapis/python-aiplatform/commit/8f53902b933f3abf0c9a222f45c2fa7ada727505))
+* Make count_tokens generally-available at TextEmbeddingModel. ([efb8413](https://github.com/googleapis/python-aiplatform/commit/efb84134094ab87d6a2fac48a1f2f1b2199c1818))
+
+
+### Bug Fixes
+
+* Avoid throw error when Part.text is empty in modality content checks ([bbd4a49](https://github.com/googleapis/python-aiplatform/commit/bbd4a49d398052ba2c20e09f8f052e6f766d8fca))
+* Correct logit_bias type annotation to accept keys as strings ([2676d25](https://github.com/googleapis/python-aiplatform/commit/2676d25e62d1bda68b0fbef274d0e669a6670415))
+* Create FV embedding dimensions sample - dimensions should be an int ([2aa221e](https://github.com/googleapis/python-aiplatform/commit/2aa221ec994fea63bd73e2cfe760a314b990e4b0))
+* Fix the sync option for Model Monitor job creation ([22151e2](https://github.com/googleapis/python-aiplatform/commit/22151e29e752dd8f27188046f8c8866c004ca196))
+* Include DeploymentResourcePool class in aiplatform top-level sdk module ([ecc4f09](https://github.com/googleapis/python-aiplatform/commit/ecc4f09054b3e314b51ebf622b7241a5ab4ff072))
+* Overriding the current TracerProvider when enabling tracing ([1476c10](https://github.com/googleapis/python-aiplatform/commit/1476c10f2fd91c02cd98799564a33ede742bc6e0))
+* Pass the project ID from vertexai.init to CloudTraceSpanExporter when enable_tracing=True for LangchainAgent ([3ec043e](https://github.com/googleapis/python-aiplatform/commit/3ec043eefb053739f767d5199b1941bbc3c49120))
+
+
+### Documentation
+
+* GenAI - Update README.md for Vertex Generative AI SDK for Python to add subsections to the right nav. ([42af742](https://github.com/googleapis/python-aiplatform/commit/42af742d808abdca56b84b3381388a36c8454f1b))
+
+## [1.59.0](https://github.com/googleapis/python-aiplatform/compare/v1.58.0...v1.59.0) (2024-07-09)
+
+
+### Features
+
+* Add model and contents fields to ComputeTokensRequest v1 ([f6e7b9c](https://github.com/googleapis/python-aiplatform/commit/f6e7b9c0f1656edba0c69d02475c2a7337fefb99))
+* Add model and contents fields to ComputeTokensRequest v1beta1 ([f6e7b9c](https://github.com/googleapis/python-aiplatform/commit/f6e7b9c0f1656edba0c69d02475c2a7337fefb99))
+* Add role field to TokensInfo v1 ([f6e7b9c](https://github.com/googleapis/python-aiplatform/commit/f6e7b9c0f1656edba0c69d02475c2a7337fefb99))
+* Add role field to TokensInfo v1beta1 ([f6e7b9c](https://github.com/googleapis/python-aiplatform/commit/f6e7b9c0f1656edba0c69d02475c2a7337fefb99))
+* GenAI - Tuning - Released the Supervised Fine Tuning feature o GA ([ae47639](https://github.com/googleapis/python-aiplatform/commit/ae47639c1dc03a89d83c8de1609aaa25af9a1368))
+
+
+### Bug Fixes
+
+* **deps:** Require proto-plus 1.22.3 ([4131e65](https://github.com/googleapis/python-aiplatform/commit/4131e6583799d16b5032fecd73d4539fb05f0cd1))
+* Offline store - set application name + remove session param ([7395665](https://github.com/googleapis/python-aiplatform/commit/7395665bcd847a62b25392d98848a6fb130f5286))
+
+## [1.58.0](https://github.com/googleapis/python-aiplatform/compare/v1.57.0...v1.58.0) (2024-07-03)
+
+
+### Features
+
+* Add deploy_metadata to PublisherModel.Deploy v1 ([71e41c8](https://github.com/googleapis/python-aiplatform/commit/71e41c8eeb0e081d67660161a31f6a228d7b0502))
+* Add deploy_metadata to PublisherModel.Deploy v1beta1 ([b5c3cdd](https://github.com/googleapis/python-aiplatform/commit/b5c3cdd737acd695301c9a564d8f91371288f9f1))
+* Add display tuning job button for Ipython environments when getting an existing job ([872b455](https://github.com/googleapis/python-aiplatform/commit/872b455bcdda59d73d7060aaaa20a0b0e86e8cbb))
+* Add private_service_connect_config and service_attachment fields to DedicatedServingEndpoint v1 ([71e41c8](https://github.com/googleapis/python-aiplatform/commit/71e41c8eeb0e081d67660161a31f6a228d7b0502))
+* Add satisfies_pzs and satisfies_pzi fields to Model v1 ([71e41c8](https://github.com/googleapis/python-aiplatform/commit/71e41c8eeb0e081d67660161a31f6a228d7b0502))
+* Add satisfies_pzs and satisfies_pzi fields to Model v1beta1 ([b5c3cdd](https://github.com/googleapis/python-aiplatform/commit/b5c3cdd737acd695301c9a564d8f91371288f9f1))
+* Add satisfies_pzs and satisfies_pzi fields to Tensorboard v1 ([71e41c8](https://github.com/googleapis/python-aiplatform/commit/71e41c8eeb0e081d67660161a31f6a228d7b0502))
+* Add satisfies_pzs and satisfies_pzi fields to Tensorboard v1beta1 ([b5c3cdd](https://github.com/googleapis/python-aiplatform/commit/b5c3cdd737acd695301c9a564d8f91371288f9f1))
+* Add UpdateDeploymentResourcePool method to DeploymentResourcePoolService v1 ([71e41c8](https://github.com/googleapis/python-aiplatform/commit/71e41c8eeb0e081d67660161a31f6a228d7b0502))
+* Add UpdateDeploymentResourcePool method to DeploymentResourcePoolService v1beta1 ([b5c3cdd](https://github.com/googleapis/python-aiplatform/commit/b5c3cdd737acd695301c9a564d8f91371288f9f1))
+* Add use_effective_order field to BleuSpec v1beta1 ([b5c3cdd](https://github.com/googleapis/python-aiplatform/commit/b5c3cdd737acd695301c9a564d8f91371288f9f1))
+* GenAI - Evaluation - Add a progress bar for evaluation service requests ([bbffb0d](https://github.com/googleapis/python-aiplatform/commit/bbffb0d5bfe0509399c801d849311a6201caa633))
+* GenAI - Evaluation - Implement rate limiter and refactor parallelization for online evaluation service requests ([bbffb0d](https://github.com/googleapis/python-aiplatform/commit/bbffb0d5bfe0509399c801d849311a6201caa633))
+* GenAI - Evaluation - Return partial evaluation results with error logging ([bbffb0d](https://github.com/googleapis/python-aiplatform/commit/bbffb0d5bfe0509399c801d849311a6201caa633))
+* Migrate DeploymentResourcePool and associated functionality to V1 namespace for GA launch of model co-hosting. ([1474d98](https://github.com/googleapis/python-aiplatform/commit/1474d988fa63cbbb3b200634719bc245cab6a448))
+
+
+### Bug Fixes
+
+* `IndexConfig` - use TreeAhConfig as default `algorithm_config`. ([341d287](https://github.com/googleapis/python-aiplatform/commit/341d287719cabdaa1041cdefe9b65b77f1e4bc3b))
+* LVM - Update `Video.load_from_file()` to support storage.googleapis.com links ([b63f960](https://github.com/googleapis/python-aiplatform/commit/b63f9600f743067ae97103dfd43f4392b9f6de56))
+
+
+### Documentation
+
+* Update comments of AutoscalingSpec v1 ([71e41c8](https://github.com/googleapis/python-aiplatform/commit/71e41c8eeb0e081d67660161a31f6a228d7b0502))
+* Update comments of AutoscalingSpec v1beta1 ([b5c3cdd](https://github.com/googleapis/python-aiplatform/commit/b5c3cdd737acd695301c9a564d8f91371288f9f1))
+* Update import paths for Gemini README ([46b3042](https://github.com/googleapis/python-aiplatform/commit/46b30425e8c86588256bf75f857078caeb9d7dee))
+
+## [1.57.0](https://github.com/googleapis/python-aiplatform/compare/v1.56.0...v1.57.0) (2024-06-26)
+
+
+### Features
+
+* Add _ModelBasedMetric base class to `vertexai.preview.evaluation.metrics` and allow metric spec customization ([536f1d5](https://github.com/googleapis/python-aiplatform/commit/536f1d5bfc0e03acaf48097446e6b3c9577961c7))
+* Add `_AutomaticMetric` type and support customizing metric spec for automatic pointwise metrics. Add `Rouge` metric to `vertexai.preview.evaluation.metrics` ([f6b6dee](https://github.com/googleapis/python-aiplatform/commit/f6b6deed2a3973ed684898d30e209af4291b8f3a))
+* Add 12 model-based pointwise metric classes to `vertexai.preview.evaluation.metrics` ([4742a87](https://github.com/googleapis/python-aiplatform/commit/4742a879c8f61c4798194a32243cf9a666477a02))
+* Add display tuning job button for Ipython environments when starting a new job ([b1e9a6c](https://github.com/googleapis/python-aiplatform/commit/b1e9a6c3bdce42278f3c868667a652e8867d393e))
+* Add encryption_spec to TuningJob ([5a6ce78](https://github.com/googleapis/python-aiplatform/commit/5a6ce7817e11c32fb138e1ba51825a18a12f8851))
+* Add enum value MALFORMED_FUNCTION_CALL to `.google.cloud.aiplatform.v1beta1.content.Candidate.FinishReason` ([5a6ce78](https://github.com/googleapis/python-aiplatform/commit/5a6ce7817e11c32fb138e1ba51825a18a12f8851))
+* Add fields grounding_chunks and grounding_supports to GroundingMetadata ([5a6ce78](https://github.com/googleapis/python-aiplatform/commit/5a6ce7817e11c32fb138e1ba51825a18a12f8851))
+* Add MALFORMED_FUNCTION_CALL to FinishReason ([5a6ce78](https://github.com/googleapis/python-aiplatform/commit/5a6ce7817e11c32fb138e1ba51825a18a12f8851))
+* Add preflight_validations to PipelineJob ([5a6ce78](https://github.com/googleapis/python-aiplatform/commit/5a6ce7817e11c32fb138e1ba51825a18a12f8851))
+* Add streamRawPredict rpc to prediction service ([5a6ce78](https://github.com/googleapis/python-aiplatform/commit/5a6ce7817e11c32fb138e1ba51825a18a12f8851))
+* Add support for hybrid queries for private endpoint in Matching Engine Index Endpoint. ([ce65eab](https://github.com/googleapis/python-aiplatform/commit/ce65eab2fd41aee06b378daa4ebd4dd7b53e1fca))
+* Add text field in Segment ([5a6ce78](https://github.com/googleapis/python-aiplatform/commit/5a6ce7817e11c32fb138e1ba51825a18a12f8851))
+* Change the icon for the model evaluation link to a lightbulb for ipython environments ([7dad7f0](https://github.com/googleapis/python-aiplatform/commit/7dad7f036b6c686379771c76314cae2585e384d8))
+* Endpoint.direct_predict ([9351e1a](https://github.com/googleapis/python-aiplatform/commit/9351e1ac51a0dd5dc0739676b9790e3e202529ad))
+* Endpoint.direct_predict_async ([9351e1a](https://github.com/googleapis/python-aiplatform/commit/9351e1ac51a0dd5dc0739676b9790e3e202529ad))
+* Endpoint.direct_raw_predict ([9351e1a](https://github.com/googleapis/python-aiplatform/commit/9351e1ac51a0dd5dc0739676b9790e3e202529ad))
+* Endpoint.direct_raw_predict_async ([9351e1a](https://github.com/googleapis/python-aiplatform/commit/9351e1ac51a0dd5dc0739676b9790e3e202529ad))
+* Endpoint.stream_direct_predict ([9351e1a](https://github.com/googleapis/python-aiplatform/commit/9351e1ac51a0dd5dc0739676b9790e3e202529ad))
+* Endpoint.stream_direct_raw_predict ([9351e1a](https://github.com/googleapis/python-aiplatform/commit/9351e1ac51a0dd5dc0739676b9790e3e202529ad))
+* Endpoint.stream_raw_predict ([9351e1a](https://github.com/googleapis/python-aiplatform/commit/9351e1ac51a0dd5dc0739676b9790e3e202529ad))
+* Expose new text embedding tuning parameters in GA namespace. ([249a5fa](https://github.com/googleapis/python-aiplatform/commit/249a5fa901a2f7572698892eac7fdc7dc6ca0ad7))
+* GenAI - Batch Prediction - Added support for tuned GenAI models ([a90ee8d](https://github.com/googleapis/python-aiplatform/commit/a90ee8da161f95aa489aa4f09309a3fa34320a4c))
+* Mark default profile run as complete when calling end_upload_tb_log ([6397f30](https://github.com/googleapis/python-aiplatform/commit/6397f3069cd164f5fb1423f9c73c5e1f6ae30714))
+* Sample code for Vertex AI Feature Store ([2c28b4e](https://github.com/googleapis/python-aiplatform/commit/2c28b4eb8184d251bf612bd77f614e9b58e3ba6a))
+* Sample code for Vertex AI Feature Store ([2fbf5a5](https://github.com/googleapis/python-aiplatform/commit/2fbf5a5704612d3f87e157e9ebeb8f6583386a06))
+* Tokenization - Added count_tokens support for local tokenization ([a8a4c30](https://github.com/googleapis/python-aiplatform/commit/a8a4c30e492513708d1dd1e3e0dbd67a3918e172))
+* Update rich html Ray on Vertex Client Context widget ([380c9d9](https://github.com/googleapis/python-aiplatform/commit/380c9d973480961c82ad22b4b298ce31c965272b))
+
+
+### Bug Fixes
+
+* `FeatureGroup.create()` - remove unused entity id column param ([c46f3e9](https://github.com/googleapis/python-aiplatform/commit/c46f3e9240eb34cf3f38fe73356d893c96edabde))
+* Fix bug where retry_timeout does not work with evaluation with experiments ([a31ac4d](https://github.com/googleapis/python-aiplatform/commit/a31ac4da5de5bace03aeb531bea372953c66ac0c))
+* Use default run_name in Tensorboard uploader for direct directory upload. ([78a92a1](https://github.com/googleapis/python-aiplatform/commit/78a92a1bad2bd4d75b85debfd401e122afa0dc11))
+
+
+### Documentation
+
+* A comment for field `distibution` in message `.google.cloud.aiplatform.v1beta1.model_monitoring_stats.ModelMonitoringStatsDataPoint` is changed. ([5a6ce78](https://github.com/googleapis/python-aiplatform/commit/5a6ce7817e11c32fb138e1ba51825a18a12f8851))
+* A comment for field `name` in message `.google.cloud.aiplatform.v1beta1.cached_content.CachedContent` is changed ([5a6ce78](https://github.com/googleapis/python-aiplatform/commit/5a6ce7817e11c32fb138e1ba51825a18a12f8851))
+* A comment for field `source` in message `.google.cloud.aiplatform.v1beta1.tool.Retrieval` is added. ([5a6ce78](https://github.com/googleapis/python-aiplatform/commit/5a6ce7817e11c32fb138e1ba51825a18a12f8851))
+* Update the documentation for the `time_series_dataset` and `video_dataset` classes ([2cf4cb9](https://github.com/googleapis/python-aiplatform/commit/2cf4cb905236cd42d9c351aff26564504ed4b72e))
+
+## [1.56.0](https://github.com/googleapis/python-aiplatform/compare/v1.55.0...v1.56.0) (2024-06-18)
+
+
+### Features
+
+* Add `retry_timeout` to EvalTask in `vertexai.preview.evaluation` ([4d9ee9d](https://github.com/googleapis/python-aiplatform/commit/4d9ee9dc6c046fd71e2f3176981a2a108fbbaeeb))
+* Add hybrid query example to vector search sample. ([510da5e](https://github.com/googleapis/python-aiplatform/commit/510da5ef3bcaa507288571fc7e066f578fde329f))
+* Add metric classes for 2 pairwise metrics for rapid evaluation SDK. ([831c8e4](https://github.com/googleapis/python-aiplatform/commit/831c8e45ee88f70efcdaba7dfed1856837074357))
+* Add pipeline_job_name to allow PipelineJob.get(pipeline_job_name) ([32e3b22](https://github.com/googleapis/python-aiplatform/commit/32e3b22993a83414ee60e52b0c95bd8b63543787))
+* Add sample code show how to create an optimized private online store in Vertex AI Feature Store. ([e352175](https://github.com/googleapis/python-aiplatform/commit/e3521751ecb79d5f711658c39d2dd5b204c191c5))
+* GenAI - Context Caching - add get() classmethod and refresh() instance method ([6be874a](https://github.com/googleapis/python-aiplatform/commit/6be874a7c6c43b7acbf5926e38e56d2ab367f5a1))
+* GenAI - Context Caching - also print model_name and expire_time. ([d548c11](https://github.com/googleapis/python-aiplatform/commit/d548c1128d8b6abc7ed8007436bc868a688fcace))
+* GenAI - Tuning - Added support for CMEK ([eb651bc](https://github.com/googleapis/python-aiplatform/commit/eb651bc2ed60ba12d88998115470c12d858892ef))
+
+
+### Bug Fixes
+
+* Do not reset aiplatform.Experiment or aiplatform.ExperimentRun unnecessarily when running tensorboard uploader. ([28a091a](https://github.com/googleapis/python-aiplatform/commit/28a091ab609ae3c086bb35ee4901c61108a4e75e))
+
+
+### Documentation
+
+* Update the documentation for the `time_series_dataset` and `video_dataset` classes ([d5dc7b5](https://github.com/googleapis/python-aiplatform/commit/d5dc7b5697eb4c7e86ec9e108454a30c8c7028d7))
+
+## [1.55.0](https://github.com/googleapis/python-aiplatform/compare/v1.54.1...v1.55.0) (2024-06-12)
+
+
+### Features
+
+* view model evaluations when AustoSxS pipeline completes ([17c59c4c2](https://github.com/googleapis/python-aiplatform/commit/17c59c4c2))
+* Add support for user-configurable 1P embedding models and quota for RAG ([8b3beb666](https://github.com/googleapis/python-aiplatform/commit/8b3beb666))
+* Add Webpage Browser Extension to Vertex AI Extension SDK ([c222f4bdb](https://github.com/googleapis/python-aiplatform/commit/c222f4bdb))
+* Add enable_tracing to LangchainAgent. ([cad035cb3](https://github.com/googleapis/python-aiplatform/commit/cad035cb3))
+* Implement multithreaded batch inference in Rapid Evaluation SDK for performance improvement ([3c6d1732b](https://github.com/googleapis/python-aiplatform/commit/3c6d1732b))
+
+### Bug Fixes
+
+* support VPC and BYOSA case in Ray on Vertex JobSubmissionClient using cluster resource name ([662d039c9](https://github.com/googleapis/python-aiplatform/commit/662d039c9))
+* Include tensorboard profiler requirements when installing google-cloud-aiplatform[tensorboard] ([4f1b5593b](https://github.com/googleapis/python-aiplatform/commit/4f1b5593b))
+* fix numerical NaN experiment run logging error in EvalTask. ([641faec0b](https://github.com/googleapis/python-aiplatform/commit/641faec0b))
+* Generate uuid run_name when run_name_prefix is not specified. ([4e2d87f9a](https://github.com/googleapis/python-aiplatform/commit/4e2d87f9a))
+
+### Documentation
+
+* Update the documentation for the `image_dataset` class ([583cb9313](https://github.com/googleapis/python-aiplatform/commit/583cb9313))
+* Include imports in experiment_tracking samples directly in cloud.google.com snippets. ([3ffb6cba7](https://github.com/googleapis/python-aiplatform/commit/3ffb6cba7))
+
+
+## [1.54.1](https://github.com/googleapis/python-aiplatform/compare/v1.54.0...v1.54.1) (2024-06-07)
+
+
+### Bug Fixes
+
+* _append_tool_name list index out of range ([#3914](https://github.com/googleapis/python-aiplatform/issues/3914)) ([323c2f5](https://github.com/googleapis/python-aiplatform/commit/323c2f5b4a63451c8bb3ef1eb5457cc674c4f56f))
+
+## [1.54.0](https://github.com/googleapis/python-aiplatform/compare/v1.53.0...v1.54.0) (2024-06-06)
+
+### BREAKING CHANGES
+* Remove Vertex SDK data science package ([ec4ec8f](https://github.com/googleapis/python-aiplatform/commit/ec4ec8f1214b3da12728c30a002b7f4632f4a90e))
+
+### Features
+
+* Add display experiment run button for Ipython environments ([ba65828](https://github.com/googleapis/python-aiplatform/commit/ba6582856b1d7f9a6ac8f90a3fa5ea6723ac64ab))
+* Add hybrid search for public find_neighbors() call. ([9d35617](https://github.com/googleapis/python-aiplatform/commit/9d3561738d577129cb222417bf208166825d8043))
+* Enable Ray Job submission without VPC peering ([37875b5](https://github.com/googleapis/python-aiplatform/commit/37875b507f25c31ac4a84e4fefe3cbba565682e3))
+* GenAI - Allowed callable functions to return values directly in Automatic Function Calling ([768af67](https://github.com/googleapis/python-aiplatform/commit/768af6772ade2b67b90a05ae3db95039a3f2786d))
+* GenAI - Release ToolConfig to GA ([bc8b14a](https://github.com/googleapis/python-aiplatform/commit/bc8b14a7c9c632721db9166dc9b63eec17d31afd))
+* Sample code for Vertex AI Feature Store ([6c14e8b](https://github.com/googleapis/python-aiplatform/commit/6c14e8b31bd950ac4f4a862b4e62ead42fe30463))
+* Support VertexTool in langchain template. ([28a3c56](https://github.com/googleapis/python-aiplatform/commit/28a3c56fdcfa4fab819e8f79d235f6576febdfce))
+
+
+### Bug Fixes
+
+* Allow non-lro delete method ([c23c0ad](https://github.com/googleapis/python-aiplatform/commit/c23c0ada07146f0e5ce6a787c8255313f7c4a06c))
+* Deep copy dataset before passing it to evaluation ([019b610](https://github.com/googleapis/python-aiplatform/commit/019b6102c2dc98550592cde0adfbb4958faddbef))
+* Ensure model starts with publishers/ when users provide resource path from models/ ([d689331](https://github.com/googleapis/python-aiplatform/commit/d689331af5172cdfe7428333536954e8339f8ab4))
+* Fix failed unit tests due to google-cloud-storage upgrade. ([945b9e4](https://github.com/googleapis/python-aiplatform/commit/945b9e4835149111cd33beaee4301f3d8f05f59d))
+* Generalize RAG files import from Google Drive ([88c6a6a](https://github.com/googleapis/python-aiplatform/commit/88c6a6a4f11285d429c3777f59101e53e4672185))
+* Set upper bound of setuptools to unbreak public Colab for using vertex_ray namespace ([6cc45bb](https://github.com/googleapis/python-aiplatform/commit/6cc45bbbea154d087c1dfe4756d4e15f21b1d844))
+
+## [1.53.0](https://github.com/googleapis/python-aiplatform/compare/v1.52.0...v1.53.0) (2024-05-30)
+
+
+### Features
+
+* Add a `cloneable` protocol for Reasoning Engine. ([8960a80](https://github.com/googleapis/python-aiplatform/commit/8960a8022dc7556413a83786fc14e25e91df8362))
+* Add labels parameter to the supervised tuning train method ([f7c5567](https://github.com/googleapis/python-aiplatform/commit/f7c5567b6e44895033cf52e6f80a1bb55fb5f647))
+* Added reboot command for PersistentResource ([7785f8c](https://github.com/googleapis/python-aiplatform/commit/7785f8c327ee17da4827840396c49063b8e6d18f))
+* Added the new `GenerationConfig.response_schema` field ([#3772](https://github.com/googleapis/python-aiplatform/issues/3772)) ([5436d88](https://github.com/googleapis/python-aiplatform/commit/5436d88bf8d6c9b6a9df5a496afdc25106463d30))
+* Enable Tensorboard profile plugin in all regions by default. ([8a4a41a](https://github.com/googleapis/python-aiplatform/commit/8a4a41afe47aaff2f69a73e5011b34bcba5cd2e9))
+* GenAI - Added the `response_schema` parameter to the `GenerationConfig` class ([b5e2c02](https://github.com/googleapis/python-aiplatform/commit/b5e2c0204070e5f7fb695d39c7e5d23f937dbffd))
+* LLM - Added the `seed` parameter to the `TextGenerationModel`'s `predict` methods ([cb2f4aa](https://github.com/googleapis/python-aiplatform/commit/cb2f4aa021af05c90e54c5e41c1c91f9d8bf13b8))
+
+
+### Bug Fixes
+
+* Create run_name when run_name_prefix is not specified for Tensorboard uploader. ([ac17d87](https://github.com/googleapis/python-aiplatform/commit/ac17d876074f3fb51ab6c04beff0d3985df54633))
+* GenAI - Tuning - Supervised - Fix `adapter_size` parameter handling to match enum values. ([1cc22c3](https://github.com/googleapis/python-aiplatform/commit/1cc22c3c3561f7c6374d32fafd45839256064958))
+* Model Monitor console uri. ([71fbc81](https://github.com/googleapis/python-aiplatform/commit/71fbc81df8fa0d7c863233abc3ed6d40666c1623))
+
+## [1.52.0](https://github.com/googleapis/python-aiplatform/compare/v1.51.0...v1.52.0) (2024-05-21)
+
+
+### Features
+
+* Add FeatureGroup delete ([f9011e0](https://github.com/googleapis/python-aiplatform/commit/f9011e0b1b4ea8470849ecdd5ba9e086c73b778b))
+* Add support for ToolConfig in the LangChain template ([9bda328](https://github.com/googleapis/python-aiplatform/commit/9bda3288b59eb52c18a13c292561cb2c720ff331))
+* Create Vertex Experiment when uploading Tensorboard logs ([339f8b6](https://github.com/googleapis/python-aiplatform/commit/339f8b667952c7302c36605842ba92fa1c7135b8))
+* GenAI - Add BatchPredictionJob for GenAI models ([df4a4f2](https://github.com/googleapis/python-aiplatform/commit/df4a4f2745178a6458bb6dc1f124c8ee60e986c1))
+* GenAI - Add cancel, delete, list methods in BatchPredictionJob ([7ff8071](https://github.com/googleapis/python-aiplatform/commit/7ff80714c2ec55330d5d6a0075366f8f700128af))
+* GenAI - Added the `BatchPredictionJob.submit` method ([4d091c6](https://github.com/googleapis/python-aiplatform/commit/4d091c68b17b3c1b4a912aa38d3a098fdc21238d))
+* Private Endpoints - Added private service connect support to prediction endpoint. ([6bdcfb3](https://github.com/googleapis/python-aiplatform/commit/6bdcfb3c0c6b121d5fbfcdad9dd218a1ddfc3e0d))
+
+
+### Bug Fixes
+
+* Add validation for evaluation dataset fields, update logging info for eval api request count ([d6ef500](https://github.com/googleapis/python-aiplatform/commit/d6ef50080f3b2b923ba0fb89eb0a8daebf8f68d4))
+* Fix feature attribution drift visualization for model monitoring SDK ([710f33d](https://github.com/googleapis/python-aiplatform/commit/710f33d87e242a283e8fbe5327ba9fa781d0d8fc))
+* Fix the default value of response_column_name in EvalTask.evaluate() ([98f9b35](https://github.com/googleapis/python-aiplatform/commit/98f9b35ccde7dad7f3e6b9e259a201ee2784d15e))
+* Update get_experiment_df to pass Experiment and allow empty metrics. ([de5d0f3](https://github.com/googleapis/python-aiplatform/commit/de5d0f3a17a77cbc70ada480768d9209c7b02828))
+
+
+### Documentation
+
+* Add Vertex Model Monitoring V2 SDK documentation ([b47e6ff](https://github.com/googleapis/python-aiplatform/commit/b47e6ff1f17278a6f1e4c31def05f3a09d981b28))
+* Update docstrings for rapid evaluation library. ([d6d371d](https://github.com/googleapis/python-aiplatform/commit/d6d371d61abd2daa2f222ca82540c5e0c4b3a602))
+
+## [1.51.0](https://github.com/googleapis/python-aiplatform/compare/v1.50.0...v1.51.0) (2024-05-10)
+
+
+### Features
+
+* Add FeatureGroup create function ([3938107](https://github.com/googleapis/python-aiplatform/commit/393810728b6b940e4cc8e1ac7f55875e3b750beb))
+* Add FeatureGroup init/get ([e47d436](https://github.com/googleapis/python-aiplatform/commit/e47d436f24cc718e378a28c4a80293778e8c183a))
+* Add support for BaseModels in LangChain templates ([5eb885e](https://github.com/googleapis/python-aiplatform/commit/5eb885ee7e01eece15679ce400f222930da1ac16))
+* Added the `vision_models.Image._mime_type` property to make `vision_models.Image` compatible with `generative_models.Image` ([6557d88](https://github.com/googleapis/python-aiplatform/commit/6557d88eb73624c8dbc7da33db129f7cbdae8a06))
+* AutoSxS Pairwise Metric in Rapid Evaluation SDK ([b0c5eda](https://github.com/googleapis/python-aiplatform/commit/b0c5eda79489d4b32972b2acea647e3c8cdc3ce9))
+* GenAI - Grounding - Released Google Web Search retriever to GA ([32b030a](https://github.com/googleapis/python-aiplatform/commit/32b030a629a20d0557dba011df2658f46c199820))
+* GenAI - Tuning - Supervised - Added support for the `adapter_size` parameter ([88188d2](https://github.com/googleapis/python-aiplatform/commit/88188d294fc2ec55ec0b05640dc791a1a3a88255))
+* LLM - Made the tuning location parameters truly optional ([bae8429](https://github.com/googleapis/python-aiplatform/commit/bae8429ae078c69574d86280ae6c784aaa9b13b5))
+* LLM - Support tuning of new text embedding models by migrating to the new v1.1.3 pipeline. ([7fea754](https://github.com/googleapis/python-aiplatform/commit/7fea7547084277dc974cbacc517ca1e95629a034))
+* LLM - Text embedding - Added the `output_dimensionality` and `learning_rate_multiplier` parameters to text embedding tuning (Preview only) ([cc8bc96](https://github.com/googleapis/python-aiplatform/commit/cc8bc965932efb68a30db9decb5a24cf597b0d8b))
+* LLM - Text Embedding - Added validation for text embedding tuning parameters. ([5a300c1](https://github.com/googleapis/python-aiplatform/commit/5a300c1071fa1492502cfde95700e1b171cdfbfc))
+* Release Ray on Vertex SDK to GA ([f7c5132](https://github.com/googleapis/python-aiplatform/commit/f7c51327c49d000cc79d56bb5333ed7fea28fa01))
+* Support custom service account for Ray cluster creation and Ray Client connection ([e0c6227](https://github.com/googleapis/python-aiplatform/commit/e0c6227d0dd92d83c98cc3c7e7607fd252e74a32))
+* Support vector_distance_threshold filtering and file-based retrieval for RAG ([cd85d8f](https://github.com/googleapis/python-aiplatform/commit/cd85d8f74d3922de3f871415bacf77c594f0c547))
+
+
+### Bug Fixes
+
+* A bug in the evaluation library where the job crashes if only custom metrics are specified. ([c528b6f](https://github.com/googleapis/python-aiplatform/commit/c528b6ff44e2347797336db800ca01240e670d32))
+* Add DeprecationWarning to vertexai.preview predictive models SDK ([3c3727b](https://github.com/googleapis/python-aiplatform/commit/3c3727b48ce4ba12bdaf36806cda4907a788d38e))
+* Add MAX_TOKENS to the list of successful finish reasons for Rapid Evaluation SDK ([195c77e](https://github.com/googleapis/python-aiplatform/commit/195c77ed7320aea3ab5899427a922d606ed78997))
+* AttributeError for TorchModelSerializer.deserialize in torch >=2.3.0 ([20b1866](https://github.com/googleapis/python-aiplatform/commit/20b18668f15c448813aad4f58f2a4d470d6da2ec))
+* GenAI - Fixed handling of multiple tools in `AutomaticFunctionCallingResponder` ([58e6ac9](https://github.com/googleapis/python-aiplatform/commit/58e6ac9b14daa42dc64d787156070c22bd7a1655))
+* Remove InternalServerError and Unknown evaluation service error from retriable exceptions ([12c147b](https://github.com/googleapis/python-aiplatform/commit/12c147b1f3e127c925b6c42b7dbbd4e949ff8e98))
+* Upload the reference model in model registry ([510c833](https://github.com/googleapis/python-aiplatform/commit/510c8334961cdb6f801863ecbd8fe49bf69b6c68))
+
+## [1.50.0](https://github.com/googleapis/python-aiplatform/compare/v1.49.0...v1.50.0) (2024-05-02)
+
+
+### Features
+
+* Add `Candidate.grounding_metadata` property ([b22a8b8](https://github.com/googleapis/python-aiplatform/commit/b22a8b847e3b299b828e37405e3678093486de28))
+* Add option to not include time_series_metrics in get_experiment_df call. This will improve execution time for Experiments with large number of runs. ([78a95c5](https://github.com/googleapis/python-aiplatform/commit/78a95c52d0e7bd9ec5b656ce67044b2f01677156))
+* Add tune_model and deploy_tuned_model for TextEmbeddingModel. ([42f5d6f](https://github.com/googleapis/python-aiplatform/commit/42f5d6f7cd13d51c4a73113c59e8b3c728cfc08b))
+* Automatically populate parents for full resource name in Vertex RAG SDK ([26657ff](https://github.com/googleapis/python-aiplatform/commit/26657ffd25ecb91882ca764e513c2e952833257f))
+* Deploy a tuned text embedding model -- it doesn't matter, if it's tuned using Node.js, or curl. ([8ca9cdf](https://github.com/googleapis/python-aiplatform/commit/8ca9cdf3576e3ce3b373ace4cd6ab0e9c54aa9f2))
+* Make get_embeddings work both for foundational & tuned models. ([b8b589c](https://github.com/googleapis/python-aiplatform/commit/b8b589ce9fff29d1721450d32b4a84a7f69413c3))
+* Python SDK for Vertex Model Monitoring V2. ([021d59f](https://github.com/googleapis/python-aiplatform/commit/021d59f1487e4e16c847d4135899d6845c0210aa))
+* Support public endpoint for Ray Client ([57a5f78](https://github.com/googleapis/python-aiplatform/commit/57a5f7815ffb8523e91d900da4ff7cfd0c344fe4))
+
+
+### Bug Fixes
+
+* Add deprecation warnings when using Ray v2.4 ([3a36784](https://github.com/googleapis/python-aiplatform/commit/3a367843840513e3257610c8ab38e9f79d3bcea0))
+* Append allowed_plugins in tb-gcp-uploader to default allowed plugins ([aab9c3e](https://github.com/googleapis/python-aiplatform/commit/aab9c3e41b92a1d60090e3d1d594390a5e9f3ff6))
+* LLM - Added missing parameters to the no-op `_TunableTextEmbeddingModelMixin.get_tuned_model` method ([eb05ac4](https://github.com/googleapis/python-aiplatform/commit/eb05ac421f186441a92c6e3b6a010d74caf14782))
+* LVM - Fixed the typo in the VisionModel aspect ratio type annotation ([2d19137](https://github.com/googleapis/python-aiplatform/commit/2d1913773cf9f4a4f8a2c8c8f45680c3ea97f68e))
+* Move torch import ([e6d34df](https://github.com/googleapis/python-aiplatform/commit/e6d34df7da7508c655eb17ee694e1ab2160fc8aa))
+* Ray - Fixed exception when using Ray 2.4 ([2661f52](https://github.com/googleapis/python-aiplatform/commit/2661f52fd08169e5d29b58f2afce9702b30101ae))
+
+## [1.49.0](https://github.com/googleapis/python-aiplatform/compare/v1.48.0...v1.49.0) (2024-04-27)
+
+
+### Features
+
+* Add additional parameters for `GenerationConfig` ([0599ca1](https://github.com/googleapis/python-aiplatform/commit/0599ca18342aece9d8b9b35534294de541593ef9))
+* Add FeatureNormType to `MatchingEngineIndexConfig`. ([c0e7acc](https://github.com/googleapis/python-aiplatform/commit/c0e7acc27e67ef1a951bb4bd97f489164dda14f9))
+* Add mappings to pipeline templates for text-embedding models. ([6279924](https://github.com/googleapis/python-aiplatform/commit/627992484ec16fbf7fdfc9c963046e10e3d7c6bf))
+* Add support for TPU v5 lite pod(v5e) for custom training jobs. Custom training jobs now accept the v5e machine types as listed in https://cloud.google.com/tpu/docs/tpus-in-gke#v5e. ([415912e](https://github.com/googleapis/python-aiplatform/commit/415912e9258d2b960c7da730902d7f15116cc474))
+* Fix typo in Vertex Feature Store SDK. ([b5404e7](https://github.com/googleapis/python-aiplatform/commit/b5404e7106059358dd7fcd21d487e5e31ed4d128))
+* GenAI - Improved the exception messages when candidates, parts or text are not available ([e82264d](https://github.com/googleapis/python-aiplatform/commit/e82264d273e35d3b305d434181badfb63a37c79c))
+* Support PreflightValidation in Preview PipelineJob submit function. ([e88dc0d](https://github.com/googleapis/python-aiplatform/commit/e88dc0d65ffaed3de9850e8eaadbfa41eb769e06))
+* Tune_model method for class TextEmbeddingModel. ([3eda55d](https://github.com/googleapis/python-aiplatform/commit/3eda55d25a8e3816eec0fb3c43f7c16c8a2d9b32))
+* Vertex RAG for enhanced generative AI ([39b5149](https://github.com/googleapis/python-aiplatform/commit/39b5149c18355a233dae7ada2f18c8072a6b59f1))
+
+
+### Bug Fixes
+
+* GCS Bucket subdirectory not being creating while using ReasoningEngine create method ([3d22a18](https://github.com/googleapis/python-aiplatform/commit/3d22a18abdacc7cb53d4b5fef941fa1a34caec08))
+* GenAI - Fixed the `Part.mime_type` property ([819a44e](https://github.com/googleapis/python-aiplatform/commit/819a44ea9b624912c31843881fd6130febb8ec57))
+* Handle missing import for RunnableConfig when generating schema for LangChain templates ([76c5d6d](https://github.com/googleapis/python-aiplatform/commit/76c5d6d6e99be5e7a8ec81e85aa383b1a39fef66))
+* Parse intermediate steps from LangChain into JSON. ([754c89d](https://github.com/googleapis/python-aiplatform/commit/754c89d8514dde707255bf0e3602dfddff49f268))
+* Register TensorFlow models from Ray checkpoints for more recent TensorFlow version, addressing the deprecation of SavedModel format in keras 3 ([1341e2c](https://github.com/googleapis/python-aiplatform/commit/1341e2c57907f7867d4237e84c5bb9b77e5dd5f5))
+* Tensorboard uploader - move remaining tensorboard uploader code to aiplatform ([f34094b](https://github.com/googleapis/python-aiplatform/commit/f34094b50ea161c2b9f0653be8b5931e922bb8f6))
+
+## [1.48.0](https://github.com/googleapis/python-aiplatform/compare/v1.47.0...v1.48.0) (2024-04-17)
+
+
+### Features
+
+* Add support for reading requirements from a file. ([80db7a0](https://github.com/googleapis/python-aiplatform/commit/80db7a0960b80ae0d78182687c1e99db696943f7))
+* Adding tpu_topology to Vertex SDK ([423c764](https://github.com/googleapis/python-aiplatform/commit/423c7646185b4df19985fb41f5776557d572dd9f))
+* Enable continuous upload for profile logs. ([f05924d](https://github.com/googleapis/python-aiplatform/commit/f05924d6bbd9e609f4ca98cdef7ab5a504672e58))
+* GenAI - Added the `GenerationResponse.prompt_feedback` property ([efd5a72](https://github.com/googleapis/python-aiplatform/commit/efd5a72c1856a6767bdbbba9ea83f366518bdac2))
+* GenAI - Added the `GenerationResponse.usage_metadata` property ([0654c35](https://github.com/googleapis/python-aiplatform/commit/0654c3504425d9f9bba6e3be919026229b616ec0))
+* Support `NOT_EQUAL` for `MatchingEngineIndexEndpoint` `numeric_restricts`. ([aa918e3](https://github.com/googleapis/python-aiplatform/commit/aa918e31fcc40878e9f29affa02a4527d90188aa))
+* Support referenced models in SDK. ([c9b6b8b](https://github.com/googleapis/python-aiplatform/commit/c9b6b8b3433854afd95a27065a052393768ceca8))
+
+
+### Bug Fixes
+
+* Add validation check for extra_packages when creating a reasoning engine. ([255dabc](https://github.com/googleapis/python-aiplatform/commit/255dabc77c647ef3ac33a10b06b3a36db122118a))
+* Add validation for langchain tools. ([a821d50](https://github.com/googleapis/python-aiplatform/commit/a821d50724da7136c90abd157a7086d6571f2c30))
+* Fixed the vertexai.init partial initialization issues ([636a654](https://github.com/googleapis/python-aiplatform/commit/636a654590919048f84baf343d291711f28eb03e))
+* GenAI - Workaround for streaming when content role is missing in service responses ([fa35b91](https://github.com/googleapis/python-aiplatform/commit/fa35b9169677c62a5f0fa746dc9db9a5296f44a3))
+
+
+### Documentation
+
+* Add Reasoning Engine reference documentation ([496fc4b](https://github.com/googleapis/python-aiplatform/commit/496fc4b96768c872c9e7312bacf9989ea6e979f5))
+* GenAI - Add Rapid Evaluation SDK reference documentation ([40b728b](https://github.com/googleapis/python-aiplatform/commit/40b728b28210f2bc57374c6c6d507cf3fa0be038))
+
+## [1.47.0](https://github.com/googleapis/python-aiplatform/compare/v1.46.0...v1.47.0) (2024-04-06)
+
+
+### Features
+
+* Add display experiment button for tuning in Ipython environments ([9bb687c](https://github.com/googleapis/python-aiplatform/commit/9bb687c20b03ea7227908e09831fb1a13ac3a970))
+* Add Persistent Resource ID parameter to Custom Job form_local_script, run, and submit methods. ([f5be0b5](https://github.com/googleapis/python-aiplatform/commit/f5be0b5652b0366eb6e823409ba1cb134e4b7b7c))
+* Add Persistent Resource Id parameter to Custom Training Job run and submit methods. ([f428006](https://github.com/googleapis/python-aiplatform/commit/f428006507e9b053a2121089e89fc54aedd3550a))
+* Added GA support for PersistentResource management ([98a07dd](https://github.com/googleapis/python-aiplatform/commit/98a07dd614063cb6a4c55c9024893874d3c95a1f))
+* Added GA support for running Custom and Hp tuning jobs on Persistent Resources ([35ecbac](https://github.com/googleapis/python-aiplatform/commit/35ecbac53df299b681e835648a9884a091f5d4d8))
+* Added the `Experiment.dashboard_url` property ([c8eec21](https://github.com/googleapis/python-aiplatform/commit/c8eec21d6f6e6f016669a18e19cebd9de1f0a7f9))
+* GenAI - Added support for `SafetySetting.method` (probability or severity) ([317ab8f](https://github.com/googleapis/python-aiplatform/commit/317ab8f7499b345c5a73365b95e9ba91c1adfecf))
+* GenAI - Added support for supervised fine-tuning ([036d2d0](https://github.com/googleapis/python-aiplatform/commit/036d2d0306e5190c972d2c4e5dd34257ea8fad6d))
+* GenAI - Added support for system instructions ([4990eb6](https://github.com/googleapis/python-aiplatform/commit/4990eb6ade736c85c08455ca0ef9f7c9515662fd))
+* GenAI - Forced function calling feature ([806ef9f](https://github.com/googleapis/python-aiplatform/commit/806ef9fe860c51ee99481cb7f209723a22a1d369))
+* Initial template for Langchain on Vertex. ([0752a29](https://github.com/googleapis/python-aiplatform/commit/0752a29ec69280373519406be8528682f1c547ec))
+* LLM - Add RLHF-tuning support for `text-bison@002` ([1f27c3e](https://github.com/googleapis/python-aiplatform/commit/1f27c3eb5bb95b04f5e1708d631309c928df932b))
+* Vertex AI Extension SDK Public Preview ([137b5e1](https://github.com/googleapis/python-aiplatform/commit/137b5e11ab40abfd4f53c61544989d99770dabeb))
+* Vertex AI Reasoning Engine SDK Public Preview ([6aaa5d0](https://github.com/googleapis/python-aiplatform/commit/6aaa5d01c7e675bb1a553fd6780b035e3513d58e))
+* Vertex Rapid Evaluation SDK and Prompt Template for Vertex Prompt Management Public Preview ([8c6ddf5](https://github.com/googleapis/python-aiplatform/commit/8c6ddf54adf91e2fbf00034fef413ccfde3769d6))
+
+
+### Bug Fixes
+
+* GenAI - Fixed response validation error during streaming ([c881998](https://github.com/googleapis/python-aiplatform/commit/c881998c7fb54289efc5ae6f5431b631b3d6c11c))
+* GenAI - Fixed the `GenerativeModel`'s handling of tuned models from different region ([bf33fb3](https://github.com/googleapis/python-aiplatform/commit/bf33fb3e0053898cf8ba919180ee246ea5ad1cdb))
+* GenAI - Fixed the TuningJob dashboard URL ([5367fbb](https://github.com/googleapis/python-aiplatform/commit/5367fbb1125debd16357e4815c704105209fca2a))
+* Reinstate persistent resource preview class and tests ([765d60d](https://github.com/googleapis/python-aiplatform/commit/765d60da57b3e3c947b121667f0a2a2cb93b88f3))
+
+
+### Documentation
+
+* Add run custom job on persistent resource sample. ([53fc845](https://github.com/googleapis/python-aiplatform/commit/53fc8455145c9fb7953a6e7dd6e85aab01055ad2))
+* Add run custom job on persistent resource sample. ([31100c6](https://github.com/googleapis/python-aiplatform/commit/31100c6dd6d7d4b0b588ed7f008e7661835b19d2))
+
+## [1.46.0](https://github.com/googleapis/python-aiplatform/compare/v1.45.0...v1.46.0) (2024-03-30)
+
+
+### Features
+
+* GenAI - Automatic Function Calling feature ([eef84c6](https://github.com/googleapis/python-aiplatform/commit/eef84c6a694ee848d19d8eab8f1a4940e50d4b0d))
+* Python SDK for Vertex Feature Store. ([5015d25](https://github.com/googleapis/python-aiplatform/commit/5015d25c5efdb9ba0a01bc60441f7eb8d5fddc52))
+
+## [1.45.0](https://github.com/googleapis/python-aiplatform/compare/v1.44.0...v1.45.0) (2024-03-28)
+
+
+### Features
+
+* Add an arg to turn off Ray metrics collection during cluster creation ([e33d11f](https://github.com/googleapis/python-aiplatform/commit/e33d11fa02eb721a6fe09bbd7c2e6a9954dbfe98))
+* Add Batch Delete and Batch Cancel Pipeline Jobs and unit tests. ([cb495e7](https://github.com/googleapis/python-aiplatform/commit/cb495e740ace7b2ab9935188ca9f7d6d3bf86d2a))
+* Add display model evaluation button for Ipython environments ([181dc7a](https://github.com/googleapis/python-aiplatform/commit/181dc7a575b0cefc1be0ea9b06cb4f30a7a0da0f))
+* Add safety filter levels, watermark support and person generation support for Imagen 2 ([0c498c5](https://github.com/googleapis/python-aiplatform/commit/0c498c5e4226b2a16adb0ff3cf7e6698a05aa5c7))
+* Add safety filter levels, watermark support and person generation support for Imagen 2 ([e2efdbe](https://github.com/googleapis/python-aiplatform/commit/e2efdbed324bd201f9793c43ca2167e8ccbbe426))
+* Add support for output_dimensionality parameter through get_embeddings. ([b1cab3f](https://github.com/googleapis/python-aiplatform/commit/b1cab3fca34e7a1969c1828348bcbd0eefe42cb8))
+* Add support for task types (TASK_QUESTION_ANSWERING, TASK_FACT_VERIFICATION) through get_embeddings. ([9ec4590](https://github.com/googleapis/python-aiplatform/commit/9ec459014612116e202c2dddda5f30716ff1a7f8))
+* Add v1beta1 service clients for reasoning engine ([13ec7e0](https://github.com/googleapis/python-aiplatform/commit/13ec7e0c63649b7c9a7a0eb4a5d36afeae1ffadb))
+* GenAI - Support generating JSON Schema from Python function ([be4922a](https://github.com/googleapis/python-aiplatform/commit/be4922adbfa42a91d67e46f0b705e62d794cb8c3))
+* Improve get_experiment_df execution speed ([2e56acc](https://github.com/googleapis/python-aiplatform/commit/2e56acc7e5627c4ca46e84febaecf5b791f67462))
+* Ray on Vertex enables XGBoost register model with custom version using pre-built container ([e45ef96](https://github.com/googleapis/python-aiplatform/commit/e45ef96de9f008a5c5556bf119a75403085d8dcb))
+* Support `accelerator_type` in RLHF tuning ([e51c977](https://github.com/googleapis/python-aiplatform/commit/e51c97738e905dda1d726cd778d62647b096a3c8))
+
+
+### Bug Fixes
+
+* Fix failing get_experiments_df test. ([767712e](https://github.com/googleapis/python-aiplatform/commit/767712e9caf4bec8260c57668654bcd257f1c831))
+* GenAI - Capture content blocked case when validating responses ([f0086df](https://github.com/googleapis/python-aiplatform/commit/f0086dfd76c138443e50bc18ae49b232905468f3))
+* LLM - Fixed the `InlineContext` grounding source ([6f59100](https://github.com/googleapis/python-aiplatform/commit/6f591001e834dfc3e49efd988b4faf2b12b1f2d8))
+* Made `Endpoint.raw_predict` thread-safe by setting `authorized_session` last ([c72c1ef](https://github.com/googleapis/python-aiplatform/commit/c72c1ef4c58c79fd7a0ee4064de7f2f4a4257f36))
+
+## [1.44.0](https://github.com/googleapis/python-aiplatform/compare/v1.43.0...v1.44.0) (2024-03-14)
+
+
+### Features
+
+* Add custom tool context manager for telemetry ([b30f5a6](https://github.com/googleapis/python-aiplatform/commit/b30f5a69226310b11a7979d19e63e3b0aa873737))
+* Add display experiment button for Ipython environments ([0b33bdd](https://github.com/googleapis/python-aiplatform/commit/0b33bdd873ab0f3195e4dc6389554a3cac2c3433))
+* Allow module_name to be specified with the base logger. ([c01df66](https://github.com/googleapis/python-aiplatform/commit/c01df6681e5f94d74dcc5fd85a9ea4af4b9dcea6))
+* Enable Ray cluster creation with custom_image for each Resource ([f90c9cc](https://github.com/googleapis/python-aiplatform/commit/f90c9cc0f96eb8108d0d55ffb7ddb51fdc60407d))
+* GenAI - Added `function_calls` shortcut property to `Candidate` class. ([d6490ff](https://github.com/googleapis/python-aiplatform/commit/d6490ffdf28d2f1916d517c223e35537611b1073))
+* LLM - Added support for the `max_context_length` tuning parameter ([e5daae9](https://github.com/googleapis/python-aiplatform/commit/e5daae92125eaada897a8de4f8de8f5da59af6db))
+* Support `shard_size` for `MatchingEngineIndex` create index. ([6dbf7d3](https://github.com/googleapis/python-aiplatform/commit/6dbf7d3f9f74d08b80a3bfb85525ec6922b370fc))
+* Use colab enterprise enviroment variables to infer project_id and region ([5baf5f8](https://github.com/googleapis/python-aiplatform/commit/5baf5f83494373049904bedc898e5b72303bccbd))
+
+
+### Bug Fixes
+
+* Batch read tensorboard time series data to allow reading more than 20 time series metrics. ([6e3eac2](https://github.com/googleapis/python-aiplatform/commit/6e3eac26ed40b1927541b71b99758834a89fa64b))
+* GenAI - Fixed `get_tuned_model` deploying the model every time ([e1c7870](https://github.com/googleapis/python-aiplatform/commit/e1c7870aabe4fac5537589f49484509b3faf3f5b))
+* GenAI - Improved `from_dict` methods for content types (`GenerationResponse`, `Candidate`, `Content`, `Part`) ([613ce69](https://github.com/googleapis/python-aiplatform/commit/613ce690d8224b85da5406507c4df75a35493b0d))
+* Improve import time by moving TensorFlow to lazy import ([f294ba8](https://github.com/googleapis/python-aiplatform/commit/f294ba8b762a88b77a623b86145302c976fdabc4))
+* LVM - Added support for GCS `storage.googleapis.com` URL import in `vision_models.Image` ([2690e72](https://github.com/googleapis/python-aiplatform/commit/2690e7223535d5758ed0e13075aebdde41a678f3))
+* Raise AttributeError for accessing non-existent fields of a proto. ([b91edf5](https://github.com/googleapis/python-aiplatform/commit/b91edf52e2b993c3301a419ad89b473c31c60cc3))
+
+
+### Documentation
+
+* Fixed the documentation generation ([63ad1bf](https://github.com/googleapis/python-aiplatform/commit/63ad1bf9e365d2f10b91e2fd036e3b7d937336c0))
+* GenAI - Added the GA classes to the documentation ([9eb5a52](https://github.com/googleapis/python-aiplatform/commit/9eb5a52c2f7f0c19a2035831181ad69f98bc3df5))
+
+## [1.43.0](https://github.com/googleapis/python-aiplatform/compare/v1.42.1...v1.43.0) (2024-02-29)
+
+
+### Features
+
+* Add `update_mask` to `MatchingEngineIndex` `upsert_datapoints()` to support dynamic metadata update. ([81f6a25](https://github.com/googleapis/python-aiplatform/commit/81f6a25ab6600b325ff4e7b0f19332f13080bee9))
+* Add Ray on Vertex BigQuery read/write support for Ray 2.9 ([e048e3a](https://github.com/googleapis/python-aiplatform/commit/e048e3a77834ce21a221dab75c890b651b9f3ce0))
+* Add read_bigquery and write_bigquery wrappers for Ray on Vertex ([4739118](https://github.com/googleapis/python-aiplatform/commit/47391182a2b7edf0c8cb2cb470e649bf5486faf4))
+* Add transport override to enable the use of REST instead of GRPC ([6ab4084](https://github.com/googleapis/python-aiplatform/commit/6ab4084aa549b1a28a4990f8b59dead510d7b296))
+* Enable Ray cluster creation and registering TensorFlow checkpoint to Vertex with Ray version 2.9 ([ff148cd](https://github.com/googleapis/python-aiplatform/commit/ff148cd1f836e599c99f2c177abb085d08827375))
+* LLM - Add support for batch prediction to `CodeGenerationModel` (`code-bison`) ([fbf2f7c](https://github.com/googleapis/python-aiplatform/commit/fbf2f7c3bf5b462e498a165017217d057a5f5bae))
+* Support custom `timeout` for `MatchingEngineIndex` and `MatchingEngineIndexEndpoint` APIs. ([09d1946](https://github.com/googleapis/python-aiplatform/commit/09d1946711fb022bf584137299ed187bf885cb23))
+* Update ray extras installation in setup.py for Ray 2.9.3 ([d947304](https://github.com/googleapis/python-aiplatform/commit/d947304314db5f683b154656551a8fd5b6e4c033))
+
+
+### Bug Fixes
+
+* Allow destination directory to exist before we copy. ([0b55762](https://github.com/googleapis/python-aiplatform/commit/0b55762a8f762b1b46a8d0dcd93381d7870e5200))
+* LVM - Fixed `VideoSegmentConfig` in Multimodal Embeddings API call ([665e78c](https://github.com/googleapis/python-aiplatform/commit/665e78c1d037fcae4c17205ffc280c4b88aea627))
+
+
+### Documentation
+
+* Update transport docstrings to include that REST is in preview ([a442866](https://github.com/googleapis/python-aiplatform/commit/a442866d2df30ea534d06da10afd496b48114f4c))
+
+## [1.42.1](https://github.com/googleapis/python-aiplatform/compare/v1.42.0...v1.42.1) (2024-02-15)
+
+
+### Bug Fixes
+
+* Handle case when no metadata is returned from model.predict ([59e2bca](https://github.com/googleapis/python-aiplatform/commit/59e2bca8fc9773f0610662d8bf426578b3015c41))
+* Remove runtime dependency of setuptools ([07c2a25](https://github.com/googleapis/python-aiplatform/commit/07c2a25225cf163584fc38616ddb6c1e560ac288))
+
+
+### Documentation
+
+* GenAI - Docs - Added quickstart link and minimum SDK version ([cf6ab21](https://github.com/googleapis/python-aiplatform/commit/cf6ab218035e4712c0202330014cb3b6c241db5b))
+* GenAI - Updated the README for GA ([cfd96d8](https://github.com/googleapis/python-aiplatform/commit/cfd96d804e66befe84c2eb6d259aa50faca2b5db))
+
+## [1.42.0](https://github.com/googleapis/python-aiplatform/compare/v1.41.0...v1.42.0) (2024-02-15)
+
+
+### Features
+
+* Add Candidate.grounding_metadata ([310ee49](https://github.com/googleapis/python-aiplatform/commit/310ee49a11767c20bed9cc4a929cbb0b8b3e6940))
+* Add GoogleSearchRetrieval ([310ee49](https://github.com/googleapis/python-aiplatform/commit/310ee49a11767c20bed9cc4a929cbb0b8b3e6940))
+* Add metadata in model.predict output for LLMs ([4661e58](https://github.com/googleapis/python-aiplatform/commit/4661e581ea957e71c643afc6bbf3b2e729b38bc8))
+* Add Retrieval ([310ee49](https://github.com/googleapis/python-aiplatform/commit/310ee49a11767c20bed9cc4a929cbb0b8b3e6940))
+* Add Tool.google_search_retrieval ([310ee49](https://github.com/googleapis/python-aiplatform/commit/310ee49a11767c20bed9cc4a929cbb0b8b3e6940))
+* Add Tool.retrieval ([310ee49](https://github.com/googleapis/python-aiplatform/commit/310ee49a11767c20bed9cc4a929cbb0b8b3e6940))
+* Add VertexAiSearch ([310ee49](https://github.com/googleapis/python-aiplatform/commit/310ee49a11767c20bed9cc4a929cbb0b8b3e6940))
+* GenAI - Added `to_dict()` methods to response and content classes ([a78748e](https://github.com/googleapis/python-aiplatform/commit/a78748e5cbe7b86ddfb1a36a4c0e5be8f66b27c3))
+* GenAI - Added support for Grounding ([0c3e294](https://github.com/googleapis/python-aiplatform/commit/0c3e29421434fe36d0f583f9860ac2e3f60e0329))
+* GenAI - Added the `GenerativeModel.start_chat(response_validation: bool = True)` parameter ([94f7cd9](https://github.com/googleapis/python-aiplatform/commit/94f7cd9e454d593e2544f8d93771d3093ffc18ae))
+* GenAI - GAPIC - Added support for Grounding ([310ee49](https://github.com/googleapis/python-aiplatform/commit/310ee49a11767c20bed9cc4a929cbb0b8b3e6940))
+* GenAI - Release the GenerativeModel to GA ([c7e3f07](https://github.com/googleapis/python-aiplatform/commit/c7e3f076f91f002ac5a889d8942a153e57e0147c))
+* LLM - Add newly supported model `text-bison@002`, update pipeline template version and prediction row limit for pipeline-based LLM Evaluation SDK ([35e57b3](https://github.com/googleapis/python-aiplatform/commit/35e57b32e70902fc90ee5a39e9001d3fa4150c50))
+* LVM - Add GCS URI support for Imagen Models (`imagetext`, `imagegeneration`) ([4109ea8](https://github.com/googleapis/python-aiplatform/commit/4109ea8d8ad456d90c0ca476e2147e9543e15b43))
+* LVM - Added support for Images from GCS uri for multimodal embeddings ([90d95d7](https://github.com/googleapis/python-aiplatform/commit/90d95d778f94e598a78a6f1c8a38e1911bffd8e2))
+* LVM - Added support for Videos from GCS uri for multimodal embeddings ([f3bd3bf](https://github.com/googleapis/python-aiplatform/commit/f3bd3bf1f74d283d7a95cd9cddf39947fcabc514))
+* Support custom image for Ray cluster creation ([d727189](https://github.com/googleapis/python-aiplatform/commit/d7271899a5383ffef69af1c46ad28d92eb4d5751))
+
+
+### Bug Fixes
+
+* Add google-auth as a direct dependency ([ecc6454](https://github.com/googleapis/python-aiplatform/commit/ecc64544d3cc718957fbe472fef1eaf22818e093))
+* Add restricts and crowding tag to `MatchingEngineIndexEndpoint` query response. ([83cb52d](https://github.com/googleapis/python-aiplatform/commit/83cb52d1ed3ec7587ea9bb877dba113abad569be))
+* Remove usage of distutils ([e35ab64](https://github.com/googleapis/python-aiplatform/commit/e35ab64d7e47f696ef9dd9c071c2d322e705b5ff))
+
+
+### Documentation
+
+* Fix incorrect field in log_time_series_metrics_sample ([f249353](https://github.com/googleapis/python-aiplatform/commit/f249353b918823b35495b295a75a90528ad652c0))
+* Update README ([14b41b5](https://github.com/googleapis/python-aiplatform/commit/14b41b50aff89e15f5f86f814df122c96231841d))
+
+## [1.41.0](https://github.com/googleapis/python-aiplatform/compare/v1.40.0...v1.41.0) (2024-02-05)
+
+
+### Features
+
+* Add `numeric_filter` to `MatchingEngineIndexEndpoint` `match()` and `find_neighbor()` private endpoint queries. ([679646a](https://github.com/googleapis/python-aiplatform/commit/679646a352ea3623d752e4685c7224db8934bd9d))
+* Added the `BatchPredictionJob.submit` method - a non-blocking version of `BatchPredictionJob.create` ([b62ddcd](https://github.com/googleapis/python-aiplatform/commit/b62ddcd751323ae96e248ab8bb90561b44f6f5e0))
+* LLM - Add support for RLHF tuning ([b2458ec](https://github.com/googleapis/python-aiplatform/commit/b2458ec51d7c3bbba787bbefac31c9aad6ffdb10))
+* Remove deletion of model upload staging bucket in system tests ([b9b373b](https://github.com/googleapis/python-aiplatform/commit/b9b373b69eab7dd303816bad94da54cad60a331e))
+* Switch Python generateContent to call Unary API endpoint ([9a19545](https://github.com/googleapis/python-aiplatform/commit/9a19545e864c6d4743156c737dd5bb8c4b86ab6f))
+
+
+### Bug Fixes
+
+* Fix crash when no target_col is specified for Bigframes tensorflow ([512b82d](https://github.com/googleapis/python-aiplatform/commit/512b82debdaf44d1b9c755c2bd06d1d24c080338))
+
+
+### Documentation
+
+* Add missing code block in docs/README.rst ([16d6b58](https://github.com/googleapis/python-aiplatform/commit/16d6b58ab27549a995919d70f78dd793feee7bd3))
+
+## [1.40.0](https://github.com/googleapis/python-aiplatform/compare/v1.39.0...v1.40.0) (2024-01-24)
+
+
+### Features
+
+* Add `return_full_datapoint` for `MatchEngineIndexEndpoint` `match()`. ([ad8d9c1](https://github.com/googleapis/python-aiplatform/commit/ad8d9c1df17578de3b893ebe46d00d457960da00))
+* Add batch delete method in preview pipeline job class and unit test. ([b0b604e](https://github.com/googleapis/python-aiplatform/commit/b0b604ec871eb99897e9b7c049c7f83abe64199e))
+* Add option to not overwrite table in Ray on Vertex BQ Write ([a99e992](https://github.com/googleapis/python-aiplatform/commit/a99e9925038f6a3e4a1538e2da315e8a01071b8b))
+* Add query by id for `MatchingEngineIndexEndpoint` `find_neighbors()` public endpoint query. ([42c7e08](https://github.com/googleapis/python-aiplatform/commit/42c7e08249f2ae758ad920e908c3f44fc72ac134))
+* Add support for `low_level_batch_size` in `match()` for `MatchingEngineIndexEndpoint` private query. ([67e593b](https://github.com/googleapis/python-aiplatform/commit/67e593b34928682491c0d00555459108261feb8e))
+* Added Public Preview support for PersistentResource management ([262a36b](https://github.com/googleapis/python-aiplatform/commit/262a36bdd942e73015a8160ddc3a01adbb99dee5))
+* Enable inline context in grounding to TextGenerationModel predict. ([a75e81c](https://github.com/googleapis/python-aiplatform/commit/a75e81c9e8bfe577468205fc0fc97366ff06f19d))
+* Fix dataset export system test ([1fbf049](https://github.com/googleapis/python-aiplatform/commit/1fbf0493dc5fa2bb05f33a4319d79a81625e07cc))
+* LVM - Added the `MultiModalEmbeddingModel.get_embeddings(dimension=...)` parameter ([1d9bd23](https://github.com/googleapis/python-aiplatform/commit/1d9bd233e636c024def55d4c867f662a4a351f9e))
+* Support empty index for `MatchingEngineIndex` create index. ([a00db07](https://github.com/googleapis/python-aiplatform/commit/a00db077a3ca77ee86117beb0b15d70d02e85e87))
+* Support private service connect for `MatchingEngineIndexEndpoint` `match()` and `read_index_datapoints()`. ([61cff4b](https://github.com/googleapis/python-aiplatform/commit/61cff4bda371e3baa61d98528d18093e5fa890b4))
+
+
+### Bug Fixes
+
+* Fix example usage of batch delete pipelinejobs. ([3b28d64](https://github.com/googleapis/python-aiplatform/commit/3b28d64c0f263fc5baa457ebeec70adc20f71df9))
+* Fix experiments failure when backing tensorboard has been deleted. ([e7a197e](https://github.com/googleapis/python-aiplatform/commit/e7a197ef5d349968994c029db4c5ce9d52e45bd7))
+* Fix message to Json parse issue in Dataset class ([066f32d](https://github.com/googleapis/python-aiplatform/commit/066f32df4e1cd09f1c3d457d6727bc731253aeac))
+* Fix typo in raise exception when tool is unsupported ([2301d79](https://github.com/googleapis/python-aiplatform/commit/2301d79ed2b08b4b76066ff0598236468ad656e7))
+* Fixed the PipelineJob bucket creation after a breaking change in Google Cloud Storage client library ([a8b01e0](https://github.com/googleapis/python-aiplatform/commit/a8b01e03cdc12de3af87ed7e67b947230a164c42)), closes [#2936](https://github.com/googleapis/python-aiplatform/issues/2936)
+* Increase memory for Ray on Vertex default machine type ([e6bcb17](https://github.com/googleapis/python-aiplatform/commit/e6bcb1757645c21cf8ccace79f9fd06531a5a1aa))
+* Use https to connect through Ray dashboard ([7af3e67](https://github.com/googleapis/python-aiplatform/commit/7af3e67654e05c20f3cd6f1226ebac3521a95200))
+
+## [1.39.0](https://github.com/googleapis/python-aiplatform/compare/v1.38.1...v1.39.0) (2024-01-05)
+
+
+### Features
+
+* Add `fraction_leaf_nodes_to_search_override`. Add support for private endpoint in `find_neighbors`. ([cd31c13](https://github.com/googleapis/python-aiplatform/commit/cd31c1306a9a00a01fbc1dda56fe99ed567a4cfb))
+* Add notification_channels field to model monitoring alert config. ([bb228ce](https://github.com/googleapis/python-aiplatform/commit/bb228ced16862a1f452352f8941d3a24d1a77090))
+* Add support of newly added fields of ExportData API to SDK ([ec3ea30](https://github.com/googleapis/python-aiplatform/commit/ec3ea305cd8a858fb770794c35481cdbc1520990))
+* Allow reuse of deleted experiment run id. ([5f6ad8d](https://github.com/googleapis/python-aiplatform/commit/5f6ad8df5a08e78a121a72a21e21d95abb072e58))
+* GenAI - Added support for "models/<model ID>" model name format ([ab21feb](https://github.com/googleapis/python-aiplatform/commit/ab21feb3528babbc619c0ea5bf4aa89e651340b3))
+* Support "reservedIpRanges" parameter in PipelineJob run() and submit() methods. ([ab99e00](https://github.com/googleapis/python-aiplatform/commit/ab99e00a42868ec09796709a5d5fb6e4f276bfb7))
+* Support custom target y column name for Bigframes Tensorflow ([1634940](https://github.com/googleapis/python-aiplatform/commit/1634940c91182fbd080556949d6c2557288216fb))
+* Verify client and cluster Ray versions match in create_ray_cluster ([17dc9b7](https://github.com/googleapis/python-aiplatform/commit/17dc9b7663c484fe6250cafd4209ef8279acdb5b))
+
+
+### Bug Fixes
+
+* Missing request parameter for v1beta explain. ([443fa9d](https://github.com/googleapis/python-aiplatform/commit/443fa9d456294c4a6aa49368cc4b5e6f01ec3d0d))
+* Pin google-cloud-aiplatform[tensorboard] dependency on tensorflow < 2.15.0 due to breaking change introduced in tensorboard 2.15.1 ([4e891f7](https://github.com/googleapis/python-aiplatform/commit/4e891f7f0c83dabde429ffaa38166af5a44eab5a))
+
+
+### Documentation
+
+* GenAI - Added GenAI to docs ([92fd7f0](https://github.com/googleapis/python-aiplatform/commit/92fd7f0a10ae35b448ac4981c072fdc5d8b05c3b))
+* Update docstring for start_upload_tb_log() ([c033c59](https://github.com/googleapis/python-aiplatform/commit/c033c5971c7692f84d0a5b58946c49348a4c4448))
+* Update tensorboard continuous uploader sample ([1220746](https://github.com/googleapis/python-aiplatform/commit/122074659871daa72aec4a6d3330a82eec77a1c5))
+
+## [1.38.1](https://github.com/googleapis/python-aiplatform/compare/v1.38.0...v1.38.1) (2023-12-13)
+
+
+### Features
+
+* Adding `serving_container_grpc_ports` parameter to Model.upload() method ([6a00ed7](https://github.com/googleapis/python-aiplatform/commit/6a00ed79252c97f3e5f5eef5492d5d7ef12c969c))
+* LLM - Added support for model distillation ([28925e9](https://github.com/googleapis/python-aiplatform/commit/28925e9464254e9768ceab845001aa0e3d46bbbf))
+* Support CMEK for scheduled pipeline jobs. ([406595d](https://github.com/googleapis/python-aiplatform/commit/406595dd78896d3c3fcec8975baccdabef468849))
+
+
+### Miscellaneous Chores
+
+* Release 1.38.1 ([537d00e](https://github.com/googleapis/python-aiplatform/commit/537d00e185df593f6c718859cbc92f8dfef67512))
+
+## [1.38.0](https://github.com/googleapis/python-aiplatform/compare/v1.37.0...v1.38.0) (2023-12-11)
+
+
+### Features
+
+* Release GenerativeModel support for Gemini ([cd233ef](https://github.com/googleapis/python-aiplatform/commit/cd233ef81d1359f3ac89fca809e337ac866d2108))
+* Add explicit constraints for update_ray_cluster ([979a4f3](https://github.com/googleapis/python-aiplatform/commit/979a4f3280cf79c672bd90b57049b1d9f20635fc))
+* Check if dataset exists before creation for Ray on Vertex BigQuery Write ([544d6fe](https://github.com/googleapis/python-aiplatform/commit/544d6fe7925d673bc0fa5236a3ae2608fce10d8e))
+* LLM - Added support for the `logprobs`, `presence_penalty`, `frequency_penalty`, and `logit_bias` generation parameters ([1449344](https://github.com/googleapis/python-aiplatform/commit/1449344490bbfd2ea9eddf0a7dfa651d89db7bc9))
+* Support `read_index_datapoints` for private network. ([c9f7119](https://github.com/googleapis/python-aiplatform/commit/c9f7119f7c13a4f2a0ef3613ef03c0247ec263da))
+* Support custom batch size for Bigframes Tensorflow ([7dc8771](https://github.com/googleapis/python-aiplatform/commit/7dc8771715387e4fb4536aa7080b795bfba8039c))
+* Update the v1 service definition to add numeric_restricts. ([d0c2ffa](https://github.com/googleapis/python-aiplatform/commit/d0c2ffa8d8d070f9ef247c3c742a67b12d1bc16d))
+* Verify client and cluster Ray versions match ([10c6ad2](https://github.com/googleapis/python-aiplatform/commit/10c6ad292f0eb79f4010ea22115b2f70abdea661))
+
+
+### Bug Fixes
+
+* `read_index_endpoint` private endpoint support. ([3d8835e](https://github.com/googleapis/python-aiplatform/commit/3d8835e1dbc48502246fc5ae141f465e0ac7ae90))
+* Fix exception message to use vertexai when project is not provided. ([0cb1a7b](https://github.com/googleapis/python-aiplatform/commit/0cb1a7b16ef49813d5c59e2b9646dc7861291b42))
+* Update test assumption for streaming endpoint of chat-bison@001 ([f9a5b69](https://github.com/googleapis/python-aiplatform/commit/f9a5b692a7b4ed7046ba5035cb1b592aa0cd9f0b))
+* When user is not logged in, throw more intuitive message ([a8b24ad](https://github.com/googleapis/python-aiplatform/commit/a8b24adaf0485f41ea6fc0d65d17d3d9500821aa))
+
+
+### Documentation
+
+* Add default value to optional field parameter_values ([0a4d772](https://github.com/googleapis/python-aiplatform/commit/0a4d772aacc8b100aac6b0c7810296dd0a3e6692))
+
+## [1.37.0](https://github.com/googleapis/python-aiplatform/compare/v1.36.4...v1.37.0) (2023-12-05)
+
+
+### Features
+
+* Add additional parameters to Model.upload(). ([7b7d7d2](https://github.com/googleapis/python-aiplatform/commit/7b7d7d2f2c7f074e62ee009a308341228fcd6582))
+* Adding support for concurrent explanations ([8e2ad75](https://github.com/googleapis/python-aiplatform/commit/8e2ad75e255210f99ab091802899a75df92b6a20))
+* Allow configuring container logging settings on models when deploying. ([beae48f](https://github.com/googleapis/python-aiplatform/commit/beae48f63e40ea171c3f1625164569e7311b8e5a))
+* Support user provided api endpoint. ([92f2b4e](https://github.com/googleapis/python-aiplatform/commit/92f2b4e32035a35f5f2a4956fee443fe3061bc32))
+* Add grpc_ports to UploadModel ModelContainerSpec, add DirectPredict, DirectRawPredict, StreamingPredict, StreamingRawPredict to PredictionService. ([6dfbad7](https://github.com/googleapis/python-aiplatform/commit/6dfbad7dcb29ef3b481bb90d989a1f6f68976996))
+
+### Bug Fixes
+
+* Clarify wording when Ray on Vertex cluster is missing head node ip ([4a71c8c](https://github.com/googleapis/python-aiplatform/commit/4a71c8c1df499aec763e7d16c2f022eb3593d3de))
+* Fix error when allowed_plugins is set to None. ([6f2860a](https://github.com/googleapis/python-aiplatform/commit/6f2860aafce9b77819a3891fb6c29bfb65ea8a2a))
+* Fixed INTEGER and BOOL parameters casting issue. Fix conversion bug where `INTEGER` and `BOOL` parameters are not cast to the correct type. ([9a204c4](https://github.com/googleapis/python-aiplatform/commit/9a204c4ffb16ba34c36f3869fbc36c52bc9986bb))
+* Make PipelineJobSchedule propagate labels to created PipelineJobs ([a34533f](https://github.com/googleapis/python-aiplatform/commit/a34533f536d1d5caa46a68a640bd507e979c161e))
+
+
+### Documentation
+
+* Add upload Tensorboard profile log to Experiment sample. ([5780513](https://github.com/googleapis/python-aiplatform/commit/57805132f82e1e21d3999e130b5c5fdbc105143a))
+* Update the documentation for the `image_dataset` class ([8562368](https://github.com/googleapis/python-aiplatform/commit/856236887a9159991b400829f5c05a307aaedd58))
+
+## [1.36.4](https://github.com/googleapis/python-aiplatform/compare/v1.36.3...v1.36.4) (2023-11-16)
+
+
+### Features
+
+* Add `numeric_restricts` to MatchingEngineIndex `find_neighbors()` for querying ([6c1f2cc](https://github.com/googleapis/python-aiplatform/commit/6c1f2cc650cfe0c6cc9896de13746d1a25d7315a))
+* Add `remove_datapoints()` to `MatchingEngineIndex`. ([b86a404](https://github.com/googleapis/python-aiplatform/commit/b86a4046c2cd0c189efc609bd6319f8da76cd6e7))
+* Add `upsert_datapoints()` to `MatchingEngineIndex` to support streaming update index. ([7ca484d](https://github.com/googleapis/python-aiplatform/commit/7ca484da0431699c460358584b9e8be102d9cc46))
+* LLM - include error code into blocked response from TextGenerationModel, ChatModel, CodeChatMode, and CodeGenerationModel. ([1f81cf2](https://github.com/googleapis/python-aiplatform/commit/1f81cf200c9394b50a43c3830ab8343ead1dc0d3))
+* Populate Ray Cluster dashboard_address from proto field ([dd4b852](https://github.com/googleapis/python-aiplatform/commit/dd4b8529a72f6d08e56e8437cdd3cd0874df8a47))
+* add CountTokens API, ComputeTokens API, and ModelContainerSpec features ([ba2fb39](https://github.com/googleapis/python-aiplatform/commit/ba2fb398b46b04fc2c9d0ea902746737ace64767))
+
+
+### Bug Fixes
+
+* Add check for empty `encryption_spec_key_name` for MatchingEngineIndexEndpoint `create`. ([7740132](https://github.com/googleapis/python-aiplatform/commit/7740132c315b2f1e55504e5c5b462eb27bf89937))
+* Fix server error due to no `encryption_spec_key_name` in MatchingEngineIndex `create_tree_ah_index` and `create_brute_force_index` ([595b580](https://github.com/googleapis/python-aiplatform/commit/595b580bfaa238b63f61cb69a7829094c747aaea))
+
+
+### Miscellaneous Chores
+
+* Release 1.36.4 ([1fd7b4e](https://github.com/googleapis/python-aiplatform/commit/1fd7b4eb4773c9c58cb4e957e176c0053169afc0))
+
+## [1.36.3](https://github.com/googleapis/python-aiplatform/compare/v1.36.2...v1.36.3) (2023-11-14)
+
+
+### Features
+
+* Add option to not use default tensorboard ([a25c669](https://github.com/googleapis/python-aiplatform/commit/a25c6697d75444e4134ac0b9ca8964458f70e275))
+* Add preview HyperparameterTuningJob which can be run on persistent resource ([0da8c53](https://github.com/googleapis/python-aiplatform/commit/0da8c5373b35d9bc7520e93934b109c3ff583dac))
+* Add Featurestore Bigtable Serving, Feature Registry v1, November bulk GAPIC release ([9f46f7c](https://www.google.com/url?sa=D&q=https%3A%2F%2Fgithub.com%2Fgoogleapis%2Fpython-aiplatform%2Fcommit%2F9f46f7c4289e78fb825837f602aecd4105f21e19))
+
+
+### Documentation
+
+* Fix documentation for obsolete link to GCS formatting ([95184de](https://github.com/googleapis/python-aiplatform/commit/95184de2570636e4ee0968dbcfb48c37f811e12f))
+
+
+### Miscellaneous Chores
+
+* Release 1.36.3 ([fdee5cb](https://github.com/googleapis/python-aiplatform/commit/fdee5cb1779344731b4cab0482079691601154d7))
+
+## [1.36.2](https://github.com/googleapis/python-aiplatform/compare/v1.36.1...v1.36.2) (2023-11-10)
+
+
+### Features
+
+* Add `encryption_spec_key_name` to `MatchingEngineIndex` `create_tree_ah_index` and ([1a9e36f](https://github.com/googleapis/python-aiplatform/commit/1a9e36f4ea1672d0fc6ce1587c86c4f132c15190))
+* Add `encryption_spec_key_name`, `enable_private_service_connect`,`project_allowlist` to MatchingEngineIndexEndpoint `create`. ([750e17b](https://github.com/googleapis/python-aiplatform/commit/750e17b4c25c9030018521545b3c21e1fb1404c2))
+* Add `index_update_method` to MatchingEngineIndex `create()` ([dcb6205](https://github.com/googleapis/python-aiplatform/commit/dcb62051a7f3aeaa009b64165569c788d8c5ec44))
+* Expose max_retry_cnt parameter for Ray on Vertex BigQuery write ([568907c](https://github.com/googleapis/python-aiplatform/commit/568907c3876b10dc104de5d19a973135b2638d62))
+* LLM - Grounding - Added support for the `disable_attribution` grounding parameter ([91e985a](https://github.com/googleapis/python-aiplatform/commit/91e985a258180226053a23a9280249079574ad16))
+* LLM - Support model evaluation when tuning chat models (`ChatModel`, `CodeChatModel`) ([755c3f9](https://github.com/googleapis/python-aiplatform/commit/755c3f99478e537ef5675ed7120a17eaf94ee5cd))
+* LVM - Added multi-language support for `ImageGenerationModel` ([791eff5](https://github.com/googleapis/python-aiplatform/commit/791eff5fac48a4395017d7f9296cfb2209a073ba))
+
+
+### Bug Fixes
+
+* Async call bug in CodeChatModel.send_message_async method ([fcf05cb](https://github.com/googleapis/python-aiplatform/commit/fcf05cb6da15c83e91e6ce5f20ab3e6649983685))
+
+
+### Documentation
+
+* Add Bigframes remote training example to vertexai README ([8b993b3](https://github.com/googleapis/python-aiplatform/commit/8b993b367758dc4f195a7a80d13923687da53e04))
+* Update the documentation for the `tabular_dataset` class ([6f40f1b](https://github.com/googleapis/python-aiplatform/commit/6f40f1baf02b279299c7ea825754fe1868d56276))
+
+
+### Miscellaneous Chores
+
+* Release 1.36.2 ([01be0c9](https://github.com/googleapis/python-aiplatform/commit/01be0c99ff86726962146bcf9601a04f7298cda1))
+
+## [1.36.1](https://github.com/googleapis/python-aiplatform/compare/v1.36.0...v1.36.1) (2023-11-07)
+
+
+### Features
+
+* Add `per_crowding_attribute_neighbor_count`, `approx_num_neighbors`, `fraction_leaf_nodes_to_search_override`, and `return_full_datapoint` to MatchingEngineIndexEndpoint `find_neighbors` ([33c551e](https://github.com/googleapis/python-aiplatform/commit/33c551efca38688c8c62ef5847dfcef0221e848c))
+* Add profiler support to tensorboard uploader sdk ([be1df7f](https://github.com/googleapis/python-aiplatform/commit/be1df7f4823f7b40022d31f529204dfe27fdb4d7))
+* Add support for `per_crowding_attribute_num_neighbors` `approx_num_neighbors`to MatchingEngineIndexEndpoint `match()` ([e5c20c3](https://github.com/googleapis/python-aiplatform/commit/e5c20c3b5c0078c9dfc70e2d1d13513a4dcefa63))
+* Add support for `per_crowding_attribute_num_neighbors` `approx_num_neighbors`to MatchingEngineIndexEndpoint `match()` ([53d31b5](https://github.com/googleapis/python-aiplatform/commit/53d31b5b6ec477e6f2b4391aaeadc8ae349800b8))
+* Add support for `per_crowding_attribute_num_neighbors` `approx_num_neighbors`to MatchingEngineIndexEndpoint `match()` ([4e357d5](https://github.com/googleapis/python-aiplatform/commit/4e357d5121d053dc313f3a3f180131e1850bebe2))
+* Enable grounding to ChatModel send_message and send_message_async methods ([d4667f2](https://github.com/googleapis/python-aiplatform/commit/d4667f25a7c95bd16511beaed85edf45307176b5))
+* Enable grounding to TextGenerationModel predict and predict_async methods ([b0b4e6b](https://github.com/googleapis/python-aiplatform/commit/b0b4e6b8243cbdb829288e3fc204d94005f1e8b4))
+* LLM - Added support for the `enable_checkpoint_selection` tuning evaluation parameter ([eaf4420](https://github.com/googleapis/python-aiplatform/commit/eaf4420479b64740cdd464afb64b8780f57c8199))
+* LLM - Added tuning support for the `*-bison-32k` models ([9eba18f](https://github.com/googleapis/python-aiplatform/commit/9eba18f70d36ac3901ba8b580cde6dde04413bc3))
+* LLM - Released `CodeChatModel` tuning to GA ([621af52](https://github.com/googleapis/python-aiplatform/commit/621af5244797a0e218195c72d9781cbd86b24fa0))
+
+
+### Bug Fixes
+
+* Correct class name in system test ([b822b57](https://github.com/googleapis/python-aiplatform/commit/b822b57fa490c8d89802ee5fbf0f3736e0811208))
+
+
+### Documentation
+
+* Clean up RoV create_ray_cluster docstring ([1473e19](https://github.com/googleapis/python-aiplatform/commit/1473e19c9b05c89ba2229f42a8d72588fa267d17))
+
+
+### Miscellaneous Chores
+
+* Release 1.36.1 ([1cde170](https://github.com/googleapis/python-aiplatform/commit/1cde1708fd26357995f3ee86194aa92aa7de5519))
+
+## [1.36.0](https://github.com/googleapis/python-aiplatform/compare/v1.35.0...v1.36.0) (2023-10-31)
+
+
+### Features
+
+* Add preview count_tokens method to CodeGenerationModel ([96e7f7d](https://github.com/googleapis/python-aiplatform/commit/96e7f7d9243c36fa991dd147fe66b3a7e545b3bb))
+* Allow the users to use extra serialization arguments for objects. ([ffbd872](https://github.com/googleapis/python-aiplatform/commit/ffbd87228ecc8d1f534e66d31301597822625b6a))
+* Also support unhashable objects to be serialized with extra args ([77a741e](https://github.com/googleapis/python-aiplatform/commit/77a741e5bec00c89a7746794d77f7ab5e93e5b2f))
+* LLM - Added `count_tokens` support to ChatModel (preview) ([01989b1](https://github.com/googleapis/python-aiplatform/commit/01989b185b7847549ccecfd4f83c9eaf3caa056d))
+* LLM - Added new regions for tuning and tuned model inference ([3d43497](https://github.com/googleapis/python-aiplatform/commit/3d434979659f8801921745600b1ee82f725937cc))
+* LLM - Added support for async streaming ([760a025](https://github.com/googleapis/python-aiplatform/commit/760a0256ef8da36ec2832b476e62626714488f3e))
+* LLM - Added support for multiple response candidates in code chat models ([598d57d](https://github.com/googleapis/python-aiplatform/commit/598d57d24ea613130a74bf7db86c757a668626b8))
+* LLM - Added support for multiple response candidates in code generation models ([0c371a4](https://github.com/googleapis/python-aiplatform/commit/0c371a4bd0adca8bec41dca6ef95e8a1011404f4))
+* LLM - Enable tuning eval TensorBoard without evaluation data ([eaf5d81](https://github.com/googleapis/python-aiplatform/commit/eaf5d816a7a752373243033ea98146d70987ce18))
+* LLM - Released `CodeGenerationModel` tuning to GA ([87dfe40](https://github.com/googleapis/python-aiplatform/commit/87dfe40d68255c62c30564a46a63a13417aa7fc4))
+* LLM - Support `accelerator_type` in tuning ([98ab2f9](https://github.com/googleapis/python-aiplatform/commit/98ab2f99d63f2ea40dead12dbffc078d9dae8336))
+* Support experiment autologging when using persistent cluster as executor ([c19b6c3](https://github.com/googleapis/python-aiplatform/commit/c19b6c3e88293f231aae0c9939e41c9e0f064ee7))
+* Upgrade BigQuery Datasource to use write() interface ([7944348](https://github.com/googleapis/python-aiplatform/commit/794434856713b4aab6aa32e8dd37b3d767136c78))
+
+
+### Bug Fixes
+
+* Adding setuptools to dependencies for Python 3.12 and above. ([afd540d](https://github.com/googleapis/python-aiplatform/commit/afd540d5210e56d5ace19f94526916a2b0c4f1fc))
+* Fix Bigframes tensorflow serializer dependencies ([b4cdb05](https://github.com/googleapis/python-aiplatform/commit/b4cdb05b12b82d3b9f605382828e3f854d985951))
+* LLM - Fixed the async streaming ([41bfcb6](https://github.com/googleapis/python-aiplatform/commit/41bfcb6677fe40afc19548b9bfb1b360b1125adc))
+* LLM - Make tuning use the global staging bucket if specified ([d9ced10](https://github.com/googleapis/python-aiplatform/commit/d9ced106b57cb21f5dcde433f1779b6500aaf7b0))
+* LVM - Fixed negative prompt in `ImageGenerationModel` ([cbe3a0d](https://github.com/googleapis/python-aiplatform/commit/cbe3a0de14a2ee189df07f445545f68fa4bdd4ae))
+* Made the Endpoint prediction client initialization lazy ([eb6071f](https://github.com/googleapis/python-aiplatform/commit/eb6071fe567ef80ba5490b9eaae0cedd6958aac5))
+* Make sure PipelineRuntimeConfigBuilder is created with the right arguments ([ad19838](https://github.com/googleapis/python-aiplatform/commit/ad19838ebc51215dc6c2fe0f373a4773877a0fb6))
+* Make sure the models list is populated before indexing ([f1659e8](https://github.com/googleapis/python-aiplatform/commit/f1659e80a7cc4f1e25d5f251c2483fa365bcdba2))
+* Raise exception for RoV BQ Write for too many rate limit exceeded ([7e09529](https://github.com/googleapis/python-aiplatform/commit/7e095298d145fe24202bacdb6be88d44789a2aa9))
+* Rollback BigQuery Datasource to use do_write() interface ([dc1b82a](https://github.com/googleapis/python-aiplatform/commit/dc1b82a9856beb3ab537b38b1ad1d2c244511c81))
+
+## [1.35.0](https://github.com/googleapis/python-aiplatform/compare/v1.34.0...v1.35.0) (2023-10-10)
+
+
+### Features
+
+* Add serializer.register_custom_command() ([639cf10](https://github.com/googleapis/python-aiplatform/commit/639cf10e444f5aee200cdf0ffaa63e768dc4119b))
+* Install Bigframes sklearn dependencies automatically ([7aaffe5](https://github.com/googleapis/python-aiplatform/commit/7aaffe505aa8e7bff86af5a3674085b8ff822107))
+* Install Bigframes tensorflow dependencies automatically ([e58689b](https://github.com/googleapis/python-aiplatform/commit/e58689b65dd7e2da3e0ece33c2e0b08f7b441089))
+* Install Bigframes torch dependencies automatically ([1d65347](https://github.com/googleapis/python-aiplatform/commit/1d65347468565e86d57e2dd003bdf87dbf37e613))
+* LLM - Added support for multiple chat response candidates ([587df74](https://github.com/googleapis/python-aiplatform/commit/587df744e2b6c4b3e1a96ff69937697fe80a97be))
+* LLM - Added support for multiple text generation response candidates ([c3ae475](https://github.com/googleapis/python-aiplatform/commit/c3ae475efaa9cbe85315b0792c0948a8f5c13bed))
+
+
+### Bug Fixes
+
+* Duplicate logs in Colab ([9b75259](https://github.com/googleapis/python-aiplatform/commit/9b7525987e05f6b63300440267fcdefea4caf35c))
+* LLM - Fixed tuning and evaluation when explicit credentials are specified ([188dffe](https://github.com/googleapis/python-aiplatform/commit/188dffeb2fc83b4c7c123624eb544b403f042c87))
+
+
+### Documentation
+
+* Add probabilistic inference to TiDE and L2L model code samples. ([efe88f9](https://github.com/googleapis/python-aiplatform/commit/efe88f98abb57ec11cdd326b87cb935a6b370257))
+
+## [1.34.0](https://github.com/googleapis/python-aiplatform/compare/v1.33.1...v1.34.0) (2023-10-02)
+
+
+### Features
+
+* Add Model Garden support to vertexai.preview.from_pretrained ([f978200](https://github.com/googleapis/python-aiplatform/commit/f9782007c58ee11fe276c373d8d7ac6c2b0cb249))
+* Enable vertexai preview persistent cluster executor ([0ae969d](https://github.com/googleapis/python-aiplatform/commit/0ae969d9e968c6b497d4f41e28f0ea1274d5253c))
+* LLM - Added the `count_tokens` method to the preview `TextGenerationModel` and `TextEmbeddingModel` classes ([6a2f2aa](https://github.com/googleapis/python-aiplatform/commit/6a2f2aa8e348c6fc1e24012291f538d2b86eae7b))
+* LLM - Improved representation for blocked responses ([222f222](https://github.com/googleapis/python-aiplatform/commit/222f222ed5f8e0c2266484b9992e45cf9827a119))
+* LLM - Released `ChatModel` tuning to GA ([7d667f9](https://github.com/googleapis/python-aiplatform/commit/7d667f9f8be264bb36e3d720caa0d8210aec5d6b))
+
+
+### Bug Fixes
+
+* Create PipelineJobSchedule in same project and location as associated PipelineJob by default ([c22220e](https://github.com/googleapis/python-aiplatform/commit/c22220ef5db440020331634f5e25199c575cc886))
+
+
+### Documentation
+
+* Add documentation for the preview namespace ([69a67f2](https://github.com/googleapis/python-aiplatform/commit/69a67f203627e4ee7d5980ce874d4db1e6af5dd8))
+
+## [1.33.1](https://github.com/googleapis/python-aiplatform/compare/v1.33.0...v1.33.1) (2023-09-20)
+
+
+### Bug Fixes
+
+* Lightning trainer fails to be unwrapped in remote training ([8271301](https://github.com/googleapis/python-aiplatform/commit/8271301454814b233a630d1c18ebe5e4833fcec2))
+
+## [1.33.0](https://github.com/googleapis/python-aiplatform/compare/v1.32.0...v1.33.0) (2023-09-18)
+
+
+### Features
+
+* Add Custom Job support to from_pretrained ([8b0add1](https://github.com/googleapis/python-aiplatform/commit/8b0add169ebd0683b56dbe3b643d533ebbd5e1ca))
+* Added async prediction and explanation support to the `Endpoint` class ([e9eb159](https://github.com/googleapis/python-aiplatform/commit/e9eb159756dfe90c9f72818204fa74d05096aec6))
+* LLM - Added support for async prediction methods ([c9c9f10](https://github.com/googleapis/python-aiplatform/commit/c9c9f10058afead91f7298ec4f09f901898718b3))
+* LLM - CodeChat - Added support for `context` ([f7feeca](https://github.com/googleapis/python-aiplatform/commit/f7feeca2161df7c12c993116364a663c4249f852))
+* Release Ray on Vertex SDK Preview ([3be36e6](https://github.com/googleapis/python-aiplatform/commit/3be36e6c17ef5e335d4f12af7a7c64702b76570e))
+
+
+### Bug Fixes
+
+* Handle Ray image parsing error ([41a3a83](https://github.com/googleapis/python-aiplatform/commit/41a3a83b2e9411a642668d9713282e87e38c64b4))
+* Vizier - Fixed field existence checks for child params in to_proto(). ([d516931](https://github.com/googleapis/python-aiplatform/commit/d51693115a32c054bf5f10ebe7fd2c52ea1ac2d7))
+
+## [1.32.0](https://github.com/googleapis/python-aiplatform/compare/v1.31.1...v1.32.0) (2023-09-05)
+
+
+### Features
+* LLM - Added `stop_sequences` parameter to streaming methods and `CodeChatModel` ([d62bb1b](https://github.com/googleapis/python-aiplatform/commit/d62bb1b2d9a41e700e3564012e70735e5148bb80))
+* LLM - Improved the handling of temperature and top_p in streaming ([6566529](https://github.com/googleapis/python-aiplatform/commit/656652964a9ab48ccd2b3c8249ecffc598da0a3a))
+* Support bigframes sharded parquet ingestion at remote deserialization (Tensorflow) ([a8f85ec](https://github.com/googleapis/python-aiplatform/commit/a8f85ec68749390058f19a40906f326f91ea3eb6))
+* Release Vertex SDK Preview ([c60b9ca](https://github.com/googleapis/python-aiplatform/commit/c60b9ca2dd8c9bbdee6d8b745ff5fa849fa38fe9))
+* Allow setting default service account ([d11b8e6](https://github.com/googleapis/python-aiplatform/commit/d11b8e67607fb9c526d04869cab47f23789bc8d7))
+
+
+### Bug Fixes
+
+* Fix feature update since no LRO is created ([468e6e7](https://github.com/googleapis/python-aiplatform/commit/468e6e7b5021ea73a5847b57564e3c090b3f8fca))
+* LLM - `CodeGenerationModel` now supports safety attributes ([c2c8a5e](https://github.com/googleapis/python-aiplatform/commit/c2c8a5e60ba11ab344cc41ce651740dc0556f6a2))
+* LLM - Fixed batch prediction on tuned models ([2a08535](https://github.com/googleapis/python-aiplatform/commit/2a085354ed921485493b482c0f4e96f3f49d06f0))
+* LLM - Fixed the handling of the `TextEmbeddingInput.task_type` parameter. ([2e3090b](https://github.com/googleapis/python-aiplatform/commit/2e3090b9f1f81b543650ae6bad31ee91395c03a4))
+* Make statistics Optional for TextEmbedding. ([7eaa1d4](https://github.com/googleapis/python-aiplatform/commit/7eaa1d4216f0494897606b1225c3b3599a235544))
+
+
+## [1.31.1](https://github.com/googleapis/python-aiplatform/compare/v1.31.0...v1.31.1) (2023-08-24)
+
+* fix: LLM - De-hardcoded the `max_output_tokens` default value for the `CodeGenerationModel` ([f5a20eb](https://github.com/googleapis/python-aiplatform/commit/f5a20eb381af0685d8b6fffad085ded87f4cf5e3))
+
+
+## [1.31.0](https://github.com/googleapis/python-aiplatform/compare/v1.30.1...v1.31.0) (2023-08-21)
+
+
+### Features
+
+* Add disable_retries option to custom jobs. ([db518b0](https://github.com/googleapis/python-aiplatform/commit/db518b0552a8900ca6a84a73ca711b775c786e92))
+* LLM - Added support for `stop_sequences` in inference ([6f7ea84](https://github.com/googleapis/python-aiplatform/commit/6f7ea84415e5d0efcc49487c93b0f1d94fd68974))
+* LLM - Exposed the `TextGenerationResponse.raw_prediction_response` ([f8f2b9c](https://github.com/googleapis/python-aiplatform/commit/f8f2b9cdf88f40fe0b7e86948515ab1cf72d92be))
+* LLM - Made tuning asynchronous when tuning becomes GA ([226ab8b](https://github.com/googleapis/python-aiplatform/commit/226ab8b64efc01d7ce20cdf924e103d7673376cf))
+* LLM - release model evaluation for TextGenerationModel to public preview ([8df5185](https://github.com/googleapis/python-aiplatform/commit/8df5185d668292d5adc11ebf9477e2fdd44599d4))
+* LLM - Released `TextGenerationModel` tuning to GA ([62ff30d](https://github.com/googleapis/python-aiplatform/commit/62ff30daa718ac7869714c68e55d6955d6355945))
+* LLM - Support streaming prediction for chat models ([ce60cf7](https://github.com/googleapis/python-aiplatform/commit/ce60cf75ec5c83db8033b553e1ad7164159fb3be))
+* LLM - Support streaming prediction for code chat models ([0359f1d](https://github.com/googleapis/python-aiplatform/commit/0359f1dd83bf86df58d1145ddf5e4634d3c8e1ff))
+* LLM - Support streaming prediction for code generation models ([3a8348b](https://github.com/googleapis/python-aiplatform/commit/3a8348bca2d9c74e5e52fb9fc131fdb766f49a5c))
+* LLM - Support streaming prediction for text generation models ([fb527f3](https://github.com/googleapis/python-aiplatform/commit/fb527f3aa59ee90fa6306196b328f513ee4b4d9c))
+* LLM - TextEmbeddingModel - Added support for structural inputs (`TextEmbeddingInput`), `auto_truncate` parameter and result `statistics` ([cbf9b6e](https://github.com/googleapis/python-aiplatform/commit/cbf9b6ee806d7eb89725f53c4509858a272b3141))
+* LVM - Added support for Image Generation models ([b3729c1](https://github.com/googleapis/python-aiplatform/commit/b3729c11a70abaf061daa56ed4c483c4118d5acf))
+* LVM - Released `ImageCaptioningModel` to GA ([7575046](https://github.com/googleapis/python-aiplatform/commit/7575046d953e83bbb8aa13769f28e1eb50e04a7d))
+* LVM - Released `ImageQnAModel` to GA ([fd5cb02](https://github.com/googleapis/python-aiplatform/commit/fd5cb0226f4cff7ee160d2005c5907b81f847a1e))
+* LVM - Released `MultiModalEmbeddingModel` to GA ([e99f366](https://github.com/googleapis/python-aiplatform/commit/e99f366fde802b8677b785613e02fc4d9f94d729))
+* LVM - Removed the `width` and `height` parameters from `ImageGenerationModel.generate_images` since the service has dropped support for image sizes and aspect ratios ([52897e6](https://github.com/googleapis/python-aiplatform/commit/52897e669ff91d3bb991fcf05ae9a18df93df05f))
+* Scheduled pipelines client GA. ([62b8b23](https://github.com/googleapis/python-aiplatform/commit/62b8b23e1144ec547b8d181240090b744dd5201a))
+
+
+### Documentation
+
+* Generate documentation for tune_model and related class ([705e1ea](https://github.com/googleapis/python-aiplatform/commit/705e1ea402684f3ff4a4cf1f80c04b88bf6cf7db))
+* LVM - Added autogenerated documentation for visual models ([18e8bb2](https://github.com/googleapis/python-aiplatform/commit/18e8bb283e80fa9efb26f5fe3f8997b0b038bb12))
+
+## [1.30.1](https://github.com/googleapis/python-aiplatform/compare/v1.30.0...v1.30.1) (2023-08-11)
+
+
+### Features
+
+* LLM - Added tuning support for `chat-bison` models ([3a97c52](https://github.com/googleapis/python-aiplatform/commit/3a97c523c8223f7f007008769ecb2efd0d44d182))
+* LLM - Added tuning support for `codechat-bison` models ([af6e455](https://github.com/googleapis/python-aiplatform/commit/af6e45556d6b093189f363a95f2be45e0008aebd))
+
+
+### Bug Fixes
+
+* LLM - Fixed the `TextGenerationModel.predict` parameters ([f3b25ab](https://github.com/googleapis/python-aiplatform/commit/f3b25ab694eaee18f5cc34f800f1b6021d291bca))
+
+
+### Miscellaneous Chores
+
+* Release 1.30.1 ([d1c79c4](https://github.com/googleapis/python-aiplatform/commit/d1c79c4ef87a8d4601ab04de050af2be4c6d6ecf))
+
+## [1.30.0](https://github.com/googleapis/python-aiplatform/compare/v1.29.0...v1.30.0) (2023-08-10)
+
+
+### Features
+
+* Add model.evaluate() method to Model class ([51df86e](https://github.com/googleapis/python-aiplatform/commit/51df86ee1390a51b82ffc015514ad1e145821a34))
+* Add support for providing only text to MultiModalEmbeddingModel.get_embeddings() ([38ec40a](https://github.com/googleapis/python-aiplatform/commit/38ec40a12cf863c9da3de8336dceba10d92f6f56))
+
+
+### Bug Fixes
+
+* LLM - Fixed filter in `list_tuned_model_names` ([57806fb](https://github.com/googleapis/python-aiplatform/commit/57806fb947e5b692cd8d4701e572eaf54585d383))
+
+## [1.29.0](https://github.com/googleapis/python-aiplatform/compare/v1.28.1...v1.29.0) (2023-08-02)
+
+
+### Features
+
+* Add preview CustomJob which can be run on persistent resource ([56906b0](https://github.com/googleapis/python-aiplatform/commit/56906b08d80bee64334f6ba0c713e30dae39cef4))
+* LLM - Support for Batch Prediction for the `textembedding` models (preview) ([a368538](https://github.com/googleapis/python-aiplatform/commit/a36853869e627aabf3dc563400d184f44c8ae876))
+* LLM - Support tuning for the code-bison model (preview) ([e4b23a2](https://github.com/googleapis/python-aiplatform/commit/e4b23a254aadfae821e326b238555cee2ecb463a))
+* LVM - Large Vision Models SDK (preview release). Support for image captioning and image QnA (`imagetext` model) and multi modal embedding (`multimodelembedding` model) (preview) ([9bbf1ea](https://github.com/googleapis/python-aiplatform/commit/9bbf1eaa02dda0723303cd39e9f6bdffab32ec21))
+
+
+### Bug Fixes
+
+* LLM - Fixed `get_tuned_model` for the future models that are not `text-bison` ([1adf72b](https://github.com/googleapis/python-aiplatform/commit/1adf72b866021b9e857166778dbddf83fd808fb7))
+
+
+### Documentation
+
+* Fix auto-generated pydoc for language_models ([7d72bd1](https://github.com/googleapis/python-aiplatform/commit/7d72bd1c3740039d7c63d1042aa6bcadbd3e4946))
+* LLM - Made it possible to provide message history to `CodeChatModel` when starting chat. ([cf46145](https://github.com/googleapis/python-aiplatform/commit/cf46145b3de8de794d4295f59d8af3ea9dd57826))
+
+## [1.28.1](https://github.com/googleapis/python-aiplatform/compare/v1.28.0...v1.28.1) (2023-07-18)
+
+
+### Features
+
+* LLM - Released the BatchPrediction to GA for TextGenerationModel ([701c3a2](https://github.com/googleapis/python-aiplatform/commit/701c3a29bb09b45b513bcf1bf332388a9b60ae2d))
+* LLM - Support tuning in the "us-central1" location ([4aa7745](https://github.com/googleapis/python-aiplatform/commit/4aa77451f5de33b90d82dba69b4c55cea5640b87))
+
+
+### Bug Fixes
+
+* Fix artifact registry link not showing in ui when creating schedules with SDK. ([203cb47](https://github.com/googleapis/python-aiplatform/commit/203cb477b6ef09a2f6d5f166b5b66a8c626848ef))
+* Fixed the installation error caused by a PyYAML issue ([4b86ce1](https://github.com/googleapis/python-aiplatform/commit/4b86ce1afacc6937fda81f6e664e22155e968c7d))
+* Require model name in ModelEvaluation.list() ([aed8c76](https://github.com/googleapis/python-aiplatform/commit/aed8c7604f5d89f52c53a599330fd502d02f7877))
+
+
+### Documentation
+
+* Fixed a docstring for train_steps ([1f55b05](https://github.com/googleapis/python-aiplatform/commit/1f55b05d4631f2b63ede8a3d5ec604d4550e22dd))
+
+
+### Miscellaneous Chores
+
+* Release 1.28.1 ([8ebf22e](https://github.com/googleapis/python-aiplatform/commit/8ebf22e3efbf47484117cbdf057cd286c8672b81))
+
+## [1.28.0](https://github.com/googleapis/python-aiplatform/compare/v1.27.1...v1.28.0) (2023-07-08)
+
+
+### Features
+
+* LLM - Released the Chat models to GA ([22aa26d](https://github.com/googleapis/python-aiplatform/commit/22aa26daabbaaa84f84d3cad524f2b6e8c53e77d))
+
+## [1.27.1](https://github.com/googleapis/python-aiplatform/compare/v1.27.0...v1.27.1) (2023-07-06)
+
+
+### Features
+
+* Add sdk support for xai example-based explanations ([f9ca1d5](https://github.com/googleapis/python-aiplatform/commit/f9ca1d52a6789f93b4eca9596c04f52a2ca75513))
+
+
+### Miscellaneous Chores
+
+* Release 1.27.1 ([2159f29](https://github.com/googleapis/python-aiplatform/commit/2159f29e8322aef33cfa27df52a57f7cc7722868))
+
+## [1.27.0](https://github.com/googleapis/python-aiplatform/compare/v1.26.1...v1.27.0) (2023-06-30)
+
+
+### Features
+
+* Add submit for CustomTrainingJob and CustomContainerTrainingJob which won't block until complete. ([d6476d0](https://github.com/googleapis/python-aiplatform/commit/d6476d0ed1440f58301d8be0b9043b13aa8d910d))
+* LLM - Added support for `learning_rate` in tuning ([c6cdd10](https://github.com/googleapis/python-aiplatform/commit/c6cdd108b5f3469340ca16bc6ef56efc55723ce3))
+* LLM - Released the Codey models to GA ([89609c9](https://github.com/googleapis/python-aiplatform/commit/89609c9007684e5cbc4b3e5c30d26c0d5539aa39))
+
+
+### Bug Fixes
+
+* Fix aiplatform.init bug by replacing experiment_name with experiment ([c60773a](https://github.com/googleapis/python-aiplatform/commit/c60773a7db8ce7a59d2cb5787dc90937776c0b8f))
+* Fix error when calling update_state() after ExperimentRun.list() ([cb255ec](https://github.com/googleapis/python-aiplatform/commit/cb255ec514995e193df7122dbf67bbd4011e6630))
+* LLM - Exported the `ChatMessage` class ([7bf7634](https://github.com/googleapis/python-aiplatform/commit/7bf7634e97dfe56c3130264eeb62a9b5d6b55cac))
+* LLM - Fixed the chat models failing due to safetyAttributes format ([459ba86](https://github.com/googleapis/python-aiplatform/commit/459ba86396ab9260fd7b28a1524c051b7ad300a5))
+* Vizier - Fixed pyvizier client study creation errors ([16299d1](https://github.com/googleapis/python-aiplatform/commit/16299d14b8f209218d6576614f773c1bcbd21d64))
+
+
+### Documentation
+
+* Fixed a docstring for _Dataset ([b68a941](https://github.com/googleapis/python-aiplatform/commit/b68a941853f9c38b0ff30f5d07cea1d7fb0700a6))
+* Fixed a docstring for TimeSeriesDataset ([a7dfce2](https://github.com/googleapis/python-aiplatform/commit/a7dfce217eebbef0877053b9c0f6f6127b556e82))
+* Populate GA LLM SDK Pydocs ([e248285](https://github.com/googleapis/python-aiplatform/commit/e248285b5da4c33a68ccd6198ce7b1d8ab20febf))
+* Update scheduled pipelines client max_run_count docstring with allowed values. ([750e161](https://github.com/googleapis/python-aiplatform/commit/750e16179e1a53bc916ae6db93cd28cfd3f911fe))
+
+## [1.26.1](https://github.com/googleapis/python-aiplatform/compare/v1.26.0...v1.26.1) (2023-06-21)
+
+
+### Features
+
+* Add additional scheduled pipelines client getters and unit tests. ([9371b4f](https://github.com/googleapis/python-aiplatform/commit/9371b4fd3f7529636fc323a7914960d0c6a70db4))
+* Add PipelineJobSchedule update method and unit tests. ([69c5f60](https://github.com/googleapis/python-aiplatform/commit/69c5f60bfea5308589a58c2b9ad9f392b3a1283e))
+* Add tunable parameters for Model Garden model training to the "AutoMLImageTrainingJob" in SDK. ([50646be](https://github.com/googleapis/python-aiplatform/commit/50646be154b2be6c4738858af2440ad207c4020a))
+* LLM - Added batch prediction ([2235305](https://github.com/googleapis/python-aiplatform/commit/2235305c7714835ff331e5294f90a6a23e31391d))
+* LLM - Exposed the chat history ([bf0e20b](https://github.com/googleapis/python-aiplatform/commit/bf0e20b497675125e0bc5abc10455b06f7a0c019))
+* LLM - Exposed the safety attributes ([01ba3ca](https://github.com/googleapis/python-aiplatform/commit/01ba3cabf522a500a29c4120a264e204a660482a))
+
+
+### Bug Fixes
+
+* Change scheduled pipelines client dashboard uri to view created schedules. Note: uri will not work until scheduler UI is GA. ([d4d8613](https://github.com/googleapis/python-aiplatform/commit/d4d86135ae1593eaffe782d1e197c2a844e6f28a))
+* Fix bug where scheduled pipeline jobs were not running. ([4e7d11a](https://github.com/googleapis/python-aiplatform/commit/4e7d11a072c2b3bdb8e6233ff879ec2c31a626ea))
+* Remove Schedule read mask because ListSchedules does not support it. ([1fda417](https://github.com/googleapis/python-aiplatform/commit/1fda4172baaf200414d95e7217bfef0e500cc16a))
+
+
+### Miscellaneous Chores
+
+* Release 1.26.1 ([42567d2](https://github.com/googleapis/python-aiplatform/commit/42567d2ba7dc14d7fbfea2a4afb71cb701582012))
+
+
+### Documentation
+
+* Update scheduled pipelines client wait() docstring. ([a7d92e5](https://github.com/googleapis/python-aiplatform/commit/a7d92e51ceab2e7c0f72aec1a19404c7c17c65a5))
+
+## [1.26.0](https://github.com/googleapis/python-aiplatform/compare/v1.25.0...v1.26.0) (2023-06-07)
+
+
+### Features
+
+* Add additional scheduled pipelines client create method unit tests. ([0463678](https://github.com/googleapis/python-aiplatform/commit/0463678723acc485da34e89e342c20a2b6809435))
+* Add pipelineJob create_schedule() method and unit test. ([635ae9c](https://github.com/googleapis/python-aiplatform/commit/635ae9c1170f536550e69fa2049e891f696c8d6f))
+* Add scheduled pipelines client create/get methods and unit tests. ([4755fc7](https://github.com/googleapis/python-aiplatform/commit/4755fc79d27da0975991d828e407df7fafe8c4d3))
+* Add scheduled pipelines client list/pause/resume methods and unit tests. ([ce5dee4](https://github.com/googleapis/python-aiplatform/commit/ce5dee454ff16e92141b8c8e46a7f855c5635843))
+* Adding `enable_access_logging` parameter to Endpoint.deploy() method, minor edit to batch_predict() docstring ([794cedd](https://github.com/googleapis/python-aiplatform/commit/794cedd0e9e34544d6c8d5bf6892ef2c28eaaf5d))
+* LLM - Added support for CMEK in tuning ([aebf74a](https://github.com/googleapis/python-aiplatform/commit/aebf74a666327210495dad12a28a0f8080d91f9b))
+* LLM - Released the LLM SDK to GA ([76465e2](https://github.com/googleapis/python-aiplatform/commit/76465e27ba233f04632688c8ce06b9d2818ff1b8))
+* Support publisher models in `BatchPredictionJob.create` ([13b11c6](https://github.com/googleapis/python-aiplatform/commit/13b11c667ac38bd3898ebca88428bea5bd923661))
+
+
+### Bug Fixes
+
+* CustomJob.from_local_script does not pass args to script for custom container images ([6ead69d](https://github.com/googleapis/python-aiplatform/commit/6ead69d069667ddc6590f2f32fac73ab0bd12b85))
+* Fix bug when checking PipelineJob failure status ([a154859](https://github.com/googleapis/python-aiplatform/commit/a154859141da22df6a9fcfdcc3898d9a94175a68))
+* Fix the bug that start_upload_tb_log() doesn't continuously upload ([66e6eae](https://github.com/googleapis/python-aiplatform/commit/66e6eaebbe2ac4e9cd370afa32242ae9117beece))
+* LLM - Fixed parameters set in `ChatModel.start_chat` being ignored ([a0d815d](https://github.com/googleapis/python-aiplatform/commit/a0d815d646d5b0885366677a533e44337530d71a))
+* LLM - Fixed the DataFrame staging on Windows ([056b0bd](https://github.com/googleapis/python-aiplatform/commit/056b0bde6f05b2a36934cc18c7f1bc34dce75dba))
+* Resource created by `_construct_sdk_resource_from_gapic` should use the project from the resource name instead of the default project. ([162b2f2](https://github.com/googleapis/python-aiplatform/commit/162b2f265ac6cd91a78f29e2110eaf87a4a6c2e3))
+* Retry for etag errors on context update. ([d3d5f9a](https://github.com/googleapis/python-aiplatform/commit/d3d5f9a2a33b4f22e53599b515f0e4d1415c5786))
+* Unbreak additional timeout for MatchingEngineIndexEndpoint deploy_index ([af199c0](https://github.com/googleapis/python-aiplatform/commit/af199c035c037478abccfa629801b843909baf99))
+
+
+### Documentation
+
+* Correct text embedding model ID docstring ([8824629](https://github.com/googleapis/python-aiplatform/commit/88246293a8a4ba94a13106bc99044129674fd15e))
+* LLM - Fixed the rendering of the example usage code blocks. ([eaaee28](https://github.com/googleapis/python-aiplatform/commit/eaaee28d1cf96eb1077f7092fb526d1162080a96))
+
+## [1.25.0](https://github.com/googleapis/python-aiplatform/compare/v1.24.1...v1.25.0) (2023-05-09)
+
+
+### Features
+
+* Add support for Large Language Models ([866c6aa](https://github.com/googleapis/python-aiplatform/commit/866c6aaf72b9a7a5f6155665f574cc11cf8075f4))
+* Add default TensorBoard support. ([fa7d3a0](https://github.com/googleapis/python-aiplatform/commit/fa7d3a0e3cd5040eb4ab1c3b0df4e494dc84bac3))
+* Add support for find_neighbors/read_index_datapoints in matching engine public endpoint ([e3a87f0](https://github.com/googleapis/python-aiplatform/commit/e3a87f04abf013341fe4f655b96405e27228ffdb))
+* Added the new root `vertexai` package ([fbd03b1](https://github.com/googleapis/python-aiplatform/commit/fbd03b15e9b71cbeeaebc868745a36c892b55c8f))
+
+
+### Bug Fixes
+
+* EntityType RPC update returns the updated EntityType - not an LRO. ([8f9c714](https://github.com/googleapis/python-aiplatform/commit/8f9c7144c152e105924d87abb30aa734af376486))
+* Fix default AutoML Forecasting transformations list. ([77b89c0](https://github.com/googleapis/python-aiplatform/commit/77b89c0151ce3647b8fac8f4e8b6a7f7c07a1192))
+* Fix type hints for `Prediction.predictions`. ([56518f1](https://github.com/googleapis/python-aiplatform/commit/56518f166215761354aba43d78301a11d198daf5))
+* Removed parameter Resume, due to causing confusion and errors. ([c82e0b5](https://github.com/googleapis/python-aiplatform/commit/c82e0b5fb74fe9ba15f9d0f14a441349499ee257))
+
+## [1.24.1](https://github.com/googleapis/python-aiplatform/compare/v1.24.0...v1.24.1) (2023-04-21)
+
+
+### Features
+
+* Add preview capability to deploy models with shared resources. ([29d4e45](https://github.com/googleapis/python-aiplatform/commit/29d4e45839180f50163de04cd0802ee88d8dd926))
+* Add support for create public index endpoint in matching engine ([7e6022b](https://github.com/googleapis/python-aiplatform/commit/7e6022bc05a8ed0e38329c92466e4ff2e924be87))
+* Add support for return public endpoint dns name in matching engine ([1b5ae44](https://github.com/googleapis/python-aiplatform/commit/1b5ae4402b74d234d0fd8c886e935b3e8919bb50))
+* Add the new model types to "AutoMLImageTrainingJob" in SDK. ([4d032d5](https://github.com/googleapis/python-aiplatform/commit/4d032d59840fdc812dbbedb45acdf45c259e50bd))
+* Adds the Time series Dense Encoder (TiDE) forecasting job. ([d8e6744](https://github.com/googleapis/python-aiplatform/commit/d8e67446dedd2c9fde58c6da1e468346391b8ab7))
+* Remove google internal annotation when export to github. ([fd5ff99](https://github.com/googleapis/python-aiplatform/commit/fd5ff998282dc5d0511f03ea6eb6d27c05183d59))
+
+
+### Bug Fixes
+
+* Support timestamp in Vertex SDK write_feature_values() ([4b0722c](https://github.com/googleapis/python-aiplatform/commit/4b0722cfcdcc5246054e0a185be9db1e23f8043b))
+
+
+### Documentation
+
+* Add Time series Dense Encoder (TiDE) model code sample. ([8e91a58](https://github.com/googleapis/python-aiplatform/commit/8e91a58b5fea6c8e4172e19f8d57f0db9a04bf92))
+* Fix docstring formatting for exceptions ([d75322c](https://github.com/googleapis/python-aiplatform/commit/d75322c24d1b802c77493550fa08bfffdf3ec8fb))
+
+
+### Miscellaneous Chores
+
+* Release 1.24.1 ([cf633a2](https://github.com/googleapis/python-aiplatform/commit/cf633a2513e215e409b9426ff751330fbc481de6))
+
+## [1.24.0](https://github.com/googleapis/python-aiplatform/compare/v1.23.0...v1.24.0) (2023-04-12)
+
+
+### Features
+
+* Add ExperimentRun.get_logged_custom_jobs method ([c116b07](https://github.com/googleapis/python-aiplatform/commit/c116b074c45387d9264fa8ee5d60723aa9bd63cd))
+* Add get method for Experiment and ExperimentRun ([41cd943](https://github.com/googleapis/python-aiplatform/commit/41cd9438f709e2ad8ed530b62d1967a15b5565e2))
+* Add incremental training to AutoMLImageTrainingJob. ([bb92380](https://github.com/googleapis/python-aiplatform/commit/bb92380f1281466db95d31218d1e898c77e596f5))
+* Add preview capability to manage DeploymentResourcePools. ([5df5da0](https://github.com/googleapis/python-aiplatform/commit/5df5da0ffa1c3a17235a061a756f9afdc261f2ac))
+* Add start_time support for BatchReadFeatureValues wrapper methods. ([91d8459](https://github.com/googleapis/python-aiplatform/commit/91d84591913044c547b09f70bf55ef8b4e35cd71))
+* Add TensorBoard log uploader ([3fad7bb](https://github.com/googleapis/python-aiplatform/commit/3fad7bba8c90f4a34259d9712728d16c59aba888))
+* Enable deployment of models that do not support deployment ([25f3f21](https://github.com/googleapis/python-aiplatform/commit/25f3f219210938c6eb0ea88d0fc3a0897d09ee4b))
+* Enable experiment tracking in CustomJob ([94a63b8](https://github.com/googleapis/python-aiplatform/commit/94a63b81a5a6cc51124d19484cefef58c6fc2cca))
+* Update the v1 service definition to add the embedding_id field in MatchRequest. ([5a1146e](https://github.com/googleapis/python-aiplatform/commit/5a1146e02ec9c021d3a2b064d30078f4e985591a))
+
+
+### Bug Fixes
+
+* Adding previously created PrivateEndpoint network parameter in Model deploy helper method ([3e1b206](https://github.com/googleapis/python-aiplatform/commit/3e1b206b1670e4e4c9cca5ffe02f770c9ffae700))
+
+
+### Documentation
+
+* Adds note to delete endpoint sample ([#2060](https://github.com/googleapis/python-aiplatform/issues/2060)) ([9922eb2](https://github.com/googleapis/python-aiplatform/commit/9922eb26ac1bdf7d99950cb5c85c02998197dc59))
+* Fix create tensorboard sample ([2c45123](https://github.com/googleapis/python-aiplatform/commit/2c45123d671ed91e1a56f1200e86485dc4f93f4f))
+* **samples:** Add sample for experiment run state update. ([111a747](https://github.com/googleapis/python-aiplatform/commit/111a747fdce1ab92e4d4673b1011c8b6a01d984e))
+* Update docstring for 3 model uploading methods ([a71e4a3](https://github.com/googleapis/python-aiplatform/commit/a71e4a389b9597ec09833cbe023ad497f9f4870d))
+* Update Vertex Forecasting weight column description. ([e0ee183](https://github.com/googleapis/python-aiplatform/commit/e0ee183d93984bd83b7d2745fec4db75b28976ae))
+
+## [1.23.0](https://github.com/googleapis/python-aiplatform/compare/v1.22.1...v1.23.0) (2023-03-15)
+
+
+### Features
+
+* Implement Model.copy functionality. ([94dd82f](https://github.com/googleapis/python-aiplatform/commit/94dd82fd2df04e50ede441145684e78b16c4e3e1))
+* Update the v1 service definition to add the fraction_leaf_nodes_to_search_override field which replaces leaf_nodes_to_search_percent_override. ([badd386](https://github.com/googleapis/python-aiplatform/commit/badd3863605f5b63ea107d6af09c71999852f846))
+
+
+### Documentation
+
+* Added missing comma in README ([8cb4377](https://github.com/googleapis/python-aiplatform/commit/8cb43770b33cd9b2070565bf409364d372f139b8))
+
+## [1.22.1](https://github.com/googleapis/python-aiplatform/compare/v1.22.0...v1.22.1) (2023-02-28)
+
+
+### Features
+
+* Add support for enable_dashboard_access field for Training jobs in SDK ([3500eab](https://github.com/googleapis/python-aiplatform/commit/3500eab379593023147c35654758daf2c0eaf02d))
+* Add the recently added new model type "cloud_1" to the "AutoMLImageTrainingJob" in SDK. ([581939b](https://github.com/googleapis/python-aiplatform/commit/581939b1879ca6b47af482883fe0dfbec089f804))
+
+
+### Documentation
+
+* Add temporal fusion transformer (TFT) model code sample. ([8ddc062](https://github.com/googleapis/python-aiplatform/commit/8ddc062669044ac0889d9f27c93a8b36c1140433))
+* **samples:** Add samples for autologging ([f8052b8](https://github.com/googleapis/python-aiplatform/commit/f8052b8f103b1bf8d7f891cf9ae537d7fa48c718))
+
+
+### Miscellaneous Chores
+
+* Release 1.22.1 ([ed4c0b1](https://github.com/googleapis/python-aiplatform/commit/ed4c0b1513c142aae06b5f8200bd922722a69a27))
+
+## [1.22.0](https://github.com/googleapis/python-aiplatform/compare/v1.21.0...v1.22.0) (2023-02-16)
+
+
+### Features
+
+* Add a return value (ClassificationMetrics) for the log_classification_metrics() ([8ebcdbd](https://github.com/googleapis/python-aiplatform/commit/8ebcdbdd47f69d537417106e44dfcb4226ee6041))
+* Add metric and parameter autologging to experiments ([96e9e12](https://github.com/googleapis/python-aiplatform/commit/96e9e1239ae13e70b9b8ae929f7343155efe3e22))
+* Add update_version to Model Registry ([8621e24](https://github.com/googleapis/python-aiplatform/commit/8621e24cd02cb545e353f54562bf111616d7a9f2))
+* Support a list of GCS URIs in CustomPythonPackageTrainingJob ([05bb71f](https://github.com/googleapis/python-aiplatform/commit/05bb71ffe437af6da4a8efc331673ff6fc5b38b3))
+* Support Model Serialization in Vertex Experiments(tensorflow) ([f38ddc2](https://github.com/googleapis/python-aiplatform/commit/f38ddc29b1f3c47cb7dd8952f8baed9103793ddf))
+
+
+### Bug Fixes
+
+* Added missing instances_format parameter to batch_prediction_job_samples ([82a2afc](https://github.com/googleapis/python-aiplatform/commit/82a2afc09d6bbfd216daed7ca0cd507c497543a0))
+* Address broken unit tests in certain environments ([d06b22d](https://github.com/googleapis/python-aiplatform/commit/d06b22d1ac6197c460092739e8572b9beb08bd63))
+* List method for MLMD schema classes ([2401a1d](https://github.com/googleapis/python-aiplatform/commit/2401a1dd642c89fd60c93266d72a6272a69ed357))
+* Unbreak additional timeout for _deploy_call() ([076308f](https://github.com/googleapis/python-aiplatform/commit/076308fa3f3bf440177ec895f42d7449cfa0d65b))
+* Unbreak additional timeout for MatchingEngine update_embeddings ([5d0bc1e](https://github.com/googleapis/python-aiplatform/commit/5d0bc1ea24cd1465f29df26f293fb9a5afb30da7))
+* Unbreak timeouts for Dataset create. ([328ebac](https://github.com/googleapis/python-aiplatform/commit/328ebac961f5771c8f544672faafbb6425375911))
+* Use Client.list_blobs instead of Bucket.list_blobs in CPR artifact downloader, to make sure that CPR works with custom service accounts on Vertex Prediction. ([bb27619](https://github.com/googleapis/python-aiplatform/commit/bb27619d71fe237690f9c14a37461f1ca839822b))
+
+
+### Documentation
+
+* Add a hint to auth Docker to the LocalModel push_image docstring. ([e97a6fb](https://github.com/googleapis/python-aiplatform/commit/e97a6fb684144962e96af84ad105df1adbe26010))
+* Fix Create and Import Tabular BQ dataset sample ([4415c10](https://github.com/googleapis/python-aiplatform/commit/4415c107609c04125a601a10f3ff579d5ca5371b))
+* Fix LocalModel push_image docstring. ([5fdb7fc](https://github.com/googleapis/python-aiplatform/commit/5fdb7fc2768240a7c8585491e5ea7afdeb725fa8))
+* Fixed a typo in docstring. ([4ee6232](https://github.com/googleapis/python-aiplatform/commit/4ee6232f771dce9d545ec62c6687c1b3e289f450))
+* New samples for model serialization ([83457ca](https://github.com/googleapis/python-aiplatform/commit/83457ca7c04fda3286bbe3f419c11ec31a043942))
+* Samples for model serialization ([7997094](https://github.com/googleapis/python-aiplatform/commit/79970947b12a097a14a82736ef3a9c5c465029b1))
+
+## [1.21.0](https://github.com/googleapis/python-aiplatform/compare/v1.20.0...v1.21.0) (2023-01-13)
+
+
+### Features
+
+* Add default skew threshold to be an optional input at _SkewDetectionConfig and also mark the target_field and data_source of skew config to optional. ([7da4164](https://github.com/googleapis/python-aiplatform/commit/7da4164697ac01ac94a45b34086facfd0d360f1b))
+* Add filter to Model Registry list_versions API. ([c1cb33f](https://github.com/googleapis/python-aiplatform/commit/c1cb33fb1488c2e935f857b9e0993b51fec67fef))
+* Add MLMD schema class ExperimentModel ([94b2f29](https://github.com/googleapis/python-aiplatform/commit/94b2f29f040829bbc97d29385f7652c377d9b36b))
+* Add Service Account support to BatchPredictionJob ([deba06b](https://github.com/googleapis/python-aiplatform/commit/deba06b938afa695b5fb2d8184647109913abd7c))
+* Add support for Predict Request Response Logging in Endpoint SDK ([372ab8d](https://github.com/googleapis/python-aiplatform/commit/372ab8dd59b3a2c5e5eae9af46141ff8e215a610))
+* Adding Feature Store: Streaming ingestion to GA ([6bc4c84](https://github.com/googleapis/python-aiplatform/commit/6bc4c848bd9104e5e76fda6e733c051e3ffd4f91))
+* Enable passing experiment_tensorboard to init without experiment ([369a0cc](https://github.com/googleapis/python-aiplatform/commit/369a0ccdea4fb869fb13c59603dfbe995c7a74b7))
+* Support Model Serialization in Vertex Experiments(sklearn) ([d4deed3](https://github.com/googleapis/python-aiplatform/commit/d4deed3d59dec90c02ea681b4b8dc33399815069))
+* Support Model Serialization in Vertex Experiments(xgboost) ([fe75eba](https://github.com/googleapis/python-aiplatform/commit/fe75ebaeeb9bbd19307100bed06cba5ca282aeed))
+
+
+### Bug Fixes
+
+* `Endpoint.undeploy_all()` doesn't undeploy all models ([9fb24d7](https://github.com/googleapis/python-aiplatform/commit/9fb24d7d647eeb8b93adf432bd20ca368c3d46f7))
+* Fix bug in associating tensorboard to an experiment ([6def0b8](https://github.com/googleapis/python-aiplatform/commit/6def0b80f587a5523754ab027e98ea8e4c7feaa9))
+* Pin shapely version to <2.0.0 ([1efd816](https://github.com/googleapis/python-aiplatform/commit/1efd81666ac63e1704322ad5771b3208a35bc479))
+* Unbreak timeouts for Dataset create, FeatureStore ingest, and MatchingEngine Index create. ([3096d1c](https://github.com/googleapis/python-aiplatform/commit/3096d1c72de7c280e56d8d58192230ba36ccfce3))
+* Updated proto message formatting logic for batch predict model monitoring ([f87fef0](https://github.com/googleapis/python-aiplatform/commit/f87fef040cbdc15b5ab547426b7d4ce44b53f2c2))
+
+## [1.20.0](https://github.com/googleapis/python-aiplatform/compare/v1.19.1...v1.20.0) (2022-12-15)
+
+
+### Features
+
+* Adds the temporal fusion transformer (TFT) forecasting job ([99313e0](https://github.com/googleapis/python-aiplatform/commit/99313e0baacd61d7d00d6576a22b151c1d8e1a49))
+* Reraise exceptions from API calls ([d72bc83](https://github.com/googleapis/python-aiplatform/commit/d72bc835184c73e61bb2879e53c71d6772d1b802))
+
+
+### Documentation
+
+* **samples:** Feature Store: Streaming ingestion code sample and test ([bc9e2cf](https://github.com/googleapis/python-aiplatform/commit/bc9e2cf55f6b4f651c79f68f426a75ed42a7d4c7))
+
+## [1.19.1](https://github.com/googleapis/python-aiplatform/compare/v1.19.0...v1.19.1) (2022-12-08)
+
+
+### Features
+
+* Add explanationSpec to TrainingPipeline-based custom jobs ([957703f](https://github.com/googleapis/python-aiplatform/commit/957703f9b9c953ee1f67740a652f68279907b104))
+
+
+### Bug Fixes
+
+* Add pre-built container(tf2-gpu-2-1) to the container URI list ([cdd557e](https://github.com/googleapis/python-aiplatform/commit/cdd557e5e86b0b4d4cf401509aba5914e0bab8b7))
+* Fix bug that broke profiler with '0-rc2' tensorflow versions. ([8779df5](https://github.com/googleapis/python-aiplatform/commit/8779df5362a6851372cf3cea005a1c6c3096b19e))
+* Fixed argument name in UnmanagedContainerModel ([d876b3a](https://github.com/googleapis/python-aiplatform/commit/d876b3ad8d0129dc98de9f86567d5bf17791058b))
+
+
+### Documentation
+
+* Add a sample for create_tensorboard. ([52656ca](https://github.com/googleapis/python-aiplatform/commit/52656cac24eedd500a3d97b3d4678857b1d51ed8))
+* Fix Experiment resource name format docstring. ([f8e5842](https://github.com/googleapis/python-aiplatform/commit/f8e5842a086bcd90c3b153ffa9dc7e788650e670))
+* Fix get Experiment data frame sample ([24e1465](https://github.com/googleapis/python-aiplatform/commit/24e146551237c494349b324ee8830154d129860c))
+* Update docstrings for "data_item_labels" in dataset ([b2f8c42](https://github.com/googleapis/python-aiplatform/commit/b2f8c42d88c29010cf78a9f44fb3cdb711a1e94c))
+* Update README fix product doc link ([43a2679](https://github.com/googleapis/python-aiplatform/commit/43a2679c0d6f5cba7dff4535a03aedd84e09a2f1))
+
+
+### Miscellaneous Chores
+
+* Release 1.19.1 ([f01867f](https://github.com/googleapis/python-aiplatform/commit/f01867f697a5d5134c993283f7cf9b22717da029))
+
+## [1.19.0](https://github.com/googleapis/python-aiplatform/compare/v1.18.3...v1.19.0) (2022-11-17)
+
+
+### Features
+
+* Add Feature Store: Streaming Ingestion (write_feature_values()) and introduce Preview namespace to Vertex SDK ([bae0315](https://github.com/googleapis/python-aiplatform/commit/bae03158c06865d1b61c06a1c8af64e876ce76dd))
+* Add bq_dataset_id parameter to batch_serve_to_df ([bb72562](https://github.com/googleapis/python-aiplatform/commit/bb72562f4515b6ace73a735477584ca0b5a30f58))
+* Add annotation_labels to ImportDataConfig in aiplatform v1 dataset.proto ([43e2805](https://github.com/googleapis/python-aiplatform/commit/43e28052d798c380de6e102edbe257a0100738cd))
+* Add support for ordery_by in Metadata SDK list methods for Artifact, Execution and Context. ([2377606](https://github.com/googleapis/python-aiplatform/commit/23776066909b5b7f77f704722d2719e1a1733ad4))
+* Support global network parameter. ([c7f57ad](https://github.com/googleapis/python-aiplatform/commit/c7f57ad505b7251b9c663538e2312998445db691))
+
+
+### Bug Fixes
+
+* Correct data file gcs path for import_data_text_sentiment_analysis_sample test ([86df4b5](https://github.com/googleapis/python-aiplatform/commit/86df4b5d79118caf8f45c3845c92afe6585c24e9))
+* Print error for schema classes ([13e2165](https://github.com/googleapis/python-aiplatform/commit/13e216518f20a32c7e18e6ea5b497a5fcb1d77a0))
+
+
+### Documentation
+
+* Update README with new link for AI Platform API ([35b83d9](https://github.com/googleapis/python-aiplatform/commit/35b83d90649ec396b736469278def4aaaf80621e))
+
+## [1.18.3](https://github.com/googleapis/python-aiplatform/compare/v1.18.2...v1.18.3) (2022-11-01)
+
+
+### Documentation
+
+* Add a sample for get_experiment_run_artifacts ([7266352](https://github.com/googleapis/python-aiplatform/commit/7266352ddddf035f68aed96d05d27bdf46559418))
+
+## [1.18.3](https://github.com/googleapis/python-aiplatform/compare/v1.18.2...v1.18.3) (2022-10-31)
+
+
+### Documentation
+
+* Add a sample for get_experiment_run_artifacts ([7266352](https://github.com/googleapis/python-aiplatform/commit/7266352ddddf035f68aed96d05d27bdf46559418))
+
+## [1.18.2](https://github.com/googleapis/python-aiplatform/compare/v1.18.0...v1.18.2) (2022-10-20)
+
+
+### Bug Fixes
+
+* Added proto message conversion to MDMJob.update fields ([#1718](https://github.com/googleapis/python-aiplatform/issues/1718)) ([9e77c61](https://github.com/googleapis/python-aiplatform/commit/9e77c61ab7349d4278274b317d316dbf2b8453c7))
+* Log_classification_metrics ([#1742](https://github.com/googleapis/python-aiplatform/issues/1742)) ([3588526](https://github.com/googleapis/python-aiplatform/commit/3588526deb5e5c5cf7467d8ef104cff3dafaa09f))
+* PipelineJob should only pass bearer tokens for AR URIs ([b43851c](https://github.com/googleapis/python-aiplatform/commit/b43851c791441e5a789ed43ffb9fe855555bb017))
+
+
+### Documentation
+
+* Fix create experiment sample ([#1716](https://github.com/googleapis/python-aiplatform/issues/1716)) ([cba7fbf](https://github.com/googleapis/python-aiplatform/commit/cba7fbfba6f06273aa3f95911c92d25720009c28))
+* Resurface googleapis.dev and prediction docs ([#1724](https://github.com/googleapis/python-aiplatform/issues/1724)) ([24f0c6f](https://github.com/googleapis/python-aiplatform/commit/24f0c6ff0ab25b2d03c1e0521f680fea8ed35d00))
+* **samples:** Improve docstring of Vertex AI Python SDK Model Registry samples ([#1705](https://github.com/googleapis/python-aiplatform/issues/1705)) ([f97e90f](https://github.com/googleapis/python-aiplatform/commit/f97e90f3d40e34c4af92dccbfe8246943e8c79d5))
+
+## [1.18.1](https://github.com/googleapis/python-aiplatform/compare/v1.18.0...v1.18.1) (2022-10-10)
+
+
+### Bug Fixes
+
+* **deps:** Allow protobuf 3.19.5 ([#1720](https://github.com/googleapis/python-aiplatform/issues/1720)) ([83e3b7c](https://github.com/googleapis/python-aiplatform/commit/83e3b7c284d3fe0b5e40b3dc25c6246e792c24c0))
+
+## [1.18.0](https://github.com/googleapis/python-aiplatform/compare/v1.17.1...v1.18.0) (2022-10-03)
+
+
+### Features
+
+* Add deleteFeatureValues in aiplatform v1beta1 featurestore_service.proto ([#1670](https://github.com/googleapis/python-aiplatform/issues/1670)) ([9a506ee](https://github.com/googleapis/python-aiplatform/commit/9a506eeae6be3fa33b262dc7782d0bb4f10bf765))
+* Add model_source_info to Model in aiplatform v1beta1 model.proto ([#1691](https://github.com/googleapis/python-aiplatform/issues/1691)) ([876fb2a](https://github.com/googleapis/python-aiplatform/commit/876fb2a832a458a4b457b0cd2481e0e783493215))
+* Add support for HTTPS URI pipeline templates ([#1683](https://github.com/googleapis/python-aiplatform/issues/1683)) ([926d0b6](https://github.com/googleapis/python-aiplatform/commit/926d0b6f0eb27a3d24dd3a6fb0a16d7c4e5b9662))
+* Add support for V1 and V2 classification models for the V1Beta2 API ([#1680](https://github.com/googleapis/python-aiplatform/issues/1680)) ([1cda4b4](https://github.com/googleapis/python-aiplatform/commit/1cda4b4833d31c1be44322850c4e0574e3e46e2b))
+* Support complex metrics in Vertex Experiments ([#1698](https://github.com/googleapis/python-aiplatform/issues/1698)) ([ed0492e](https://github.com/googleapis/python-aiplatform/commit/ed0492eb866490e85cd9a5c43fc519510e1f5917))
+
+
+### Bug Fixes
+
+* **deps:** Require protobuf >= 3.20.2 ([#1699](https://github.com/googleapis/python-aiplatform/issues/1699)) ([c5c77ad](https://github.com/googleapis/python-aiplatform/commit/c5c77ada26d1a51a55f18d4beee47ceab8987103))
+* Fix endpoint parsing in ModelDeploymentMonitoringJob.update ([#1671](https://github.com/googleapis/python-aiplatform/issues/1671)) ([186872d](https://github.com/googleapis/python-aiplatform/commit/186872d97d55f1ff8a52f5517fb3dd9b620f678d))
+* Project/location parsing for nested resources ([#1700](https://github.com/googleapis/python-aiplatform/issues/1700)) ([9e1d796](https://github.com/googleapis/python-aiplatform/commit/9e1d79671e3be73283842649705be83c3eb3de6a))
+* Show inherited SDK methods in pydoc ([#1707](https://github.com/googleapis/python-aiplatform/issues/1707)) ([2b7583b](https://github.com/googleapis/python-aiplatform/commit/2b7583b47194b049b083f4cb490d8abcb32e5b10))
+
+
+### Documentation
+
+* Fix typos ([#1709](https://github.com/googleapis/python-aiplatform/issues/1709)) ([5fe515c](https://github.com/googleapis/python-aiplatform/commit/5fe515cd17c90103808ca1f11a3899d1250562bf))
+
+## [1.17.1](https://github.com/googleapis/python-aiplatform/compare/v1.17.0...v1.17.1) (2022-09-15)
+
+
+### Features
+
+* Add enable_simple_view to PipelineJob.list() ([#1614](https://github.com/googleapis/python-aiplatform/issues/1614)) ([627fdf9](https://github.com/googleapis/python-aiplatform/commit/627fdf9542e177d9ee07849c7c7ca6857381a83d))
+* Add eval metrics types to get_experiment_df ([#1648](https://github.com/googleapis/python-aiplatform/issues/1648)) ([944b03f](https://github.com/googleapis/python-aiplatform/commit/944b03fc9bd7f1d69f6852c65286b72f49f425df))
+* Adding Python 3.10 support + updating google-vizier version ([#1644](https://github.com/googleapis/python-aiplatform/issues/1644)) ([f4766dc](https://github.com/googleapis/python-aiplatform/commit/f4766dc288904ae504e9559c535907c7497e04a9))
+
+
+### Miscellaneous Chores
+
+* Release 1.17.1 ([#1668](https://github.com/googleapis/python-aiplatform/issues/1668)) ([b54d659](https://github.com/googleapis/python-aiplatform/commit/b54d65930bc48d675679fe3fc99943c418b4a5f5))
+
+## [1.17.0](https://github.com/googleapis/python-aiplatform/compare/v1.16.1...v1.17.0) (2022-09-07)
+
+
+### Features
+
+* Add input artifact when creating a pipeline ([#1593](https://github.com/googleapis/python-aiplatform/issues/1593)) ([2cf9fe6](https://github.com/googleapis/python-aiplatform/commit/2cf9fe69e6946e21da585ad44d76c2c0c6209cca))
+* Add model_monitoring_stats_anomalies,model_monitoring_status to BatchPredictionJob in aiplatform v1beta1 batch_prediction_job.proto ([#1621](https://github.com/googleapis/python-aiplatform/issues/1621)) ([0a1f4e9](https://github.com/googleapis/python-aiplatform/commit/0a1f4e9c2568af585623543c0d9cc291bf2e1c9f))
+* Add read_mask to ListPipelineJobsRequest in aiplatform v1 pipeline_service ([#1589](https://github.com/googleapis/python-aiplatform/issues/1589)) ([9e19a40](https://github.com/googleapis/python-aiplatform/commit/9e19a40a4a81cf17759637f0310f0885166b626e))
+* Add samples for get execution input and output artifacts ([#1585](https://github.com/googleapis/python-aiplatform/issues/1585)) ([eb5a4b6](https://github.com/googleapis/python-aiplatform/commit/eb5a4b65a16ed13dd45f2c3098b23aeabd5fd565))
+* Add support for SDK Method metrics tracking via _USER_AGENT_SDK… ([#1591](https://github.com/googleapis/python-aiplatform/issues/1591)) ([28e56ef](https://github.com/googleapis/python-aiplatform/commit/28e56ef1c8c51e57a5d818b1cd45dd3ec4c5e4bf))
+* Support filters in matching engine vector matching ([#1608](https://github.com/googleapis/python-aiplatform/issues/1608)) ([d591d3e](https://github.com/googleapis/python-aiplatform/commit/d591d3e36b76e5bf2f4828041cbdeb783bdcfea5))
+* Support model monitoring for batch prediction in Vertex SDK ([#1570](https://github.com/googleapis/python-aiplatform/issues/1570)) ([bbec998](https://github.com/googleapis/python-aiplatform/commit/bbec998ea71aa342fee08d0d5fa115ab36a6f60f))
+* Support raw_predict for Endpoint ([#1620](https://github.com/googleapis/python-aiplatform/issues/1620)) ([cc7c968](https://github.com/googleapis/python-aiplatform/commit/cc7c968512db84385c053a8353a8dad8a57aef22))
+* Support ResourceName with Version. ([#1609](https://github.com/googleapis/python-aiplatform/issues/1609)) ([737dc2b](https://github.com/googleapis/python-aiplatform/commit/737dc2bacd89e3c9af06bb408abe5ed9304a1397))
+* Update the samples of hyperparameter tuning in the public doc ([#1600](https://github.com/googleapis/python-aiplatform/issues/1600)) ([653b759](https://github.com/googleapis/python-aiplatform/commit/653b759a6d5a7740511d4ff6d27762ff4056008e))
+
+
+### Bug Fixes
+
+* **deps:** Allow protobuf < 5.0.0 ([#1587](https://github.com/googleapis/python-aiplatform/issues/1587)) ([3d3e0aa](https://github.com/googleapis/python-aiplatform/commit/3d3e0aa9f0283dafe3056ed7d3067310535db48c))
+* **deps:** require proto-plus >= 1.22.0 ([3d3e0aa](https://github.com/googleapis/python-aiplatform/commit/3d3e0aa9f0283dafe3056ed7d3067310535db48c))
+* Log_metrics docstring error ([#1588](https://github.com/googleapis/python-aiplatform/issues/1588)) ([0385c4c](https://github.com/googleapis/python-aiplatform/commit/0385c4c1515b6ddb75732ab5dfdbcf0bd3fb3a00))
+* Study.list() method ([#1594](https://github.com/googleapis/python-aiplatform/issues/1594)) ([47eb0ae](https://github.com/googleapis/python-aiplatform/commit/47eb0aef224f14e3ce8f0144b0e266d7bdf5ec83))
+* Update Model.list_model_evaluations and get_model_evaluation to use the provided version ([#1616](https://github.com/googleapis/python-aiplatform/issues/1616)) ([8fb836b](https://github.com/googleapis/python-aiplatform/commit/8fb836bcf361230e47d3af9e7ca5882a17da136a))
+
+
+### Documentation
+
+* ExperimentRun docstring and end_run kwarg ([#1649](https://github.com/googleapis/python-aiplatform/issues/1649)) ([075a6c2](https://github.com/googleapis/python-aiplatform/commit/075a6c2fca09e76db37224fac0d0a2d6096e44ea))
+* Remove TODOs from docs ([#1513](https://github.com/googleapis/python-aiplatform/issues/1513)) ([406ed84](https://github.com/googleapis/python-aiplatform/commit/406ed84aca7eed06788289edb7423cfec602b012))
+* **samples:** Add AutoML image classification sample ([#923](https://github.com/googleapis/python-aiplatform/issues/923)) ([677b311](https://github.com/googleapis/python-aiplatform/commit/677b311803ecae7069f94f3a8829b93dcd42305f))
+* **samples:** Add Model Registry samples to Vertex AI Python SDK ([#1602](https://github.com/googleapis/python-aiplatform/issues/1602)) ([72fd36d](https://github.com/googleapis/python-aiplatform/commit/72fd36d703fb45ac411a0c44160b8b1d5ebb068b))
+* **samples:** Added seq2seq sample ([#1595](https://github.com/googleapis/python-aiplatform/issues/1595)) ([4e7175f](https://github.com/googleapis/python-aiplatform/commit/4e7175f6c4a2a4c8fbceae4c351578829ef88519))
+
+## [1.16.1](https://github.com/googleapis/python-aiplatform/compare/v1.16.0...v1.16.1) (2022-08-02)
+
+
+### Features
+
+* Add google.ClassificationMetrics, google.RegressionMetrics, and google.Forecasting Metrics ([#1549](https://github.com/googleapis/python-aiplatform/issues/1549)) ([3526b3e](https://github.com/googleapis/python-aiplatform/commit/3526b3e28b41488929de1a1c33dad787bee1a794))
+* added support for conditional parameters in hyperparameter tuning ([#1544](https://github.com/googleapis/python-aiplatform/issues/1544)) ([744cc38](https://github.com/googleapis/python-aiplatform/commit/744cc380fb2e7ab15b8d55be4af6ccd8ce7b98a7))
+* SDK support for model monitoring ([#1249](https://github.com/googleapis/python-aiplatform/issues/1249)) ([18c88d1](https://github.com/googleapis/python-aiplatform/commit/18c88d1d9e5e2e70fab7d435064ae4adc1077f1b))
+* support case insensitive match on search facets ([#1523](https://github.com/googleapis/python-aiplatform/issues/1523)) ([cb4d405](https://github.com/googleapis/python-aiplatform/commit/cb4d405264d639bbe5302d30e2c8cba81a37db37))
+* Vertex Vizier support in SDK. ([#1434](https://github.com/googleapis/python-aiplatform/issues/1434)) ([b63b3ba](https://github.com/googleapis/python-aiplatform/commit/b63b3ba8b9a18faeeb71eb2565a63b28d6b8d590))
+
+
+### Bug Fixes
+
+* Correct docstring in Dataset classes ([#1553](https://github.com/googleapis/python-aiplatform/issues/1553)) ([caebb47](https://github.com/googleapis/python-aiplatform/commit/caebb47da6647b7ea395668703468ef99fcef877))
+
+
+### Miscellaneous Chores
+
+* release 1.16.1 ([#1568](https://github.com/googleapis/python-aiplatform/issues/1568)) ([4437cdd](https://github.com/googleapis/python-aiplatform/commit/4437cddda827d82dadf47ade8b24f05b74d2ae0d))
+
+## [1.16.0](https://github.com/googleapis/python-aiplatform/compare/v1.15.1...v1.16.0) (2022-07-27)
+
+
+### Features
+
+* Add metadata SDK sample for delete method. ([#1530](https://github.com/googleapis/python-aiplatform/issues/1530)) ([46aa9b5](https://github.com/googleapis/python-aiplatform/commit/46aa9b5e9cf1df72f41ad997d2bd1ca0a40ecfca))
+* Add metadata SDK samples for list artifact and list execution ([#1514](https://github.com/googleapis/python-aiplatform/issues/1514)) ([c0d01f1](https://github.com/googleapis/python-aiplatform/commit/c0d01f1d7c9dbcc3df115a5a59eb23b5ce1440dc))
+* Add Metadata SDK support and samples for get method ([#1516](https://github.com/googleapis/python-aiplatform/issues/1516)) ([d442248](https://github.com/googleapis/python-aiplatform/commit/d4422483cb8864d0e7b8b23ee4e334156d8a7de6))
+* Add samples for Metadata context list, get, and create ([#1525](https://github.com/googleapis/python-aiplatform/issues/1525)) ([d913e1d](https://github.com/googleapis/python-aiplatform/commit/d913e1d777a4f0bb3315ad97283763bdf6d34589))
+* Change the Metadata SDK _Context class to an external class ([#1519](https://github.com/googleapis/python-aiplatform/issues/1519)) ([95b107c](https://github.com/googleapis/python-aiplatform/commit/95b107c8727245e1836f9cbddd3f2e331532dd62))
+* Refactor schema classes to subclass from _Resource ([#1536](https://github.com/googleapis/python-aiplatform/issues/1536)) ([93002e8](https://github.com/googleapis/python-aiplatform/commit/93002e82778b3ae8570809076f8bf55bf76dcf34))
+* Support custom containers in CustomJob.from_local_script ([#1483](https://github.com/googleapis/python-aiplatform/issues/1483)) ([be0b7e1](https://github.com/googleapis/python-aiplatform/commit/be0b7e1ff0649fe78250395950d81af0a848a244))
+* Vertex AI Prediction Custom Prediction Routine ([34bbd0a](https://github.com/googleapis/python-aiplatform/commit/34bbd0abbaf29e644ed7703b3251f1de65bf5a86))
+
+
+### Bug Fixes
+
+* Fixed getting the output GCS bucket in PipelineJob.submit ([#1542](https://github.com/googleapis/python-aiplatform/issues/1542)) ([69d6c7d](https://github.com/googleapis/python-aiplatform/commit/69d6c7dc4e87a9d1e908bd7ab3a5c6f552936c84))
+* Pass the PipelineJob credentials to `create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist` ([#1537](https://github.com/googleapis/python-aiplatform/issues/1537)) ([b53e2b5](https://github.com/googleapis/python-aiplatform/commit/b53e2b5942faad388a6c34bd4af700b666ed1f1c))
+
+## [1.15.1](https://github.com/googleapis/python-aiplatform/compare/v1.15.0...v1.15.1) (2022-07-18)
+
+
+### Features
+
+* add get_associated_experiment method to pipeline_jobs ([#1476](https://github.com/googleapis/python-aiplatform/issues/1476)) ([e9f2c3c](https://github.com/googleapis/python-aiplatform/commit/e9f2c3c54740e4735f864c5eabd3b0ac9655b6de))
+* Add sample for create artifact and execution using the Metadata SDK. ([#1462](https://github.com/googleapis/python-aiplatform/issues/1462)) ([1fc7dd9](https://github.com/googleapis/python-aiplatform/commit/1fc7dd99d618a462273d597518f007c6b0cb188e))
+* Add support for start_execution in MLMD SDK. ([#1465](https://github.com/googleapis/python-aiplatform/issues/1465)) ([298958f](https://github.com/googleapis/python-aiplatform/commit/298958f404f902b9e1d7e93bbceb97b43612f649))
+* Add support for Vertex Tables Q2 regions ([#1498](https://github.com/googleapis/python-aiplatform/issues/1498)) ([1b16f90](https://github.com/googleapis/python-aiplatform/commit/1b16f90821d13fc3fb693d60b44c2e57444c4287))
+* Added the PipelineJob.from_pipeline_func method ([#1415](https://github.com/googleapis/python-aiplatform/issues/1415)) ([6ef05de](https://github.com/googleapis/python-aiplatform/commit/6ef05de85b51a67f8df6ad8813518eb7c9158e71))
+
+
+### Bug Fixes
+
+* **deps:** require google-api-core>=1.32.0,>=2.8.0 ([#1512](https://github.com/googleapis/python-aiplatform/issues/1512)) ([6d09dee](https://github.com/googleapis/python-aiplatform/commit/6d09deec18d1152705dc55e912d4d0248ea64672))
+* Unbreak aiplatform.Experiment.create ([#1509](https://github.com/googleapis/python-aiplatform/issues/1509)) ([558c141](https://github.com/googleapis/python-aiplatform/commit/558c1410b43356da0832b3ded78977ee80be93cd))
+
+
+### Miscellaneous Chores
+
+* release 1.15.1 ([#1510](https://github.com/googleapis/python-aiplatform/issues/1510)) ([e500ff3](https://github.com/googleapis/python-aiplatform/commit/e500ff3b96384e25c08a50bd16e6e0fd6266d98b))
+
+## [1.15.0](https://github.com/googleapis/python-aiplatform/compare/v1.14.0...v1.15.0) (2022-06-29)
+
+
+### Features
+
+* add default_skew_threshold to TrainingPredictionSkewDetectionConfig in aiplatform v1beta1, v1 model_monitoring.proto ([#1411](https://github.com/googleapis/python-aiplatform/issues/1411)) ([7a8e3be](https://github.com/googleapis/python-aiplatform/commit/7a8e3bed0f1084ab6f33c032b9c436f9b57da6cd))
+* add model_monitoring_config to BatchPredictionJob in aiplatform v1beta1 batch_prediction_job.proto ([#1450](https://github.com/googleapis/python-aiplatform/issues/1450)) ([d35df58](https://github.com/googleapis/python-aiplatform/commit/d35df58f703e04c59bcdaa7b763920fca3750220))
+* add model_version_id to BatchPredictionJob in aiplatform v1 batch_prediction_job.proto ([#1453](https://github.com/googleapis/python-aiplatform/issues/1453)) ([9ef057a](https://github.com/googleapis/python-aiplatform/commit/9ef057abe35895b57dcbe3cbab619a1b785c0b95))
+* add model_version_id to UploadModelResponse in aiplatform v1 model_service.proto ([#1442](https://github.com/googleapis/python-aiplatform/issues/1442)) ([1c198f1](https://github.com/googleapis/python-aiplatform/commit/1c198f1d457d41a1bbb0ea501e8cff45bb119f8e))
+* Add PrivateEndpoint class and HTTP methods ([#1033](https://github.com/googleapis/python-aiplatform/issues/1033)) ([425a32f](https://github.com/googleapis/python-aiplatform/commit/425a32fccab2147101bec0031ca139bb311b7cff))
+* add support for accepting an Artifact Registry URL in pipeline_job ([#1405](https://github.com/googleapis/python-aiplatform/issues/1405)) ([e138cfd](https://github.com/googleapis/python-aiplatform/commit/e138cfd8b8f8033a562c1b7f2d340042f57db27e))
+* add support for failure_policy in PipelineJob ([#1452](https://github.com/googleapis/python-aiplatform/issues/1452)) ([d0968ea](https://github.com/googleapis/python-aiplatform/commit/d0968ead02d14edf209f36a18b61a9400233a4b8))
+* Improved metadata artifact and execution creation using python / SDK ([#1430](https://github.com/googleapis/python-aiplatform/issues/1430)) ([6c4374f](https://github.com/googleapis/python-aiplatform/commit/6c4374f893d34f8c2310223afb39f48f8961a0bb))
+* support dataset update ([#1416](https://github.com/googleapis/python-aiplatform/issues/1416)) ([e3eb82f](https://github.com/googleapis/python-aiplatform/commit/e3eb82f59d3f28dfedd71b9e69a0e967a01eada5))
+* Support for Model Versioning ([#1438](https://github.com/googleapis/python-aiplatform/issues/1438)) ([d890685](https://github.com/googleapis/python-aiplatform/commit/d890685c981d47992e4cdb2c9196f64dc520a5ff))
+* Vertex AI Experiments GA ([#1410](https://github.com/googleapis/python-aiplatform/issues/1410)) ([24d1bb6](https://github.com/googleapis/python-aiplatform/commit/24d1bb6d85dda76b9895dc924c7e0437864ea7a2))
+
+
+### Bug Fixes
+
+* Fixed docstrings for wildcards and matching engine type ([#1220](https://github.com/googleapis/python-aiplatform/issues/1220)) ([d778dee](https://github.com/googleapis/python-aiplatform/commit/d778dee69402bc280bf6cf0d7cf08cf165be5299))
+* Removed dirs_exist_ok parameter as it's not backwards compatible ([#1170](https://github.com/googleapis/python-aiplatform/issues/1170)) ([50d4129](https://github.com/googleapis/python-aiplatform/commit/50d41293d699643d3753693af79af95f68a2b07a))
+
+## [1.14.0](https://github.com/googleapis/python-aiplatform/compare/v1.13.1...v1.14.0) (2022-06-08)
+
+
+### Features
+
+* add a way to easily clone a PipelineJob ([#1239](https://github.com/googleapis/python-aiplatform/issues/1239)) ([efaf6ed](https://github.com/googleapis/python-aiplatform/commit/efaf6edc36262b095aa13d0b40348c20e39b3fc6))
+* add display_name and metadata to ModelEvaluation in aiplatform model_evaluation.proto ([b6bf6dc](https://github.com/googleapis/python-aiplatform/commit/b6bf6dc643274220e6eeca6479b5f9df61b11d16))
+* add Examples to Explanation related messages in aiplatform v1beta1 explanation.proto ([b6bf6dc](https://github.com/googleapis/python-aiplatform/commit/b6bf6dc643274220e6eeca6479b5f9df61b11d16))
+* Add hierarchy and window configs to Vertex Forecasting training job ([#1255](https://github.com/googleapis/python-aiplatform/issues/1255)) ([8560fa8](https://github.com/googleapis/python-aiplatform/commit/8560fa88c8e0fe51f2ae56f68be575e85db3696a))
+* add holiday regions for vertex forecasting ([#1253](https://github.com/googleapis/python-aiplatform/issues/1253)) ([0036ab0](https://github.com/googleapis/python-aiplatform/commit/0036ab07004e0c9ae7806c4c2c25f22d5af4a978))
+* add IAM policy to aiplatform_v1beta1.yaml ([b6bf6dc](https://github.com/googleapis/python-aiplatform/commit/b6bf6dc643274220e6eeca6479b5f9df61b11d16))
+* add latent_space_source to ExplanationMetadata in aiplatform v1 explanation_metadata.proto ([b6bf6dc](https://github.com/googleapis/python-aiplatform/commit/b6bf6dc643274220e6eeca6479b5f9df61b11d16))
+* add latent_space_source to ExplanationMetadata in aiplatform v1beta1 explanation_metadata.proto ([b6bf6dc](https://github.com/googleapis/python-aiplatform/commit/b6bf6dc643274220e6eeca6479b5f9df61b11d16))
+* add preset configuration for example-based explanations in aiplatform v1beta1 explanation.proto ([b6bf6dc](https://github.com/googleapis/python-aiplatform/commit/b6bf6dc643274220e6eeca6479b5f9df61b11d16))
+* add scaling to OnlineServingConfig in aiplatform v1 featurestore.proto ([b6bf6dc](https://github.com/googleapis/python-aiplatform/commit/b6bf6dc643274220e6eeca6479b5f9df61b11d16))
+* add seq2seq forecasting training job ([#1196](https://github.com/googleapis/python-aiplatform/issues/1196)) ([643d335](https://github.com/googleapis/python-aiplatform/commit/643d335693ec57848949ee173401867a1188678b))
+* add successful_forecast_point_count to CompletionStats in completion_stats.proto ([b6bf6dc](https://github.com/googleapis/python-aiplatform/commit/b6bf6dc643274220e6eeca6479b5f9df61b11d16))
+* add template_metadata to PipelineJob in aiplatform v1 pipeline_job.proto ([b6bf6dc](https://github.com/googleapis/python-aiplatform/commit/b6bf6dc643274220e6eeca6479b5f9df61b11d16))
+* Add Vertex Forecasting E2E test. ([#1248](https://github.com/googleapis/python-aiplatform/issues/1248)) ([e82c179](https://github.com/googleapis/python-aiplatform/commit/e82c1792293396045a1032df015a3700fc38609b))
+* Added forecasting snippets and fixed bugs with existing snippets ([#1210](https://github.com/googleapis/python-aiplatform/issues/1210)) ([4e4bff5](https://github.com/googleapis/python-aiplatform/commit/4e4bff5cac3a99e7f55145ab2aee83b20af67060))
+
+
+### Bug Fixes
+
+* change endpoint update method to return resource ([#1409](https://github.com/googleapis/python-aiplatform/issues/1409)) ([44e279b](https://github.com/googleapis/python-aiplatform/commit/44e279b15a1b03bf234111333517153ffdbaf696))
+* Changed system test to use list_models() correctly ([#1397](https://github.com/googleapis/python-aiplatform/issues/1397)) ([a3da19a](https://github.com/googleapis/python-aiplatform/commit/a3da19aac6bdd3fa8d218408582205f7241a4b04))
+* Pinned protobuf to prevent issues with pb files. ([#1398](https://github.com/googleapis/python-aiplatform/issues/1398)) ([7a54637](https://github.com/googleapis/python-aiplatform/commit/7a54637d9b0e7a52ec4648505a6902610c4cc5b7))
+
+
+### Documentation
+
+* fix changelog header to consistent size ([#1404](https://github.com/googleapis/python-aiplatform/issues/1404)) ([f6a7e6f](https://github.com/googleapis/python-aiplatform/commit/f6a7e6f35188d6032fc8b34a3c205b0632029e02))
+
+## [1.13.1](https://github.com/googleapis/python-aiplatform/compare/v1.13.0...v1.13.1) (2022-05-26)
+
+
+### Features
+
+* add batch_size kwarg for batch prediction jobs ([#1194](https://github.com/googleapis/python-aiplatform/issues/1194)) ([50bdb01](https://github.com/googleapis/python-aiplatform/commit/50bdb01504740ed31de788d8a160f3e2be7f55df))
+* add update endpoint ([#1162](https://github.com/googleapis/python-aiplatform/issues/1162)) ([0ecfe1e](https://github.com/googleapis/python-aiplatform/commit/0ecfe1e7ab8687c13cb4267985e8b6ebc7bd2534))
+* support autoscaling metrics when deploying models ([#1197](https://github.com/googleapis/python-aiplatform/issues/1197)) ([095717c](https://github.com/googleapis/python-aiplatform/commit/095717c8b77dc5d66e677413a437ea6ed92e0b1a))
+
+
+### Bug Fixes
+
+* check in service proto file ([#1174](https://github.com/googleapis/python-aiplatform/issues/1174)) ([5fdf151](https://github.com/googleapis/python-aiplatform/commit/5fdf151ee0d0a630c07a75dc8f19906e7ad1aa8a))
+* regenerate pb2 files using grpcio-tools ([#1394](https://github.com/googleapis/python-aiplatform/issues/1394)) ([406c868](https://github.com/googleapis/python-aiplatform/commit/406c868344280d424f4191c98bcbbdeaf947b2d1))
+
+
+### Documentation
+
+* update aiplatform SDK arrangement for Sphinx ([#1163](https://github.com/googleapis/python-aiplatform/issues/1163)) ([e9510ea](https://github.com/googleapis/python-aiplatform/commit/e9510ea6344a296e0c93ddf32280cf4c010ee4f1))
+
+
+### Miscellaneous Chores
+
+* release 1.13.1 ([#1395](https://github.com/googleapis/python-aiplatform/issues/1395)) ([df78407](https://github.com/googleapis/python-aiplatform/commit/df78407b2f14c95c9e84b4b1375a8de5bc9c7bb5))
+
+## [1.13.0](https://github.com/googleapis/python-aiplatform/compare/v1.12.1...v1.13.0) (2022-05-09)
+
+
+### Features
+
+* add ConvexAutomatedStoppingSpec to StudySpec in aiplatform v1 study.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add ConvexAutomatedStoppingSpec to StudySpec in aiplatform v1beta1 study.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add JOB_STATE_UPDATING to JobState in aiplatform v1 job_state.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add JOB_STATE_UPDATING to JobState in aiplatform v1beta1 job_state.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add LatestMonitoringPipelineMetadata to ModelDeploymentMonitoringJob in aiplatform v1beta1 model_deployment_monitoring_job.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add ListModelVersion, DeleteModelVersion, and MergeVersionAliases rpcs to aiplatform v1beta1 model_service.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add MfsMount in aiplatform v1 machine_resources.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add MfsMount in aiplatform v1beta1 machine_resources.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add model_id and parent_model to TrainingPipeline in aiplatform v1beta1 training_pipeline.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add model_version_id to DeployedModel in aiplatform v1beta1 endpoint.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add model_version_id to PredictResponse in aiplatform v1beta1 prediction_service.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add model_version_id to UploadModelRequest and UploadModelResponse in aiplatform v1beta1 model_service.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add nfs_mounts to WorkPoolSpec in aiplatform v1 custom_job.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add nfs_mounts to WorkPoolSpec in aiplatform v1beta1 custom_job.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add Pandas DataFrame support to TabularDataset ([#1185](https://github.com/googleapis/python-aiplatform/issues/1185)) ([4fe4558](https://github.com/googleapis/python-aiplatform/commit/4fe4558ea0aaf73e3c0e9715ae90cb729a4c5678))
+* add PredictRequestResponseLoggingConfig to aiplatform v1beta1 endpoint.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add reserved_ip_ranges to CustomJobSpec in aiplatform v1 custom_job.proto ([#1165](https://github.com/googleapis/python-aiplatform/issues/1165)) ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add reserved_ip_ranges to CustomJobSpec in aiplatform v1beta1 custom_job.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* add template_metadata to PipelineJob in aiplatform v1beta1 pipeline_job.proto ([#1186](https://github.com/googleapis/python-aiplatform/issues/1186)) ([99aca4a](https://github.com/googleapis/python-aiplatform/commit/99aca4a9b0deeefd294cfd64fa3e247cc41e006c))
+* add version_id to Model in aiplatform v1beta1 model.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+* allow creating featurestore without online node ([#1180](https://github.com/googleapis/python-aiplatform/issues/1180)) ([3224ae3](https://github.com/googleapis/python-aiplatform/commit/3224ae3402e9493866dd4958d011a431968b9c2c))
+* Allow users to specify timestamp split for vertex forecasting ([#1187](https://github.com/googleapis/python-aiplatform/issues/1187)) ([ee49e00](https://github.com/googleapis/python-aiplatform/commit/ee49e004c8fbd0c8c27760b525c6e7431057a45e))
+* Make matching engine API public ([#1192](https://github.com/googleapis/python-aiplatform/issues/1192)) ([469db6b](https://github.com/googleapis/python-aiplatform/commit/469db6b08a9aa7fc64d8ea27f7e2e2fb2e9f643b))
+* rename Similarity to Examples, and similarity to examples in ExplanationParameters in aiplatform v1beta1 explanation.proto ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+
+
+### Documentation
+
+* fix type in docstring for map fields ([847ad78](https://github.com/googleapis/python-aiplatform/commit/847ad789e09aec14238a7476a3fa88729ce24d6f))
+
+## [1.12.1](https://github.com/googleapis/python-aiplatform/compare/v1.12.0...v1.12.1) (2022-04-20)
+
+
+### Features
+
+* Add endpoind_id arg to Endpoint#create ([#1168](https://github.com/googleapis/python-aiplatform/issues/1168)) ([4c21993](https://github.com/googleapis/python-aiplatform/commit/4c21993642b84d7595ead7a63424260deafaf43c))
+* add ModelEvaluation support ([#1167](https://github.com/googleapis/python-aiplatform/issues/1167)) ([10f95cd](https://github.com/googleapis/python-aiplatform/commit/10f95cde5e0282a99041ff2108111970f52379f3))
+
+
+### Bug Fixes
+
+* change default for create_request_timeout arg to None ([#1175](https://github.com/googleapis/python-aiplatform/issues/1175)) ([47791f7](https://github.com/googleapis/python-aiplatform/commit/47791f79c56a67be7503b5d5d4eb72dc409b18a0))
+
+
+### Documentation
+
+* endpoint.create => aiplatform.Endpoint.create ([#1153](https://github.com/googleapis/python-aiplatform/issues/1153)) ([1122a26](https://github.com/googleapis/python-aiplatform/commit/1122a26fd01d4c964055ca85a683de0c91867b6f))
+* update changelog headers ([#1164](https://github.com/googleapis/python-aiplatform/issues/1164)) ([c1e899d](https://github.com/googleapis/python-aiplatform/commit/c1e899dba3f57e515b1f1958e962f355276460c4))
+* update model code snippet order in README ([#1154](https://github.com/googleapis/python-aiplatform/issues/1154)) ([404d7f1](https://github.com/googleapis/python-aiplatform/commit/404d7f13d8666ea673743ab54928046eb64ee542))
+
+
+### Miscellaneous Chores
+
+* release 1.12.1 ([#1176](https://github.com/googleapis/python-aiplatform/issues/1176)) ([f98d92e](https://github.com/googleapis/python-aiplatform/commit/f98d92ecf7ad42fdbb095e65f98800bc6b2d3d12))
+
+## [1.12.0](https://github.com/googleapis/python-aiplatform/compare/v1.11.0...v1.12.0) (2022-04-07)
+
+
+### Features
+
+* add categorical_threshold_config to FeaturestoreMonitoringConfig in aiplatform v1 featurestore_monitoring.proto ([38f3711](https://github.com/googleapis/python-aiplatform/commit/38f3711bd76bbcfe4ce48739bb11049e2711d47f))
+* add categorical_threshold_config to FeaturestoreMonitoringConfig in aiplatform v1beta1 featurestore_monitoring.proto ([38f3711](https://github.com/googleapis/python-aiplatform/commit/38f3711bd76bbcfe4ce48739bb11049e2711d47f))
+* add disable_monitoring to Feature in aiplatform v1 feature.proto ([38f3711](https://github.com/googleapis/python-aiplatform/commit/38f3711bd76bbcfe4ce48739bb11049e2711d47f))
+* add disable_monitoring to Feature in aiplatform v1beta1 feature.proto ([38f3711](https://github.com/googleapis/python-aiplatform/commit/38f3711bd76bbcfe4ce48739bb11049e2711d47f))
+* Add done method for pipeline, training, and batch prediction jobs ([#1062](https://github.com/googleapis/python-aiplatform/issues/1062)) ([f3338fc](https://github.com/googleapis/python-aiplatform/commit/f3338fcd4f51072ee86b765ee580cfe3c4b222ce))
+* add import_features_analysis to FeaturestoreMonitoringConfig in aiplatform v1 featurestore_monitoring.proto ([38f3711](https://github.com/googleapis/python-aiplatform/commit/38f3711bd76bbcfe4ce48739bb11049e2711d47f))
+* add import_features_analysis to FeaturestoreMonitoringConfig in aiplatform v1beta1 featurestore_monitoring.proto ([38f3711](https://github.com/googleapis/python-aiplatform/commit/38f3711bd76bbcfe4ce48739bb11049e2711d47f))
+* add ImportModelEvaluation in aiplatform v1 model_service.proto ([#1105](https://github.com/googleapis/python-aiplatform/issues/1105)) ([ef5930c](https://github.com/googleapis/python-aiplatform/commit/ef5930c58838ce51f92ef1acb941f5141c83faad))
+* add monitoring_config to EntityType in aiplatform v1 entity_type.proto ([#1077](https://github.com/googleapis/python-aiplatform/issues/1077)) ([38f3711](https://github.com/googleapis/python-aiplatform/commit/38f3711bd76bbcfe4ce48739bb11049e2711d47f))
+* add monitoring_stats_anomalies to Feature in aiplatform v1 feature.proto ([38f3711](https://github.com/googleapis/python-aiplatform/commit/38f3711bd76bbcfe4ce48739bb11049e2711d47f))
+* add monitoring_stats_anomalies to Feature in aiplatform v1beta1 feature.proto ([38f3711](https://github.com/googleapis/python-aiplatform/commit/38f3711bd76bbcfe4ce48739bb11049e2711d47f))
+* add numerical_threshold_config to FeaturestoreMonitoringConfig in aiplatform v1 featurestore_monitoring.proto ([38f3711](https://github.com/googleapis/python-aiplatform/commit/38f3711bd76bbcfe4ce48739bb11049e2711d47f))
+* add numerical_threshold_config to FeaturestoreMonitoringConfig in aiplatform v1beta1 featurestore_monitoring.proto ([38f3711](https://github.com/googleapis/python-aiplatform/commit/38f3711bd76bbcfe4ce48739bb11049e2711d47f))
+* add objective to MonitoringStatsSpec in aiplatform v1 featurestore_service.proto ([38f3711](https://github.com/googleapis/python-aiplatform/commit/38f3711bd76bbcfe4ce48739bb11049e2711d47f))
+* add objective to MonitoringStatsSpec in aiplatform v1beta1 featurestore_service.proto ([38f3711](https://github.com/googleapis/python-aiplatform/commit/38f3711bd76bbcfe4ce48739bb11049e2711d47f))
+* add PredictRequestResponseLoggingConfig to Endpoint in aiplatform v1 endpoint.proto ([#1072](https://github.com/googleapis/python-aiplatform/issues/1072)) ([be0ccc4](https://github.com/googleapis/python-aiplatform/commit/be0ccc488dac22128be317ca40337d6b93af0906))
+* add staleness_days to SnapshotAnalysis in aiplatform v1 featurestore_monitoring.proto ([38f3711](https://github.com/googleapis/python-aiplatform/commit/38f3711bd76bbcfe4ce48739bb11049e2711d47f))
+* add staleness_days to SnapshotAnalysis in aiplatform v1beta1 featurestore_monitoring.proto ([38f3711](https://github.com/googleapis/python-aiplatform/commit/38f3711bd76bbcfe4ce48739bb11049e2711d47f))
+* Add support for Vertex Tables Q1 regions ([#1065](https://github.com/googleapis/python-aiplatform/issues/1065)) ([6383d4f](https://github.com/googleapis/python-aiplatform/commit/6383d4f20f1ab0a7634c1028cb9f270e91c31d2a))
+* add timeout arg across SDK ([#1099](https://github.com/googleapis/python-aiplatform/issues/1099)) ([184f7f3](https://github.com/googleapis/python-aiplatform/commit/184f7f327aa00b4c8d1acc24dcb1c4c4be6c5bcc))
+* Add timeout arguments to Endpoint.predict and Endpoint.explain ([#1094](https://github.com/googleapis/python-aiplatform/issues/1094)) ([cc59e60](https://github.com/googleapis/python-aiplatform/commit/cc59e60193a72bb57d699cabea03ab7bdd386b0e))
+* Made display_name parameter optional for most calls ([#882](https://github.com/googleapis/python-aiplatform/issues/882)) ([400b760](https://github.com/googleapis/python-aiplatform/commit/400b7608afeaca9a36936cabd402c5322eb9345b))
+* **sdk:** enable loading both JSON and YAML pipelines IR ([#1089](https://github.com/googleapis/python-aiplatform/issues/1089)) ([f2e70b1](https://github.com/googleapis/python-aiplatform/commit/f2e70b1563171b5a92a2c40edf29ae373bbeb175))
+* **v1beta1:** add `service_account` to `BatchPredictionJob` in `batch_prediction_job.proto` ([#1084](https://github.com/googleapis/python-aiplatform/issues/1084)) ([b7a5177](https://github.com/googleapis/python-aiplatform/commit/b7a517731bc8127d4186838bfb88fa883b2be853))
+
+
+### Bug Fixes
+
+* add resource manager utils to get project ID from project number ([#1068](https://github.com/googleapis/python-aiplatform/issues/1068)) ([f10a1d4](https://github.com/googleapis/python-aiplatform/commit/f10a1d4280c3e653c9f4795f0423bf07a23acdf9))
+* add self.wait() in operations after optional_sync supported resource creation ([#1083](https://github.com/googleapis/python-aiplatform/issues/1083)) ([79aeec1](https://github.com/googleapis/python-aiplatform/commit/79aeec1380068318398851b2a7b2fd6ddee7fa8b))
+* Don't throw exception when getting representation of unrun GCA objects ([#1071](https://github.com/googleapis/python-aiplatform/issues/1071)) ([c9ba060](https://github.com/googleapis/python-aiplatform/commit/c9ba0603e6a8e3d772af67367242aad7a18e03c8))
+* Fix import error string showing wrong pip install path ([#1076](https://github.com/googleapis/python-aiplatform/issues/1076)) ([74ffa19](https://github.com/googleapis/python-aiplatform/commit/74ffa19e7d540f6bb5f21d2335c2a5d23cc49ee2))
+* Fixed getting project ID when running on Vertex AI; Fixes [#852](https://github.com/googleapis/python-aiplatform/issues/852) ([#943](https://github.com/googleapis/python-aiplatform/issues/943)) ([876cb33](https://github.com/googleapis/python-aiplatform/commit/876cb33a407cfea5c965e4f348056b147b1d16c3))
+* Give aiplatform logging its own log namespace, let the user configure their own root logger ([#1081](https://github.com/googleapis/python-aiplatform/issues/1081)) ([fb78243](https://github.com/googleapis/python-aiplatform/commit/fb782434d456f41c6c6bd6664b203cebb53131b8))
+* Honoring the model's supported_deployment_resources_types ([#865](https://github.com/googleapis/python-aiplatform/issues/865)) ([db34b85](https://github.com/googleapis/python-aiplatform/commit/db34b85aaf211ca491313d2b8ae2a45253109614))
+* missing reference to logged_web_access_uris ([#1056](https://github.com/googleapis/python-aiplatform/issues/1056)) ([198a1b5](https://github.com/googleapis/python-aiplatform/commit/198a1b5753f509c9137a8d9e9b56d68e6e386563))
+* system tests failure from test_upload_and_deploy_xgboost_model ([#1149](https://github.com/googleapis/python-aiplatform/issues/1149)) ([c8422a9](https://github.com/googleapis/python-aiplatform/commit/c8422a9b807e092f2d48e7f3fa8b40c8724cc028))
+
+
+### Documentation
+
+* fix CustomContainerTrainingJob example in docstring ([#1101](https://github.com/googleapis/python-aiplatform/issues/1101)) ([d2fb9db](https://github.com/googleapis/python-aiplatform/commit/d2fb9db095d1acb15894df3d0a5e66128ce8f14e))
+* improve bigquery_destination_prefix docstring ([#1098](https://github.com/googleapis/python-aiplatform/issues/1098)) ([a46df64](https://github.com/googleapis/python-aiplatform/commit/a46df64ab99aee8d7e47b44394a234243dc2a0f8))
+* Include time dependency in documentation for weight, time, and target columns. ([#1102](https://github.com/googleapis/python-aiplatform/issues/1102)) ([52273c2](https://github.com/googleapis/python-aiplatform/commit/52273c2108c9bb24eadab214036f2ef93b847321))
+* **samples:** read, import, batch_serve, batch_create features ([#1046](https://github.com/googleapis/python-aiplatform/issues/1046)) ([80dd40d](https://github.com/googleapis/python-aiplatform/commit/80dd40dcb830ece3b5442d60834357ada6583204))
+* Update AutoML Video docstring ([#987](https://github.com/googleapis/python-aiplatform/issues/987)) ([6002d5d](https://github.com/googleapis/python-aiplatform/commit/6002d5d9bf24542f9f3f844e469bc3f8ad9636ec))
+
+## [1.11.0](https://github.com/googleapis/python-aiplatform/compare/v1.10.0...v1.11.0) (2022-03-03)
+
+
+### Features
+
+* add additional_experiement flag in the tables and forecasting training job ([#979](https://github.com/googleapis/python-aiplatform/issues/979)) ([5fe59a4](https://github.com/googleapis/python-aiplatform/commit/5fe59a4015882d56c22f9973aff888966dd53a2e))
+* add TPU_V2 & TPU_V3 values to AcceleratorType in aiplatform v1/v1beta1 accelerator_type.proto ([#1010](https://github.com/googleapis/python-aiplatform/issues/1010)) ([09c2e8a](https://github.com/googleapis/python-aiplatform/commit/09c2e8a368c6d265d99acfb12addd5ba6f1a50e6))
+* Added scheduling to CustomTrainingJob, CustomPythonPackageTrainingJob, CustomContainerTrainingJob ([#970](https://github.com/googleapis/python-aiplatform/issues/970)) ([89078e0](https://github.com/googleapis/python-aiplatform/commit/89078e0d2a719e2b0d25ae36ecd06c356a5a33c9))
+
+
+### Bug Fixes
+
+* **deps:** allow google-cloud-storage < 3.0.0dev ([#1008](https://github.com/googleapis/python-aiplatform/issues/1008)) ([1c34154](https://github.com/googleapis/python-aiplatform/commit/1c341544e9bd94c6ff0ee41177565c8c078673a3))
+* **deps:** require google-api-core>=1.31.5, >=2.3.2 ([#1050](https://github.com/googleapis/python-aiplatform/issues/1050)) ([dfbd68a](https://github.com/googleapis/python-aiplatform/commit/dfbd68a79f1c892c4380405dd900deb6ac6574a6))
+* **deps:** require proto-plus>=1.15.0 ([dfbd68a](https://github.com/googleapis/python-aiplatform/commit/dfbd68a79f1c892c4380405dd900deb6ac6574a6))
+* enforce bq SchemaField field_type and mode using feature value_type ([#1019](https://github.com/googleapis/python-aiplatform/issues/1019)) ([095bea2](https://github.com/googleapis/python-aiplatform/commit/095bea23bc15a490ddbb1a8edac7f5db626bc659))
+* Fix create_lit_model_from_endpoint not accepting models that don't return a dictionary. ([#1020](https://github.com/googleapis/python-aiplatform/issues/1020)) ([b9a057d](https://github.com/googleapis/python-aiplatform/commit/b9a057d001deb8727cb725d44bb5528dce330653))
+* loosen assertions for system test featurestore ([#1040](https://github.com/googleapis/python-aiplatform/issues/1040)) ([2ba404f](https://github.com/googleapis/python-aiplatform/commit/2ba404f8bfbccd7a18ef613417912ed94882c4bd))
+* remove empty scripts kwarg in setup.py ([#1014](https://github.com/googleapis/python-aiplatform/issues/1014)) ([ef3fcc8](https://github.com/googleapis/python-aiplatform/commit/ef3fcc86fb3808b37706470c8c49903ec3a302fb))
+* show logs when TFX pipelines are submitted ([#976](https://github.com/googleapis/python-aiplatform/issues/976)) ([c10923b](https://github.com/googleapis/python-aiplatform/commit/c10923b47b9b9941d14ae2c5398348d971a23f9d))
+* update system test_model_upload to use BUILD_SPECIFIC_GCP_PROJECT ([#1043](https://github.com/googleapis/python-aiplatform/issues/1043)) ([e7d2719](https://github.com/googleapis/python-aiplatform/commit/e7d27193f323f88f4238206ecb380d746d98df31))
+
+
+### Documentation
+
+* **samples:** add samples to create/delete featurestore ([#980](https://github.com/googleapis/python-aiplatform/issues/980)) ([5ee6354](https://github.com/googleapis/python-aiplatform/commit/5ee6354a12c6422015acb81caef32d6d2f52c838))
+* **samples:** added create feature and create entity type samples and tests ([#984](https://github.com/googleapis/python-aiplatform/issues/984)) ([d221e6b](https://github.com/googleapis/python-aiplatform/commit/d221e6bebd7fb98a8c6e3f3b8ae507f2f214128f))
+
+## [1.10.0](https://github.com/googleapis/python-aiplatform/compare/v1.9.0...v1.10.0) (2022-02-07)
+
+
+### Features
+
+* _TrainingScriptPythonPackager to support folders ([#812](https://github.com/googleapis/python-aiplatform/issues/812)) ([3aec6a7](https://github.com/googleapis/python-aiplatform/commit/3aec6a7b8f26ef2a5b378a6224d6402e3b42c917))
+* add dedicated_resources to DeployedIndex in aiplatform v1beta1 index_endpoint.proto feat: add Scaling to OnlineServingConfig in aiplatform v1beta1 featurestore.proto chore: sort imports ([#991](https://github.com/googleapis/python-aiplatform/issues/991)) ([7a7f0d4](https://github.com/googleapis/python-aiplatform/commit/7a7f0d45f3d08c93b11fcd2c5a265a8db4b0c890))
+* add dedicated_resources to DeployedIndex message in aiplatform v1 index_endpoint.proto chore: sort imports ([#990](https://github.com/googleapis/python-aiplatform/issues/990)) ([a814923](https://github.com/googleapis/python-aiplatform/commit/a8149233bcd857e75700c6ec7d29c0aabf1687c1))
+* Add XAI SDK integration to TensorFlow models with LIT integration ([#917](https://github.com/googleapis/python-aiplatform/issues/917)) ([ea2b5cf](https://github.com/googleapis/python-aiplatform/commit/ea2b5cfbcafead1c63009fda10bd44a00d560efb))
+* Added `aiplatform.Model.update` method ([#952](https://github.com/googleapis/python-aiplatform/issues/952)) ([44e208a](https://github.com/googleapis/python-aiplatform/commit/44e208a8dbf082e770373d58c31b3ad3e8b39f4f))
+* Enable europe-west6 and northamerica-northeast2 regions ([0f6b670](https://github.com/googleapis/python-aiplatform/commit/0f6b6701e96fb0ec345e81560d03059a7900160f))
+* enable feature store batch serve to BigQuery and GCS for csv and tfrecord ([#919](https://github.com/googleapis/python-aiplatform/issues/919)) ([c840728](https://github.com/googleapis/python-aiplatform/commit/c840728e503eea3300e9629405978e28c6aafec7))
+* enable feature store batch serve to Pandas DataFrame; fix: read instances uri for batch serve ([#983](https://github.com/googleapis/python-aiplatform/issues/983)) ([e0fec36](https://github.com/googleapis/python-aiplatform/commit/e0fec36686e373c13acca3203372572c760c7af4))
+* enable feature store online serving ([#918](https://github.com/googleapis/python-aiplatform/issues/918)) ([b8f5f82](https://github.com/googleapis/python-aiplatform/commit/b8f5f82ae43edfb933305a074c315e2f3239b4b1))
+* enable ingest from pd.DataFrame ([#977](https://github.com/googleapis/python-aiplatform/issues/977)) ([9289f2d](https://github.com/googleapis/python-aiplatform/commit/9289f2d3ce424f3f9754a3dd23883e25dec1300f))
+* Open LIT with a deployed model ([#963](https://github.com/googleapis/python-aiplatform/issues/963)) ([ea16849](https://github.com/googleapis/python-aiplatform/commit/ea16849f936d7a2e8402fd235decefe5972685ed))
+
+
+### Bug Fixes
+
+* Fixed BigQuery datasets that have colon in URI ([#855](https://github.com/googleapis/python-aiplatform/issues/855)) ([153578f](https://github.com/googleapis/python-aiplatform/commit/153578f19d57db96e3674b2d797c5352c107f936))
+* Fixed integration test for model.upload ([#975](https://github.com/googleapis/python-aiplatform/issues/975)) ([0ca3747](https://github.com/googleapis/python-aiplatform/commit/0ca374769f922fd427c5b6f58c9ce1ab40f18d18))
+* rename teardown fixture ([#1004](https://github.com/googleapis/python-aiplatform/issues/1004)) ([fcd0096](https://github.com/googleapis/python-aiplatform/commit/fcd00969dbbbf06887dfdbaa6bc65b135c24f95f))
+
+
+### Documentation
+
+* **samples:** replace deprecated fields in create_training_pipeline_tabular_forecasting_sample.py ([#981](https://github.com/googleapis/python-aiplatform/issues/981)) ([9ebc972](https://github.com/googleapis/python-aiplatform/commit/9ebc972bba972b1e1920db422ed28a721e90329d))
+
+## [1.9.0](https://www.github.com/googleapis/python-aiplatform/compare/v1.8.1...v1.9.0) (2021-12-29)
+
+
+### Features
+
+* add create in Featurestore, EntityType, Feature; add create_entity_type in Featurestore; add create_feature, batch_create_features in EntityType; add ingest_from_* for bq and gcs in EntityType; add and update delete with force delete nested resources ([#872](https://www.github.com/googleapis/python-aiplatform/issues/872)) ([ba11c3d](https://www.github.com/googleapis/python-aiplatform/commit/ba11c3d3cd8d3869e2deb3207a8698fa7ce284ec))
+* Add LIT methods for Pandas DataFrame and TensorFlow saved model. ([#874](https://www.github.com/googleapis/python-aiplatform/issues/874)) ([03cf301](https://www.github.com/googleapis/python-aiplatform/commit/03cf301989a5802b122803eac7a2d03f2d1769fb))
+* Add support to create TensorboardExperiment ([#909](https://www.github.com/googleapis/python-aiplatform/issues/909)) ([96ce738](https://www.github.com/googleapis/python-aiplatform/commit/96ce7387ac58e0ec7cb6a7f6d6a6e422eae5da96))
+* Add support to create TensorboardRun ([#912](https://www.github.com/googleapis/python-aiplatform/issues/912)) ([8df74a2](https://www.github.com/googleapis/python-aiplatform/commit/8df74a29df0adb95fff5500fcc9d7a025012ab5e))
+
+
+### Bug Fixes
+
+* Fix timestamp proto util to default to timestamp at call time. ([#933](https://www.github.com/googleapis/python-aiplatform/issues/933)) ([d72a254](https://www.github.com/googleapis/python-aiplatform/commit/d72a254e97cf74f3fdd55a32a4af86737243593a))
+* Improve handling of undeploying model without redistributing remaining traffic ([#898](https://www.github.com/googleapis/python-aiplatform/issues/898)) ([8a8a4fa](https://www.github.com/googleapis/python-aiplatform/commit/8a8a4faa667bde2a4df04afa23a6dd5b1856f958))
+* issues/192254729 ([#914](https://www.github.com/googleapis/python-aiplatform/issues/914)) ([3ec620c](https://www.github.com/googleapis/python-aiplatform/commit/3ec620c64bd60ceb5b89918200e11e3fbff67370))
+* issues/192254729 ([#915](https://www.github.com/googleapis/python-aiplatform/issues/915)) ([0f22ff6](https://www.github.com/googleapis/python-aiplatform/commit/0f22ff61460a3f2bd55d2c10c4ee06e582f03944))
+* use open_in_new_tab in the render method. ([#926](https://www.github.com/googleapis/python-aiplatform/issues/926)) ([04618e0](https://www.github.com/googleapis/python-aiplatform/commit/04618e0563b8588eec2ccd8342c6085ca08b5adb))
+
+## [1.8.1](https://www.github.com/googleapis/python-aiplatform/compare/v1.8.0...v1.8.1) (2021-12-14)
+
+
+### Bug Fixes
+
+* add clarity to param model_name ([#888](https://www.github.com/googleapis/python-aiplatform/issues/888)) ([1d81783](https://www.github.com/googleapis/python-aiplatform/commit/1d81783b2f914dd7606ee884ca31c1a594e5135f))
+* add clarity to parameters per user feedback ([#886](https://www.github.com/googleapis/python-aiplatform/issues/886)) ([37ee0a1](https://www.github.com/googleapis/python-aiplatform/commit/37ee0a1dc6e0105e19aca18f44995a352bfc40cb))
+* add param for multi-label per user's feedback ([#887](https://www.github.com/googleapis/python-aiplatform/issues/887)) ([fda942f](https://www.github.com/googleapis/python-aiplatform/commit/fda942ffbe009077b47f36aad1c29603a451e38b))
+* add support for API base path overriding ([#908](https://www.github.com/googleapis/python-aiplatform/issues/908)) ([45c4086](https://www.github.com/googleapis/python-aiplatform/commit/45c4086dd07dd7d3d3b7417196ff61a7107d8a1a))
+* Important the correct constants and use v1 for tensorboard experiments ([#905](https://www.github.com/googleapis/python-aiplatform/issues/905)) ([48c2bf1](https://www.github.com/googleapis/python-aiplatform/commit/48c2bf1ea2fa42afea1b5d419527bfb8e49e0ac0))
+* incorrect uri for IOD yaml ([#889](https://www.github.com/googleapis/python-aiplatform/issues/889)) ([e108ef8](https://www.github.com/googleapis/python-aiplatform/commit/e108ef8250c77c8a8edeccb6b601cbe0b0380c89))
+* Minor docstring and snippet fixes ([#873](https://www.github.com/googleapis/python-aiplatform/issues/873)) ([578e06d](https://www.github.com/googleapis/python-aiplatform/commit/578e06df481c3d60074a7b8e9365f8361b04e32b))
+
+
+### Documentation
+
+* Update references to containers and notebook samples. ([#890](https://www.github.com/googleapis/python-aiplatform/issues/890)) ([67fa1f1](https://www.github.com/googleapis/python-aiplatform/commit/67fa1f179af66686339d797e5b368e96816ed1c5))
+* Updated docstrings with exception error classes ([#894](https://www.github.com/googleapis/python-aiplatform/issues/894)) ([f9aecd2](https://www.github.com/googleapis/python-aiplatform/commit/f9aecd22fe08a97e45187b4d11c755ac3b9dfadd))
+
+## [1.8.0](https://www.github.com/googleapis/python-aiplatform/compare/v1.7.1...v1.8.0) (2021-12-03)
+
+
+### Features
+
+* Add cloud profiler to training_utils ([6d5c7c4](https://www.github.com/googleapis/python-aiplatform/commit/6d5c7c42d1c352f161e4738c6dbbf540a032017b))
+* add enable_private_service_connect field to Endpoint feat: add id field to DeployedModel feat: add service_attachment field to PrivateEndpoints feat: add endpoint_id to CreateEndpointRequest and method signature to CreateEndpoint feat: add method... ([#878](https://www.github.com/googleapis/python-aiplatform/issues/878)) ([ca813be](https://www.github.com/googleapis/python-aiplatform/commit/ca813be08ec2620380b5a12b0d6cdc079e27ba79))
+* add enable_private_service_connect field to Endpoint feat: add id field to DeployedModel feat: add service_attachment field to PrivateEndpoints feat: add endpoint_id to CreateEndpointRequest and method signature to CreateEndpoint feat: add method... ([#879](https://www.github.com/googleapis/python-aiplatform/issues/879)) ([47e93b2](https://www.github.com/googleapis/python-aiplatform/commit/47e93b20843f30805b73cd6db214c8743f8bfc97))
+* add featurestore module including Featurestore, EntityType, and Feature classes; add get, update, delete, list methods in all featurestore classes; add search method in Feature class ([#850](https://www.github.com/googleapis/python-aiplatform/issues/850)) ([66745a6](https://www.github.com/googleapis/python-aiplatform/commit/66745a6ce13fb8b32dd7fbf3eb86e71bd291869b))
+* Add prediction container URI builder method ([#805](https://www.github.com/googleapis/python-aiplatform/issues/805)) ([91dd3c0](https://www.github.com/googleapis/python-aiplatform/commit/91dd3c0d5de72fac5b1dc8a9bc23d6cb431061a4))
+* default to custom job display name if experiment name looks like a custom job ID ([#833](https://www.github.com/googleapis/python-aiplatform/issues/833)) ([8b9376e](https://www.github.com/googleapis/python-aiplatform/commit/8b9376e9c961a751799f5b80d1b19917c8c353f8))
+* Support uploading local models ([#779](https://www.github.com/googleapis/python-aiplatform/issues/779)) ([bffbd9d](https://www.github.com/googleapis/python-aiplatform/commit/bffbd9d359edb099e661736a0c77269bb3a0c746))
+* Tensorboard v1 protos release ([#847](https://www.github.com/googleapis/python-aiplatform/issues/847)) ([e0fc3d9](https://www.github.com/googleapis/python-aiplatform/commit/e0fc3d9e4e8a7911f21671ea49818c5f84798d12))
+* updating Tensorboard related code to use v1 ([#851](https://www.github.com/googleapis/python-aiplatform/issues/851)) ([b613b26](https://www.github.com/googleapis/python-aiplatform/commit/b613b264524aaab2cb65e63a5487770736faa7c8))
+* Upgrade Tensorboard from v1beta1 to v1 ([#849](https://www.github.com/googleapis/python-aiplatform/issues/849)) ([c40ec85](https://www.github.com/googleapis/python-aiplatform/commit/c40ec85e1fca2bee6813f52712d063f96264ec2c))
+
+
+### Bug Fixes
+
+* Import error for cloud_profiler ([#869](https://www.github.com/googleapis/python-aiplatform/issues/869)) ([0f124e9](https://www.github.com/googleapis/python-aiplatform/commit/0f124e93a1ddead16c0018970f34e45c73d5ed81))
+* Support multiple instances in custom predict sample ([#857](https://www.github.com/googleapis/python-aiplatform/issues/857)) ([8cb4839](https://www.github.com/googleapis/python-aiplatform/commit/8cb483918bdbaeae34935eef2b3cd997c1ae89a3))
+
+
+### Documentation
+
+* Added comment for evaluation_id to python examples ([#860](https://www.github.com/googleapis/python-aiplatform/issues/860)) ([004bf5f](https://www.github.com/googleapis/python-aiplatform/commit/004bf5fa4cb2d66e36de7ec52dee8e2c8dd438ee))
+* Reverted IDs in model_service snippets test ([#871](https://www.github.com/googleapis/python-aiplatform/issues/871)) ([da747b5](https://www.github.com/googleapis/python-aiplatform/commit/da747b5ffca3c12b8d64bc80bfe93da5afde0d43))
+* Update name of BQ source parameter in samples ([#859](https://www.github.com/googleapis/python-aiplatform/issues/859)) ([f11b598](https://www.github.com/googleapis/python-aiplatform/commit/f11b598f9069f77e86631ada53941876aea010bc))
+
+## [1.7.1](https://www.github.com/googleapis/python-aiplatform/compare/v1.7.0...v1.7.1) (2021-11-16)
+
+
+### Features
+
+* Add support for new Vertex regions ([#811](https://www.github.com/googleapis/python-aiplatform/issues/811)) ([8d04138](https://www.github.com/googleapis/python-aiplatform/commit/8d0413880486d03314ecab80347a713318c6944a))
+
+
+### Bug Fixes
+
+* add parameters_value in PipelineJob for schema > 2.0.0 ([#817](https://www.github.com/googleapis/python-aiplatform/issues/817)) ([900a449](https://www.github.com/googleapis/python-aiplatform/commit/900a44962ac85608dbcb3d23049db160d49d842a))
+* exclude support for python 3.10 ([#831](https://www.github.com/googleapis/python-aiplatform/issues/831)) ([0301a1d](https://www.github.com/googleapis/python-aiplatform/commit/0301a1de5719031c6c826fe4887ff5fb6bcfa956))
+
+
+### Miscellaneous Chores
+
+* release 1.7.1 ([#845](https://www.github.com/googleapis/python-aiplatform/issues/845)) ([ca04de6](https://www.github.com/googleapis/python-aiplatform/commit/ca04de6a95f8b22d0161e250d8d4314a35becfab))
+
+## [1.7.0](https://www.github.com/googleapis/python-aiplatform/compare/v1.6.2...v1.7.0) (2021-11-06)
+
+
+### Features
+
+* Adds support for `google.protobuf.Value` pipeline parameters in the `parameter_values` field ([#807](https://www.github.com/googleapis/python-aiplatform/issues/807)) ([c97199d](https://www.github.com/googleapis/python-aiplatform/commit/c97199dd2cb712ef436ee9cbf6b8add27b42b174))
+* Adds support for `google.protobuf.Value` pipeline parameters in the `parameter_values` field ([#808](https://www.github.com/googleapis/python-aiplatform/issues/808)) ([726b620](https://www.github.com/googleapis/python-aiplatform/commit/726b620bea1223c80225c9a3c2b54342e9c14052))
+* PipelineJob switch to v1 API from v1beta1 API ([#750](https://www.github.com/googleapis/python-aiplatform/issues/750)) ([8db7e0c](https://www.github.com/googleapis/python-aiplatform/commit/8db7e0ca4e796fea47c1bdf4c0fccd514f2dd8c2))
+
+
+### Bug Fixes
+
+* Correct PipelineJob credentials description ([#816](https://www.github.com/googleapis/python-aiplatform/issues/816)) ([49aaa87](https://www.github.com/googleapis/python-aiplatform/commit/49aaa8719a3daabf7e0d23fa1cd1d64c19159a83))
+* Fixed docstrings for Dataset in AutoMLForecastingTrainingJob ([760887b](https://www.github.com/googleapis/python-aiplatform/commit/760887b196884707473896def9e8b69c9fc77423))
+
+
+### Documentation
+
+* Fix pydocs README to be consistent with repo README ([#821](https://www.github.com/googleapis/python-aiplatform/issues/821)) ([95dbd60](https://www.github.com/googleapis/python-aiplatform/commit/95dbd6020ee8f3037b0834eb39312b5d7e5fd8e1))
+* Update sample with feedback from b/191251050 ([#818](https://www.github.com/googleapis/python-aiplatform/issues/818)) ([6b2d938](https://www.github.com/googleapis/python-aiplatform/commit/6b2d93834734b6789c13ef3782b1b3632f5c6133))
+
+## [1.6.2](https://www.github.com/googleapis/python-aiplatform/compare/v1.6.1...v1.6.2) (2021-11-01)
+
+
+### Features
+
+* Add PipelineJob.submit to create PipelineJob without monitoring it's completion. ([#798](https://www.github.com/googleapis/python-aiplatform/issues/798)) ([7ab05d5](https://www.github.com/googleapis/python-aiplatform/commit/7ab05d5e127636d96365b7ea408974ccd6c2f0fe))
+* support new protobuf value param types for Pipeline Job client ([#797](https://www.github.com/googleapis/python-aiplatform/issues/797)) ([2fc05ca](https://www.github.com/googleapis/python-aiplatform/commit/2fc05cab03a2c7f8462b234b02d43bc7581ba845))
+
+
+### Bug Fixes
+
+* Add retries when polling during monitoring runs ([#786](https://www.github.com/googleapis/python-aiplatform/issues/786)) ([45401c0](https://www.github.com/googleapis/python-aiplatform/commit/45401c09f23ed616a7ca84b3d7f53b8a1db21c7c))
+* use version.py for versioning ([#804](https://www.github.com/googleapis/python-aiplatform/issues/804)) ([514031f](https://www.github.com/googleapis/python-aiplatform/commit/514031fce90b6e4606279d4903dc93b0f18b9f2a))
+* Widen system test timeout, handle tearing down failed training pipelines ([#791](https://www.github.com/googleapis/python-aiplatform/issues/791)) ([78879e2](https://www.github.com/googleapis/python-aiplatform/commit/78879e2482cac7ef5520f1d7fe900768147b948e))
+
+
+### Miscellaneous Chores
+
+* release 1.6.2 ([#809](https://www.github.com/googleapis/python-aiplatform/issues/809)) ([e50b049](https://www.github.com/googleapis/python-aiplatform/commit/e50b0497574411a9c7462d76dca489281ee48d83))
+
+## [1.6.1](https://www.github.com/googleapis/python-aiplatform/compare/v1.6.0...v1.6.1) (2021-10-25)
+
+
+### Features
+
+* Add debugging terminal support for CustomJob, HyperparameterTun… ([#699](https://www.github.com/googleapis/python-aiplatform/issues/699)) ([2deb505](https://www.github.com/googleapis/python-aiplatform/commit/2deb50502ae2bb8ba3f97d69b06b72b7625639a4))
+* add support for python 3.10 ([#769](https://www.github.com/googleapis/python-aiplatform/issues/769)) ([8344804](https://www.github.com/googleapis/python-aiplatform/commit/83448044508f5feb052ae7fc5a5a7ca917cee0d1))
+* Add training_utils folder and environment_variables for training ([141c008](https://www.github.com/googleapis/python-aiplatform/commit/141c008759aefe56a41e1eac654739c509d9754d))
+* enable reduction server ([#741](https://www.github.com/googleapis/python-aiplatform/issues/741)) ([8ef0ded](https://www.github.com/googleapis/python-aiplatform/commit/8ef0ded034db797adb4d458eba43537992d822bd))
+* enabling AutoML Forecasting training response to include BigQuery location of exported evaluated examples ([#657](https://www.github.com/googleapis/python-aiplatform/issues/657)) ([c1c2326](https://www.github.com/googleapis/python-aiplatform/commit/c1c2326b2342ab1b6f4c4ce3852e63376eae740d))
+* **PipelineJob:** allow PipelineSpec as param ([#774](https://www.github.com/googleapis/python-aiplatform/issues/774)) ([f90a1bd](https://www.github.com/googleapis/python-aiplatform/commit/f90a1bd775daa0892e16fd82fc1738fa9a912ec7))
+* pre batch creating TensorboardRuns and TensorboardTimeSeries in one_shot mode to speed up uploading ([#772](https://www.github.com/googleapis/python-aiplatform/issues/772)) ([c9f68c6](https://www.github.com/googleapis/python-aiplatform/commit/c9f68c6e840ba3cda04080623dfbcba6945d53e8))
+
+
+### Bug Fixes
+
+* cast resource labels to dict type ([#783](https://www.github.com/googleapis/python-aiplatform/issues/783)) ([255edc9](https://www.github.com/googleapis/python-aiplatform/commit/255edc92dc897619ddd705463aefb8a1723ae8cd))
+* Remove sync parameter from create_endpoint_sample ([#695](https://www.github.com/googleapis/python-aiplatform/issues/695)) ([0477f5a](https://www.github.com/googleapis/python-aiplatform/commit/0477f5a10ba1048e64c11fc3d7e1e375b19a10fe))
+
+
+### Miscellaneous Chores
+
+* release 1.6.1 ([#789](https://www.github.com/googleapis/python-aiplatform/issues/789)) ([4520d35](https://www.github.com/googleapis/python-aiplatform/commit/4520d350beb756549304de60d62ff637bb1807c5))
+
+## [1.6.0](https://www.github.com/googleapis/python-aiplatform/compare/v1.5.0...v1.6.0) (2021-10-12)
+
+
+### Features
+
+* add featurestore service to aiplatform v1 ([#765](https://www.github.com/googleapis/python-aiplatform/issues/765)) ([68c88e4](https://www.github.com/googleapis/python-aiplatform/commit/68c88e48f62d5c2ff561862ba810a48389f7e41a))
+* Add one shot profile uploads to tensorboard uploader. ([#704](https://www.github.com/googleapis/python-aiplatform/issues/704)) ([a83f253](https://www.github.com/googleapis/python-aiplatform/commit/a83f2535b31e2aaff0306c7290265b864b9ddb40))
+* Added column_specs, training_encryption_spec_key_name, model_encryption_spec_key_name to AutoMLForecastingTrainingJob.init and various split methods to AutoMLForecastingTrainingJob.run ([#647](https://www.github.com/googleapis/python-aiplatform/issues/647)) ([7cb6976](https://www.github.com/googleapis/python-aiplatform/commit/7cb69764e0f9be9ca0fcb1641f4dc90e3b306bed))
+* Lazy load Endpoint class ([#655](https://www.github.com/googleapis/python-aiplatform/issues/655)) ([c795c6f](https://www.github.com/googleapis/python-aiplatform/commit/c795c6fbb87c4f71845cfbd2647c1adbc029bcef))
+
+## [1.5.0](https://www.github.com/googleapis/python-aiplatform/compare/v1.4.3...v1.5.0) (2021-09-30)
+
+
+### Features
+
+* Add data plane code snippets for feature store service ([#713](https://www.github.com/googleapis/python-aiplatform/issues/713)) ([e3ea683](https://www.github.com/googleapis/python-aiplatform/commit/e3ea683bf754832340853a15bdb0a0662500a70f))
+* add flaky test diagnostic script ([#734](https://www.github.com/googleapis/python-aiplatform/issues/734)) ([09e48de](https://www.github.com/googleapis/python-aiplatform/commit/09e48de8b79fb5d601169663c9a8e1c33883f1cf))
+* add vizier service to aiplatform v1 BUILD.bazel ([#731](https://www.github.com/googleapis/python-aiplatform/issues/731)) ([1a580ae](https://www.github.com/googleapis/python-aiplatform/commit/1a580aec158b5e25b94f27a6a9daa3943124c485))
+* code snippets for feature store control plane ([#709](https://www.github.com/googleapis/python-aiplatform/issues/709)) ([8e06ced](https://www.github.com/googleapis/python-aiplatform/commit/8e06ced83ed2cc480d869318c4debef9c28ad214))
+* Updating the Tensorboard uploader to use the new batch write API so it runs more efficiently ([#710](https://www.github.com/googleapis/python-aiplatform/issues/710)) ([9d1b01a](https://www.github.com/googleapis/python-aiplatform/commit/9d1b01a91dc077bfe8edf023216b65b826d67d5f))
+
+
+### Bug Fixes
+
+* [#677](https://www.github.com/googleapis/python-aiplatform/issues/677) ([#728](https://www.github.com/googleapis/python-aiplatform/issues/728)) ([7f548e4](https://www.github.com/googleapis/python-aiplatform/commit/7f548e4b5322055a3c2befcdc9d4eef1bc2278ca))
+* **PipelineJob:** use name as output only field ([#719](https://www.github.com/googleapis/python-aiplatform/issues/719)) ([1c84464](https://www.github.com/googleapis/python-aiplatform/commit/1c84464e3130f9db81cd341306b334f9a490587f))
+* use the project id from BQ dataset instead of the default project id ([#717](https://www.github.com/googleapis/python-aiplatform/issues/717)) ([e87a255](https://www.github.com/googleapis/python-aiplatform/commit/e87a255705a5d04ade79f12c706dc842c0228118))
+
+## [1.4.3](https://www.github.com/googleapis/python-aiplatform/compare/v1.4.2...v1.4.3) (2021-09-17)
+
+
+### Features
+
+* **PipelineJob:** support dict, list, bool typed input parameters fr… ([#693](https://www.github.com/googleapis/python-aiplatform/issues/693)) ([243b75c](https://www.github.com/googleapis/python-aiplatform/commit/243b75c2655beeef47848410a40d86a072428ac3))
+
+
+### Bug Fixes
+
+* Update milli node_hours for image training ([#663](https://www.github.com/googleapis/python-aiplatform/issues/663)) ([64768c3](https://www.github.com/googleapis/python-aiplatform/commit/64768c3591f648932e023713d2a728ce5318bb8b))
+* XAI Metadata compatibility with Model.upload ([#705](https://www.github.com/googleapis/python-aiplatform/issues/705)) ([f0570cb](https://www.github.com/googleapis/python-aiplatform/commit/f0570cb999f024ca96e7daaa102c81b681c2a575))
+
+
+### Miscellaneous Chores
+
+* release 1.4.3 ([#715](https://www.github.com/googleapis/python-aiplatform/issues/715)) ([b610486](https://www.github.com/googleapis/python-aiplatform/commit/b6104868161a236fc5585855b5948a5e3294aea2))
+
+## [1.4.2](https://www.github.com/googleapis/python-aiplatform/compare/v1.4.1...v1.4.2) (2021-09-10)
+
+
+### Features
+
+* add explanation metadata `get_metadata_protobuf` for reuse ([#672](https://www.github.com/googleapis/python-aiplatform/issues/672)) ([efb6d18](https://www.github.com/googleapis/python-aiplatform/commit/efb6d18f868086bc53aceab60942eb837ced65b7))
+
+
+## [1.4.1](https://www.github.com/googleapis/python-aiplatform/compare/v1.4.0...v1.4.1) (2021-09-07)
+
+
+### Features
+
+* add prediction service RPC RawPredict to aiplatform_v1beta1 feat: add tensorboard service RPCs to aiplatform_v1beta1: BatchCreateTensorboardRuns, BatchCreateTensorboardTimeSeries, WriteTensorboardExperimentData feat: add model_deployment_monitori... ([#670](https://www.github.com/googleapis/python-aiplatform/issues/670)) ([b73cd94](https://www.github.com/googleapis/python-aiplatform/commit/b73cd9485f8713ac42e7efa9bfd952f67368b778))
+* add Vizier service to aiplatform v1 ([#671](https://www.github.com/googleapis/python-aiplatform/issues/671)) ([179150a](https://www.github.com/googleapis/python-aiplatform/commit/179150aed80d1386993a07870fe34f2b637ded18))
+* add XAI, model monitoring, and index services to aiplatform v1 ([#668](https://www.github.com/googleapis/python-aiplatform/issues/668)) ([1fbce55](https://www.github.com/googleapis/python-aiplatform/commit/1fbce55fd846f473f41c16c1185be893e2376bdd))
+* Update tensorboard uploader to use Dispatcher for handling different event types ([#651](https://www.github.com/googleapis/python-aiplatform/issues/651)) ([d20b520](https://www.github.com/googleapis/python-aiplatform/commit/d20b520ea936a6554a24099beb0e044f237ff741)), closes [#519](https://www.github.com/googleapis/python-aiplatform/issues/519)
+
+
+### Documentation
+
+* Add code sample for Pipelines ([#684](https://www.github.com/googleapis/python-aiplatform/issues/684)) ([4f0c18e](https://www.github.com/googleapis/python-aiplatform/commit/4f0c18e8989cf353019876a73aa57457332e88fb))
+
+## [1.4.0](https://www.github.com/googleapis/python-aiplatform/compare/v1.3.0...v1.4.0) (2021-08-30)
+
+
+### Features
+
+* add filter and timestamp splits ([#627](https://www.github.com/googleapis/python-aiplatform/issues/627)) ([1a13577](https://www.github.com/googleapis/python-aiplatform/commit/1a135775966c8a2303ded529eba514dcf9db7205))
+* add labels to all resource creation apis ([#601](https://www.github.com/googleapis/python-aiplatform/issues/601)) ([4e7666a](https://www.github.com/googleapis/python-aiplatform/commit/4e7666a30b4472698ed980d9d746ba85ad4142d8))
+* add PipelineJob.list ([a58ea82](https://www.github.com/googleapis/python-aiplatform/commit/a58ea826c575b9b0c8cb69e47fc2f07a98bb285b))
+* add support for export_evaluated_data_items_config in AutoMLTab… ([#583](https://www.github.com/googleapis/python-aiplatform/issues/583)) ([2a6b0a3](https://www.github.com/googleapis/python-aiplatform/commit/2a6b0a369296698f79d75e93007e4c7319f3523c))
+* add util functions to get URLs for Tensorboard web app. ([#635](https://www.github.com/googleapis/python-aiplatform/issues/635)) ([8d88c00](https://www.github.com/googleapis/python-aiplatform/commit/8d88c006c5586b28d340448382a9292543448fd6))
+* Add wait_for_resource_creation to BatchPredictionJob and unblock async creation when model is pending creation. ([#660](https://www.github.com/googleapis/python-aiplatform/issues/660)) ([db580ad](https://www.github.com/googleapis/python-aiplatform/commit/db580ad43e97e0d877c29c0e8c077c37dee33ff3))
+* Added the VertexAiResourceNoun.to_dict() method ([#588](https://www.github.com/googleapis/python-aiplatform/issues/588)) ([b478075](https://www.github.com/googleapis/python-aiplatform/commit/b478075efb05553760514256fee9a63126a9916f))
+* expose base_output_dir for custom job ([#586](https://www.github.com/googleapis/python-aiplatform/issues/586)) ([2f138d1](https://www.github.com/googleapis/python-aiplatform/commit/2f138d1dfe4959d1b5f53a9dfef90a18de9908ec))
+* expose boot disk type and size for CustomTrainingJob, CustomPythonPackageTrainingJob, and CustomContainerTrainingJob ([#602](https://www.github.com/googleapis/python-aiplatform/issues/602)) ([355ea24](https://www.github.com/googleapis/python-aiplatform/commit/355ea24c6dd9b061ae0933df4dd07dd5b8c2232b))
+* split GAPIC samples by service ([#599](https://www.github.com/googleapis/python-aiplatform/issues/599)) ([5f15b4f](https://www.github.com/googleapis/python-aiplatform/commit/5f15b4f9a4bad2c9447747a8bdebaa99eab00b75))
+
+
+### Bug Fixes
+
+* Fixed bug in TabularDataset.column_names ([#590](https://www.github.com/googleapis/python-aiplatform/issues/590)) ([0fbcd59](https://www.github.com/googleapis/python-aiplatform/commit/0fbcd592cd7e9c4b0a131d777fa84e592a43a21c))
+* pipeline none values ([#649](https://www.github.com/googleapis/python-aiplatform/issues/649)) ([2f89343](https://www.github.com/googleapis/python-aiplatform/commit/2f89343adbd69610fc5cacc7121119fc7279186e))
+* Populate service_account and network in PipelineJob instead of pipeline_spec ([#658](https://www.github.com/googleapis/python-aiplatform/issues/658)) ([8fde2ce](https://www.github.com/googleapis/python-aiplatform/commit/8fde2ce4441139784bc0fdd62c88d4b833018765))
+* re-remove extra TB dependencies introduced due to merge conflict ([#593](https://www.github.com/googleapis/python-aiplatform/issues/593)) ([433b94a](https://www.github.com/googleapis/python-aiplatform/commit/433b94a78004de6d3a4726317d8bac32c358ace8))
+* Update BatchPredictionJob.iter_outputs() and BQ docstrings ([#631](https://www.github.com/googleapis/python-aiplatform/issues/631)) ([28f32fd](https://www.github.com/googleapis/python-aiplatform/commit/28f32fd11470ad86d2f103346b3e6be8f1adc2d8))
+
+## [1.3.0](https://www.github.com/googleapis/python-aiplatform/compare/v1.2.0...v1.3.0) (2021-07-30)
+
+
+### Features
+
+* add get method for PipelineJob ([#561](https://www.github.com/googleapis/python-aiplatform/issues/561)) ([fe5e6e4](https://www.github.com/googleapis/python-aiplatform/commit/fe5e6e4576a6c8c73549effae99bced709e29402))
+* add Samples section to CONTRIBUTING.rst ([#558](https://www.github.com/googleapis/python-aiplatform/issues/558)) ([d35c466](https://www.github.com/googleapis/python-aiplatform/commit/d35c466e19ac9fa43b5668ce18520090b5e4edd9))
+* add tensorboard resource management ([#539](https://www.github.com/googleapis/python-aiplatform/issues/539)) ([6f8d3d1](https://www.github.com/googleapis/python-aiplatform/commit/6f8d3d1ed89f0aa6f2f0418ae752185104196c63))
+* add tf1 metadata builder ([#526](https://www.github.com/googleapis/python-aiplatform/issues/526)) ([918998c](https://www.github.com/googleapis/python-aiplatform/commit/918998c0bdc25b6a39d359a34f892dac1ca4efac))
+* add wait for creation and more informative exception when properties are not available ([#566](https://www.github.com/googleapis/python-aiplatform/issues/566)) ([e346117](https://www.github.com/googleapis/python-aiplatform/commit/e346117d5453358a32a1d6e584613ace5c2251d9))
+* Adds a new API method FindMostStableBuild ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+* Adds attribution_score_drift_threshold field ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+* Adds attribution_score_skew_thresholds field ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+* Adds BigQuery output table field to batch prediction job output config ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+* Adds CustomJob.enable_web_access field ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+* Adds CustomJob.web_access_uris field ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+* Adds Endpoint.network, Endpoint.private_endpoints fields and PrivateEndpoints message ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+* Adds Execution.State constants: CACHED and CANCELLED ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+* Adds Feature Store features ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+* Adds fields to Study message ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+* Adds IndexEndpoint.private_ip_ranges field ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+* Adds IndexEndpointService.deployed_index_id field ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+* Adds MetadataService.DeleteArtifact and DeleteExecution methods ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+* Adds ModelMonitoringObjectConfig.explanation_config field ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+* Adds ModelMonitoringObjectConfig.ExplanationConfig message field ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+* column specs for tabular transformation ([#466](https://www.github.com/googleapis/python-aiplatform/issues/466)) ([71d0bd4](https://www.github.com/googleapis/python-aiplatform/commit/71d0bd4615b436eaa3ec3eade4445934552f1cb3))
+* enable_caching in PipelineJob to compile time settings ([#557](https://www.github.com/googleapis/python-aiplatform/issues/557)) ([c9da662](https://www.github.com/googleapis/python-aiplatform/commit/c9da662ec24709622bcc4a9e85d1938bead91923))
+* Removes breaking change from v1 version of AI Platform protos ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+
+
+### Bug Fixes
+
+* change default replica count to 1 for custom training job classes ([#579](https://www.github.com/googleapis/python-aiplatform/issues/579)) ([c24251f](https://www.github.com/googleapis/python-aiplatform/commit/c24251fdd230e73c2aadb4369266b78979a31015))
+* create pipeline job with user-specified job id ([#567](https://www.github.com/googleapis/python-aiplatform/issues/567)) ([df68ec3](https://www.github.com/googleapis/python-aiplatform/commit/df68ec3441eeb7670531f50aaed00df6f7e2a1a3))
+* **deps:** pin 'google-{api,cloud}-core', 'google-auth' to allow 2.x versions ([#556](https://www.github.com/googleapis/python-aiplatform/issues/556)) ([5d79795](https://www.github.com/googleapis/python-aiplatform/commit/5d797956737f2d0d4afa4d28fe1fa2f835992991))
+* enable self signed jwt for grpc ([#570](https://www.github.com/googleapis/python-aiplatform/issues/570)) ([6a99b12](https://www.github.com/googleapis/python-aiplatform/commit/6a99b125922b8fca7c997150b81b6925376e9d1d))
+
+
+### Documentation
+
+* fix spelling ([#565](https://www.github.com/googleapis/python-aiplatform/issues/565)) ([fe5c702](https://www.github.com/googleapis/python-aiplatform/commit/fe5c7020040fb0b3b558643b8bc3e12e76f4055f))
+
+## [1.2.0](https://www.github.com/googleapis/python-aiplatform/compare/v1.1.1...v1.2.0) (2021-07-14)
+
+
+### Features
+
+* Add additional_experiments field to AutoMlTablesInputs ([#540](https://www.github.com/googleapis/python-aiplatform/issues/540)) ([96ee726](https://www.github.com/googleapis/python-aiplatform/commit/96ee7261d5c3ffac5598c618b7c7499fad34ab12))
+* add always_use_jwt_access ([#498](https://www.github.com/googleapis/python-aiplatform/issues/498)) ([6df4866](https://www.github.com/googleapis/python-aiplatform/commit/6df48663286db10b1b88f947fc5873a18084cf37))
+* add explain get_metadata function for tf2. ([#507](https://www.github.com/googleapis/python-aiplatform/issues/507)) ([f6f9a97](https://www.github.com/googleapis/python-aiplatform/commit/f6f9a97bb178d9859b8d43166a43792d88e57710))
+* Add structure for XAI explain (from XAI SDK) ([#502](https://www.github.com/googleapis/python-aiplatform/issues/502)) ([cb9ef11](https://www.github.com/googleapis/python-aiplatform/commit/cb9ef1115e58c230f3d009397a6e6a27fd376bed))
+* Add two new ModelType constants for Video Action Recognition training jobs ([96ee726](https://www.github.com/googleapis/python-aiplatform/commit/96ee7261d5c3ffac5598c618b7c7499fad34ab12))
+* Adds AcceleratorType.NVIDIA_TESLA_A100 constant ([f3a3d03](https://www.github.com/googleapis/python-aiplatform/commit/f3a3d03c8509dc49c24139155a572dacbe954f66))
+* Adds additional_experiments field to AutoMlForecastingInputs ([8077b3d](https://www.github.com/googleapis/python-aiplatform/commit/8077b3d728b6e168c8aad41291dd90144ab75633))
+* Adds additional_experiments field to AutoMlTablesInputs ([#544](https://www.github.com/googleapis/python-aiplatform/issues/544)) ([8077b3d](https://www.github.com/googleapis/python-aiplatform/commit/8077b3d728b6e168c8aad41291dd90144ab75633))
+* Adds AutoscalingMetricSpec message ([f3a3d03](https://www.github.com/googleapis/python-aiplatform/commit/f3a3d03c8509dc49c24139155a572dacbe954f66))
+* Adds BigQuery output table field to batch prediction job output config ([f3a3d03](https://www.github.com/googleapis/python-aiplatform/commit/f3a3d03c8509dc49c24139155a572dacbe954f66))
+* Adds fields to Study message ([f3a3d03](https://www.github.com/googleapis/python-aiplatform/commit/f3a3d03c8509dc49c24139155a572dacbe954f66))
+* Adds JobState.JOB_STATE_EXPIRED constant ([f3a3d03](https://www.github.com/googleapis/python-aiplatform/commit/f3a3d03c8509dc49c24139155a572dacbe954f66))
+* Adds PipelineService methods for Create, Get, List, Delete, Cancel ([f3a3d03](https://www.github.com/googleapis/python-aiplatform/commit/f3a3d03c8509dc49c24139155a572dacbe954f66))
+* Adds two new ModelType constants for Video Action Recognition training jobs ([8077b3d](https://www.github.com/googleapis/python-aiplatform/commit/8077b3d728b6e168c8aad41291dd90144ab75633))
+* Removes AcceleratorType.TPU_V2 and TPU_V3 constants ([#543](https://www.github.com/googleapis/python-aiplatform/issues/543)) ([f3a3d03](https://www.github.com/googleapis/python-aiplatform/commit/f3a3d03c8509dc49c24139155a572dacbe954f66))
+
+
+### Bug Fixes
+
+* Handle nested fields from BigQuery source when getting default column_names ([#522](https://www.github.com/googleapis/python-aiplatform/issues/522)) ([3fc1d44](https://www.github.com/googleapis/python-aiplatform/commit/3fc1d44ac0acbb4f58088e7eeb16d85818af1125))
+* log pipeline completion and raise pipeline failures ([#523](https://www.github.com/googleapis/python-aiplatform/issues/523)) ([2508fe9](https://www.github.com/googleapis/python-aiplatform/commit/2508fe9d8a75ac8b05f06a78589c657313bd1d3d))
+* making the uploader depend on tensorflow-proper ([#499](https://www.github.com/googleapis/python-aiplatform/issues/499)) ([b95e040](https://www.github.com/googleapis/python-aiplatform/commit/b95e0406566879e8f71cefda72b41dc6fe4e578f))
+* Set prediction client when listing Endpoints ([#512](https://www.github.com/googleapis/python-aiplatform/issues/512)) ([95639ee](https://www.github.com/googleapis/python-aiplatform/commit/95639ee1c2c9cb66624265383d4d27bed3ff7dbd))
+
+## [1.1.1](https://www.github.com/googleapis/python-aiplatform/compare/v1.1.0...v1.1.1) (2021-06-22)
+
+
+### Features
+
+* add cancel method to pipeline client ([#488](https://www.github.com/googleapis/python-aiplatform/issues/488)) ([3b19fff](https://www.github.com/googleapis/python-aiplatform/commit/3b19fff399b85c92e661eb83a48a4c6636423518))
+
+
+### Bug Fixes
+
+* check if training_task_metadata is populated before logging backingCustomJob ([#494](https://www.github.com/googleapis/python-aiplatform/issues/494)) ([2e627f8](https://www.github.com/googleapis/python-aiplatform/commit/2e627f876e1d7dd03e5d6bd2e81e6234e361a9df))
+
+
+### Documentation
+
+* omit mention of Python 2.7 in 'CONTRIBUTING.rst' ([#1127](https://www.github.com/googleapis/python-aiplatform/issues/1127)) ([#489](https://www.github.com/googleapis/python-aiplatform/issues/489)) ([cbc47f8](https://www.github.com/googleapis/python-aiplatform/commit/cbc47f862f291b00b85718498571e0c737cb26a6))
+
+
+### Miscellaneous Chores
+
+* release 1.1.1 ([1a38ce2](https://www.github.com/googleapis/python-aiplatform/commit/1a38ce2f9879e1c42c0c6b25b72bd4836e3a1f73))
+
+## [1.1.0](https://www.github.com/googleapis/python-aiplatform/compare/v1.0.1...v1.1.0) (2021-06-17)
+
+
+### Features
+
+* add aiplatform API Vizier service ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+* add featurestore, index, metadata, monitoring, pipeline, and tensorboard services to aiplatform v1beta1 ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+* add invalid_row_count to ImportFeatureValuesResponse and ImportFeatureValuesOperationMetadata ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+* add pipeline client init and run to vertex AI ([1f1226f](https://www.github.com/googleapis/python-aiplatform/commit/1f1226fd8c745a7cd86c299fa0cfc2291947f3e7))
+* add tensorboard support for CustomTrainingJob, CustomContainerTrainingJob, CustomPythonPackageTrainingJob ([#462](https://www.github.com/googleapis/python-aiplatform/issues/462)) ([8cfd611](https://www.github.com/googleapis/python-aiplatform/commit/8cfd61179af06232173b91b4d9fd633028823624))
+* adds enhanced protos for time series forecasting ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+* adds enhanced protos for time series forecasting ([#374](https://www.github.com/googleapis/python-aiplatform/issues/374)) ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+* allow the prediction endpoint to be overridden ([#461](https://www.github.com/googleapis/python-aiplatform/issues/461)) ([c2cf612](https://www.github.com/googleapis/python-aiplatform/commit/c2cf61288326cad28ab474064b887687bc649d76))
+* AutoMlImageSegmentationInputs.ModelType adds MOBILE_TF_LOW_LATENCY constant ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+* AutoMlVideoClassificationInputs.ModelType adds MOBILE_JETSON_VERSATILE_1 constant ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+* Expose additional attributes into Vertex SDK to close gap with GAPIC ([#477](https://www.github.com/googleapis/python-aiplatform/issues/477)) ([572a27c](https://www.github.com/googleapis/python-aiplatform/commit/572a27c7929e5686b61950e09e17134564987d50))
+* ImageSegmentationPredictionResult.category_mask field changed to string data type ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+* remove unsupported accelerator types ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+* removes forecasting (time_series_forecasting proto) from public v1beta1 protos ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+* removes unused protos from schema/ folders: schema/io_format.proto, schema/saved_query_metadata.proto ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+* support additional_experiments for AutoML Tables and AutoML Forecasting ([#428](https://www.github.com/googleapis/python-aiplatform/issues/428)) ([b4211f2](https://www.github.com/googleapis/python-aiplatform/commit/b4211f2f60aead88107c08a18d30b0800b019593))
+* support self-signed JWT flow for service accounts ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+
+
+### Bug Fixes
+
+* add async client to %name_%version/init.py ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+* add target_column docstring ([#473](https://www.github.com/googleapis/python-aiplatform/issues/473)) ([c0543cd](https://www.github.com/googleapis/python-aiplatform/commit/c0543cdd1e9ba0efd18d7d1a442906938fc6db9a))
+* configuring timeouts for aiplatform v1 methods ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+* Enable MetadataStore to use credentials when aiplatfrom.init passed experiment and credentials. ([#460](https://www.github.com/googleapis/python-aiplatform/issues/460)) ([e7bf0d8](https://www.github.com/googleapis/python-aiplatform/commit/e7bf0d83d8bb0849a9bce886c958d13f5cbe5fab))
+* exclude docs and tests from package ([#481](https://www.github.com/googleapis/python-aiplatform/issues/481)) ([b209904](https://www.github.com/googleapis/python-aiplatform/commit/b2099049484f66f4348ddd4448c676feecb0b46e))
+* pass credentials to BQ and GCS clients ([#469](https://www.github.com/googleapis/python-aiplatform/issues/469)) ([481d172](https://www.github.com/googleapis/python-aiplatform/commit/481d172542ffd80e18f4fab5b01945be17d5e18c))
+* remove display_name from FeatureStore ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+* Remove URI attribute from Endpoint sample ([#478](https://www.github.com/googleapis/python-aiplatform/issues/478)) ([e3cbdd8](https://www.github.com/googleapis/python-aiplatform/commit/e3cbdd8322c854f526c8564f8bb61fb6525598d7))
+
+
+### Documentation
+
+* changes product name to Vertex AI ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+* correct link to fieldmask ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+* removes tinyurl links ([fdc968f](https://www.github.com/googleapis/python-aiplatform/commit/fdc968f49e89a5c7ca14692080c0ae7e8b6e0865))
+
+## [1.0.1](https://www.github.com/googleapis/python-aiplatform/compare/v1.0.0...v1.0.1) (2021-05-21)
+
+
+### Bug Fixes
+
+* use resource name location when passed full resource name ([#421](https://www.github.com/googleapis/python-aiplatform/issues/421)) ([f40f322](https://www.github.com/googleapis/python-aiplatform/commit/f40f32289e1fbeb93b35e4b66f65d15528a6481c))
+
+## [1.0.0](https://www.github.com/googleapis/python-aiplatform/compare/v0.9.0...v1.0.0) (2021-05-19)
+
+
+### Features
+
+* add custom and hp tuning ([#388](https://www.github.com/googleapis/python-aiplatform/issues/388)) ([aab9e58](https://www.github.com/googleapis/python-aiplatform/commit/aab9e589426331bfe7ac3f6efa97109e0bd0db0d))
+* add tensorboard support to custom job and hyperparameter tuning job ([#404](https://www.github.com/googleapis/python-aiplatform/issues/404)) ([fa9bc39](https://www.github.com/googleapis/python-aiplatform/commit/fa9bc3943df55bc0d077ba9b02101ae792a6fb57))
+
+
+### Bug Fixes
+
+* tb-gcp-uploader to show flags in "--help" correctly ([#409](https://www.github.com/googleapis/python-aiplatform/issues/409)) ([9f603dd](https://www.github.com/googleapis/python-aiplatform/commit/9f603dd57868d893cb3be6cf70686fdce2706a6c))
+
+
+### Miscellaneous Chores
+
+* release 1.0.0 ([#407](https://www.github.com/googleapis/python-aiplatform/issues/407)) ([a2d7b68](https://www.github.com/googleapis/python-aiplatform/commit/a2d7b68e4016965f6e3771053f77e1745b44c403))
+
+## [0.9.0](https://www.github.com/googleapis/python-aiplatform/compare/v0.8.0...v0.9.0) (2021-05-17)
+
+
+### Features
+
+* Add AutoML vision, Custom training job, and generic prediction samples ([#300](https://www.github.com/googleapis/python-aiplatform/issues/300)) ([cc1a708](https://www.github.com/googleapis/python-aiplatform/commit/cc1a7084f7715c94657d5a3b3374c0fc9a86a299))
+* Add VPC Peering support to CustomTrainingJob classes ([#378](https://www.github.com/googleapis/python-aiplatform/issues/378)) ([56273f7](https://www.github.com/googleapis/python-aiplatform/commit/56273f7d1329a3404e58af4666297e6d6325f6ed))
+* AutoML Forecasting, Metadata Experiment Tracking, Tensorboard uploader ([e94c9db](https://www.github.com/googleapis/python-aiplatform/commit/e94c9dbeac701390b25e6d70b0b0acc270636029))
+
+
+### Bug Fixes
+
+* **deps:** add packaging requirement ([#392](https://www.github.com/googleapis/python-aiplatform/issues/392)) ([47c1530](https://www.github.com/googleapis/python-aiplatform/commit/47c15300d6c8e879e1d7a10ad0c79e2bb4f18aee))
+* enable aiplatform unit tests ([dcc459d](https://www.github.com/googleapis/python-aiplatform/commit/dcc459d55890961a8aa3cadb696c023a991eea05))
+* rollback six to 1.15 ([#391](https://www.github.com/googleapis/python-aiplatform/issues/391)) ([066624b](https://www.github.com/googleapis/python-aiplatform/commit/066624b7c2ab3af281b7f63e47c990efbcd52673))
+
+## [0.8.0](https://www.github.com/googleapis/python-aiplatform/compare/v0.7.1...v0.8.0) (2021-05-11)
+
+
+### Features
+
+* Add export model ([#353](https://www.github.com/googleapis/python-aiplatform/issues/353)) ([12c5be4](https://www.github.com/googleapis/python-aiplatform/commit/12c5be4690b23375468af16b00790c106232f539))
+* add mbsdk video dataset samples ([#307](https://www.github.com/googleapis/python-aiplatform/issues/307)) ([24d6920](https://www.github.com/googleapis/python-aiplatform/commit/24d6920a530995a3a5c6ad6b25d0b867f7aebe27))
+* Add service account support to Custom Training and Model deployment ([#342](https://www.github.com/googleapis/python-aiplatform/issues/342)) ([b4b1b12](https://www.github.com/googleapis/python-aiplatform/commit/b4b1b12d735e7c40717bd9ff8f8fd330d5e83738))
+* add services to aiplatform_v1beta1 ([#367](https://www.github.com/googleapis/python-aiplatform/issues/367)) ([beb4032](https://www.github.com/googleapis/python-aiplatform/commit/beb4032e2b8c62e65fabcd8ef7cc4cf3d90535a3))
+* Added create_training_pipeline_custom_job_sample and create_training_pipeline_custom_training_managed_dataset_sample and fixed create_training_pipeline_image_classification_sample ([#343](https://www.github.com/googleapis/python-aiplatform/issues/343)) ([1c6b998](https://www.github.com/googleapis/python-aiplatform/commit/1c6b998d9145309d79712f494a2b00b50a9a9bf4))
+* Added create_training_pipeline_custom_package_job_sample and create_training_pipeline_custom_container_job_sample and reworked create_training_pipeline_custom_job_sample ([#351](https://www.github.com/googleapis/python-aiplatform/issues/351)) ([7abf8ef](https://www.github.com/googleapis/python-aiplatform/commit/7abf8ef54c606aa1d1093490369bb797c22fc331))
+* Added default AutoMLTabularTrainingJob column transformations ([#357](https://www.github.com/googleapis/python-aiplatform/issues/357)) ([4fce8c4](https://www.github.com/googleapis/python-aiplatform/commit/4fce8c42504c6c5b86025d728819f61284ac5eef))
+* Added deploy_model_with_dedicated_resources_sample, deploy_model_with_automatic_resources_sample, upload_model and get_model samples ([#337](https://www.github.com/googleapis/python-aiplatform/issues/337)) ([ef4f6f8](https://www.github.com/googleapis/python-aiplatform/commit/ef4f6f8aa5f6a3eea5d9d88e4604410d97b1ef54))
+* Added explain tabular samples ([#348](https://www.github.com/googleapis/python-aiplatform/issues/348)) ([c95d1ce](https://www.github.com/googleapis/python-aiplatform/commit/c95d1cebec0a3e2bf6a25a76700d46a42e65376c))
+* **aiplatform:** Add support for setting User agent header ([#364](https://www.github.com/googleapis/python-aiplatform/issues/364)) ([d50d26d](https://www.github.com/googleapis/python-aiplatform/commit/d50d26d18b892767dcf2e37d30d85b2b405e2708))
+* expose env var in cust training class run func args ([#366](https://www.github.com/googleapis/python-aiplatform/issues/366)) ([7ae28b8](https://www.github.com/googleapis/python-aiplatform/commit/7ae28b84b0b8dd7068f8c0d0303776098816fab0))
+* MBSDK Tabular samples ([#338](https://www.github.com/googleapis/python-aiplatform/issues/338)) ([4241738](https://www.github.com/googleapis/python-aiplatform/commit/4241738bf93c34611b777e4926a939549eb4134e))
+* update featurestore ([#377](https://www.github.com/googleapis/python-aiplatform/issues/377)) ([bc17163](https://www.github.com/googleapis/python-aiplatform/commit/bc17163449a428f2caf17b521add3dde0b5c4391))
+
+
+### Bug Fixes
+
+* Add all supported uCAIP GA regions ([#350](https://www.github.com/googleapis/python-aiplatform/issues/350)) ([5e14c59](https://www.github.com/googleapis/python-aiplatform/commit/5e14c5969c53e50712295aa0343e7622d5db629d))
+* **aiplatform:** Fix doc formatting ([#359](https://www.github.com/googleapis/python-aiplatform/issues/359)) ([857f63d](https://www.github.com/googleapis/python-aiplatform/commit/857f63d475463189ebb89b25d0ca08d9544c3bf3))
+* Bump google-cloud-storage min version to 1.32.0 ([#371](https://www.github.com/googleapis/python-aiplatform/issues/371)) ([6fda925](https://www.github.com/googleapis/python-aiplatform/commit/6fda9255493684bed3820f2c81dca5599872b8b6))
+* default model_display_name to _CustomTrainingJob.display_name when model_serving_container_image_uri is provided ([#324](https://www.github.com/googleapis/python-aiplatform/issues/324)) ([a5fa7a2](https://www.github.com/googleapis/python-aiplatform/commit/a5fa7a224570901988e5e7579c46cc2b823caa9b))
+* env formatiing ([#379](https://www.github.com/googleapis/python-aiplatform/issues/379)) ([6bc4c61](https://www.github.com/googleapis/python-aiplatform/commit/6bc4c612d5471911f82ee5ada9fb3a9307ee836f))
+* remove Optional type hint on deploy ([#345](https://www.github.com/googleapis/python-aiplatform/issues/345)) ([79b0ab1](https://www.github.com/googleapis/python-aiplatform/commit/79b0ab13e6d08a12ac0a0971a8001e9ddb8baf56))
+
+## [0.7.1](https://www.github.com/googleapis/python-aiplatform/compare/v0.7.0...v0.7.1) (2021-04-14)
+
+
+### Bug Fixes
+
+* fix list failing without order_by and local sorting ([#320](https://www.github.com/googleapis/python-aiplatform/issues/320)) ([06e99db](https://www.github.com/googleapis/python-aiplatform/commit/06e99db849d954344aeb8bdefde41d1884e36315))
+
+## [0.7.0](https://www.github.com/googleapis/python-aiplatform/compare/v0.6.0...v0.7.0) (2021-04-14)
+
+
+### Features
+
+* Add Custom Container Prediction support, move to single API endpoint ([#277](https://www.github.com/googleapis/python-aiplatform/issues/277)) ([ca7f6d6](https://www.github.com/googleapis/python-aiplatform/commit/ca7f6d64ea75349a841b53fe6ef6547942439e35))
+* Add initial Model Builder SDK samples ([#265](https://www.github.com/googleapis/python-aiplatform/issues/265)) ([1230dc6](https://www.github.com/googleapis/python-aiplatform/commit/1230dc68a34c5b747186d31a25d1b8f40bf7a97e))
+* Add list() method to all resource nouns ([#294](https://www.github.com/googleapis/python-aiplatform/issues/294)) ([3ec9386](https://www.github.com/googleapis/python-aiplatform/commit/3ec9386f8f766662c91922af66b8098ddfa1eb8f))
+* add support for multiple client versions, change aiplatform from compat.V1BETA1 to compat.V1 ([#290](https://www.github.com/googleapis/python-aiplatform/issues/290)) ([89e3212](https://www.github.com/googleapis/python-aiplatform/commit/89e321246b6223a2355947d8dbd0161b84523478))
+* Make aiplatform.Dataset private ([#296](https://www.github.com/googleapis/python-aiplatform/issues/296)) ([1f0d5f3](https://www.github.com/googleapis/python-aiplatform/commit/1f0d5f3e3f95ee5056545e9d4742b96e9380a22e))
+* parse project location when passed full resource name to get apis ([#297](https://www.github.com/googleapis/python-aiplatform/issues/297)) ([674227d](https://www.github.com/googleapis/python-aiplatform/commit/674227d2e7ed4a4a4e180213dc1178dde7d65a3a))
+
+
+### Bug Fixes
+
+* add quotes to logged snippet ([0ecd0a8](https://www.github.com/googleapis/python-aiplatform/commit/0ecd0a8bbc5a2fc645877d0eb3b930e1b03a270a))
+* make logging more informative during training ([#310](https://www.github.com/googleapis/python-aiplatform/issues/310)) ([9a4d991](https://www.github.com/googleapis/python-aiplatform/commit/9a4d99150a035b8dde7b4f9e72f25745af17b609))
+* remove TPU from accelerator test cases ([57f4fcf](https://www.github.com/googleapis/python-aiplatform/commit/57f4fcf7637467f6176436f6d2e1f6c8be909c4a))
+
+## [0.6.0](https://www.github.com/googleapis/python-aiplatform/compare/v0.5.1...v0.6.0) (2021-03-22)
+
+
+### Features
+
+* add Vizier service ([#266](https://www.github.com/googleapis/python-aiplatform/issues/266)) ([e5c1b1a](https://www.github.com/googleapis/python-aiplatform/commit/e5c1b1a4909d701efeb27f29af43a95516c51475))
+
+
+### Bug Fixes
+
+* skip create data labeling job sample tests ([#254](https://www.github.com/googleapis/python-aiplatform/issues/254)) ([116a29b](https://www.github.com/googleapis/python-aiplatform/commit/116a29b1efcebb15bad14c3c36d3591c09ef10be))
+
+## [0.5.1](https://www.github.com/googleapis/python-aiplatform/compare/v0.5.0...v0.5.1) (2021-03-01)
+
+
+### Bug Fixes
+
+* fix create data labeling job samples tests ([#244](https://www.github.com/googleapis/python-aiplatform/issues/244)) ([3c440de](https://www.github.com/googleapis/python-aiplatform/commit/3c440dea14ad4d04b05ebf17ba4bcb031fe95b3e))
+* fix predict sample tests for proto-plus==1.14.2 ([#250](https://www.github.com/googleapis/python-aiplatform/issues/250)) ([b1c9d88](https://www.github.com/googleapis/python-aiplatform/commit/b1c9d88646f00b034e2576890406325db5384f10))
+* fix update export model sample, and add sample test ([#239](https://www.github.com/googleapis/python-aiplatform/issues/239)) ([20b8859](https://www.github.com/googleapis/python-aiplatform/commit/20b88592da3dd7344c7053d7fe652115ed42e4aa))
+
+
+### Documentation
+
+* update index.rst to include v1 ([#246](https://www.github.com/googleapis/python-aiplatform/issues/246)) ([82193ef](https://www.github.com/googleapis/python-aiplatform/commit/82193ef401258b17fd20895e2b0f6c95a39a24a1))
+
+## [0.5.0](https://www.github.com/googleapis/python-aiplatform/compare/v0.4.0...v0.5.0) (2021-02-17)
+
+
+### Features
+
+* exposes v1 enhanced types and adds tests ([#226](https://www.github.com/googleapis/python-aiplatform/issues/226)) ([42b587d](https://www.github.com/googleapis/python-aiplatform/commit/42b587de2805b9efacb6e1eb5bf05e50ffb37797))
+* LRO metadata ([#204](https://www.github.com/googleapis/python-aiplatform/issues/204)) ([2863dc0](https://www.github.com/googleapis/python-aiplatform/commit/2863dc0ba2337a0e997b95e2cb8669abd62635e3))
+* moves manual enhanced lib edits outside of generated files ([#198](https://www.github.com/googleapis/python-aiplatform/issues/198)) ([a04a561](https://www.github.com/googleapis/python-aiplatform/commit/a04a5613cec36811db8768da5ea7c3229da3074b))
+* updates python-aiplatform to v1 ([#212](https://www.github.com/googleapis/python-aiplatform/issues/212)) ([efc00ed](https://www.github.com/googleapis/python-aiplatform/commit/efc00ed6bb838dceaee7ad9469cc51d1500a365d))
+
+
+### Bug Fixes
+
+* correct text sentiment analysis sample ([#222](https://www.github.com/googleapis/python-aiplatform/issues/222)) ([0befde3](https://www.github.com/googleapis/python-aiplatform/commit/0befde36bfd4ff1b5161b7ceb3bb55f6e7d8ea37))
+* **deps:** remove optional dependencies ([#187](https://www.github.com/googleapis/python-aiplatform/issues/187)) ([6589383](https://www.github.com/googleapis/python-aiplatform/commit/6589383f149fcf463d153fe76973bd874ff3967a))
+* Fix sample test ([#215](https://www.github.com/googleapis/python-aiplatform/issues/215)) ([cdeb0ec](https://www.github.com/googleapis/python-aiplatform/commit/cdeb0ec30c334ff2b5d5e06bc976e824d6e18c04))
+* reduces image size for test image ([#213](https://www.github.com/googleapis/python-aiplatform/issues/213)) ([3ed0e09](https://www.github.com/googleapis/python-aiplatform/commit/3ed0e0961f104762194d9ac598a81017ac9d2392))
+
+## [0.4.0](https://www.github.com/googleapis/python-aiplatform/compare/v0.3.1...v0.4.0) (2021-01-08)
+
+
+### Features
+
+* add create_batch_prediction_job samples ([#67](https://www.github.com/googleapis/python-aiplatform/issues/67)) ([96a850f](https://www.github.com/googleapis/python-aiplatform/commit/96a850f2d24d7ae95f2cdec83a56362abecb85a2))
+* add create_hyperparameter_tuning_job_python_package sample ([#76](https://www.github.com/googleapis/python-aiplatform/issues/76)) ([5155dee](https://www.github.com/googleapis/python-aiplatform/commit/5155dee5edd86fb700a91dfca01bddd4d6393410))
+* add create_training_pipeline_custom_training_managed_dataset sample ([#75](https://www.github.com/googleapis/python-aiplatform/issues/75)) ([b012283](https://www.github.com/googleapis/python-aiplatform/commit/b012283c08cf8abc2974dc73ff7c2d3b8112a16b))
+* add custom_job samples ([#69](https://www.github.com/googleapis/python-aiplatform/issues/69)) ([fb165b3](https://www.github.com/googleapis/python-aiplatform/commit/fb165b3632119b361a1936f367128f7146b49685))
+* add data_labeling samples ([#78](https://www.github.com/googleapis/python-aiplatform/issues/78)) ([7daacd5](https://www.github.com/googleapis/python-aiplatform/commit/7daacd576dc96149c05e2908f276831337076316))
+* add get_custom_job and get_hyperparameter_tuning_job samples ([#68](https://www.github.com/googleapis/python-aiplatform/issues/68)) ([26da7a7](https://www.github.com/googleapis/python-aiplatform/commit/26da7a7d4c1f5db2d2c3b2faedccbd9899c14a47))
+* add schema namespace ([#140](https://www.github.com/googleapis/python-aiplatform/issues/140)) ([1cbd4a5](https://www.github.com/googleapis/python-aiplatform/commit/1cbd4a553fb8d035f687247ce87843167bf106ad))
+* add video action recognition samples ([#77](https://www.github.com/googleapis/python-aiplatform/issues/77)) ([4c60ad6](https://www.github.com/googleapis/python-aiplatform/commit/4c60ad67dcd9026cb989d6e81dec4813cbae962f))
+* Added tabular forecasting sample ([#156](https://www.github.com/googleapis/python-aiplatform/issues/156)) ([a23857b](https://www.github.com/googleapis/python-aiplatform/commit/a23857bc9be94c4a23ae7cf6f2eac75d8ea5ae95))
+* Added tabular forecasting samples ([#128](https://www.github.com/googleapis/python-aiplatform/issues/128)) ([69fc7fd](https://www.github.com/googleapis/python-aiplatform/commit/69fc7fd415e1b404530fd3e2881a94c0441791cf))
+* adds function/method enhancements, demo samples ([#122](https://www.github.com/googleapis/python-aiplatform/issues/122)) ([1a302d2](https://www.github.com/googleapis/python-aiplatform/commit/1a302d232d868a96bf6a41cbf92a550edcdb0673))
+* adds text batch prediction samples ([#82](https://www.github.com/googleapis/python-aiplatform/issues/82)) ([ad09c29](https://www.github.com/googleapis/python-aiplatform/commit/ad09c29c1685a904966e34894c1c4ea77baa2425))
+* initial generation of enhanced types ([#102](https://www.github.com/googleapis/python-aiplatform/issues/102)) ([5ddbf16](https://www.github.com/googleapis/python-aiplatform/commit/5ddbf16f35234dc1781de9d17310a345ac1524de))
+* update create_training_pipeline samples ([#142](https://www.github.com/googleapis/python-aiplatform/issues/142)) ([624a08d](https://www.github.com/googleapis/python-aiplatform/commit/624a08d65c2088c0d5272a7b1b88983a8c7e6284))
+* xai samples ([#83](https://www.github.com/googleapis/python-aiplatform/issues/83)) ([5cf3859](https://www.github.com/googleapis/python-aiplatform/commit/5cf38596d115da63cdddc8958b6ae8f455bdb9a6))
+
+
+### Bug Fixes
+
+* blacken on library, test files ([#135](https://www.github.com/googleapis/python-aiplatform/issues/135)) ([53cdbab](https://www.github.com/googleapis/python-aiplatform/commit/53cdbabdef6bd10488f49d0c3ed6f05149af32a6))
+* predict image samples params ([#150](https://www.github.com/googleapis/python-aiplatform/issues/150)) ([7983b44](https://www.github.com/googleapis/python-aiplatform/commit/7983b448158cf8166ada54c60fb896d5658a2162))
+
+
+### Documentation
+
+* update readme ([#81](https://www.github.com/googleapis/python-aiplatform/issues/81)) ([19dc31a](https://www.github.com/googleapis/python-aiplatform/commit/19dc31a7e63ec112e9d0dc72e22db04910137d07))
+
+## [0.3.1](https://www.github.com/googleapis/python-aiplatform/compare/v0.3.0...v0.3.1) (2020-11-13)
+
+
+### Features
+
+* add samples ([#56](https://www.github.com/googleapis/python-aiplatform/issues/56)) ([53cc5f5](https://www.github.com/googleapis/python-aiplatform/commit/53cc5f51bf3845fe688ee7b7a0494ff42c549f69))
+
+## 0.3.0 (2020-11-05)
+
+
+### Features
+
+* generate v1beta1 ([e80a4fc](https://www.github.com/googleapis/python-aiplatform/commit/e80a4fcbdc84bdd76b159520c93943ada88c7210))
+* python-aiplatform beta library ([#39](https://www.github.com/googleapis/python-aiplatform/issues/39)) ([81366dc](https://www.github.com/googleapis/python-aiplatform/commit/81366dcec173ed1e63b17e242c1ef74f623dd31b))
+* regenerate v1beta1 ([#4](https://www.github.com/googleapis/python-aiplatform/issues/4)) ([4ddc426](https://www.github.com/googleapis/python-aiplatform/commit/4ddc426a6b4b8cd319fa885e363c94b35ef777d9))
+
+
+### Bug Fixes
+
+* re-add py sessions to noxfile ([#22](https://www.github.com/googleapis/python-aiplatform/issues/22)) ([3c713d5](https://www.github.com/googleapis/python-aiplatform/commit/3c713d5cf47bf343bf53583296daed6161d4f4ed))
diff --git a/testbed/googleapis__python-aiplatform/CODE_OF_CONDUCT.md b/testbed/googleapis__python-aiplatform/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000000000000000000000000000000000..039f436812047176f9dd787fc07cb5f8af8dba63
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/CODE_OF_CONDUCT.md
@@ -0,0 +1,95 @@
+
+# Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of
+experience, education, socio-economic status, nationality, personal appearance,
+race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, or to ban temporarily or permanently any
+contributor for other behaviors that they deem inappropriate, threatening,
+offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+This Code of Conduct also applies outside the project spaces when the Project
+Steward has a reasonable belief that an individual's behavior may have a
+negative impact on the project or its community.
+
+## Conflict Resolution
+
+We do not believe that all conflict is bad; healthy debate and disagreement
+often yield positive results. However, it is never okay to be disrespectful or
+to engage in behavior that violates the project’s code of conduct.
+
+If you see someone violating the code of conduct, you are encouraged to address
+the behavior directly with those involved. Many issues can be resolved quickly
+and easily, and this gives people more control over the outcome of their
+dispute. If you are unable to resolve the matter for any reason, or if the
+behavior is threatening or harassing, report it. We are dedicated to providing
+an environment where participants feel welcome and safe.
+
+
+Reports should be directed to *googleapis-stewards@google.com*, the
+Project Steward(s) for *Google Cloud Client Libraries*. It is the Project Steward’s duty to
+receive and address reported violations of the code of conduct. They will then
+work with a committee consisting of representatives from the Open Source
+Programs Office and the Google Open Source Strategy team. If for any reason you
+are uncomfortable reaching out to the Project Steward, please email
+opensource@google.com.
+
+We will investigate every complaint, but you may not receive a direct response.
+We will use our discretion in determining when and how to follow up on reported
+incidents, which may range from not taking action to permanent expulsion from
+the project and project-sponsored spaces. We will notify the accused of the
+report and provide them an opportunity to discuss it before any action is taken.
+The identity of the reporter will be omitted from the details of the report
+supplied to the accused. In potentially harmful situations, such as ongoing
+harassment or threats to anyone's safety, we may take action without notice.
+
+## Attribution
+
+This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
+available at
+https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
\ No newline at end of file
diff --git a/testbed/googleapis__python-aiplatform/CONTRIBUTING.rst b/testbed/googleapis__python-aiplatform/CONTRIBUTING.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ef68d68832ab13fe11d093ee4bc556484e0fd8c0
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/CONTRIBUTING.rst
@@ -0,0 +1,281 @@
+.. Generated by synthtool. DO NOT EDIT!
+############
+Contributing
+############
+
+#. **Please sign one of the contributor license agreements below.**
+#. Fork the repo, develop and test your code changes, add docs.
+#. Make sure that your commit messages clearly describe the changes.
+#. Send a pull request. (Please Read: `Faster Pull Request Reviews`_)
+
+.. _Faster Pull Request Reviews: https://github.com/kubernetes/community/blob/master/contributors/guide/pull-requests.md#best-practices-for-faster-reviews
+
+.. contents:: Here are some guidelines for hacking on the Google Cloud Client libraries.
+
+***************
+Adding Features
+***************
+
+In order to add a feature:
+
+- The feature must be documented in both the API and narrative
+ documentation.
+
+- The feature must work fully on the following CPython versions:
+ 3.8, 3.9, 3.10, 3.11 and 3.12 on both UNIX and Windows.
+
+- The feature must not add unnecessary dependencies (where
+ "unnecessary" is of course subjective, but new dependencies should
+ be discussed).
+
+****************************
+Using a Development Checkout
+****************************
+
+You'll have to create a development environment using a Git checkout:
+
+- While logged into your GitHub account, navigate to the
+ ``python-aiplatform`` `repo`_ on GitHub.
+
+- Fork and clone the ``python-aiplatform`` repository to your GitHub account by
+ clicking the "Fork" button.
+
+- Clone your fork of ``python-aiplatform`` from your GitHub account to your local
+ computer, substituting your account username and specifying the destination
+ as ``hack-on-python-aiplatform``. E.g.::
+
+ $ cd ${HOME}
+ $ git clone git@github.com:USERNAME/python-aiplatform.git hack-on-python-aiplatform
+ $ cd hack-on-python-aiplatform
+ # Configure remotes such that you can pull changes from the googleapis/python-aiplatform
+ # repository into your local repository.
+ $ git remote add upstream git@github.com:googleapis/python-aiplatform.git
+ # fetch and merge changes from upstream into main
+ $ git fetch upstream
+ $ git merge upstream/main
+
+Now your local repo is set up such that you will push changes to your GitHub
+repo, from which you can submit a pull request.
+
+To work on the codebase and run the tests, we recommend using ``nox``,
+but you can also use a ``virtualenv`` of your own creation.
+
+.. _repo: https://github.com/googleapis/python-aiplatform
+
+Using ``nox``
+=============
+
+We use `nox `__ to instrument our tests.
+
+- To test your changes, run unit tests with ``nox``::
+ $ nox -s unit
+
+- To run a single unit test::
+
+ $ nox -s unit-3.12 -- -k
+
+
+ .. note::
+
+ The unit tests and system tests are described in the
+ ``noxfile.py`` files in each directory.
+
+.. nox: https://pypi.org/project/nox/
+
+*****************************************
+I'm getting weird errors... Can you help?
+*****************************************
+
+If the error mentions ``Python.h`` not being found,
+install ``python-dev`` and try again.
+On Debian/Ubuntu::
+
+ $ sudo apt-get install python-dev
+
+************
+Coding Style
+************
+- We use the automatic code formatter ``black``. You can run it using
+ the nox session ``blacken``. This will eliminate many lint errors. Run via::
+
+ $ nox -s blacken
+
+- PEP8 compliance is required, with exceptions defined in the linter configuration.
+ If you have ``nox`` installed, you can test that you have not introduced
+ any non-compliant code via::
+
+ $ nox -s lint
+
+- In order to make ``nox -s lint`` run faster, you can set some environment
+ variables::
+
+ export GOOGLE_CLOUD_TESTING_REMOTE="upstream"
+ export GOOGLE_CLOUD_TESTING_BRANCH="main"
+
+ By doing this, you are specifying the location of the most up-to-date
+ version of ``python-aiplatform``. The
+ remote name ``upstream`` should point to the official ``googleapis``
+ checkout and the branch should be the default branch on that remote (``main``).
+
+- This repository contains configuration for the
+ `pre-commit `__ tool, which automates checking
+ our linters during a commit. If you have it installed on your ``$PATH``,
+ you can enable enforcing those checks via:
+
+.. code-block:: bash
+
+ $ pre-commit install
+ pre-commit installed at .git/hooks/pre-commit
+
+Exceptions to PEP8:
+
+- Many unit tests use a helper method, ``_call_fut`` ("FUT" is short for
+ "Function-Under-Test"), which is PEP8-incompliant, but more readable.
+ Some also use a local variable, ``MUT`` (short for "Module-Under-Test").
+
+********************
+Running System Tests
+********************
+
+- To run system tests, you can execute::
+
+ # Run all system tests
+ $ nox -s system
+
+ # Run a single system test
+ $ nox -s system-3.8 -- -k
+
+
+ .. note::
+
+ System tests are only configured to run under Python 3.8.
+ For expediency, we do not run them in older versions of Python 3.
+
+ This alone will not run the tests. You'll need to change some local
+ auth settings and change some configuration in your project to
+ run all the tests.
+
+- System tests will be run against an actual project. You should use local credentials from gcloud when possible. See `Best practices for application authentication `__. Some tests require a service account. For those tests see `Authenticating as a service account `__.
+
+*************
+Test Coverage
+*************
+
+- The codebase *must* have 100% test statement coverage after each commit.
+ You can test coverage via ``nox -s cover``.
+
+******************************************************
+Documentation Coverage and Building HTML Documentation
+******************************************************
+
+If you fix a bug, and the bug requires an API or behavior modification, all
+documentation in this package which references that API or behavior must be
+changed to reflect the bug fix, ideally in the same commit that fixes the bug
+or adds the feature.
+
+Build the docs via:
+
+ $ nox -s docs
+
+*************************
+Samples and code snippets
+*************************
+
+Code samples and snippets live in the `samples/` catalogue. Feel free to
+provide more examples, but make sure to write tests for those examples.
+Each folder containing example code requires its own `noxfile.py` script
+which automates testing. If you decide to create a new folder, you can
+base it on the `samples/snippets` folder (providing `noxfile.py` and
+the requirements files).
+
+The tests will run against a real Google Cloud Project, so you should
+configure them just like the System Tests.
+
+- To run sample tests, you can execute::
+
+ # Run all tests in a folder
+ $ cd samples/snippets
+ $ nox -s py-3.8
+
+ # Run a single sample test
+ $ cd samples/snippets
+ $ nox -s py-3.8 -- -k
+
+********************************************
+Note About ``README`` as it pertains to PyPI
+********************************************
+
+The `description on PyPI`_ for the project comes directly from the
+``README``. Due to the reStructuredText (``rst``) parser used by
+PyPI, relative links which will work on GitHub (e.g. ``CONTRIBUTING.rst``
+instead of
+``https://github.com/googleapis/python-aiplatform/blob/main/CONTRIBUTING.rst``)
+may cause problems creating links or rendering the description.
+
+.. _description on PyPI: https://pypi.org/project/google-cloud-aiplatform
+
+
+*************************
+Supported Python Versions
+*************************
+
+We support:
+
+- `Python 3.8`_
+- `Python 3.9`_
+- `Python 3.10`_
+- `Python 3.11`_
+- `Python 3.12`_
+
+.. _Python 3.8: https://docs.python.org/3.8/
+.. _Python 3.9: https://docs.python.org/3.9/
+.. _Python 3.10: https://docs.python.org/3.10/
+.. _Python 3.11: https://docs.python.org/3.11/
+.. _Python 3.12: https://docs.python.org/3.12/
+
+
+Supported versions can be found in our ``noxfile.py`` `config`_.
+
+.. _config: https://github.com/googleapis/python-aiplatform/blob/main/noxfile.py
+
+
+We also explicitly decided to support Python 3 beginning with version 3.8.
+Reasons for this include:
+
+- Encouraging use of newest versions of Python 3
+- Taking the lead of `prominent`_ open-source `projects`_
+- `Unicode literal support`_ which allows for a cleaner codebase that
+ works in both Python 2 and Python 3
+
+.. _prominent: https://docs.djangoproject.com/en/1.9/faq/install/#what-python-version-can-i-use-with-django
+.. _projects: http://flask.pocoo.org/docs/0.10/python3/
+.. _Unicode literal support: https://www.python.org/dev/peps/pep-0414/
+
+**********
+Versioning
+**********
+
+This library follows `Semantic Versioning`_.
+
+.. _Semantic Versioning: http://semver.org/
+
+Some packages are currently in major version zero (``0.y.z``), which means that
+anything may change at any time and the public API should not be considered
+stable.
+
+******************************
+Contributor License Agreements
+******************************
+
+Before we can accept your pull requests you'll need to sign a Contributor
+License Agreement (CLA):
+
+- **If you are an individual writing original source code** and **you own the
+ intellectual property**, then you'll need to sign an
+ `individual CLA `__.
+- **If you work for a company that wants to allow you to contribute your work**,
+ then you'll need to sign a
+ `corporate CLA `__.
+
+You can sign these electronically (just scroll to the bottom). After that,
+we'll be able to accept your pull requests.
diff --git a/testbed/googleapis__python-aiplatform/LICENSE b/testbed/googleapis__python-aiplatform/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/testbed/googleapis__python-aiplatform/MANIFEST.in b/testbed/googleapis__python-aiplatform/MANIFEST.in
new file mode 100644
index 0000000000000000000000000000000000000000..e0a66705318ea6130a0a4c5cff04a80c309c2a1d
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/MANIFEST.in
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Generated by synthtool. DO NOT EDIT!
+include README.rst LICENSE
+recursive-include google *.json *.proto py.typed
+recursive-include tests *
+global-exclude *.py[co]
+global-exclude __pycache__
+
+# Exclude scripts for samples readmegen
+prune scripts/readme-gen
diff --git a/testbed/googleapis__python-aiplatform/README.rst b/testbed/googleapis__python-aiplatform/README.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b22aee61c27d5908afa11a292eb6c802ac17733a
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/README.rst
@@ -0,0 +1,573 @@
+Vertex AI SDK for Python
+=================================================
+
+
+Gemini API and Generative AI on Vertex AI
+-----------------------------------------
+
+.. note::
+
+ For Gemini API and Generative AI on Vertex AI, please reference `Vertex Generative AI SDK for Python`_
+.. _Vertex Generative AI SDK for Python: https://cloud.google.com/vertex-ai/generative-ai/docs/reference/python/latest
+
+-----------------------------------------
+
+|GA| |pypi| |versions| |unit-tests| |system-tests| |sample-tests|
+
+`Vertex AI`_: Google Vertex AI is an integrated suite of machine learning tools and services for building and using ML models with AutoML or custom code. It offers both novices and experts the best workbench for the entire machine learning development lifecycle.
+
+- `Client Library Documentation`_
+- `Product Documentation`_
+
+.. |GA| image:: https://img.shields.io/badge/support-ga-gold.svg
+ :target: https://github.com/googleapis/google-cloud-python/blob/main/README.rst#general-availability
+.. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-aiplatform.svg
+ :target: https://pypi.org/project/google-cloud-aiplatform/
+.. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-aiplatform.svg
+ :target: https://pypi.org/project/google-cloud-aiplatform/
+.. |unit-tests| image:: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-unit-tests.svg
+ :target: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-unit-tests.html
+.. |system-tests| image:: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-system-tests.svg
+ :target: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-system-tests.html
+.. |sample-tests| image:: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-sample-tests.svg
+ :target: https://storage.googleapis.com/cloud-devrel-public/python-aiplatform/badges/sdk-sample-tests.html
+.. _Vertex AI: https://cloud.google.com/vertex-ai/docs
+.. _Client Library Documentation: https://cloud.google.com/python/docs/reference/aiplatform/latest
+.. _Product Documentation: https://cloud.google.com/vertex-ai/docs
+
+Quick Start
+-----------
+
+In order to use this library, you first need to go through the following steps:
+
+1. `Select or create a Cloud Platform project.`_
+2. `Enable billing for your project.`_
+3. `Enable the Vertex AI API.`_
+4. `Setup Authentication.`_
+
+.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project
+.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project
+.. _Enable the Vertex AI API.: https://cloud.google.com/vertex-ai/docs/start/use-vertex-ai-python-sdk
+.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html
+
+Installation
+~~~~~~~~~~~~
+
+Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to
+create isolated Python environments. The basic problem it addresses is one of
+dependencies and versions, and indirectly permissions.
+
+With `virtualenv`_, it's possible to install this library without needing system
+install permissions, and without clashing with the installed system
+dependencies.
+
+.. _virtualenv: https://virtualenv.pypa.io/en/latest/
+
+
+Mac/Linux
+^^^^^^^^^
+
+.. code-block:: console
+
+ pip install virtualenv
+ virtualenv
+ source /bin/activate
+ /bin/pip install google-cloud-aiplatform
+
+
+Windows
+^^^^^^^
+
+.. code-block:: console
+
+ pip install virtualenv
+ virtualenv
+ \Scripts\activate
+ \Scripts\pip.exe install google-cloud-aiplatform
+
+
+Supported Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Python >= 3.8
+
+Deprecated Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+Python <= 3.7.
+
+The last version of this library compatible with Python 3.6 is google-cloud-aiplatform==1.12.1.
+
+Overview
+~~~~~~~~
+This section provides a brief overview of the Vertex AI SDK for Python. You can also reference the notebooks in `vertex-ai-samples`_ for examples.
+
+.. _vertex-ai-samples: https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/main/notebooks/community/sdk
+
+All publicly available SDK features can be found in the :code:`google/cloud/aiplatform` directory.
+Under the hood, Vertex SDK builds on top of GAPIC, which stands for Google API CodeGen.
+The GAPIC library code sits in :code:`google/cloud/aiplatform_v1` and :code:`google/cloud/aiplatform_v1beta1`,
+and it is auto-generated from Google's service proto files.
+
+For most developers' programmatic needs, they can follow these steps to figure out which libraries to import:
+
+1. Look through :code:`google/cloud/aiplatform` first -- Vertex SDK's APIs will almost always be easier to use and more concise comparing with GAPIC
+2. If the feature that you are looking for cannot be found there, look through :code:`aiplatform_v1` to see if it's available in GAPIC
+3. If it is still in beta phase, it will be available in :code:`aiplatform_v1beta1`
+
+If none of the above scenarios could help you find the right tools for your task, please feel free to open a github issue and send us a feature request.
+
+Importing
+^^^^^^^^^
+Vertex AI SDK resource based functionality can be used by importing the following namespace:
+
+.. code-block:: Python
+
+ from google.cloud import aiplatform
+
+Initialization
+^^^^^^^^^^^^^^
+Initialize the SDK to store common configurations that you use with the SDK.
+
+.. code-block:: Python
+
+ aiplatform.init(
+ # your Google Cloud Project ID or number
+ # environment default used is not set
+ project='my-project',
+
+ # the Vertex AI region you will use
+ # defaults to us-central1
+ location='us-central1',
+
+ # Google Cloud Storage bucket in same region as location
+ # used to stage artifacts
+ staging_bucket='gs://my_staging_bucket',
+
+ # custom google.auth.credentials.Credentials
+ # environment default credentials used if not set
+ credentials=my_credentials,
+
+ # customer managed encryption key resource name
+ # will be applied to all Vertex AI resources if set
+ encryption_spec_key_name=my_encryption_key_name,
+
+ # the name of the experiment to use to track
+ # logged metrics and parameters
+ experiment='my-experiment',
+
+ # description of the experiment above
+ experiment_description='my experiment description'
+ )
+
+Datasets
+^^^^^^^^
+Vertex AI provides managed tabular, text, image, and video datasets. In the SDK, datasets can be used downstream to
+train models.
+
+To create a tabular dataset:
+
+.. code-block:: Python
+
+ my_dataset = aiplatform.TabularDataset.create(
+ display_name="my-dataset", gcs_source=['gs://path/to/my/dataset.csv'])
+
+You can also create and import a dataset in separate steps:
+
+.. code-block:: Python
+
+ from google.cloud import aiplatform
+
+ my_dataset = aiplatform.TextDataset.create(
+ display_name="my-dataset")
+
+ my_dataset.import_data(
+ gcs_source=['gs://path/to/my/dataset.csv'],
+ import_schema_uri=aiplatform.schema.dataset.ioformat.text.multi_label_classification
+ )
+
+To get a previously created Dataset:
+
+.. code-block:: Python
+
+ dataset = aiplatform.ImageDataset('projects/my-project/location/us-central1/datasets/{DATASET_ID}')
+
+Vertex AI supports a variety of dataset schemas. References to these schemas are available under the
+:code:`aiplatform.schema.dataset` namespace. For more information on the supported dataset schemas please refer to the
+`Preparing data docs`_.
+
+.. _Preparing data docs: https://cloud.google.com/ai-platform-unified/docs/datasets/prepare
+
+Training
+^^^^^^^^
+The Vertex AI SDK for Python allows you train Custom and AutoML Models.
+
+You can train custom models using a custom Python script, custom Python package, or container.
+
+**Preparing Your Custom Code**
+
+Vertex AI custom training enables you to train on Vertex AI datasets and produce Vertex AI models. To do so your
+script must adhere to the following contract:
+
+It must read datasets from the environment variables populated by the training service:
+
+.. code-block:: Python
+
+ os.environ['AIP_DATA_FORMAT'] # provides format of data
+ os.environ['AIP_TRAINING_DATA_URI'] # uri to training split
+ os.environ['AIP_VALIDATION_DATA_URI'] # uri to validation split
+ os.environ['AIP_TEST_DATA_URI'] # uri to test split
+
+Please visit `Using a managed dataset in a custom training application`_ for a detailed overview.
+
+.. _Using a managed dataset in a custom training application: https://cloud.google.com/vertex-ai/docs/training/using-managed-datasets
+
+It must write the model artifact to the environment variable populated by the training service:
+
+.. code-block:: Python
+
+ os.environ['AIP_MODEL_DIR']
+
+**Running Training**
+
+.. code-block:: Python
+
+ job = aiplatform.CustomTrainingJob(
+ display_name="my-training-job",
+ script_path="training_script.py",
+ container_uri="us-docker.pkg.dev/vertex-ai/training/tf-cpu.2-2:latest",
+ requirements=["gcsfs==0.7.1"],
+ model_serving_container_image_uri="us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-2:latest",
+ )
+
+ model = job.run(my_dataset,
+ replica_count=1,
+ machine_type="n1-standard-4",
+ accelerator_type='NVIDIA_TESLA_K80',
+ accelerator_count=1)
+
+In the code block above `my_dataset` is managed dataset created in the `Dataset` section above. The `model` variable is a managed Vertex AI model that can be deployed or exported.
+
+
+AutoMLs
+-------
+The Vertex AI SDK for Python supports AutoML tabular, image, text, video, and forecasting.
+
+To train an AutoML tabular model:
+
+.. code-block:: Python
+
+ dataset = aiplatform.TabularDataset('projects/my-project/location/us-central1/datasets/{DATASET_ID}')
+
+ job = aiplatform.AutoMLTabularTrainingJob(
+ display_name="train-automl",
+ optimization_prediction_type="regression",
+ optimization_objective="minimize-rmse",
+ )
+
+ model = job.run(
+ dataset=dataset,
+ target_column="target_column_name",
+ training_fraction_split=0.6,
+ validation_fraction_split=0.2,
+ test_fraction_split=0.2,
+ budget_milli_node_hours=1000,
+ model_display_name="my-automl-model",
+ disable_early_stopping=False,
+ )
+
+
+Models
+------
+To get a model:
+
+
+.. code-block:: Python
+
+ model = aiplatform.Model('/projects/my-project/locations/us-central1/models/{MODEL_ID}')
+
+
+
+To upload a model:
+
+.. code-block:: Python
+
+ model = aiplatform.Model.upload(
+ display_name='my-model',
+ artifact_uri="gs://python/to/my/model/dir",
+ serving_container_image_uri="us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-2:latest",
+ )
+
+
+
+To deploy a model:
+
+
+.. code-block:: Python
+
+ endpoint = model.deploy(machine_type="n1-standard-4",
+ min_replica_count=1,
+ max_replica_count=5
+ machine_type='n1-standard-4',
+ accelerator_type='NVIDIA_TESLA_K80',
+ accelerator_count=1)
+
+
+Please visit `Importing models to Vertex AI`_ for a detailed overview:
+
+.. _Importing models to Vertex AI: https://cloud.google.com/vertex-ai/docs/general/import-model
+
+Model Evaluation
+----------------
+
+The Vertex AI SDK for Python currently supports getting model evaluation metrics for all AutoML models.
+
+To list all model evaluations for a model:
+
+.. code-block:: Python
+
+ model = aiplatform.Model('projects/my-project/locations/us-central1/models/{MODEL_ID}')
+
+ evaluations = model.list_model_evaluations()
+
+
+To get the model evaluation resource for a given model:
+
+.. code-block:: Python
+
+ model = aiplatform.Model('projects/my-project/locations/us-central1/models/{MODEL_ID}')
+
+ # returns the first evaluation with no arguments, you can also pass the evaluation ID
+ evaluation = model.get_model_evaluation()
+
+ eval_metrics = evaluation.metrics
+
+
+You can also create a reference to your model evaluation directly by passing in the resource name of the model evaluation:
+
+.. code-block:: Python
+
+ evaluation = aiplatform.ModelEvaluation(
+ evaluation_name='projects/my-project/locations/us-central1/models/{MODEL_ID}/evaluations/{EVALUATION_ID}')
+
+Alternatively, you can create a reference to your evaluation by passing in the model and evaluation IDs:
+
+.. code-block:: Python
+
+ evaluation = aiplatform.ModelEvaluation(
+ evaluation_name={EVALUATION_ID},
+ model_id={MODEL_ID})
+
+
+Batch Prediction
+----------------
+
+To create a batch prediction job:
+
+.. code-block:: Python
+
+ model = aiplatform.Model('/projects/my-project/locations/us-central1/models/{MODEL_ID}')
+
+ batch_prediction_job = model.batch_predict(
+ job_display_name='my-batch-prediction-job',
+ instances_format='csv',
+ machine_type='n1-standard-4',
+ gcs_source=['gs://path/to/my/file.csv'],
+ gcs_destination_prefix='gs://path/to/my/batch_prediction/results/',
+ service_account='my-sa@my-project.iam.gserviceaccount.com'
+ )
+
+You can also create a batch prediction job asynchronously by including the `sync=False` argument:
+
+.. code-block:: Python
+
+ batch_prediction_job = model.batch_predict(..., sync=False)
+
+ # wait for resource to be created
+ batch_prediction_job.wait_for_resource_creation()
+
+ # get the state
+ batch_prediction_job.state
+
+ # block until job is complete
+ batch_prediction_job.wait()
+
+
+Endpoints
+---------
+
+To create an endpoint:
+
+.. code-block:: Python
+
+ endpoint = aiplatform.Endpoint.create(display_name='my-endpoint')
+
+To deploy a model to a created endpoint:
+
+.. code-block:: Python
+
+ model = aiplatform.Model('/projects/my-project/locations/us-central1/models/{MODEL_ID}')
+
+ endpoint.deploy(model,
+ min_replica_count=1,
+ max_replica_count=5,
+ machine_type='n1-standard-4',
+ accelerator_type='NVIDIA_TESLA_K80',
+ accelerator_count=1)
+
+To get predictions from endpoints:
+
+.. code-block:: Python
+
+ endpoint.predict(instances=[[6.7, 3.1, 4.7, 1.5], [4.6, 3.1, 1.5, 0.2]])
+
+To undeploy models from an endpoint:
+
+.. code-block:: Python
+
+ endpoint.undeploy_all()
+
+To delete an endpoint:
+
+.. code-block:: Python
+
+ endpoint.delete()
+
+
+Pipelines
+---------
+
+To create a Vertex AI Pipeline run and monitor until completion:
+
+.. code-block:: Python
+
+ # Instantiate PipelineJob object
+ pl = PipelineJob(
+ display_name="My first pipeline",
+
+ # Whether or not to enable caching
+ # True = always cache pipeline step result
+ # False = never cache pipeline step result
+ # None = defer to cache option for each pipeline component in the pipeline definition
+ enable_caching=False,
+
+ # Local or GCS path to a compiled pipeline definition
+ template_path="pipeline.json",
+
+ # Dictionary containing input parameters for your pipeline
+ parameter_values=parameter_values,
+
+ # GCS path to act as the pipeline root
+ pipeline_root=pipeline_root,
+ )
+
+ # Execute pipeline in Vertex AI and monitor until completion
+ pl.run(
+ # Email address of service account to use for the pipeline run
+ # You must have iam.serviceAccounts.actAs permission on the service account to use it
+ service_account=service_account,
+
+ # Whether this function call should be synchronous (wait for pipeline run to finish before terminating)
+ # or asynchronous (return immediately)
+ sync=True
+ )
+
+To create a Vertex AI Pipeline without monitoring until completion, use `submit` instead of `run`:
+
+.. code-block:: Python
+
+ # Instantiate PipelineJob object
+ pl = PipelineJob(
+ display_name="My first pipeline",
+
+ # Whether or not to enable caching
+ # True = always cache pipeline step result
+ # False = never cache pipeline step result
+ # None = defer to cache option for each pipeline component in the pipeline definition
+ enable_caching=False,
+
+ # Local or GCS path to a compiled pipeline definition
+ template_path="pipeline.json",
+
+ # Dictionary containing input parameters for your pipeline
+ parameter_values=parameter_values,
+
+ # GCS path to act as the pipeline root
+ pipeline_root=pipeline_root,
+ )
+
+ # Submit the Pipeline to Vertex AI
+ pl.submit(
+ # Email address of service account to use for the pipeline run
+ # You must have iam.serviceAccounts.actAs permission on the service account to use it
+ service_account=service_account,
+ )
+
+
+Explainable AI: Get Metadata
+----------------------------
+
+To get metadata in dictionary format from TensorFlow 1 models:
+
+.. code-block:: Python
+
+ from google.cloud.aiplatform.explain.metadata.tf.v1 import saved_model_metadata_builder
+
+ builder = saved_model_metadata_builder.SavedModelMetadataBuilder(
+ 'gs://python/to/my/model/dir', tags=[tf.saved_model.tag_constants.SERVING]
+ )
+ generated_md = builder.get_metadata()
+
+To get metadata in dictionary format from TensorFlow 2 models:
+
+.. code-block:: Python
+
+ from google.cloud.aiplatform.explain.metadata.tf.v2 import saved_model_metadata_builder
+
+ builder = saved_model_metadata_builder.SavedModelMetadataBuilder('gs://python/to/my/model/dir')
+ generated_md = builder.get_metadata()
+
+To use Explanation Metadata in endpoint deployment and model upload:
+
+.. code-block:: Python
+
+ explanation_metadata = builder.get_metadata_protobuf()
+
+ # To deploy a model to an endpoint with explanation
+ model.deploy(..., explanation_metadata=explanation_metadata)
+
+ # To deploy a model to a created endpoint with explanation
+ endpoint.deploy(..., explanation_metadata=explanation_metadata)
+
+ # To upload a model with explanation
+ aiplatform.Model.upload(..., explanation_metadata=explanation_metadata)
+
+
+Cloud Profiler
+----------------------------
+
+Cloud Profiler allows you to profile your remote Vertex AI Training jobs on demand and visualize the results in Vertex AI Tensorboard.
+
+To start using the profiler with TensorFlow, update your training script to include the following:
+
+.. code-block:: Python
+
+ from google.cloud.aiplatform.training_utils import cloud_profiler
+ ...
+ cloud_profiler.init()
+
+Next, run the job with with a Vertex AI TensorBoard instance. For full details on how to do this, visit https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview
+
+Finally, visit your TensorBoard in your Google Cloud Console, navigate to the "Profile" tab, and click the `Capture Profile` button. This will allow users to capture profiling statistics for the running jobs.
+
+
+Next Steps
+~~~~~~~~~~
+
+- Read the `Client Library Documentation`_ for Vertex AI
+ API to see other available methods on the client.
+- Read the `Vertex AI API Product documentation`_ to learn
+ more about the product and see How-to Guides.
+- View this `README`_ to see the full list of Cloud
+ APIs that we cover.
+
+.. _Vertex AI API Product documentation: https://cloud.google.com/vertex-ai/docs
+.. _README: https://github.com/googleapis/google-cloud-python/blob/main/README.rst
diff --git a/testbed/googleapis__python-aiplatform/SECURITY.md b/testbed/googleapis__python-aiplatform/SECURITY.md
new file mode 100644
index 0000000000000000000000000000000000000000..8b58ae9c01ae3b07eeba325544a99071e0713f31
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/SECURITY.md
@@ -0,0 +1,7 @@
+# Security Policy
+
+To report a security issue, please use [g.co/vulnz](https://g.co/vulnz).
+
+The Google Security Team will respond within 5 working days of your report on g.co/vulnz.
+
+We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue.
diff --git a/testbed/googleapis__python-aiplatform/gemini_docs/README.md b/testbed/googleapis__python-aiplatform/gemini_docs/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e7ac132fac1b16b691be613adbf9322a3aa95003
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/gemini_docs/README.md
@@ -0,0 +1,326 @@
+# Vertex Generative AI SDK for Python
+The Vertex Generative AI SDK helps developers use Google's generative AI
+[Gemini models](http://cloud.google.com/vertex-ai/docs/generative-ai/multimodal/overview)
+to build AI-powered features and applications.
+The SDKs support use cases like the following:
+
+- Generate text from texts, images and videos (multimodal generation)
+- Build stateful multi-turn conversations (chat)
+- Function calling
+
+## Installation
+
+To install the
+[google-cloud-aiplatform](https://pypi.org/project/google-cloud-aiplatform/)
+Python package, run the following command:
+
+```shell
+pip3 install --upgrade --user "google-cloud-aiplatform>=1.38"
+```
+
+## Usage
+
+For detailed instructions, see [quickstart](http://cloud.google.com/vertex-ai/docs/generative-ai/start/quickstarts/quickstart-multimodal) and [Introduction to multimodal classes in the Vertex AI SDK](http://cloud.google.com/vertex-ai/docs/generative-ai/multimodal/sdk-for-gemini/gemini-sdk-overview-reference).
+
+#### Imports:
+```python
+import vertexai
+```
+
+#### Initialization:
+
+```python
+vertexai.init(project='my-project', location='us-central1')
+```
+
+#### Basic generation:
+```python
+from vertexai.generative_models import GenerativeModel
+model = GenerativeModel("gemini-pro")
+print(model.generate_content("Why is sky blue?"))
+```
+
+#### Using images and videos
+```python
+from vertexai.generative_models import GenerativeModel, Image
+vision_model = GenerativeModel("gemini-pro-vision")
+
+# Local image
+image = Image.load_from_file("image.jpg")
+print(vision_model.generate_content(["What is shown in this image?", image]))
+
+# Image from Cloud Storage
+image_part = generative_models.Part.from_uri("gs://download.tensorflow.org/example_images/320px-Felis_catus-cat_on_snow.jpg", mime_type="image/jpeg")
+print(vision_model.generate_content([image_part, "Describe this image?"]))
+
+# Text and video
+video_part = Part.from_uri("gs://cloud-samples-data/video/animals.mp4", mime_type="video/mp4")
+print(vision_model.generate_content(["What is in the video? ", video_part]))
+```
+
+#### Chat
+```python
+from vertexai.generative_models import GenerativeModel, Image
+vision_model = GenerativeModel("gemini-ultra-vision")
+vision_chat = vision_model.start_chat()
+image = Image.load_from_file("image.jpg")
+print(vision_chat.send_message(["I like this image.", image]))
+print(vision_chat.send_message("What things do I like?."))
+```
+
+#### System instructions
+```python
+from vertexai.generative_models import GenerativeModel
+model = GenerativeModel(
+ "gemini-1.0-pro",
+ system_instruction=[
+ "Talk like a pirate.",
+ "Don't use rude words.",
+ ],
+)
+print(model.generate_content("Why is sky blue?"))
+```
+
+#### Function calling
+
+```python
+# First, create tools that the model is can use to answer your questions.
+# Describe a function by specifying it's schema (JsonSchema format)
+get_current_weather_func = generative_models.FunctionDeclaration(
+ name="get_current_weather",
+ description="Get the current weather in a given location",
+ parameters={
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city and state, e.g. San Francisco, CA"
+ },
+ "unit": {
+ "type": "string",
+ "enum": [
+ "celsius",
+ "fahrenheit",
+ ]
+ }
+ },
+ "required": [
+ "location"
+ ]
+ },
+)
+# Tool is a collection of related functions
+weather_tool = generative_models.Tool(
+ function_declarations=[get_current_weather_func],
+)
+
+# Use tools in chat:
+model = GenerativeModel(
+ "gemini-pro",
+ # You can specify tools when creating a model to avoid having to send them with every request.
+ tools=[weather_tool],
+)
+chat = model.start_chat()
+# Send a message to the model. The model will respond with a function call.
+print(chat.send_message("What is the weather like in Boston?"))
+# Then send a function response to the model. The model will use it to answer.
+print(chat.send_message(
+ Part.from_function_response(
+ name="get_current_weather",
+ response={
+ "content": {"weather": "super nice"},
+ }
+ ),
+))
+```
+
+
+#### Automatic Function calling
+
+Note: The `FunctionDeclaration.from_func` converter does not support nested types for parameters. Please provide full `FunctionDeclaration` instead.
+
+```python
+from vertexai.preview.generative_models import GenerativeModel, Tool, FunctionDeclaration, AutomaticFunctionCallingResponder
+
+# First, create functions that the model can use to answer your questions.
+def get_current_weather(location: str, unit: str = "centigrade"):
+ """Gets weather in the specified location.
+
+ Args:
+ location: The location for which to get the weather.
+ unit: Optional. Temperature unit. Can be Centigrade or Fahrenheit. Defaults to Centigrade.
+ """
+ return dict(
+ location=location,
+ unit=unit,
+ weather="Super nice, but maybe a bit hot.",
+ )
+
+# Infer function schema
+get_current_weather_func = FunctionDeclaration.from_func(get_current_weather)
+# Tool is a collection of related functions
+weather_tool = Tool(
+ function_declarations=[get_current_weather_func],
+)
+
+# Use tools in chat:
+model = GenerativeModel(
+ "gemini-pro",
+ # You can specify tools when creating a model to avoid having to send them with every request.
+ tools=[weather_tool],
+)
+
+# Activate automatic function calling:
+afc_responder = AutomaticFunctionCallingResponder(
+ # Optional:
+ max_automatic_function_calls=5,
+)
+chat = model.start_chat(responder=afc_responder)
+# Send a message to the model. The model will respond with a function call.
+# The SDK will automatically call the requested function and respond to the model.
+# The model will use the function call response to answer the original question.
+print(chat.send_message("What is the weather like in Boston?"))
+```
+
+#### Evaluation
+
+- To perform bring-your-own-response(BYOR) evaluation, provide the model responses in the `response` column in the dataset. If a pairwise metric is used for BYOR evaluation, provide the baseline model responses in the `baseline_model_response` column.
+
+```python
+import pandas as pd
+from vertexai.evaluation import EvalTask, MetricPromptTemplateExamples
+
+eval_dataset = pd.DataFrame({
+ "prompt" : [...],
+ "reference": [...],
+ "response" : [...],
+ "baseline_model_response": [...],
+})
+eval_task = EvalTask(
+ dataset=eval_dataset,
+ metrics=[
+ "bleu",
+ "rouge_l_sum",
+ MetricPromptTemplateExamples.Pointwise.FLUENCY,
+ MetricPromptTemplateExamples.Pairwise.SAFETY
+ ],
+ experiment="my-experiment",
+)
+eval_result = eval_task.evaluate(experiment_run_name="eval-experiment-run")
+```
+- To perform evaluation with Gemini model inference, specify the `model` parameter with a `GenerativeModel` instance. The input column name to the model is `prompt` and must be present in the dataset.
+
+```python
+from vertexai.evaluation import EvalTask
+from vertexai.generative_models import GenerativeModel
+
+eval_dataset = pd.DataFrame({
+ "reference": [...],
+ "prompt" : [...],
+})
+result = EvalTask(
+ dataset=eval_dataset,
+ metrics=["exact_match", "bleu", "rouge_1", "rouge_l_sum"],
+ experiment="my-experiment",
+).evaluate(
+ model=GenerativeModel("gemini-1.5-pro"),
+ experiment_run_name="gemini-eval-run"
+)
+```
+
+- If a `prompt_template` is specified, the `prompt` column is not required. Prompts can be assembled from the evaluation dataset, and all prompt template variable names must be present in the dataset columns.
+
+```python
+import pandas as pd
+from vertexai.evaluation import EvalTask, MetricPromptTemplateExamples
+from vertexai.generative_models import GenerativeModel
+
+eval_dataset = pd.DataFrame({
+ "context" : [...],
+ "instruction": [...],
+})
+result = EvalTask(
+ dataset=eval_dataset,
+ metrics=[MetricPromptTemplateExamples.Pointwise.SUMMARIZATION_QUALITY],
+).evaluate(
+ model=GenerativeModel("gemini-1.5-pro"),
+ prompt_template="{instruction}. Article: {context}. Summary:",
+)
+```
+
+- To perform evaluation with custom model inference, specify the `model`
+parameter with a custom inference function. The input column name to the
+custom inference function is `prompt` and must be present in the dataset.
+
+```python
+from openai import OpenAI
+from vertexai.evaluation import EvalTask, MetricPromptTemplateExamples
+
+
+client = OpenAI()
+def custom_model_fn(input: str) -> str:
+ response = client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "user", "content": input}
+ ]
+ )
+ return response.choices[0].message.content
+
+eval_dataset = pd.DataFrame({
+ "prompt" : [...],
+ "reference": [...],
+})
+result = EvalTask(
+ dataset=eval_dataset,
+ metrics=[MetricPromptTemplateExamples.Pointwise.SAFETY],
+ experiment="my-experiment",
+).evaluate(
+ model=custom_model_fn,
+ experiment_run_name="gpt-eval-run"
+)
+```
+
+- To perform pairwise metric evaluation with model inference step, specify
+the `baseline_model` input to a `PairwiseMetric` instance and the candidate
+`model` input to the `EvalTask.evaluate()` function. The input column name
+to both models is `prompt` and must be present in the dataset.
+
+```python
+import pandas as pd
+from vertexai.evaluation import EvalTask, MetricPromptTemplateExamples, PairwiseMetric
+from vertexai.generative_models import GenerativeModel
+
+baseline_model = GenerativeModel("gemini-1.0-pro")
+candidate_model = GenerativeModel("gemini-1.5-pro")
+
+pairwise_groundedness = PairwiseMetric(
+ metric_prompt_template=MetricPromptTemplateExamples.get_prompt_template(
+ "pairwise_groundedness"
+ ),
+ baseline_model=baseline_model,
+)
+eval_dataset = pd.DataFrame({
+ "prompt" : [...],
+})
+result = EvalTask(
+ dataset=eval_dataset,
+ metrics=[pairwise_groundedness],
+ experiment="my-pairwise-experiment",
+).evaluate(
+ model=candidate_model,
+ experiment_run_name="gemini-pairwise-eval-run",
+)
+```
+
+## Documentation
+
+You can find complete documentation for the Vertex AI SDKs and the Gemini model in the Google Cloud [documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview)
+
+## Contributing
+
+See [Contributing](https://github.com/googleapis/python-aiplatform/blob/main/CONTRIBUTING.rst) for more information on contributing to the Vertex AI Python SDK.
+
+## License
+
+The contents of this repository are licensed under the [Apache License, version 2.0](http://www.apache.org/licenses/LICENSE-2.0).
\ No newline at end of file
diff --git a/testbed/googleapis__python-aiplatform/gemini_docs/conf.py b/testbed/googleapis__python-aiplatform/gemini_docs/conf.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b55aa99c4d2dba69ad8311dfcd4d12e3b460565
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/gemini_docs/conf.py
@@ -0,0 +1,440 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# google-cloud-aiplatform documentation build configuration file
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+import shlex
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath(".."))
+
+# For plugins that can not read conf.py.
+# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
+sys.path.insert(0, os.path.abspath("."))
+
+__version__ = ""
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+needs_sphinx = "1.5.5"
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ "sphinx.ext.autodoc",
+ "sphinx.ext.autosummary",
+ "sphinx.ext.intersphinx",
+ "sphinx.ext.coverage",
+ "sphinx.ext.doctest",
+ "sphinx.ext.napoleon",
+ "sphinx.ext.todo",
+ "sphinx.ext.viewcode",
+ "recommonmark",
+]
+
+# autodoc/autosummary flags
+autoclass_content = "both"
+autodoc_default_options = {"members": True}
+autosummary_generate = True
+
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ["_templates"]
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+# source_suffix = ['.rst', '.md']
+source_suffix = [".rst", ".md"]
+
+# The encoding of source files.
+# source_encoding = 'utf-8-sig'
+
+# The root toctree document.
+root_doc = "index"
+
+# General information about the project.
+project = "google-cloud-vertexai"
+copyright = "2019, Google"
+author = "Google APIs"
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The full version, including alpha/beta/rc tags.
+release = __version__
+# The short X.Y version.
+version = ".".join(release.split(".")[0:2])
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+# today = ''
+# Else, today_fmt is used as the format for a strftime call.
+# today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = [
+ "_build",
+ "**/.nox/**/*",
+ "samples/AUTHORING_GUIDE.md",
+ "samples/CONTRIBUTING.md",
+ "samples/snippets/README.rst",
+]
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+# default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+# add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+# add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+# show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = "sphinx"
+
+# A list of ignored prefixes for module index sorting.
+# modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+# keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = True
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = "alabaster"
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+html_theme_options = {
+ "description": "Google Cloud Client Libraries for google-cloud-aiplatform",
+ "github_user": "googleapis",
+ "github_repo": "python-aiplatform",
+ "github_banner": True,
+ "font_family": "'Roboto', Georgia, sans",
+ "head_font_family": "'Roboto', Georgia, serif",
+ "code_font_family": "'Roboto Mono', 'Consolas', monospace",
+}
+
+# Add any paths that contain custom themes here, relative to this directory.
+# html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# " v documentation".
+# html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+# html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+# html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+# html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ["_static"]
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+# html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+# html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+# html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+# html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+# html_additional_pages = {}
+
+# If false, no module index is generated.
+# html_domain_indices = True
+
+# If false, no index is generated.
+# html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+# html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+# html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+# html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+# html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+# html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
+# html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# Now only 'ja' uses this config value
+# html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+# html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = "google-cloud-aiplatform-doc"
+
+# -- Options for warnings ------------------------------------------------------
+
+
+suppress_warnings = [
+ # Temporarily suppress this to avoid "more than one target found for
+ # cross-reference" warning, which are intractable for us to avoid while in
+ # a mono-repo.
+ # See https://github.com/sphinx-doc/sphinx/blob
+ # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
+ "ref.python"
+]
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ #'papersize': 'letterpaper',
+ # The font size ('10pt', '11pt' or '12pt').
+ #'pointsize': '10pt',
+ # Additional stuff for the LaTeX preamble.
+ #'preamble': '',
+ # Latex figure (float) alignment
+ #'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (
+ root_doc,
+ "google-cloud-aiplatform.tex",
+ "google-cloud-aiplatform Documentation",
+ author,
+ "manual",
+ )
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+# latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+# latex_use_parts = False
+
+# If true, show page references after internal links.
+# latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+# latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+# latex_appendices = []
+
+# If false, no module index is generated.
+# latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (
+ root_doc,
+ "google-cloud-aiplatform",
+ "google-cloud-aiplatform Documentation",
+ [author],
+ 1,
+ )
+]
+
+# If true, show URL addresses after external links.
+# man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (
+ root_doc,
+ "google-cloud-aiplatform",
+ "google-cloud-aiplatform Documentation",
+ author,
+ "google-cloud-aiplatform",
+ "google-cloud-aiplatform Library",
+ "APIs",
+ )
+]
+
+# Documents to append as an appendix to all manuals.
+# texinfo_appendices = []
+
+# If false, no module index is generated.
+# texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+# texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+# texinfo_no_detailmenu = False
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {
+ "python": ("https://python.readthedocs.org/en/latest/", None),
+ "google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
+ "google.api_core": (
+ "https://googleapis.dev/python/google-api-core/latest/",
+ None,
+ ),
+ "grpc": ("https://grpc.github.io/grpc/python/", None),
+ "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
+ "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
+}
+
+
+# Napoleon settings
+napoleon_google_docstring = True
+napoleon_numpy_docstring = True
+napoleon_include_private_with_doc = False
+napoleon_include_special_with_doc = True
+napoleon_use_admonition_for_examples = False
+napoleon_use_admonition_for_notes = False
+napoleon_use_admonition_for_references = False
+napoleon_use_ivar = False
+napoleon_use_param = True
+napoleon_use_rtype = True
+
+
+def adopt_members_reexported_from_private_modules(public_module: str):
+ """Remaps the module items that come from internal modules.
+
+ A public module might be exporting items that are imported from private modules.
+ This function changes the `__module__` of such items to the public module.
+
+ Example:
+ `package/public.py`:
+
+ ```
+ from package._private import _PrivateClass as PublicClass
+ __all__ = ["PublicClass"]
+ ```
+
+ Calling this function on the `package.public` module will change:
+ ```
+ package._private._PrivateClass.__name__ = "PublicClass"
+ package._private._PrivateClass.__module__ = "package.public"
+ ```
+ """
+ for name, cls in public_module.__dict__.items():
+ if name in public_module.__all__:
+ if "._" in cls.__module__:
+ cls.__name__ = name
+ cls.__module__ = public_module.__name__
+
+
+def setup(*args, **kwargs):
+ # 1. Giving pretty module names to the GA and preview classes
+ # 2. Giving pretty class names to the preview classes
+ # 3. Making Sphinx automodule render the class members instead of
+ # dismissing the exported private classes as "Alias of".
+ from vertexai import evaluation
+ from vertexai import language_models
+ from vertexai import vision_models
+ from vertexai.preview import (
+ language_models as preview_language_models,
+ )
+ from vertexai.preview import (
+ vision_models as preview_vision_models,
+ )
+
+ # There are many possible ways to select which classes to fix.
+ # We select the publicly exported members that have an internal module ("*._*").
+
+ # Setting the modules of the GA classes
+ adopt_members_reexported_from_private_modules(evaluation)
+ adopt_members_reexported_from_private_modules(language_models)
+ adopt_members_reexported_from_private_modules(vision_models)
+
+ # Setting the modules of the public preview classes
+ # Selecting the members that still have an internal module after the GA fixes.
+ adopt_members_reexported_from_private_modules(preview_language_models)
+ adopt_members_reexported_from_private_modules(preview_vision_models)
diff --git a/testbed/googleapis__python-aiplatform/gemini_docs/index.rst b/testbed/googleapis__python-aiplatform/gemini_docs/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cb50fed0997f6fce033425ad57f91400808699f2
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/gemini_docs/index.rst
@@ -0,0 +1,8 @@
+.. include:: README.md
+
+API Reference
+-------------
+.. toctree::
+ :maxdepth: 2
+
+ vertexai/vertexai
diff --git a/testbed/googleapis__python-aiplatform/gemini_docs/vertexai/vertexai.rst b/testbed/googleapis__python-aiplatform/gemini_docs/vertexai/vertexai.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3d8ff1dec5fc78a43bfcb412b3dd24a764ab71c0
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/gemini_docs/vertexai/vertexai.rst
@@ -0,0 +1,97 @@
+Vertex AI SDK
+=============================================
+
+.. automodule:: vertexai
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+.. automodule:: vertexai.generative_models
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+.. automodule:: vertexai.preview.generative_models
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+.. automodule:: vertexai.preview.prompts
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+.. automodule:: vertexai.prompts._prompts
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+.. automodule:: vertexai.prompts._prompt_management
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+.. automodule:: vertexai.language_models
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+.. automodule:: vertexai.language_models._language_models
+ :no-members:
+ :private-members: _TunableModelMixin
+
+.. automodule:: vertexai.preview
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+.. automodule:: vertexai.preview.language_models
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+.. automodule:: vertexai.vision_models
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+.. automodule:: vertexai.preview.vision_models
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+.. automodule:: vertexai.preview.tuning
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+.. automodule:: vertexai.preview.tuning.sft
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+.. automodule:: vertexai.evaluation
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+.. automodule:: vertexai.preview.reasoning_engines
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+.. automodule:: vertexai.resources
+ :no-members:
+
+.. automodule:: vertexai.resources.preview
+ :no-members:
+
+.. automodule:: vertexai.resources.preview.ml_monitoring
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+.. automodule:: vertexai.resources.preview.ml_monitoring.spec
+ :members:
+ :show-inheritance:
+ :inherited-members:
diff --git a/testbed/googleapis__python-aiplatform/mypy.ini b/testbed/googleapis__python-aiplatform/mypy.ini
new file mode 100644
index 0000000000000000000000000000000000000000..574c5aed394b3cee50209c2e79dc8a415f297dc2
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/mypy.ini
@@ -0,0 +1,3 @@
+[mypy]
+python_version = 3.7
+namespace_packages = True
diff --git a/testbed/googleapis__python-aiplatform/noxfile.py b/testbed/googleapis__python-aiplatform/noxfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..282461dc212a8867a782db9563eb1ee9218e6a17
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/noxfile.py
@@ -0,0 +1,591 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Generated by synthtool. DO NOT EDIT!
+
+from __future__ import absolute_import
+import os
+import pathlib
+import re
+import shutil
+import warnings
+
+import nox
+
+FLAKE8_VERSION = "flake8==6.1.0"
+BLACK_VERSION = "black==22.3.0"
+ISORT_VERSION = "isort==5.10.1"
+LINT_PATHS = ["docs", "google", "vertexai", "tests", "noxfile.py", "setup.py"]
+
+DEFAULT_PYTHON_VERSION = "3.8"
+
+DOCS_DEPENDENCIES = (
+ "sphinx==5.0.2",
+ "alabaster",
+ "google-cloud-aiplatform[evaluation]",
+ "recommonmark",
+)
+
+DOCFX_DEPENDENCIES = (
+ "gcp-sphinx-docfx-yaml",
+ "sphinxcontrib-applehelp==1.0.4",
+ "sphinxcontrib-devhelp==1.0.2",
+ "sphinxcontrib-htmlhelp==2.0.1",
+ "sphinxcontrib-qthelp==1.0.3",
+ "sphinxcontrib-serializinghtml==1.1.5",
+ "alabaster",
+ "google-cloud-aiplatform[evaluation]",
+ "recommonmark",
+)
+
+UNIT_TEST_PYTHON_VERSIONS = ["3.8", "3.9", "3.10", "3.11", "3.12"]
+UNIT_TEST_LANGCHAIN_PYTHON_VERSIONS = ["3.9", "3.10", "3.11", "3.12"]
+UNIT_TEST_STANDARD_DEPENDENCIES = [
+ "mock",
+ "asyncmock",
+ "pytest",
+ "pytest-cov",
+ "pytest-asyncio",
+ # Preventing: py.test: error: unrecognized arguments: -n=auto --dist=loadscope
+ "pytest-xdist",
+]
+UNIT_TEST_EXTERNAL_DEPENDENCIES = []
+UNIT_TEST_LOCAL_DEPENDENCIES = []
+UNIT_TEST_DEPENDENCIES = []
+UNIT_TEST_EXTRAS = [
+ "testing",
+]
+UNIT_TEST_EXTRAS_BY_PYTHON = {}
+
+SYSTEM_TEST_PYTHON_VERSIONS = ["3.10"]
+SYSTEM_TEST_STANDARD_DEPENDENCIES = [
+ "mock",
+ "pytest",
+ "google-cloud-testutils",
+]
+SYSTEM_TEST_EXTERNAL_DEPENDENCIES = []
+SYSTEM_TEST_LOCAL_DEPENDENCIES = []
+SYSTEM_TEST_DEPENDENCIES = []
+SYSTEM_TEST_EXTRAS = [
+ "testing",
+]
+SYSTEM_TEST_EXTRAS_BY_PYTHON = {}
+
+CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
+
+# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
+nox.options.sessions = [
+ "unit",
+ "unit_ray",
+ "unit_langchain",
+ "system",
+ "cover",
+ "lint",
+ "lint_setup_py",
+ "blacken",
+ "docs",
+]
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def lint(session):
+ """Run linters.
+
+ Returns a failure if the linters find linting errors or sufficiently
+ serious code quality issues.
+ """
+ session.install(FLAKE8_VERSION, BLACK_VERSION)
+ session.run(
+ "black",
+ "--check",
+ "--diff",
+ *LINT_PATHS,
+ )
+ session.run("flake8", *LINT_PATHS)
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def blacken(session):
+ """Run black. Format code to uniform standard."""
+ session.install(BLACK_VERSION)
+ session.run(
+ "black",
+ *LINT_PATHS,
+ )
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def format(session):
+ """
+ Run isort to sort imports. Then run black
+ to format code to uniform standard.
+ """
+ session.install(BLACK_VERSION, ISORT_VERSION)
+ # Use the --fss option to sort imports using strict alphabetical order.
+ # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections
+ session.run(
+ "isort",
+ "--fss",
+ *LINT_PATHS,
+ )
+ session.run(
+ "black",
+ *LINT_PATHS,
+ )
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def lint_setup_py(session):
+ """Verify that setup.py is valid (including RST check)."""
+ session.install("docutils", "pygments")
+ session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
+
+
+def install_unittest_dependencies(session, *constraints):
+ standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES
+ session.install(*standard_deps, *constraints)
+
+ if UNIT_TEST_EXTERNAL_DEPENDENCIES:
+ warnings.warn(
+ "'unit_test_external_dependencies' is deprecated. Instead, please "
+ "use 'unit_test_dependencies' or 'unit_test_local_dependencies'.",
+ DeprecationWarning,
+ )
+ session.install(*UNIT_TEST_EXTERNAL_DEPENDENCIES, *constraints)
+
+ if UNIT_TEST_LOCAL_DEPENDENCIES:
+ session.install(*UNIT_TEST_LOCAL_DEPENDENCIES, *constraints)
+
+ if UNIT_TEST_EXTRAS_BY_PYTHON:
+ extras = UNIT_TEST_EXTRAS_BY_PYTHON.get(session.python, [])
+ elif UNIT_TEST_EXTRAS:
+ extras = UNIT_TEST_EXTRAS
+ else:
+ extras = []
+
+ if extras:
+ session.install("-e", f".[{','.join(extras)}]", *constraints)
+ else:
+ session.install("-e", ".", *constraints)
+
+
+def default(session):
+ # Install all test dependencies, then install this package in-place.
+
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+ install_unittest_dependencies(session, "-c", constraints_path)
+
+ # Run py.test against the unit tests.
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=unit_{session.python}_sponge_log.xml",
+ "--cov=google",
+ "--cov-append",
+ "--cov-config=.coveragerc",
+ "--cov-report=",
+ "--cov-fail-under=0",
+ "--ignore=tests/unit/vertex_ray",
+ "--ignore=tests/unit/vertex_langchain",
+ "--ignore=tests/unit/architecture",
+ os.path.join("tests", "unit"),
+ *session.posargs,
+ )
+
+ # Run tests that require isolation.
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=unit_{session.python}_test_vertexai_import_sponge_log.xml",
+ os.path.join("tests", "unit", "architecture", "test_vertexai_import.py"),
+ *session.posargs,
+ )
+
+
+@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
+def unit(session):
+ """Run the unit test suite."""
+ # First run the minimal GenAI tests
+ unit_genai_minimal_dependencies(session)
+
+ # Then run the default full test suite
+ default(session)
+
+
+def unit_genai_minimal_dependencies(session):
+ # Install minimal test dependencies, then install this package in-place.
+
+ standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES
+ session.install(*standard_deps)
+ session.install("-e", ".")
+
+ # Run py.test against the unit tests.
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=unit_{session.python}_sponge_log.xml",
+ # These tests require the PIL module
+ # "--ignore=TestGenerativeModels::test_image_mime_types",
+ os.path.join("tests", "unit", "vertexai", "test_generative_models.py"),
+ *session.posargs,
+ )
+
+
+@nox.session(python="3.10")
+@nox.parametrize("ray", ["2.9.3", "2.33.0"])
+def unit_ray(session, ray):
+ # Install all test dependencies, then install this package in-place.
+
+ constraints_path = str(CURRENT_DIRECTORY / "testing" / f"constraints-ray-{ray}.txt")
+ standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES
+ session.install(*standard_deps, "-c", constraints_path)
+
+ # Install ray extras
+ session.install("-e", ".[ray_testing]", "-c", constraints_path)
+
+ # Run py.test against the unit tests.
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=unit_ray_{ray}_sponge_log.xml",
+ "--cov=google",
+ "--cov-append",
+ "--cov-config=.coveragerc",
+ "--cov-report=",
+ "--cov-fail-under=0",
+ os.path.join("tests", "unit", "vertex_ray"),
+ *session.posargs,
+ )
+
+
+@nox.session(python=UNIT_TEST_LANGCHAIN_PYTHON_VERSIONS)
+def unit_langchain(session):
+ # Install all test dependencies, then install this package in-place.
+
+ constraints_path = str(CURRENT_DIRECTORY / "testing" / "constraints-langchain.txt")
+ standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES
+ session.install(*standard_deps, "-c", constraints_path)
+
+ # Install langchain extras
+ session.install("-e", ".[langchain_testing]", "-c", constraints_path)
+
+ # Run py.test against the unit tests.
+ session.run(
+ "py.test",
+ "--quiet",
+ "--junitxml=unit_langchain_sponge_log.xml",
+ "--cov=google",
+ "--cov-append",
+ "--cov-config=.coveragerc",
+ "--cov-report=",
+ "--cov-fail-under=0",
+ os.path.join("tests", "unit", "vertex_langchain"),
+ *session.posargs,
+ )
+
+
+def install_systemtest_dependencies(session, *constraints):
+ # Use pre-release gRPC for system tests.
+ # Exclude version 1.52.0rc1 which has a known issue.
+ # See https://github.com/grpc/grpc/issues/32163
+ session.install("--pre", "grpcio!=1.52.0rc1")
+
+ session.install(*SYSTEM_TEST_STANDARD_DEPENDENCIES, *constraints)
+
+ if SYSTEM_TEST_EXTERNAL_DEPENDENCIES:
+ session.install(*SYSTEM_TEST_EXTERNAL_DEPENDENCIES, *constraints)
+
+ if SYSTEM_TEST_LOCAL_DEPENDENCIES:
+ session.install("-e", *SYSTEM_TEST_LOCAL_DEPENDENCIES, *constraints)
+
+ if SYSTEM_TEST_DEPENDENCIES:
+ session.install("-e", *SYSTEM_TEST_DEPENDENCIES, *constraints)
+
+ if SYSTEM_TEST_EXTRAS_BY_PYTHON:
+ extras = SYSTEM_TEST_EXTRAS_BY_PYTHON.get(session.python, [])
+ elif SYSTEM_TEST_EXTRAS:
+ extras = SYSTEM_TEST_EXTRAS
+ else:
+ extras = []
+
+ if extras:
+ session.install("-e", f".[{','.join(extras)}]", *constraints)
+ else:
+ session.install("-e", ".", *constraints)
+
+
+@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
+def system(session):
+ """Run the system test suite."""
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+ system_test_path = os.path.join("tests", "system.py")
+ system_test_folder_path = os.path.join("tests", "system")
+
+ # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
+ if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
+ session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
+ # Install pyopenssl for mTLS testing.
+ if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
+ session.install("pyopenssl")
+
+ system_test_exists = os.path.exists(system_test_path)
+ system_test_folder_exists = os.path.exists(system_test_folder_path)
+ # Sanity check: only run tests if found.
+ if not system_test_exists and not system_test_folder_exists:
+ session.skip("System tests were not found")
+
+ install_systemtest_dependencies(session, "-c", constraints_path)
+
+ # Run py.test against the system tests.
+ if system_test_exists:
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_path,
+ *session.posargs,
+ )
+ if system_test_folder_exists:
+ session.run(
+ "py.test",
+ "-v",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_folder_path,
+ *session.posargs,
+ )
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def cover(session):
+ """Run the final coverage report.
+
+ This outputs the coverage report aggregating coverage from the unit
+ test runs (not system test runs), and then erases coverage data.
+ """
+ session.install("coverage", "pytest-cov")
+ session.run("coverage", "report", "--show-missing", "--fail-under=85")
+
+ session.run("coverage", "erase")
+
+
+@nox.session(python="3.9")
+def docs(session):
+ """Build the docs for this library."""
+
+ session.install("-e", ".")
+ session.install(
+ *DOCS_DEPENDENCIES,
+ "google-cloud-aiplatform[prediction]",
+ )
+
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
+ session.run(
+ "sphinx-build",
+ "-T", # show full traceback on exception
+ "-N", # no colors
+ "-b",
+ "html",
+ "-d",
+ os.path.join("docs", "_build", "doctrees", ""),
+ os.path.join("docs", ""),
+ os.path.join("docs", "_build", "html", ""),
+ )
+
+
+@nox.session(python="3.10")
+def docfx(session):
+ """Build the docfx yaml files for this library."""
+
+ session.install("-e", ".")
+ session.install(
+ *DOCFX_DEPENDENCIES,
+ "google-cloud-aiplatform[prediction]",
+ )
+
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
+ session.run(
+ "sphinx-build",
+ "-T", # show full traceback on exception
+ "-N", # no colors
+ "-D",
+ (
+ "extensions=sphinx.ext.autodoc,"
+ "sphinx.ext.autosummary,"
+ "docfx_yaml.extension,"
+ "sphinx.ext.intersphinx,"
+ "sphinx.ext.coverage,"
+ "sphinx.ext.napoleon,"
+ "sphinx.ext.todo,"
+ "sphinx.ext.viewcode,"
+ "recommonmark"
+ ),
+ "-b",
+ "html",
+ "-d",
+ os.path.join("docs", "_build", "doctrees", ""),
+ os.path.join("docs", ""),
+ os.path.join("docs", "_build", "html", ""),
+ )
+
+
+@nox.session(python="3.9")
+def gemini_docs(session):
+ """Build the docs for library related to Gemini."""
+
+ session.install("-e", ".")
+ session.install(*DOCS_DEPENDENCIES)
+
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
+ session.run(
+ "sphinx-build",
+ "-T", # show full traceback on exception
+ "-N", # no colors
+ "-b",
+ "html",
+ "-d",
+ os.path.join("gemini_docs", "_build", "doctrees", ""),
+ os.path.join("gemini_docs", ""),
+ os.path.join("gemini_docs", "_build", "html", ""),
+ )
+
+
+@nox.session(python="3.10")
+def gemini_docfx(session):
+ """Build the docfx yaml files for library related to Gemini."""
+
+ session.install("-e", ".")
+ session.install(*DOCFX_DEPENDENCIES)
+
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
+ session.run(
+ "sphinx-build",
+ "-T", # show full traceback on exception
+ "-N", # no colors
+ "-D",
+ (
+ "extensions=sphinx.ext.autodoc,"
+ "sphinx.ext.autosummary,"
+ "docfx_yaml.extension,"
+ "sphinx.ext.intersphinx,"
+ "sphinx.ext.coverage,"
+ "sphinx.ext.napoleon,"
+ "sphinx.ext.todo,"
+ "sphinx.ext.viewcode,"
+ "recommonmark"
+ ),
+ "-b",
+ "html",
+ "-d",
+ os.path.join("gemini_docs", "_build", "doctrees", ""),
+ os.path.join("gemini_docs", ""),
+ os.path.join("gemini_docs", "_build", "html", ""),
+ )
+
+
+@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
+def prerelease_deps(session):
+ """Run all tests with prerelease versions of dependencies installed."""
+
+ # Install all dependencies
+ session.install("-e", ".[all, tests, tracing]")
+ unit_deps_all = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_EXTERNAL_DEPENDENCIES
+ session.install(*unit_deps_all)
+ system_deps_all = (
+ SYSTEM_TEST_STANDARD_DEPENDENCIES + SYSTEM_TEST_EXTERNAL_DEPENDENCIES
+ )
+ session.install(*system_deps_all)
+
+ # Because we test minimum dependency versions on the minimum Python
+ # version, the first version we test with in the unit tests sessions has a
+ # constraints file containing all dependencies and extras.
+ with open(
+ CURRENT_DIRECTORY
+ / "testing"
+ / f"constraints-{UNIT_TEST_PYTHON_VERSIONS[0]}.txt",
+ encoding="utf-8",
+ ) as constraints_file:
+ constraints_text = constraints_file.read()
+
+ # Ignore leading whitespace and comment lines.
+ constraints_deps = [
+ match.group(1)
+ for match in re.finditer(
+ r"^\s*(\S+)(?===\S+)", constraints_text, flags=re.MULTILINE
+ )
+ ]
+
+ session.install(*constraints_deps)
+
+ prerel_deps = [
+ "protobuf",
+ # dependency of grpc
+ "six",
+ "googleapis-common-protos",
+ # Exclude version 1.52.0rc1 which has a known issue. See https://github.com/grpc/grpc/issues/32163
+ "grpcio!=1.52.0rc1",
+ "grpcio-status",
+ "google-api-core",
+ "proto-plus",
+ "google-cloud-testutils",
+ # dependencies of google-cloud-testutils"
+ "click",
+ ]
+
+ for dep in prerel_deps:
+ session.install("--pre", "--no-deps", "--upgrade", dep)
+
+ # Remaining dependencies
+ other_deps = [
+ "requests",
+ "google-auth",
+ ]
+ session.install(*other_deps)
+
+ # Print out prerelease package versions
+ session.run(
+ "python", "-c", "import google.protobuf; print(google.protobuf.__version__)"
+ )
+ session.run("python", "-c", "import grpc; print(grpc.__version__)")
+
+ session.run("py.test", "tests/unit")
+
+ system_test_path = os.path.join("tests", "system.py")
+ system_test_folder_path = os.path.join("tests", "system")
+
+ # Only run system tests if found.
+ if os.path.exists(system_test_path):
+ session.run(
+ "py.test",
+ "--verbose",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_path,
+ *session.posargs,
+ )
+ if os.path.exists(system_test_folder_path):
+ session.run(
+ "py.test",
+ "--verbose",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_folder_path,
+ *session.posargs,
+ )
diff --git a/testbed/googleapis__python-aiplatform/owlbot.py b/testbed/googleapis__python-aiplatform/owlbot.py
new file mode 100644
index 0000000000000000000000000000000000000000..0caac7dcb8acd55a9771019af36398e74c455a88
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/owlbot.py
@@ -0,0 +1,205 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This script is used to synthesize generated parts of this library."""
+
+import re
+
+import synthtool as s
+import synthtool.gcp as gcp
+from synthtool.languages import python
+
+common = gcp.CommonTemplates()
+
+default_version = "v1"
+
+has_generator_updates = False
+for library in s.get_staging_dirs(default_version):
+ # ---------------------------------------------------------------------
+ # Patch each version of the library
+ # ---------------------------------------------------------------------
+
+ # https://github.com/googleapis/gapic-generator-python/issues/413
+ s.replace(
+ library
+ / f"google/cloud/aiplatform_{library.name}/services/prediction_service/client.py",
+ "request.instances = instances",
+ "request.instances.extend(instances)",
+ )
+
+ # Remove test_predict_flattened/test_predict_flattened_async due to gapic generator bug
+ # https://github.com/googleapis/gapic-generator-python/issues/414
+ s.replace(
+ library
+ / f"tests/unit/gapic/aiplatform_{library.name}/test_prediction_service.py",
+ """def test_predict_flattened.*?def test_predict_flattened_error""",
+ "def test_predict_flattened_error",
+ flags=re.MULTILINE | re.DOTALL,
+ )
+
+ # Remove test_explain_flattened/test_explain_flattened_async due to gapic generator bug
+ # https://github.com/googleapis/gapic-generator-python/issues/414
+ s.replace(
+ library
+ / f"tests/unit/gapic/aiplatform_{library.name}/test_prediction_service.py",
+ """def test_explain_flattened.*?def test_explain_flattened_error""",
+ "def test_explain_flattened_error",
+ flags=re.MULTILINE | re.DOTALL,
+ )
+
+ s.move(
+ library,
+ excludes=[
+ ".coveragerc",
+ ".pre-commit-config.yaml",
+ "setup.py",
+ "README.rst",
+ "docs/index.rst",
+ "docs/summary_overview.md",
+ f"docs/definition_{library.name}/services.rst",
+ f"docs/instance_{library.name}/services.rst",
+ f"docs/params_{library.name}/services.rst",
+ f"docs/prediction_{library.name}/services.rst",
+ f"scripts/fixup_aiplatform_{library.name}_keywords.py",
+ f"scripts/fixup_definition_{library.name}_keywords.py",
+ f"scripts/fixup_instance_{library.name}_keywords.py",
+ f"scripts/fixup_params_{library.name}_keywords.py",
+ f"scripts/fixup_prediction_{library.name}_keywords.py",
+ "google/cloud/aiplatform/__init__.py",
+ f"google/cloud/aiplatform/{library.name}/schema/**/services/",
+ "**/gapic_version.py", # exclude gapic_version.py to avoid reverting the version to 0.1.0
+ ".kokoro/samples",
+ "noxfile.py",
+ "testing",
+ "docs/conf.py",
+ ],
+ )
+ has_generator_updates = True
+
+s.remove_staging_dirs()
+
+# only run post processor when there are changes to the generated code
+if has_generator_updates:
+ # ----------------------------------------------------------------------------
+ # Add templated files
+ # ----------------------------------------------------------------------------
+
+ templated_files = common.py_library(
+ cov_level=98,
+ system_test_python_versions=["3.8"],
+ unit_test_python_versions=["3.8", "3.9", "3.10", "3.11", "3.12"],
+ unit_test_extras=["testing"],
+ system_test_extras=["testing"],
+ microgenerator=True,
+ )
+ s.move(
+ templated_files,
+ excludes=[
+ ".coveragerc",
+ ".pre-commit-config.yaml",
+ ".kokoro/continuous/common.cfg",
+ ".kokoro/presubmit/presubmit.cfg",
+ ".kokoro/continuous/prerelease-deps.cfg",
+ ".kokoro/presubmit/prerelease-deps.cfg",
+ ".kokoro/docs/docs-presubmit.cfg",
+ ".kokoro/build.sh",
+ ".kokoro/release.sh",
+ ".kokoro/release/common.cfg",
+ ".kokoro/requirements*",
+ # exclude sample configs so periodic samples are tested against main
+ # instead of pypi
+ ".kokoro/samples/python3.7/common.cfg",
+ ".kokoro/samples/python3.8/common.cfg",
+ ".kokoro/samples/python3.9/common.cfg",
+ ".kokoro/samples/python3.10/common.cfg",
+ ".kokoro/samples/python3.11/common.cfg",
+ ".kokoro/samples/python3.12/common.cfg",
+ ".kokoro/samples/python3.7/periodic.cfg",
+ ".kokoro/samples/python3.8/periodic.cfg",
+ ".kokoro/samples/python3.9/periodic.cfg",
+ ".kokoro/samples/python3.10/periodic.cfg",
+ ".kokoro/samples/python3.11/periodic.cfg",
+ ".kokoro/samples/python3.12/periodic.cfg",
+ ".github/CODEOWNERS",
+ ".github/PULL_REQUEST_TEMPLATE.md",
+ ".github/workflows", # exclude gh actions as credentials are needed for tests
+ "README.rst",
+ ".github/release-please.yml", # use release please manifest
+ "noxfile.py",
+ "testing",
+ "docs/conf.py",
+ ],
+ ) # the microgenerator has a good coveragerc file
+
+ python.py_samples(skip_readmes=True)
+
+ python.configure_previous_major_version_branches()
+
+ # Update samples config to use `ucaip-sample-tests` project
+ s.replace(
+ ".kokoro/samples/python3.*/common.cfg",
+ """env_vars: \{
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-.*?"
+ \}""",
+ """env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "ucaip-sample-tests"
+ }""",
+ )
+
+ s.replace(
+ ".kokoro/test-samples-impl.sh",
+ "python3.9",
+ "python3",
+ )
+
+ # Update publish-docs to include gemini docs workflow.
+ s.replace(
+ ".kokoro/publish-docs.sh",
+ "# build docs",
+ """\
+# build Gemini docs
+nox -s gemini_docs
+# create metadata
+python3 -m docuploader create-metadata \\
+ --name="vertexai" \\
+ --version=$(python3 setup.py --version) \\
+ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \\
+ --distribution-name="google-cloud-vertexai" \\
+ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \\
+ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \\
+ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json)
+cat docs.metadata
+# upload docs
+python3 -m docuploader upload gemini_docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}"
+# Gemini docfx yaml files
+nox -s gemini_docfx
+# create metadata.
+python3 -m docuploader create-metadata \\
+ --name="vertexai" \\
+ --version=$(python3 setup.py --version) \\
+ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \\
+ --distribution-name="google-cloud-vertexai" \\
+ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \\
+ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \\
+ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) \\
+ --stem="/vertex-ai/generative-ai/docs/reference/python"
+cat docs.metadata
+# upload docs
+python3 -m docuploader upload gemini_docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}"
+# build docs""",
+ )
+
+ s.shell.run(["nox", "-s", "blacken"], hide_output=False)
diff --git a/testbed/googleapis__python-aiplatform/pypi/README.md b/testbed/googleapis__python-aiplatform/pypi/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5ad547baf32f02b59a21a6757bc690842d8785d9
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/pypi/README.md
@@ -0,0 +1,4 @@
+# vertexai package
+
+The pypi package in the `_vertex_ai_placeholder` directory is being used to
+upload a package with `vertexai` namespace on PyPi which depends on `google-cloud-aiplatform`.
diff --git a/testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/LICENSE b/testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/README.md b/testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..bb218063b8e453116c167393989331473dc12feb
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/README.md
@@ -0,0 +1,6 @@
+vertexai
+========
+
+To use the Vertex GAPIC clients, please install the `google-cloud-aiplatform` PyPi package by running `pip install google-cloud-aiplatform`.
+
+To use the Vertex AI SDK, please install the `vertexai` PyPi package by running `pip install vertexai`.
\ No newline at end of file
diff --git a/testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/pyproject.toml b/testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..090733dfa6ef8ab142fd56746d5bb1d91e2834eb
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/pyproject.toml
@@ -0,0 +1,17 @@
+[build-system]
+requires = ["setuptools>=61.0"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "vertexai"
+dynamic = ["version", "dependencies", "optional-dependencies"]
+authors = [
+ { name="Google LLC", email="googleapis-packages@google.com" },
+]
+license = {text = "Apache 2.0"}
+description = "Please run pip install vertexai to use the Vertex SDK."
+readme = "README.md"
+requires-python = ">=3.8"
+
+[project.urls]
+repository = "https://github.com/googleapis/python-aiplatform.git"
\ No newline at end of file
diff --git a/testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/setup.py b/testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..c392916712978ae0c018ff2ffc9daadb049711b6
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/setup.py
@@ -0,0 +1,247 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import io
+import os
+
+import setuptools # type: ignore
+
+name = "vertexai"
+description = "Vertex AI API client library"
+
+package_root = os.path.abspath(os.path.dirname(__file__))
+readme_filename = os.path.join(package_root, "README.md")
+with io.open(readme_filename, encoding="utf-8") as readme_file:
+ readme = readme_file.read()
+
+version = {}
+with open(os.path.join(package_root, "version.py")) as fp:
+ exec(fp.read(), version)
+version = version["__version__"]
+
+tensorboard_extra_require = ["tensorflow >=2.3.0, <3.0.0dev; python_version<='3.11'"]
+metadata_extra_require = ["pandas >= 1.0.0", "numpy>=1.15.0"]
+xai_extra_require = ["tensorflow >=2.3.0, <3.0.0dev"]
+lit_extra_require = [
+ "tensorflow >= 2.3.0, <3.0.0dev",
+ "pandas >= 1.0.0",
+ "lit-nlp == 0.4.0",
+ "explainable-ai-sdk >= 1.0.0",
+]
+profiler_extra_require = [
+ "tensorboard-plugin-profile >= 2.4.0, <3.0.0dev",
+ "werkzeug >= 2.0.0, <2.1.0dev",
+ "tensorflow >=2.4.0, <3.0.0dev",
+]
+featurestore_extra_require = [
+ "google-cloud-bigquery-storage",
+ "pandas >= 1.0.0",
+ "pyarrow >= 6.0.1",
+]
+pipelines_extra_require = [
+ "pyyaml>=5.3.1,<7",
+]
+datasets_extra_require = [
+ "pyarrow >= 3.0.0, < 8.0dev; python_version<'3.11'",
+ "pyarrow >= 10.0.1; python_version=='3.11'",
+ "pyarrow >= 14.0.0; python_version>='3.12'",
+]
+
+vizier_extra_require = [
+ "google-vizier>=0.1.6",
+]
+
+prediction_extra_require = [
+ "docker >= 5.0.3",
+ "fastapi >= 0.71.0, <=0.109.1",
+ "httpx >=0.23.0, <0.25.0", # Optional dependency of fastapi
+ "starlette >= 0.17.1",
+ "uvicorn[standard] >= 0.16.0",
+]
+
+endpoint_extra_require = ["requests >= 2.28.1"]
+
+private_endpoints_extra_require = [
+ "urllib3 >=1.21.1, <1.27",
+ "requests >= 2.28.1",
+]
+
+autologging_extra_require = ["mlflow>=1.27.0,<=2.1.1"]
+
+preview_extra_require = [
+ "cloudpickle < 3.0",
+ "google-cloud-logging < 4.0",
+]
+
+ray_extra_require = [
+ # Cluster only supports 2.4.0 and 2.9.3
+ (
+ "ray[default] >= 2.4, <= 2.9.3,!= 2.5.*,!= 2.6.*,!= 2.7.*,!="
+ " 2.8.*,!=2.9.0,!=2.9.1,!=2.9.2; python_version<'3.11'"
+ ),
+ # Ray Data v2.4 in Python 3.11 is broken, but got fixed in Ray v2.5.
+ "ray[default] >= 2.5, <= 2.9.3; python_version=='3.11'",
+ "google-cloud-bigquery-storage",
+ "google-cloud-bigquery",
+ "pandas >= 1.0.0, < 2.2.0",
+ "pyarrow >= 6.0.1",
+ # Workaround for https://github.com/ray-project/ray/issues/36990.
+ # TODO(b/295406381): Remove this pin when we drop support of ray<=2.5.
+ "pydantic < 2",
+ "immutabledict",
+]
+
+genai_requires = (
+ "pydantic < 3",
+ "docstring_parser < 1",
+)
+
+ray_testing_extra_require = ray_extra_require + [
+ "pytest-xdist",
+ # ray train extras required for prediction tests
+ (
+ "ray[train] >= 2.4, <= 2.9.3,!= 2.5.*,!= 2.6.*,!= 2.7.*,!="
+ " 2.8.*,!=2.9.0,!=2.9.1,!=2.9.2"
+ ),
+ # Framework version constraints copied from testing_extra_require
+ "scikit-learn",
+ "tensorflow",
+ "torch >= 2.0.0, < 2.1.0",
+ "xgboost",
+ "xgboost_ray",
+]
+
+reasoning_engine_extra_require = [
+ "cloudpickle >= 2.2.1, < 3.0",
+ "pydantic < 3",
+]
+
+rapid_evaluation_extra_require = [
+ "nest_asyncio >= 1.0.0, < 1.6.0",
+ "pandas >= 1.0.0, < 2.2.0",
+]
+
+langchain_extra_require = [
+ "langchain >= 0.1.13, < 0.2",
+ "langchain-core < 0.2",
+ "langchain-google-vertexai < 0.2",
+]
+
+langchain_testing_extra_require = langchain_extra_require + [
+ "pytest-xdist",
+]
+
+full_extra_require = list(
+ set(
+ tensorboard_extra_require
+ + metadata_extra_require
+ + xai_extra_require
+ + lit_extra_require
+ + featurestore_extra_require
+ + pipelines_extra_require
+ + datasets_extra_require
+ + endpoint_extra_require
+ + vizier_extra_require
+ + prediction_extra_require
+ + private_endpoints_extra_require
+ + autologging_extra_require
+ + preview_extra_require
+ + ray_extra_require
+ + reasoning_engine_extra_require
+ + rapid_evaluation_extra_require
+ )
+)
+testing_extra_require = (
+ full_extra_require
+ + profiler_extra_require
+ + [
+ "bigframes; python_version>='3.10'",
+ # google-api-core 2.x is required since kfp requires protobuf > 4
+ "google-api-core >= 2.11, < 3.0.0",
+ "grpcio-testing",
+ "ipython",
+ "kfp >= 2.6.0, < 3.0.0",
+ "pyfakefs",
+ "pytest-asyncio",
+ "pytest-xdist",
+ "scikit-learn",
+ # Lazy import requires > 2.12.0
+ "tensorflow == 2.13.0; python_version<='3.11'",
+ "tensorflow == 2.16.1; python_version>'3.11'",
+ # TODO(jayceeli) torch 2.1.0 has conflict with pyfakefs, will check if
+ # future versions fix this issue
+ "torch >= 2.0.0, < 2.1.0; python_version<='3.11'",
+ "torch >= 2.2.0; python_version>'3.11'",
+ "requests-toolbelt < 1.0.0",
+ "immutabledict",
+ "xgboost",
+ ]
+)
+
+
+setuptools.setup(
+ name=name,
+ version=version,
+ description=description,
+ long_description=readme,
+ author="Google LLC",
+ author_email="vertex-sdk-dev-pypi@google.com",
+ license="Apache 2.0",
+ url="https://github.com/googleapis/python-aiplatform",
+ platforms="Posix; MacOS X; Windows",
+ include_package_data=True,
+ install_requires=[f"google-cloud-aiplatform[all] == {version}"],
+ extras_require={
+ "endpoint": endpoint_extra_require,
+ "full": full_extra_require,
+ "metadata": metadata_extra_require,
+ "tensorboard": tensorboard_extra_require,
+ "testing": testing_extra_require,
+ "xai": xai_extra_require,
+ "lit": lit_extra_require,
+ "cloud_profiler": profiler_extra_require,
+ "pipelines": pipelines_extra_require,
+ "vizier": vizier_extra_require,
+ "prediction": prediction_extra_require,
+ "datasets": datasets_extra_require,
+ "private_endpoints": private_endpoints_extra_require,
+ "autologging": autologging_extra_require,
+ "preview": preview_extra_require,
+ "ray": ray_extra_require,
+ "ray_testing": ray_testing_extra_require,
+ "reasoningengine": reasoning_engine_extra_require,
+ "rapid_evaluation": rapid_evaluation_extra_require,
+ "langchain": langchain_extra_require,
+ "langchain_testing": langchain_testing_extra_require,
+ },
+ python_requires=">=3.8",
+ classifiers=[
+ "Development Status :: 5 - Production/Stable",
+ "Intended Audience :: Developers",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Topic :: Internet",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ],
+ zip_safe=False,
+)
diff --git a/testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/version.py b/testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..f07dc7e22b0f11a94f55f1551400f67075c77267
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/pypi/_vertex_ai_placeholder/version.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+__version__ = "1.75.0"
diff --git a/testbed/googleapis__python-aiplatform/release-please-config.json b/testbed/googleapis__python-aiplatform/release-please-config.json
new file mode 100644
index 0000000000000000000000000000000000000000..eb1f551bfd6e48caf649903466949ca555403a2c
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/release-please-config.json
@@ -0,0 +1,48 @@
+{
+ "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json",
+ "packages": {
+ ".": {
+ "release-type": "python",
+ "extra-files": [
+ "google/cloud/aiplatform/version.py",
+ "google/cloud/aiplatform/gapic_version.py",
+ "google/cloud/aiplatform_v1/gapic_version.py",
+ "google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py",
+ "google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py",
+ "google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py",
+ "google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py",
+ "google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py",
+ "google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py",
+ "google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py",
+ "google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py",
+ "google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py",
+ "google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py",
+ "google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py",
+ "google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py",
+ "google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py",
+ "google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py",
+ "google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py",
+ "google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py",
+ "google/cloud/aiplatform_v1beta1/gapic_version.py",
+ "pypi/_vertex_ai_placeholder/version.py",
+ {
+ "type": "json",
+ "path": "samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json",
+ "jsonpath": "$.clientLibrary.version"
+ },
+ {
+ "type": "json",
+ "path": "samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json",
+ "jsonpath": "$.clientLibrary.version"
+ }
+ ]
+ }
+ },
+ "release-type": "python",
+ "plugins": [
+ {
+ "type": "sentence-case"
+ }
+ ],
+ "initial-version": "0.1.0"
+}
diff --git a/testbed/googleapis__python-aiplatform/renovate.json b/testbed/googleapis__python-aiplatform/renovate.json
new file mode 100644
index 0000000000000000000000000000000000000000..39b2a0ec929660c4db978654ede22ce079c22b4a
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/renovate.json
@@ -0,0 +1,12 @@
+{
+ "extends": [
+ "config:base",
+ "group:all",
+ ":preserveSemverRanges",
+ ":disableDependencyDashboard"
+ ],
+ "ignorePaths": [".pre-commit-config.yaml", ".kokoro/requirements.txt", "setup.py"],
+ "pip_requirements": {
+ "fileMatch": ["requirements-test.txt", "samples/[\\S/]*constraints.txt", "samples/[\\S/]*constraints-test.txt"]
+ }
+}
diff --git a/testbed/googleapis__python-aiplatform/sdk_schema_tests/__init__.py b/testbed/googleapis__python-aiplatform/sdk_schema_tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ee14b78201436dc3040c85fe2dae2006288804a
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/sdk_schema_tests/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/testbed/googleapis__python-aiplatform/sdk_schema_tests/common_contract.py b/testbed/googleapis__python-aiplatform/sdk_schema_tests/common_contract.py
new file mode 100644
index 0000000000000000000000000000000000000000..9626372cacc1faeee1c770493080335e3644f5a8
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/sdk_schema_tests/common_contract.py
@@ -0,0 +1,24 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+expected_generate_content_common_arg_keys = (
+ "self",
+ "contents",
+ "generation_config",
+ "safety_settings",
+ "tools",
+ "tool_config",
+ "stream",
+)
diff --git a/testbed/googleapis__python-aiplatform/sdk_schema_tests/method_signature_tests/__init__.py b/testbed/googleapis__python-aiplatform/sdk_schema_tests/method_signature_tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ee14b78201436dc3040c85fe2dae2006288804a
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/sdk_schema_tests/method_signature_tests/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/testbed/googleapis__python-aiplatform/sdk_schema_tests/method_signature_tests/method_signature_tests.py b/testbed/googleapis__python-aiplatform/sdk_schema_tests/method_signature_tests/method_signature_tests.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3417f33520e985ea063fcd703b34fbdc20553ea
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/sdk_schema_tests/method_signature_tests/method_signature_tests.py
@@ -0,0 +1,54 @@
+"""Tests for method_signature."""
+
+from inspect import signature
+import unittest
+
+from vertexai.generative_models import GenerativeModel as VertexAIGenerativeModel
+from google.generativeai import GenerativeModel as GoogleAIGenerativeModel
+from sdk_schema_tests import common_contract
+
+
+_VERTEX_AI_SDK_NAME = "Vertex AI SDK"
+_GOOGLE_AI_SDK_NAME = "Google AI SDK"
+
+
+class TestGenerativeModelMethodSignatures(unittest.TestCase):
+ """Tests for method signatures of GenerativeModel."""
+
+ def _test_method_argument_key_in_both_sdks(
+ self,
+ method_under_test,
+ expected_method_arg_keys,
+ sdk_name
+ ):
+ method_signature = signature(method_under_test)
+ actual_method_arg_keys = method_signature.parameters.keys()
+ for expected_arg_key in expected_method_arg_keys:
+ self.assertIn(
+ member=expected_arg_key,
+ container=actual_method_arg_keys,
+ msg=(
+ f"[{sdk_name}][method {method_under_test.__name__}]: expected"
+ f" common arugment {expected_arg_key} not found in actual arugment"
+ f" list: {actual_method_arg_keys}"
+ ),
+ )
+
+ def test_generate_content_method_signature(self):
+ expected_common_arg_keys = (
+ common_contract.expected_generate_content_common_arg_keys
+ )
+ test_arguments = [
+ {
+ "method_under_test": VertexAIGenerativeModel.generate_content,
+ "expected_method_arg_keys": expected_common_arg_keys,
+ "sdk_name": _VERTEX_AI_SDK_NAME,
+ },
+ {
+ "method_under_test": GoogleAIGenerativeModel.generate_content,
+ "expected_method_arg_keys": expected_common_arg_keys,
+ "sdk_name": _GOOGLE_AI_SDK_NAME,
+ },
+ ]
+ for test_argument in test_arguments:
+ self._test_method_argument_key_in_both_sdks(**test_argument)
diff --git a/testbed/googleapis__python-aiplatform/setup.cfg b/testbed/googleapis__python-aiplatform/setup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..052350089505ef9b7fb22c27031f0f0940e3a477
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/setup.cfg
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Generated by synthtool. DO NOT EDIT!
+[bdist_wheel]
+universal = 1
diff --git a/testbed/googleapis__python-aiplatform/setup.py b/testbed/googleapis__python-aiplatform/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..605230a189577ee9c7cb73af33c60841bad99699
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/setup.py
@@ -0,0 +1,300 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import io
+import os
+
+import setuptools # type: ignore
+
+name = "google-cloud-aiplatform"
+description = "Vertex AI API client library"
+
+package_root = os.path.abspath(os.path.dirname(__file__))
+readme_filename = os.path.join(package_root, "README.rst")
+with io.open(readme_filename, encoding="utf-8") as readme_file:
+ readme = readme_file.read()
+
+version = {}
+with open(os.path.join(package_root, "google/cloud/aiplatform/version.py")) as fp:
+ exec(fp.read(), version)
+version = version["__version__"]
+
+packages = [
+ package
+ for package in setuptools.PEP420PackageFinder.find()
+ if package.startswith("google") or package.startswith("vertexai")
+]
+
+# Add vertex_ray relative packages
+packages += [
+ package.replace("google.cloud.aiplatform.vertex_ray", "vertex_ray")
+ for package in setuptools.PEP420PackageFinder.find()
+ if package.startswith("google.cloud.aiplatform.vertex_ray")
+]
+
+profiler_extra_require = [
+ "tensorboard-plugin-profile >= 2.4.0, <2.18.0", # <3.0.0dev",
+ "werkzeug >= 2.0.0, <2.1.0dev",
+ "tensorflow >=2.4.0, <3.0.0dev",
+]
+tensorboard_extra_require = [
+ "tensorflow >=2.3.0, <3.0.0dev; python_version<='3.11'"
+] + profiler_extra_require
+
+metadata_extra_require = ["pandas >= 1.0.0", "numpy>=1.15.0"]
+xai_extra_require = ["tensorflow >=2.3.0, <3.0.0dev"]
+lit_extra_require = [
+ "tensorflow >= 2.3.0, <3.0.0dev",
+ "pandas >= 1.0.0",
+ "lit-nlp == 0.4.0",
+ "explainable-ai-sdk >= 1.0.0",
+]
+featurestore_extra_require = [
+ "google-cloud-bigquery-storage",
+ "pandas >= 1.0.0",
+ "pyarrow >= 6.0.1",
+]
+pipelines_extra_require = [
+ "pyyaml>=5.3.1,<7",
+]
+datasets_extra_require = [
+ "pyarrow >= 3.0.0, < 8.0dev; python_version<'3.11'",
+ "pyarrow >= 10.0.1; python_version=='3.11'",
+ "pyarrow >= 14.0.0; python_version>='3.12'",
+]
+
+vizier_extra_require = [
+ "google-vizier>=0.1.6",
+]
+
+prediction_extra_require = [
+ "docker >= 5.0.3",
+ "fastapi >= 0.71.0, <=0.114.0",
+ "httpx >=0.23.0, <0.25.0", # Optional dependency of fastapi
+ "starlette >= 0.17.1",
+ "uvicorn[standard] >= 0.16.0",
+]
+
+endpoint_extra_require = ["requests >= 2.28.1"]
+
+private_endpoints_extra_require = [
+ "urllib3 >=1.21.1, <1.27",
+ "requests >= 2.28.1",
+]
+
+autologging_extra_require = ["mlflow>=1.27.0,<=2.16.0"]
+
+preview_extra_require = []
+
+ray_extra_require = [
+ # Cluster only supports 2.9.3 and 2.33.0. Keep 2.4.0 for our testing environment.
+ # Note that testing is submiting a job in a cluster with Ray 2.9.3 remotely.
+ (
+ "ray[default] >= 2.4, <= 2.33.0,!= 2.5.*,!= 2.6.*,!= 2.7.*,!="
+ " 2.8.*,!=2.9.0,!=2.9.1,!=2.9.2, !=2.10.*, !=2.11.*, !=2.12.*, !=2.13.*, !="
+ " 2.14.*, !=2.15.*, !=2.16.*, !=2.17.*, !=2.18.*, !=2.19.*, !=2.20.*, !="
+ " 2.21.*, !=2.22.*, !=2.23.*, !=2.24.*, !=2.25.*, !=2.26.*, !=2.27.*, !="
+ " 2.28.*, !=2.29.*, !=2.30.*, !=2.31.*, !=2.32.*; python_version<'3.11'"
+ ),
+ # To avoid ImportError: cannot import name 'packaging' from 'pkg_resources'
+ "setuptools < 70.0.0",
+ # Ray Data v2.4 in Python 3.11 is broken, but got fixed in Ray v2.5.
+ "ray[default] >= 2.5, <= 2.33.0; python_version=='3.11'",
+ "google-cloud-bigquery-storage",
+ "google-cloud-bigquery",
+ "pandas >= 1.0.0",
+ "pyarrow >= 6.0.1",
+ "immutabledict",
+]
+
+genai_requires = (
+ "pydantic < 3",
+ "typing_extensions",
+ "docstring_parser < 1",
+)
+
+ray_testing_extra_require = ray_extra_require + [
+ "pytest-xdist",
+ # ray train extras required for prediction tests
+ "ray[train]",
+ # Framework version constraints copied from testing_extra_require
+ "scikit-learn<1.6.0",
+ "tensorflow",
+ "torch >= 2.0.0, < 2.1.0",
+ "xgboost",
+ "xgboost_ray",
+]
+
+reasoning_engine_extra_require = [
+ "cloudpickle >= 3.0, < 4.0",
+ "google-cloud-trace < 2",
+ "opentelemetry-sdk < 2",
+ "opentelemetry-exporter-gcp-trace < 2",
+ "pydantic >= 2.6.3, < 3",
+ "typing_extensions",
+]
+
+evaluation_extra_require = [
+ "pandas >= 1.0.0",
+ "tqdm>=4.23.0",
+]
+
+langchain_extra_require = [
+ "langchain >= 0.1.16, < 0.4",
+ "langchain-core < 0.4",
+ "langchain-google-vertexai < 3",
+ "openinference-instrumentation-langchain >= 0.1.19, < 0.2",
+]
+
+langchain_testing_extra_require = list(
+ set(
+ langchain_extra_require
+ + reasoning_engine_extra_require
+ + ["absl-py", "pytest-xdist"]
+ )
+)
+
+tokenization_extra_require = ["sentencepiece >= 0.2.0"]
+tokenization_testing_extra_require = tokenization_extra_require + ["nltk"]
+
+full_extra_require = list(
+ set(
+ tensorboard_extra_require
+ + metadata_extra_require
+ + xai_extra_require
+ + lit_extra_require
+ + featurestore_extra_require
+ + pipelines_extra_require
+ + datasets_extra_require
+ + endpoint_extra_require
+ + vizier_extra_require
+ + prediction_extra_require
+ + private_endpoints_extra_require
+ + autologging_extra_require
+ + preview_extra_require
+ + ray_extra_require
+ + evaluation_extra_require
+ )
+)
+testing_extra_require = (
+ full_extra_require
+ + profiler_extra_require
+ + tokenization_testing_extra_require
+ + [
+ # aiohttp is required for async rest tests (need google-auth[aiohttp],
+ # but can't specify extras in constraints files)
+ "aiohttp",
+ "bigframes; python_version>='3.10'",
+ # google-api-core 2.x is required since kfp requires protobuf > 4
+ "google-api-core >= 2.11, < 3.0.0",
+ "grpcio-testing",
+ "ipython",
+ "kfp >= 2.6.0, < 3.0.0",
+ "pytest-asyncio",
+ "pytest-xdist",
+ "scikit-learn<1.6.0; python_version<='3.10'",
+ "scikit-learn; python_version>'3.10'",
+ # Lazy import requires > 2.12.0
+ "tensorflow == 2.13.0; python_version<='3.11'",
+ "tensorflow == 2.16.1; python_version>'3.11'",
+ # TODO(jayceeli) torch 2.1.0 has conflict with pyfakefs, will check if
+ # future versions fix this issue
+ "torch >= 2.0.0, < 2.1.0; python_version<='3.11'",
+ "torch >= 2.2.0; python_version>'3.11'",
+ "requests-toolbelt < 1.0.0",
+ "immutabledict",
+ "xgboost",
+ ]
+)
+
+
+setuptools.setup(
+ name=name,
+ version=version,
+ description=description,
+ long_description=readme,
+ packages=packages,
+ package_dir={"vertex_ray": "google/cloud/aiplatform/vertex_ray"},
+ package_data={"": ["*.html.j2"]},
+ entry_points={
+ "console_scripts": [
+ "tb-gcp-uploader=google.cloud.aiplatform.tensorboard.uploader_main:run_main"
+ ],
+ },
+ namespace_packages=("google", "google.cloud"),
+ author="Google LLC",
+ author_email="googleapis-packages@google.com",
+ license="Apache 2.0",
+ url="https://github.com/googleapis/python-aiplatform",
+ platforms="Posix; MacOS X; Windows",
+ include_package_data=True,
+ install_requires=(
+ (
+ "google-api-core[grpc] >= 1.34.1,"
+ " <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*"
+ ),
+ "google-auth >= 2.14.1, <3.0.0dev",
+ "proto-plus >= 1.22.3, <2.0.0dev",
+ "protobuf>=3.20.2,<6.0.0dev,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5",
+ "packaging >= 14.3",
+ "google-cloud-storage >= 1.32.0, < 3.0.0dev",
+ "google-cloud-bigquery >= 1.15.0, < 4.0.0dev, !=3.20.0",
+ "google-cloud-resource-manager >= 1.3.3, < 3.0.0dev",
+ "shapely < 3.0.0dev",
+ )
+ + genai_requires,
+ extras_require={
+ "endpoint": endpoint_extra_require,
+ "full": full_extra_require,
+ "metadata": metadata_extra_require,
+ "tensorboard": tensorboard_extra_require,
+ "testing": testing_extra_require,
+ "xai": xai_extra_require,
+ "lit": lit_extra_require,
+ "cloud_profiler": profiler_extra_require,
+ "pipelines": pipelines_extra_require,
+ "vizier": vizier_extra_require,
+ "prediction": prediction_extra_require,
+ "datasets": datasets_extra_require,
+ "private_endpoints": private_endpoints_extra_require,
+ "autologging": autologging_extra_require,
+ "preview": preview_extra_require,
+ "ray": ray_extra_require,
+ "ray_testing": ray_testing_extra_require,
+ "reasoningengine": reasoning_engine_extra_require,
+ "evaluation": evaluation_extra_require,
+ "langchain": langchain_extra_require,
+ "langchain_testing": langchain_testing_extra_require,
+ "tokenization": tokenization_extra_require,
+ },
+ python_requires=">=3.8",
+ classifiers=[
+ "Development Status :: 5 - Production/Stable",
+ "Intended Audience :: Developers",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Topic :: Internet",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ],
+ zip_safe=False,
+)
diff --git a/testbed/googleapis__python-aiplatform/testing/constraints-langchain.txt b/testbed/googleapis__python-aiplatform/testing/constraints-langchain.txt
new file mode 100644
index 0000000000000000000000000000000000000000..14ba97ad539c1dd55791ccdf8bdbb282779aba5d
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/testing/constraints-langchain.txt
@@ -0,0 +1,3 @@
+langchain
+langchain-core
+langchain-google-vertexai
\ No newline at end of file
diff --git a/testbed/googleapis__python-aiplatform/testing/constraints-ray-2.33.0.txt b/testbed/googleapis__python-aiplatform/testing/constraints-ray-2.33.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7a955ba26cea4ef1e54a799c4387271a83d15e1d
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/testing/constraints-ray-2.33.0.txt
@@ -0,0 +1,13 @@
+ray==2.33.0
+# Below constraints are inherited from constraints-3.10.txt
+google-api-core
+proto-plus==1.22.3
+protobuf
+mock==4.0.2
+google-cloud-storage==2.2.1 # Increased for kfp 2.0 compatibility
+packaging==24.1 # Increased to unbreak canonicalize_version error (b/377774673)
+grpcio-testing==1.34.0
+mlflow==1.30.1 # Pinned to speed up installation
+pytest-xdist==3.3.1 # Pinned to unbreak unit tests
+IPython # Added to test supernova rich html buttons
+
diff --git a/testbed/googleapis__python-aiplatform/testing/constraints-ray-2.4.0.txt b/testbed/googleapis__python-aiplatform/testing/constraints-ray-2.4.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..de876f6dc240d9171b54535f0768ee57286b237e
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/testing/constraints-ray-2.4.0.txt
@@ -0,0 +1,13 @@
+ray==2.4.0
+# Below constraints are inherited from constraints-3.10.txt
+google-api-core
+proto-plus==1.22.3
+protobuf
+mock==4.0.2
+google-cloud-storage==2.2.1 # Increased for kfp 2.0 compatibility
+packaging==20.0 # Increased for compatibility with MLFlow
+grpcio-testing==1.34.0
+mlflow==1.30.1 # Pinned to speed up installation
+pytest-xdist==3.3.1 # Pinned to unbreak unit tests
+IPython # Added to test supernova rich html buttons
+
diff --git a/testbed/googleapis__python-aiplatform/tests/system/__init__.py b/testbed/googleapis__python-aiplatform/tests/system/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8e1c3845db5b44e0d5727e3354929c81d631f15
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/e2e_base.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/e2e_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..467442106186c009bf83ab35854ec2414a2f78a9
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/e2e_base.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import abc
+import asyncio
+import importlib
+import logging
+import os
+import pytest
+import uuid
+
+from typing import Any, Dict, Generator
+
+from google.api_core import exceptions
+from google.cloud import aiplatform
+import vertexai
+from google.cloud import bigquery
+from google.cloud import resourcemanager
+from google.cloud import storage
+from google.cloud.aiplatform import initializer
+
+_PROJECT = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
+_VPC_NETWORK_URI = os.getenv("_VPC_NETWORK_URI")
+_LOCATION = "us-central1"
+
+
+class TestEndToEnd(metaclass=abc.ABCMeta):
+ @property
+ @classmethod
+ @abc.abstractmethod
+ def _temp_prefix(cls) -> str:
+ """Prefix to staging bucket and display names created by this end-to-end test.
+ Keep the string as short as possible and use kebab case, starting with a lowercase letter.
+
+ Example: `"temp-vertex-hpt-test"`
+ """
+ pass
+
+ @classmethod
+ def _make_display_name(cls, key: str) -> str:
+ """Helper method to make unique display_names.
+
+ Args:
+ key (str): Required. Identifier for the display name.
+ Returns:
+ Unique display name.
+ """
+ return f"{cls._temp_prefix}-{key}-{uuid.uuid4()}"
+
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+ importlib.reload(vertexai)
+
+ @pytest.fixture(scope="class")
+ def shared_state(self) -> Generator[Dict[str, Any], None, None]:
+ shared_state = {}
+ yield shared_state
+
+ @pytest.fixture(scope="class")
+ def prepare_staging_bucket(
+ self, shared_state: Dict[str, Any]
+ ) -> Generator[storage.bucket.Bucket, None, None]:
+ """Create a staging bucket and store bucket resource object in shared state."""
+
+ staging_bucket_name = f"{self._temp_prefix.lower()}-{uuid.uuid4()}"[:63]
+ shared_state["staging_bucket_name"] = staging_bucket_name
+
+ storage_client = storage.Client(project=_PROJECT)
+ shared_state["storage_client"] = storage_client
+
+ bucket = storage_client.create_bucket(
+ staging_bucket_name, project=_PROJECT, location=_LOCATION
+ )
+
+ # TODO(#1415) Once PR Is merged, use the added utilities to
+ # provide create/view access to Pipeline's default service account (compute)
+ project_number = (
+ resourcemanager.ProjectsClient()
+ .get_project(name=f"projects/{_PROJECT}")
+ .name.split("/", 1)[1]
+ )
+
+ service_account = f"{project_number}-compute@developer.gserviceaccount.com"
+ bucket_iam_policy = bucket.get_iam_policy()
+ bucket_iam_policy.setdefault("roles/storage.objectCreator", set()).add(
+ f"serviceAccount:{service_account}"
+ )
+ bucket_iam_policy.setdefault("roles/storage.objectViewer", set()).add(
+ f"serviceAccount:{service_account}"
+ )
+ bucket.set_iam_policy(bucket_iam_policy)
+
+ shared_state["bucket"] = bucket
+ yield
+
+ @pytest.fixture(scope="class")
+ def delete_staging_bucket(self, shared_state: Dict[str, Any]):
+ """Delete the staging bucket and all it's contents"""
+
+ yield
+
+ # Get the staging bucket used for testing and wipe it
+ bucket = shared_state["bucket"]
+ bucket.delete(force=True)
+
+ @pytest.fixture(scope="class")
+ def prepare_bigquery_dataset(
+ self, shared_state: Dict[str, Any]
+ ) -> Generator[bigquery.dataset.Dataset, None, None]:
+ """Create a bigquery dataset and store bigquery resource object in shared state."""
+
+ bigquery_client = bigquery.Client(project=_PROJECT)
+ shared_state["bigquery_client"] = bigquery_client
+
+ dataset_name = f"{self._temp_prefix.lower()}_{uuid.uuid4()}".replace("-", "_")
+ dataset_id = f"{_PROJECT}.{dataset_name}"
+ shared_state["bigquery_dataset_id"] = dataset_id
+
+ dataset = bigquery.Dataset(dataset_id)
+ dataset.location = _LOCATION
+ shared_state["bigquery_dataset"] = bigquery_client.create_dataset(dataset)
+
+ yield
+
+ @pytest.fixture(scope="class")
+ def delete_bigquery_dataset(self, shared_state: Dict[str, Any]):
+ """Delete the bigquery dataset"""
+
+ yield
+
+ # Get the bigquery dataset id used for testing and wipe it
+ bigquery_dataset = shared_state["bigquery_dataset"]
+ bigquery_client = shared_state["bigquery_client"]
+ bigquery_client.delete_dataset(
+ bigquery_dataset.dataset_id, delete_contents=True, not_found_ok=True
+ ) # Make an API request.
+
+ @pytest.fixture(scope="class")
+ def bigquery_dataset(self) -> Generator[bigquery.dataset.Dataset, None, None]:
+ """Create a bigquery dataset and store bigquery resource object in shared state."""
+
+ bigquery_client = bigquery.Client(project=_PROJECT)
+
+ dataset_name = f"{self._temp_prefix.lower()}_{uuid.uuid4()}".replace("-", "_")
+ dataset_id = f"{_PROJECT}.{dataset_name}"
+
+ dataset = bigquery.Dataset(dataset_id)
+ dataset.location = _LOCATION
+ dataset = bigquery_client.create_dataset(dataset)
+
+ yield dataset
+
+ bigquery_client.delete_dataset(
+ dataset.dataset_id, delete_contents=True, not_found_ok=True
+ ) # Make an API request.
+
+ @pytest.fixture(scope="class")
+ def tear_down_resources(self, shared_state: Dict[str, Any]):
+ """Delete every Vertex AI resource created during test"""
+
+ yield
+
+ if "resources" not in shared_state:
+ return
+
+ # TODO(b/218310362): Add resource deletion system tests
+ # Bring all Endpoints to the front of the list
+ # Ensures Models are undeployed first before we attempt deletion
+ shared_state["resources"].sort(
+ key=lambda r: 1
+ if isinstance(r, aiplatform.Endpoint)
+ or isinstance(r, aiplatform.MatchingEngineIndexEndpoint)
+ or isinstance(r, aiplatform.Experiment)
+ else 2
+ )
+
+ for resource in shared_state["resources"]:
+ try:
+ if isinstance(
+ resource,
+ (
+ aiplatform.Endpoint,
+ aiplatform.Featurestore,
+ aiplatform.MatchingEngineIndexEndpoint,
+ ),
+ ):
+ # For endpoint, undeploy model then delete endpoint
+ # For featurestore, force delete its entity_types and features with the featurestore
+ resource.delete(force=True)
+ elif isinstance(resource, aiplatform.Experiment):
+ resource.delete(delete_backing_tensorboard_runs=True)
+ else:
+ resource.delete()
+ except (exceptions.GoogleAPIError, RuntimeError) as e:
+ logging.exception(f"Could not delete resource: {resource} due to: {e}")
+
+ @pytest.fixture(scope="session")
+ def event_loop(event_loop):
+ loop = asyncio.get_event_loop()
+ yield loop
+ loop.close()
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_custom_job.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_custom_job.py
new file mode 100644
index 0000000000000000000000000000000000000000..f48bd9e29a5a56042c3105f7aaff30f9a91e21b8
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_custom_job.py
@@ -0,0 +1,185 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+
+import pytest
+from unittest import mock
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform.constants import base as constants
+from google.cloud.aiplatform.utils import resource_manager_utils
+from google.cloud.aiplatform.compat.types import job_state as gca_job_state
+from tests.system.aiplatform import e2e_base
+
+_PREBUILT_CONTAINER_IMAGE = (
+ "us-docker.pkg.dev/vertex-ai/training/sklearn-cpu.1-0:latest"
+)
+_CUSTOM_CONTAINER_IMAGE = "python:3.8"
+
+_DIR_NAME = os.path.dirname(os.path.abspath(__file__))
+_LOCAL_TRAINING_SCRIPT_PATH = os.path.join(
+ _DIR_NAME, "test_resources/custom_job_script.py"
+)
+
+
+@mock.patch.object(
+ constants,
+ "AIPLATFORM_DEPENDENCY_PATH",
+ "google-cloud-aiplatform @ git+https://github.com/googleapis/"
+ f"python-aiplatform.git@{os.environ['KOKORO_GIT_COMMIT']}#egg=google-cloud-aiplatform"
+ if os.environ.get("KOKORO_GIT_COMMIT")
+ else constants.AIPLATFORM_DEPENDENCY_PATH,
+)
+@mock.patch.object(
+ constants,
+ "AIPLATFORM_AUTOLOG_DEPENDENCY_PATH",
+ "google-cloud-aiplatform[autologging] @ git+https://github.com/googleapis/"
+ f"python-aiplatform.git@{os.environ['KOKORO_GIT_COMMIT']}#egg=google-cloud-aiplatform"
+ if os.environ.get("KOKORO_GIT_COMMIT")
+ else constants.AIPLATFORM_AUTOLOG_DEPENDENCY_PATH,
+)
+@pytest.mark.usefixtures(
+ "prepare_staging_bucket", "delete_staging_bucket", "tear_down_resources"
+)
+class TestCustomJob(e2e_base.TestEndToEnd):
+
+ _temp_prefix = "temp-vertex-sdk-custom-job"
+
+ def setup_class(cls):
+ cls._experiment_name = cls._make_display_name("experiment")[:60]
+ cls._experiment_run_name = cls._make_display_name("experiment-run")[:60]
+
+ project_number = resource_manager_utils.get_project_number(e2e_base._PROJECT)
+ cls._service_account = f"{project_number}-compute@developer.gserviceaccount.com"
+
+ def test_from_local_script_prebuilt_container(self, shared_state):
+ shared_state["resources"] = []
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ staging_bucket=shared_state["staging_bucket_name"],
+ )
+
+ display_name = self._make_display_name("custom-job")
+
+ custom_job = aiplatform.CustomJob.from_local_script(
+ display_name=display_name,
+ script_path=_LOCAL_TRAINING_SCRIPT_PATH,
+ container_uri=_PREBUILT_CONTAINER_IMAGE,
+ requirements=["scikit-learn", "pandas"],
+ )
+ try:
+ custom_job.run()
+ finally:
+ shared_state["resources"].append(custom_job)
+
+ assert custom_job.state == gca_job_state.JobState.JOB_STATE_SUCCEEDED
+
+ def test_from_local_script_custom_container(self, shared_state):
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ staging_bucket=shared_state["staging_bucket_name"],
+ )
+
+ display_name = self._make_display_name("custom-job")
+
+ custom_job = aiplatform.CustomJob.from_local_script(
+ display_name=display_name,
+ script_path=_LOCAL_TRAINING_SCRIPT_PATH,
+ container_uri=_CUSTOM_CONTAINER_IMAGE,
+ requirements=["scikit-learn", "pandas"],
+ )
+ try:
+ custom_job.run()
+ finally:
+ shared_state["resources"].append(custom_job)
+
+ assert custom_job.state == gca_job_state.JobState.JOB_STATE_SUCCEEDED
+
+ def test_from_local_script_enable_autolog_prebuilt_container(self, shared_state):
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ staging_bucket=shared_state["staging_bucket_name"],
+ experiment=self._experiment_name,
+ )
+
+ shared_state["resources"].append(
+ aiplatform.metadata.metadata._experiment_tracker.experiment
+ )
+
+ display_name = self._make_display_name("custom-job")
+
+ custom_job = aiplatform.CustomJob.from_local_script(
+ display_name=display_name,
+ script_path=_LOCAL_TRAINING_SCRIPT_PATH,
+ container_uri=_PREBUILT_CONTAINER_IMAGE,
+ requirements=["scikit-learn", "pandas"],
+ enable_autolog=True,
+ )
+
+ try:
+ with aiplatform.start_run(self._experiment_run_name) as run:
+ shared_state["resources"].append(run)
+ custom_job.run(
+ experiment=self._experiment_name,
+ experiment_run=run,
+ service_account=self._service_account,
+ )
+ finally:
+ shared_state["resources"].append(custom_job)
+
+ assert custom_job.state == gca_job_state.JobState.JOB_STATE_SUCCEEDED
+
+ def test_from_local_script_enable_autolog_custom_container(self, shared_state):
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ staging_bucket=shared_state["staging_bucket_name"],
+ )
+
+ display_name = self._make_display_name("custom-job")
+
+ custom_job = aiplatform.CustomJob.from_local_script(
+ display_name=display_name,
+ script_path=_LOCAL_TRAINING_SCRIPT_PATH,
+ container_uri=_CUSTOM_CONTAINER_IMAGE,
+ requirements=["scikit-learn", "pandas"],
+ enable_autolog=True,
+ )
+
+ # Let the job auto-create the experiment run.
+ try:
+ custom_job.run(
+ experiment=self._experiment_name,
+ service_account=self._service_account,
+ )
+ finally:
+ shared_state["resources"].append(custom_job)
+ experiment_run_resource = aiplatform.Context.get(
+ custom_job.job_spec.experiment_run
+ )
+ if experiment_run_resource:
+ shared_state["resources"].append(experiment_run_resource)
+
+ assert custom_job.state == gca_job_state.JobState.JOB_STATE_SUCCEEDED
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_dataset.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9488dcaa077459726b7213f7c7b997d1f1b284c
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_dataset.py
@@ -0,0 +1,436 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import uuid
+import pytest
+import importlib
+
+import pandas as pd
+import re
+
+from datetime import datetime
+
+from google.api_core import exceptions
+from google.api_core import client_options
+
+from google.cloud import aiplatform
+from google.cloud import bigquery
+from google.cloud import storage
+from google.cloud.aiplatform import utils
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform.compat.services import (
+ dataset_service_client_v1 as dataset_service,
+)
+
+from test_utils.vpcsc_config import vpcsc_config
+
+from tests.system.aiplatform import e2e_base
+
+_TEST_PROJECT = e2e_base._PROJECT
+_TEST_LOCATION = e2e_base._LOCATION
+TEST_BUCKET = os.environ.get(
+ "GCLOUD_TEST_SAMPLES_BUCKET", "cloud-samples-data-us-central1"
+)
+
+_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+_TEST_API_ENDPOINT = f"{_TEST_LOCATION}-aiplatform.googleapis.com"
+_TEST_IMAGE_DATASET_ID = "1997950066622464000" # permanent_50_flowers_dataset
+_TEST_TEXT_DATASET_ID = (
+ "6203215905493614592" # permanent_text_entity_extraction_dataset
+)
+_TEST_DATASET_DISPLAY_NAME = "permanent_50_flowers_dataset"
+_TEST_DATASET_LABELS = {"test": "labels"}
+_TEST_DATASET_DESCRIPTION = "test description"
+_TEST_TABULAR_CLASSIFICATION_GCS_SOURCE = "gs://ucaip-sample-resources/iris_1000.csv"
+_TEST_FORECASTING_BQ_SOURCE = (
+ "bq://ucaip-sample-tests:ucaip_test_us_central1.2020_sales_train"
+)
+_TEST_TEXT_ENTITY_EXTRACTION_GCS_SOURCE = "gs://ucaip-samples-us-central1/sdk_system_test_resources/text_entity_extraction_dataset_small.jsonl"
+_TEST_IMAGE_OBJECT_DETECTION_GCS_SOURCE = "gs://cloud-samples-data-us-central1/ai-platform-unified/datasets/images/isg_data.jsonl"
+_TEST_TEXT_ENTITY_IMPORT_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/ioformat/text_extraction_io_format_1.0.0.yaml"
+_TEST_IMAGE_OBJ_DET_SEGMENTATION_IMPORT_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/ioformat/image_segmentation_io_format_1.0.0.yaml"
+
+# create_from_dataframe
+_TEST_BOOL_COL = "bool_col"
+_TEST_BOOL_ARR_COL = "bool_array_col"
+_TEST_DOUBLE_COL = "double_col"
+_TEST_DOUBLE_ARR_COL = "double_array_col"
+_TEST_INT_COL = "int64_col"
+_TEST_INT_ARR_COL = "int64_array_col"
+_TEST_STR_COL = "string_col"
+_TEST_STR_ARR_COL = "string_array_col"
+_TEST_BYTES_COL = "bytes_col"
+_TEST_TIMESTAMP_COL = "timestamp_col"
+_TEST_DATETIME_COL = "datetime_col"
+_TEST_DF_COLUMN_NAMES = [
+ _TEST_BOOL_COL,
+ _TEST_BOOL_ARR_COL,
+ _TEST_DOUBLE_COL,
+ _TEST_DOUBLE_ARR_COL,
+ _TEST_INT_COL,
+ _TEST_INT_ARR_COL,
+ _TEST_STR_COL,
+ _TEST_STR_ARR_COL,
+ _TEST_BYTES_COL,
+ _TEST_TIMESTAMP_COL,
+ _TEST_DATETIME_COL,
+]
+
+_TEST_TIME_NOW = datetime.now()
+_TEST_TIMESTAMP_WITH_TIMEZONE = pd.Timestamp(_TEST_TIME_NOW, tz="US/Pacific")
+_TEST_TIMESTAMP_WITHOUT_TIMEZONE = pd.Timestamp(_TEST_TIME_NOW)
+
+_TEST_DATAFRAME = pd.DataFrame(
+ data=[
+ [
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ _TEST_TIMESTAMP_WITH_TIMEZONE,
+ _TEST_TIMESTAMP_WITHOUT_TIMEZONE,
+ ],
+ [
+ True,
+ [True, True],
+ 2.2,
+ [2.2, 4.4],
+ 2,
+ [2, 3],
+ "test1",
+ ["test2", "test3"],
+ b"0",
+ _TEST_TIMESTAMP_WITH_TIMEZONE,
+ _TEST_TIMESTAMP_WITHOUT_TIMEZONE,
+ ],
+ ],
+ columns=_TEST_DF_COLUMN_NAMES,
+)
+_TEST_DATAFRAME_BQ_SCHEMA = [
+ bigquery.SchemaField(name="bool_col", field_type="BOOL"),
+ bigquery.SchemaField(name="bool_array_col", field_type="BOOL", mode="REPEATED"),
+ bigquery.SchemaField(name="double_col", field_type="FLOAT"),
+ bigquery.SchemaField(name="double_array_col", field_type="FLOAT", mode="REPEATED"),
+ bigquery.SchemaField(name="int64_col", field_type="INTEGER"),
+ bigquery.SchemaField(name="int64_array_col", field_type="INTEGER", mode="REPEATED"),
+ bigquery.SchemaField(name="string_col", field_type="STRING"),
+ bigquery.SchemaField(name="string_array_col", field_type="STRING", mode="REPEATED"),
+ bigquery.SchemaField(name="bytes_col", field_type="STRING"),
+ bigquery.SchemaField(name="timestamp_col", field_type="TIMESTAMP"),
+ bigquery.SchemaField(name="datetime_col", field_type="DATETIME"),
+]
+
+
+class TestDataset(e2e_base.TestEndToEnd):
+
+ _temp_prefix = "temp-vertex-sdk-dataset-test"
+
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ @pytest.fixture()
+ def storage_client(self):
+ yield storage.Client(project=_TEST_PROJECT)
+
+ @pytest.fixture()
+ def staging_bucket(self, storage_client):
+ new_staging_bucket = f"temp-sdk-integration-{uuid.uuid4()}"
+ bucket = storage_client.create_bucket(new_staging_bucket)
+
+ yield bucket
+
+ bucket.delete(force=True)
+
+ @pytest.fixture()
+ def dataset_gapic_client(self):
+ gapic_client = dataset_service.DatasetServiceClient(
+ client_options=client_options.ClientOptions(api_endpoint=_TEST_API_ENDPOINT)
+ )
+
+ yield gapic_client
+
+ # TODO(vinnys): Remove pytest skip once persistent resources are accessible
+ @pytest.mark.skip(reason="System tests cannot access persistent test resources")
+ def test_get_existing_dataset(self):
+ """Retrieve a known existing dataset, ensure SDK successfully gets the
+ dataset resource."""
+
+ flowers_dataset = aiplatform.ImageDataset(dataset_name=_TEST_IMAGE_DATASET_ID)
+ assert flowers_dataset.name == _TEST_IMAGE_DATASET_ID
+ assert flowers_dataset.display_name == _TEST_DATASET_DISPLAY_NAME
+
+ def test_get_nonexistent_dataset(self):
+ """Ensure attempting to retrieve a dataset that doesn't exist raises
+ a Google API core 404 exception."""
+
+ # AI Platform service returns 404
+ with pytest.raises(exceptions.NotFound):
+ aiplatform.ImageDataset(dataset_name="0")
+
+ def test_get_new_dataset_and_import(self, dataset_gapic_client):
+ """Retrieve new, empty dataset and import a text dataset using import().
+ Then verify data items were successfully imported."""
+
+ try:
+ text_dataset = aiplatform.TextDataset.create(
+ display_name=self._make_display_name(key="get_new_dataset_and_import"),
+ )
+
+ my_dataset = aiplatform.TextDataset(dataset_name=text_dataset.name)
+
+ data_items_pre_import = dataset_gapic_client.list_data_items(
+ parent=my_dataset.resource_name
+ )
+
+ assert len(list(data_items_pre_import)) == 0
+
+ # Blocking call to import
+ my_dataset.import_data(
+ gcs_source=_TEST_TEXT_ENTITY_EXTRACTION_GCS_SOURCE,
+ import_schema_uri=_TEST_TEXT_ENTITY_IMPORT_SCHEMA,
+ import_request_timeout=500,
+ )
+
+ data_items_post_import = dataset_gapic_client.list_data_items(
+ parent=my_dataset.resource_name
+ )
+
+ assert len(list(data_items_post_import)) == 51
+ finally:
+ text_dataset.delete()
+
+ @vpcsc_config.skip_if_inside_vpcsc
+ def test_create_and_import_image_dataset(self, dataset_gapic_client):
+ """Use the Dataset.create() method to create a new image obj detection
+ dataset and import images. Then confirm images were successfully imported."""
+
+ try:
+ img_dataset = aiplatform.ImageDataset.create(
+ display_name=self._make_display_name(key="create_image_dataset"),
+ gcs_source=_TEST_IMAGE_OBJECT_DETECTION_GCS_SOURCE,
+ import_schema_uri=_TEST_IMAGE_OBJ_DET_SEGMENTATION_IMPORT_SCHEMA,
+ create_request_timeout=None,
+ )
+
+ finally:
+ if img_dataset is not None:
+ img_dataset.delete()
+
+ def test_create_tabular_dataset(self):
+ """Use the Dataset.create() method to create a new tabular dataset.
+ Then confirm the dataset was successfully created and references GCS source."""
+
+ try:
+ tabular_dataset = aiplatform.TabularDataset.create(
+ display_name=self._make_display_name(key="create_tabular_dataset"),
+ gcs_source=[_TEST_TABULAR_CLASSIFICATION_GCS_SOURCE],
+ create_request_timeout=None,
+ )
+
+ gapic_metadata = tabular_dataset.to_dict()["metadata"]
+ gcs_source_uris = gapic_metadata["inputConfig"]["gcsSource"]["uri"]
+
+ assert len(gcs_source_uris) == 1
+ assert _TEST_TABULAR_CLASSIFICATION_GCS_SOURCE == gcs_source_uris[0]
+ assert (
+ tabular_dataset.metadata_schema_uri
+ == aiplatform.schema.dataset.metadata.tabular
+ )
+
+ finally:
+ if tabular_dataset is not None:
+ tabular_dataset.delete()
+
+ def test_create_tabular_dataset_from_dataframe(self, bigquery_dataset):
+ table_id = f"test_table{uuid.uuid4()}"
+ bq_staging_table = (
+ f"bq://{_TEST_PROJECT}.{bigquery_dataset.dataset_id}.{table_id}"
+ )
+ try:
+ tabular_dataset = aiplatform.TabularDataset.create_from_dataframe(
+ df_source=_TEST_DATAFRAME,
+ staging_path=bq_staging_table,
+ display_name=self._make_display_name(
+ key="create_and_import_dataset_from_dataframe"
+ ),
+ )
+
+ """Use the Dataset.create_from_dataframe() method to create a new tabular dataset.
+ Then confirm the dataset was successfully created and references the BQ source."""
+ gapic_metadata = tabular_dataset.to_dict()["metadata"]
+ bq_source = gapic_metadata["inputConfig"]["bigquerySource"]["uri"]
+
+ assert bq_staging_table == bq_source
+ assert (
+ tabular_dataset.metadata_schema_uri
+ == aiplatform.schema.dataset.metadata.tabular
+ )
+ bigquery_client = bigquery.Client(
+ project=_TEST_PROJECT,
+ credentials=initializer.global_config.credentials,
+ )
+ table = bigquery_client.get_table(
+ f"{_TEST_PROJECT}.{bigquery_dataset.dataset_id}.{table_id}"
+ )
+ assert (
+ table.schema[-1]
+ == bigquery.SchemaField(name="datetime_col", field_type="DATETIME")
+ if re.match(
+ r"3.*",
+ bigquery.__version__,
+ )
+ else bigquery.SchemaField(name="datetime_col", field_type="TIMESTAMP")
+ )
+ finally:
+ if tabular_dataset is not None:
+ tabular_dataset.delete()
+
+ def test_create_tabular_dataset_from_dataframe_with_provided_schema(
+ self, bigquery_dataset
+ ):
+ """Use the Dataset.create_from_dataframe() method to create a new tabular dataset,
+ passing in the optional `bq_schema` argument. Then confirm the dataset was successfully
+ created and references the BQ source."""
+
+ try:
+ bq_staging_table = f"bq://{_TEST_PROJECT}.{bigquery_dataset.dataset_id}.test_table{uuid.uuid4()}"
+
+ tabular_dataset = aiplatform.TabularDataset.create_from_dataframe(
+ df_source=_TEST_DATAFRAME,
+ staging_path=bq_staging_table,
+ display_name=self._make_display_name(
+ key="create_and_import_dataset_from_dataframe"
+ ),
+ bq_schema=_TEST_DATAFRAME_BQ_SCHEMA,
+ )
+
+ gapic_metadata = tabular_dataset.to_dict()["metadata"]
+ bq_source = gapic_metadata["inputConfig"]["bigquerySource"]["uri"]
+
+ assert bq_staging_table == bq_source
+ assert (
+ tabular_dataset.metadata_schema_uri
+ == aiplatform.schema.dataset.metadata.tabular
+ )
+ finally:
+ tabular_dataset.delete()
+
+ def test_create_time_series_dataset(self):
+ """Use the Dataset.create() method to create a new time series dataset.
+ Then confirm the dataset was successfully created and references GCS source."""
+
+ try:
+ time_series_dataset = aiplatform.TimeSeriesDataset.create(
+ display_name=self._make_display_name(key="create_time_series_dataset"),
+ bq_source=[_TEST_FORECASTING_BQ_SOURCE],
+ create_request_timeout=None,
+ )
+
+ gapic_metadata = time_series_dataset.to_dict()["metadata"]
+ bq_source_uri = gapic_metadata["inputConfig"]["bigquerySource"]["uri"]
+
+ assert _TEST_FORECASTING_BQ_SOURCE == bq_source_uri
+ assert (
+ time_series_dataset.metadata_schema_uri
+ == aiplatform.schema.dataset.metadata.time_series
+ )
+
+ finally:
+ if time_series_dataset is not None:
+ time_series_dataset.delete()
+
+ def test_export_data(self, storage_client, staging_bucket):
+ """Get an existing dataset, export data to a newly created folder in
+ Google Cloud Storage, then verify data was successfully exported."""
+
+ dataset = aiplatform.TextDataset(dataset_name=_TEST_TEXT_DATASET_ID)
+
+ exported_files = dataset.export_data(output_dir=f"gs://{staging_bucket.name}")
+
+ assert len(exported_files) # Ensure at least one GCS path was returned
+
+ exported_file = exported_files[0]
+ bucket, prefix = utils.extract_bucket_and_prefix_from_gcs_path(exported_file)
+
+ bucket = storage_client.get_bucket(bucket)
+ blob = bucket.get_blob(prefix)
+
+ assert blob # Verify the returned GCS export path exists
+
+ def test_export_data_for_custom_training(self, staging_bucket):
+ """Get an existing dataset, export data to a newly created folder in
+ Google Cloud Storage, then verify data was successfully exported."""
+
+ # pylint: disable=protected-access
+ # Custom training data export should be generic, hence using the base
+ # _Dataset class here in test. In practice, users shuold be able to
+ # use this function in any inhericted classes of _Dataset.
+ dataset = aiplatform.datasets._Dataset(dataset_name=_TEST_IMAGE_DATASET_ID)
+
+ split = {
+ "training_filter": "labels.aiplatform.googleapis.com/ml_use=training",
+ "validation_filter": "labels.aiplatform.googleapis.com/ml_use=validation",
+ "test_filter": "labels.aiplatform.googleapis.com/ml_use=test",
+ }
+
+ export_data_response = dataset.export_data_for_custom_training(
+ output_dir=f"gs://{staging_bucket.name}",
+ annotation_schema_uri="gs://google-cloud-aiplatform/schema/dataset/annotation/image_classification_1.0.0.yaml",
+ split=split,
+ )
+
+ # Ensure three output paths (training, validation and test) are provided
+ assert len(export_data_response["exportedFiles"]) == 3
+ # Ensure data stats are calculated and correct
+ assert int(export_data_response["dataStats"]["trainingDataItemsCount"]) == 40
+ assert int(export_data_response["dataStats"]["validationDataItemsCount"]) == 5
+ assert int(export_data_response["dataStats"]["testDataItemsCount"]) == 5
+ assert int(export_data_response["dataStats"]["trainingAnnotationsCount"]) == 40
+ assert int(export_data_response["dataStats"]["validationAnnotationsCount"]) == 5
+ assert int(export_data_response["dataStats"]["testAnnotationsCount"]) == 5
+
+ def test_update_dataset(self):
+ """Create a new dataset and use update() method to change its display_name, labels, and description.
+ Then confirm these fields of the dataset was successfully modifed."""
+
+ try:
+ dataset = aiplatform.ImageDataset.create()
+ labels = dataset.labels
+
+ dataset = dataset.update(
+ display_name=_TEST_DATASET_DISPLAY_NAME,
+ labels=_TEST_DATASET_LABELS,
+ description=_TEST_DATASET_DESCRIPTION,
+ update_request_timeout=None,
+ )
+ labels.update(_TEST_DATASET_LABELS)
+
+ assert dataset.display_name == _TEST_DATASET_DISPLAY_NAME
+ assert dataset.labels == labels
+ assert dataset.gca_resource.description == _TEST_DATASET_DESCRIPTION
+
+ finally:
+ dataset.delete()
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_e2e_forecasting.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_e2e_forecasting.py
new file mode 100644
index 0000000000000000000000000000000000000000..f42692abccf6c2d8a856141fc669cf6085aa5ea9
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_e2e_forecasting.py
@@ -0,0 +1,395 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import training_jobs
+
+from google.cloud.aiplatform.compat.types import job_state
+from google.cloud.aiplatform.compat.types import pipeline_state
+import pytest
+from tests.system.aiplatform import e2e_base
+
+_TRAINING_DATASET_BQ_PATH = (
+ "bq://ucaip-sample-tests:ucaip_test_us_central1.2020_sales_train"
+)
+_PREDICTION_DATASET_BQ_PATH = (
+ "bq://ucaip-sample-tests:ucaip_test_us_central1.2021_sales_predict"
+)
+
+
+@pytest.mark.usefixtures("prepare_staging_bucket", "delete_staging_bucket")
+class TestEndToEndForecasting1(e2e_base.TestEndToEnd):
+ """End to end system test of the Vertex SDK with forecasting data."""
+
+ _temp_prefix = "temp-vertex-sdk-e2e-forecasting"
+
+ @pytest.mark.parametrize(
+ "training_job",
+ [
+ training_jobs.AutoMLForecastingTrainingJob,
+ ],
+ )
+ def test_end_to_end_forecasting(self, shared_state, training_job):
+ """Builds a dataset, trains models, and gets batch predictions."""
+ resources = []
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ staging_bucket=shared_state["staging_bucket_name"],
+ )
+ try:
+ ds = aiplatform.TimeSeriesDataset.create(
+ display_name=self._make_display_name("dataset"),
+ bq_source=[_TRAINING_DATASET_BQ_PATH],
+ sync=False,
+ create_request_timeout=180.0,
+ )
+ resources.append(ds)
+
+ time_column = "date"
+ time_series_identifier_column = "store_name"
+ target_column = "sale_dollars"
+ column_specs = {
+ time_column: "timestamp",
+ target_column: "numeric",
+ "city": "categorical",
+ "zip_code": "categorical",
+ "county": "categorical",
+ }
+
+ job = training_job(
+ display_name=self._make_display_name("train-housing-forecasting"),
+ optimization_objective="minimize-rmse",
+ column_specs=column_specs,
+ )
+ resources.append(job)
+
+ model = job.run(
+ dataset=ds,
+ target_column=target_column,
+ time_column=time_column,
+ time_series_identifier_column=time_series_identifier_column,
+ available_at_forecast_columns=[time_column],
+ unavailable_at_forecast_columns=[target_column],
+ time_series_attribute_columns=["city", "zip_code", "county"],
+ forecast_horizon=30,
+ context_window=30,
+ data_granularity_unit="day",
+ data_granularity_count=1,
+ budget_milli_node_hours=1000,
+ holiday_regions=["GLOBAL"],
+ hierarchy_group_total_weight=1,
+ window_stride_length=1,
+ model_display_name=self._make_display_name("forecasting-liquor-model"),
+ sync=False,
+ )
+ resources.append(model)
+
+ batch_prediction_job = model.batch_predict(
+ job_display_name=self._make_display_name("forecasting-liquor-model"),
+ instances_format="bigquery",
+ predictions_format="csv",
+ machine_type="n1-standard-4",
+ bigquery_source=_PREDICTION_DATASET_BQ_PATH,
+ gcs_destination_prefix=(
+ f'gs://{shared_state["staging_bucket_name"]}/bp_results/'
+ ),
+ sync=False,
+ )
+ resources.append(batch_prediction_job)
+
+ batch_prediction_job.wait()
+ model.wait()
+ assert job.state == pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ assert batch_prediction_job.state == job_state.JobState.JOB_STATE_SUCCEEDED
+ finally:
+ for resource in resources:
+ resource.delete()
+
+
+@pytest.mark.usefixtures("prepare_staging_bucket", "delete_staging_bucket")
+class TestEndToEndForecasting2(e2e_base.TestEndToEnd):
+ """End to end system test of the Vertex SDK with forecasting data."""
+
+ _temp_prefix = "temp-vertex-sdk-e2e-forecasting"
+
+ @pytest.mark.parametrize(
+ "training_job",
+ [
+ training_jobs.SequenceToSequencePlusForecastingTrainingJob,
+ ],
+ )
+ def test_end_to_end_forecasting(self, shared_state, training_job):
+ """Builds a dataset, trains models, and gets batch predictions."""
+ resources = []
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ staging_bucket=shared_state["staging_bucket_name"],
+ )
+ try:
+ ds = aiplatform.TimeSeriesDataset.create(
+ display_name=self._make_display_name("dataset"),
+ bq_source=[_TRAINING_DATASET_BQ_PATH],
+ sync=False,
+ create_request_timeout=180.0,
+ )
+ resources.append(ds)
+
+ time_column = "date"
+ time_series_identifier_column = "store_name"
+ target_column = "sale_dollars"
+ column_specs = {
+ time_column: "timestamp",
+ target_column: "numeric",
+ "city": "categorical",
+ "zip_code": "categorical",
+ "county": "categorical",
+ }
+
+ job = training_job(
+ display_name=self._make_display_name("train-housing-forecasting"),
+ optimization_objective="minimize-rmse",
+ column_specs=column_specs,
+ )
+ resources.append(job)
+
+ model = job.run(
+ dataset=ds,
+ target_column=target_column,
+ time_column=time_column,
+ time_series_identifier_column=time_series_identifier_column,
+ available_at_forecast_columns=[time_column],
+ unavailable_at_forecast_columns=[target_column],
+ time_series_attribute_columns=["city", "zip_code", "county"],
+ forecast_horizon=30,
+ context_window=30,
+ data_granularity_unit="day",
+ data_granularity_count=1,
+ budget_milli_node_hours=1000,
+ holiday_regions=["GLOBAL"],
+ hierarchy_group_total_weight=1,
+ window_stride_length=1,
+ model_display_name=self._make_display_name("forecasting-liquor-model"),
+ sync=False,
+ )
+ resources.append(model)
+
+ batch_prediction_job = model.batch_predict(
+ job_display_name=self._make_display_name("forecasting-liquor-model"),
+ instances_format="bigquery",
+ predictions_format="csv",
+ machine_type="n1-standard-4",
+ bigquery_source=_PREDICTION_DATASET_BQ_PATH,
+ gcs_destination_prefix=(
+ f'gs://{shared_state["staging_bucket_name"]}/bp_results/'
+ ),
+ sync=False,
+ )
+ resources.append(batch_prediction_job)
+
+ batch_prediction_job.wait()
+ model.wait()
+ assert job.state == pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ assert batch_prediction_job.state == job_state.JobState.JOB_STATE_SUCCEEDED
+ finally:
+ for resource in resources:
+ resource.delete()
+
+
+@pytest.mark.usefixtures("prepare_staging_bucket", "delete_staging_bucket")
+class TestEndToEndForecasting3(e2e_base.TestEndToEnd):
+ """End to end system test of the Vertex SDK with forecasting data."""
+
+ _temp_prefix = "temp-vertex-sdk-e2e-forecasting"
+
+ @pytest.mark.parametrize(
+ "training_job",
+ [
+ training_jobs.TemporalFusionTransformerForecastingTrainingJob,
+ ],
+ )
+ def test_end_to_end_forecasting(self, shared_state, training_job):
+ """Builds a dataset, trains models, and gets batch predictions."""
+ resources = []
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ staging_bucket=shared_state["staging_bucket_name"],
+ )
+ try:
+ ds = aiplatform.TimeSeriesDataset.create(
+ display_name=self._make_display_name("dataset"),
+ bq_source=[_TRAINING_DATASET_BQ_PATH],
+ sync=False,
+ create_request_timeout=180.0,
+ )
+ resources.append(ds)
+
+ time_column = "date"
+ time_series_identifier_column = "store_name"
+ target_column = "sale_dollars"
+ column_specs = {
+ time_column: "timestamp",
+ target_column: "numeric",
+ "city": "categorical",
+ "zip_code": "categorical",
+ "county": "categorical",
+ }
+
+ job = training_job(
+ display_name=self._make_display_name("train-housing-forecasting"),
+ optimization_objective="minimize-rmse",
+ column_specs=column_specs,
+ )
+ resources.append(job)
+
+ model = job.run(
+ dataset=ds,
+ target_column=target_column,
+ time_column=time_column,
+ time_series_identifier_column=time_series_identifier_column,
+ available_at_forecast_columns=[time_column],
+ unavailable_at_forecast_columns=[target_column],
+ time_series_attribute_columns=["city", "zip_code", "county"],
+ forecast_horizon=30,
+ context_window=30,
+ data_granularity_unit="day",
+ data_granularity_count=1,
+ budget_milli_node_hours=1000,
+ holiday_regions=["GLOBAL"],
+ hierarchy_group_total_weight=1,
+ window_stride_length=1,
+ model_display_name=self._make_display_name("forecasting-liquor-model"),
+ sync=False,
+ )
+ resources.append(model)
+
+ batch_prediction_job = model.batch_predict(
+ job_display_name=self._make_display_name("forecasting-liquor-model"),
+ instances_format="bigquery",
+ predictions_format="csv",
+ machine_type="n1-standard-4",
+ bigquery_source=_PREDICTION_DATASET_BQ_PATH,
+ gcs_destination_prefix=(
+ f'gs://{shared_state["staging_bucket_name"]}/bp_results/'
+ ),
+ sync=False,
+ )
+ resources.append(batch_prediction_job)
+
+ batch_prediction_job.wait()
+ model.wait()
+ assert job.state == pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ assert batch_prediction_job.state == job_state.JobState.JOB_STATE_SUCCEEDED
+ finally:
+ for resource in resources:
+ resource.delete()
+
+
+@pytest.mark.usefixtures("prepare_staging_bucket", "delete_staging_bucket")
+class TestEndToEndForecasting4(e2e_base.TestEndToEnd):
+ """End to end system test of the Vertex SDK with forecasting data."""
+
+ _temp_prefix = "temp-vertex-sdk-e2e-forecasting"
+
+ @pytest.mark.parametrize(
+ "training_job",
+ [
+ training_jobs.TimeSeriesDenseEncoderForecastingTrainingJob,
+ ],
+ )
+ def test_end_to_end_forecasting(self, shared_state, training_job):
+ """Builds a dataset, trains models, and gets batch predictions."""
+ resources = []
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ staging_bucket=shared_state["staging_bucket_name"],
+ )
+ try:
+ ds = aiplatform.TimeSeriesDataset.create(
+ display_name=self._make_display_name("dataset"),
+ bq_source=[_TRAINING_DATASET_BQ_PATH],
+ sync=False,
+ create_request_timeout=180.0,
+ )
+ resources.append(ds)
+
+ time_column = "date"
+ time_series_identifier_column = "store_name"
+ target_column = "sale_dollars"
+ column_specs = {
+ time_column: "timestamp",
+ target_column: "numeric",
+ "city": "categorical",
+ "zip_code": "categorical",
+ "county": "categorical",
+ }
+
+ job = training_job(
+ display_name=self._make_display_name("train-housing-forecasting"),
+ optimization_objective="minimize-rmse",
+ column_specs=column_specs,
+ )
+ resources.append(job)
+
+ model = job.run(
+ dataset=ds,
+ target_column=target_column,
+ time_column=time_column,
+ time_series_identifier_column=time_series_identifier_column,
+ available_at_forecast_columns=[time_column],
+ unavailable_at_forecast_columns=[target_column],
+ time_series_attribute_columns=["city", "zip_code", "county"],
+ forecast_horizon=30,
+ context_window=30,
+ data_granularity_unit="day",
+ data_granularity_count=1,
+ budget_milli_node_hours=1000,
+ holiday_regions=["GLOBAL"],
+ hierarchy_group_total_weight=1,
+ window_stride_length=1,
+ model_display_name=self._make_display_name("forecasting-liquor-model"),
+ sync=False,
+ )
+ resources.append(model)
+
+ batch_prediction_job = model.batch_predict(
+ job_display_name=self._make_display_name("forecasting-liquor-model"),
+ instances_format="bigquery",
+ predictions_format="csv",
+ machine_type="n1-standard-4",
+ bigquery_source=_PREDICTION_DATASET_BQ_PATH,
+ gcs_destination_prefix=(
+ f'gs://{shared_state["staging_bucket_name"]}/bp_results/'
+ ),
+ sync=False,
+ )
+ resources.append(batch_prediction_job)
+
+ batch_prediction_job.wait()
+ model.wait()
+ assert job.state == pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ assert batch_prediction_job.state == job_state.JobState.JOB_STATE_SUCCEEDED
+ finally:
+ for resource in resources:
+ resource.delete()
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_e2e_metadata_schema.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_e2e_metadata_schema.py
new file mode 100644
index 0000000000000000000000000000000000000000..87afff662b47418b9471d7e5eccda6c0c5c7abca
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_e2e_metadata_schema.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import json
+
+import pytest
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform.metadata.schema.google import (
+ artifact_schema as google_artifact_schema,
+)
+from google.cloud.aiplatform.metadata.schema.system import (
+ artifact_schema as system_artifact_schema,
+)
+from google.cloud.aiplatform.metadata.schema.system import (
+ execution_schema as system_execution_schema,
+)
+from tests.system.aiplatform import e2e_base
+
+
+@pytest.mark.usefixtures("tear_down_resources")
+class TestMetadataSchema(e2e_base.TestEndToEnd):
+
+ _temp_prefix = "tmpvrtxmlmdsdk-e2e"
+
+ def setup_class(cls):
+ # Truncating the name because of resource id constraints from the service
+ cls.artifact_display_name = cls._make_display_name("base-artifact")[:30]
+ cls.artifact_id = cls._make_display_name("base-artifact-id")[:30]
+ cls.artifact_uri = cls._make_display_name("base-uri")
+ cls.artifact_metadata = {"test_property": "test_value"}
+ cls.artifact_description = cls._make_display_name("base-description")
+ cls.execution_display_name = cls._make_display_name("base-execution")[:30]
+ cls.execution_description = cls._make_display_name("base-description")
+
+ def test_system_dataset_artifact_create(self, shared_state):
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ artifact = system_artifact_schema.Dataset(
+ display_name=self.artifact_display_name,
+ uri=self.artifact_uri,
+ metadata=self.artifact_metadata,
+ description=self.artifact_description,
+ ).create()
+
+ shared_state["resources"] = [artifact]
+
+ assert artifact.display_name == self.artifact_display_name
+ assert json.dumps(artifact.metadata, sort_keys=True) == json.dumps(
+ self.artifact_metadata, sort_keys=True
+ )
+ assert artifact.schema_title == "system.Dataset"
+ assert artifact.description == self.artifact_description
+ assert "/metadataStores/default/artifacts/" in artifact.resource_name
+
+ def test_google_dataset_artifact_create(self, shared_state):
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+ vertex_dataset_name = f"projects/{e2e_base._PROJECT}/locations/{e2e_base._LOCATION}/datasets/dataset"
+ artifact = google_artifact_schema.VertexDataset(
+ vertex_dataset_name=vertex_dataset_name,
+ display_name=self.artifact_display_name,
+ metadata=self.artifact_metadata,
+ description=self.artifact_description,
+ ).create()
+
+ shared_state["resources"].append(artifact)
+
+ expected_metadata = self.artifact_metadata.copy()
+ expected_metadata["resourceName"] = vertex_dataset_name
+
+ assert artifact.display_name == self.artifact_display_name
+ assert json.dumps(artifact.metadata, sort_keys=True) == json.dumps(
+ expected_metadata, sort_keys=True
+ )
+ assert artifact.schema_title == "google.VertexDataset"
+ assert artifact.description == self.artifact_description
+ assert "/metadataStores/default/artifacts/" in artifact.resource_name
+ assert (
+ artifact.uri
+ == f"https://{e2e_base._LOCATION}-aiplatform.googleapis.com/v1/{vertex_dataset_name}"
+ )
+
+ def test_execution_create_using_system_schema_class(self, shared_state):
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ execution = system_execution_schema.CustomJobExecution(
+ display_name=self.execution_display_name,
+ description=self.execution_description,
+ ).create()
+
+ shared_state["resources"].append(execution)
+
+ assert execution.display_name == self.execution_display_name
+ assert execution.schema_title == "system.CustomJobExecution"
+ assert execution.description == self.execution_description
+ assert "/metadataStores/default/executions/" in execution.resource_name
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_e2e_tabular.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_e2e_tabular.py
new file mode 100644
index 0000000000000000000000000000000000000000..20b49999d381f861b88c7936cbf399aecb877a65
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_e2e_tabular.py
@@ -0,0 +1,221 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+
+import pytest
+
+from google.cloud import storage
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform.compat.types import (
+ job_state as gca_job_state,
+ pipeline_state as gca_pipeline_state,
+)
+from tests.system.aiplatform import e2e_base
+
+
+_DATASET_TRAINING_SRC = "gs://cloud-samples-data-us-central1/vertex-ai/structured_data/california_housing/california-housing-data.csv"
+_DATASET_BATCH_PREDICT_SRC = "gs://cloud-samples-data-us-central1/vertex-ai/batch-prediction/california_housing_batch_predict.jsonl"
+_DIR_NAME = os.path.dirname(os.path.abspath(__file__))
+_LOCAL_TRAINING_SCRIPT_PATH = os.path.join(
+ _DIR_NAME, "test_resources/california_housing_training_script.py"
+)
+_INSTANCE = {
+ "longitude": -124.35,
+ "latitude": 40.54,
+ "housing_median_age": 52.0,
+ "total_rooms": 1820.0,
+ "total_bedrooms": 300.0,
+ "population": 806,
+ "households": 270.0,
+ "median_income": 3.014700,
+}
+
+
+@pytest.mark.usefixtures(
+ "prepare_staging_bucket", "delete_staging_bucket", "tear_down_resources"
+)
+class TestEndToEndTabular(e2e_base.TestEndToEnd):
+ """End to end system test of the Vertex SDK with tabular data adapted from
+ reference notebook http://shortn/_eyoNx3SN0X"""
+
+ _temp_prefix = "temp-vertex-sdk-e2e-tabular"
+
+ def test_end_to_end_tabular(self, shared_state):
+ """Build dataset, train a custom and AutoML model, deploy, and get predictions"""
+
+ # Collection of resources generated by this test, to be deleted during teardown
+ shared_state["resources"] = []
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ staging_bucket=shared_state["staging_bucket_name"],
+ )
+
+ # Create and import to single managed dataset for both training jobs
+
+ ds = aiplatform.TabularDataset.create(
+ display_name=self._make_display_name("dataset"),
+ gcs_source=[_DATASET_TRAINING_SRC],
+ sync=False,
+ create_request_timeout=180.0,
+ )
+
+ shared_state["resources"].extend([ds])
+
+ # Define both training jobs
+
+ custom_job = aiplatform.CustomTrainingJob(
+ display_name=self._make_display_name("train-housing-custom"),
+ script_path=_LOCAL_TRAINING_SCRIPT_PATH,
+ container_uri="gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest",
+ requirements=["gcsfs==0.7.1"],
+ model_serving_container_image_uri="gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest",
+ )
+
+ automl_job = aiplatform.AutoMLTabularTrainingJob(
+ display_name=self._make_display_name("train-housing-automl"),
+ optimization_prediction_type="regression",
+ optimization_objective="minimize-rmse",
+ )
+
+ # Kick off both training jobs, AutoML job will take approx one hour to run
+
+ custom_model = custom_job.run(
+ ds,
+ replica_count=1,
+ model_display_name=self._make_display_name("custom-housing-model"),
+ timeout=1234,
+ restart_job_on_worker_restart=True,
+ enable_web_access=True,
+ sync=False,
+ create_request_timeout=None,
+ disable_retries=True,
+ )
+
+ automl_model = automl_job.run(
+ dataset=ds,
+ target_column="median_house_value",
+ model_display_name=self._make_display_name("automl-housing-model"),
+ sync=False,
+ )
+
+ shared_state["resources"].extend(
+ [automl_job, automl_model, custom_job, custom_model]
+ )
+
+ # Deploy both models after training completes
+ custom_endpoint = custom_model.deploy(machine_type="n1-standard-4", sync=False)
+ automl_endpoint = automl_model.deploy(machine_type="n1-standard-4", sync=False)
+ shared_state["resources"].extend([automl_endpoint, custom_endpoint])
+
+ custom_batch_prediction_job = custom_model.batch_predict(
+ job_display_name=self._make_display_name("custom-housing-model"),
+ instances_format="jsonl",
+ machine_type="n1-standard-4",
+ gcs_source=_DATASET_BATCH_PREDICT_SRC,
+ gcs_destination_prefix=f'gs://{shared_state["staging_bucket_name"]}/bp_results/',
+ sync=False,
+ )
+
+ shared_state["resources"].append(custom_batch_prediction_job)
+
+ in_progress_done_check = custom_job.done()
+ custom_job.wait_for_resource_creation()
+
+ automl_job.wait_for_resource_creation()
+ # custom_batch_prediction_job.wait_for_resource_creation()
+
+ # Send online prediction with same instance to both deployed models
+ # This sample is taken from an observation where median_house_value = 94600
+ custom_endpoint.wait()
+
+ # Check scheduling is correctly set
+ assert (
+ custom_job._gca_resource.training_task_inputs["scheduling"]["timeout"]
+ == "1234s"
+ )
+ assert (
+ custom_job._gca_resource.training_task_inputs["scheduling"][
+ "restartJobOnWorkerRestart"
+ ]
+ is True
+ )
+
+ custom_prediction = custom_endpoint.predict([_INSTANCE], timeout=180.0)
+
+ custom_batch_prediction_job.wait()
+
+ automl_endpoint.wait()
+ automl_prediction = automl_endpoint.predict(
+ [{k: str(v) for k, v in _INSTANCE.items()}], # Cast int values to strings
+ timeout=180.0,
+ )
+
+ # Test lazy loading of Endpoint, check getter was never called after predict()
+ custom_endpoint = aiplatform.Endpoint(custom_endpoint.resource_name)
+ custom_endpoint.predict([_INSTANCE])
+
+ completion_done_check = custom_job.done()
+ assert custom_endpoint._skipped_getter_call()
+
+ assert (
+ custom_job.state
+ == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ assert (
+ automl_job.state
+ == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ assert (
+ custom_batch_prediction_job.state
+ == gca_job_state.JobState.JOB_STATE_SUCCEEDED
+ )
+
+ # Ensure batch prediction errors output file is empty
+ batch_predict_gcs_output_path = (
+ custom_batch_prediction_job.output_info.gcs_output_directory
+ )
+ client = storage.Client()
+
+ for blob in client.list_blobs(
+ bucket_or_name=shared_state["staging_bucket_name"],
+ prefix=f"bp_results/{batch_predict_gcs_output_path.split('/')[-1]}",
+ ):
+ # There are always 2 files in this output path: 1 with errors, 1 with predictions
+ if "errors" in blob.name:
+ error_output_filestr = blob.download_as_string().decode()
+ assert not error_output_filestr
+
+ # Ensure a single prediction was returned
+ assert len(custom_prediction.predictions) == 1
+ assert len(automl_prediction.predictions) == 1
+
+ # Ensure the models are remotely accurate
+ try:
+ automl_result = automl_prediction.predictions[0]["value"]
+ custom_result = custom_prediction.predictions[0][0]
+ assert 200000 > automl_result > 50000
+ assert 200000 > custom_result > 50000
+ except KeyError as e:
+ raise RuntimeError("Unexpected prediction response structure:", e)
+
+ # Check done() method works correctly
+ assert in_progress_done_check is False
+ assert completion_done_check is True
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_experiments.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_experiments.py
new file mode 100644
index 0000000000000000000000000000000000000000..9dbaf218863ee39d547dd6ccc62dfd04aa662154
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_experiments.py
@@ -0,0 +1,769 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import tempfile
+
+import uuid
+import pytest
+
+from google.api_core import exceptions
+from google.cloud import storage
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform.utils import rest_utils
+from google.cloud.aiplatform.metadata.schema.google import (
+ artifact_schema as google_artifact_schema,
+)
+from tests.system.aiplatform import e2e_base
+from tests.system.aiplatform import test_model_upload
+
+import numpy as np
+import sklearn
+from sklearn.linear_model import LinearRegression
+
+
+_RUN = "run-1"
+_PARAMS = {"sdk-param-test-1": 0.1, "sdk-param-test-2": 0.2}
+_METRICS = {"sdk-metric-test-1": 0.8, "sdk-metric-test-2": 100.0}
+
+_RUN_2 = "run-2"
+_PARAMS_2 = {"sdk-param-test-1": 0.2, "sdk-param-test-2": 0.4}
+_METRICS_2 = {"sdk-metric-test-1": 1.6, "sdk-metric-test-2": 200.0}
+
+_READ_TIME_SERIES_BATCH_SIZE = 20
+
+_TIME_SERIES_METRIC_KEY = "accuracy"
+
+_CLASSIFICATION_METRICS = {
+ "display_name": "my-classification-metrics",
+ "labels": ["cat", "dog"],
+ "matrix": [[9, 1], [1, 9]],
+ "fpr": [0.1, 0.5, 0.9],
+ "tpr": [0.1, 0.7, 0.9],
+ "threshold": [0.9, 0.5, 0.1],
+}
+
+
+@pytest.mark.usefixtures(
+ "prepare_staging_bucket", "delete_staging_bucket", "tear_down_resources"
+)
+class TestExperiments(e2e_base.TestEndToEnd):
+
+ _temp_prefix = "tmpvrtxsdk-e2e"
+
+ def setup_class(cls):
+ cls._experiment_name = cls._make_display_name("")[:64]
+ cls._experiment_name_2 = cls._make_display_name("")[:64]
+ cls._experiment_model_name = cls._make_display_name("sklearn-model")[:64]
+ cls._dataset_artifact_name = cls._make_display_name("")[:64]
+ cls._dataset_artifact_uri = cls._make_display_name("ds-uri")
+ cls._pipeline_job_id = cls._make_display_name("job-id")
+
+ def test_create_experiment(self, shared_state):
+
+ # Truncating the name because of resource id constraints from the service
+ tensorboard = aiplatform.Tensorboard.create(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ display_name=self._experiment_name,
+ )
+
+ shared_state["resources"] = [tensorboard]
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ experiment_tensorboard=tensorboard,
+ )
+
+ shared_state["resources"].append(
+ aiplatform.metadata.metadata._experiment_tracker.experiment
+ )
+
+ def test_get_experiment(self):
+ experiment = aiplatform.Experiment(
+ experiment_name=self._experiment_name,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+ assert experiment.name == self._experiment_name
+
+ def test_start_run(self):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ )
+ run = aiplatform.start_run(_RUN)
+ assert run.name == _RUN
+
+ def test_get_run(self):
+ run = aiplatform.ExperimentRun(
+ run_name=_RUN,
+ experiment=self._experiment_name,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+ assert run.name == _RUN
+ assert run.state == aiplatform.gapic.Execution.State.RUNNING
+
+ def test_log_params(self):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ )
+ aiplatform.start_run(_RUN, resume=True)
+ aiplatform.log_params(_PARAMS)
+ run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
+ assert run.get_params() == _PARAMS
+
+ def test_log_metrics(self):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ )
+ aiplatform.start_run(_RUN, resume=True)
+ aiplatform.log_metrics(_METRICS)
+ run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
+ assert run.get_metrics() == _METRICS
+
+ def test_log_time_series_metrics(self):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ )
+
+ aiplatform.start_run(_RUN, resume=True)
+
+ for i in range(5):
+ aiplatform.log_time_series_metrics({_TIME_SERIES_METRIC_KEY: i})
+
+ run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
+
+ time_series_result = run.get_time_series_data_frame()[
+ [_TIME_SERIES_METRIC_KEY, "step"]
+ ].to_dict("list")
+
+ assert time_series_result == {
+ "step": list(range(1, 6)),
+ _TIME_SERIES_METRIC_KEY: [float(value) for value in range(5)],
+ }
+
+ def test_get_time_series_data_frame_batch_read_success(self, shared_state):
+ tensorboard = aiplatform.Tensorboard.create(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ display_name=self._experiment_name_2,
+ )
+ shared_state["resources"] = [tensorboard]
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name_2,
+ experiment_tensorboard=tensorboard,
+ )
+ shared_state["resources"].append(
+ aiplatform.metadata.metadata._experiment_tracker.experiment
+ )
+ aiplatform.start_run(_RUN)
+ for i in range(_READ_TIME_SERIES_BATCH_SIZE + 1):
+ aiplatform.log_time_series_metrics({f"{_TIME_SERIES_METRIC_KEY}-{i}": 1})
+
+ run = aiplatform.ExperimentRun(
+ run_name=_RUN, experiment=self._experiment_name_2
+ )
+ time_series_result = run.get_time_series_data_frame()
+
+ assert len(time_series_result) > _READ_TIME_SERIES_BATCH_SIZE
+
+ def test_log_classification_metrics(self, shared_state):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ )
+ aiplatform.start_run(_RUN, resume=True)
+ classification_metrics = aiplatform.log_classification_metrics(
+ display_name=_CLASSIFICATION_METRICS["display_name"],
+ labels=_CLASSIFICATION_METRICS["labels"],
+ matrix=_CLASSIFICATION_METRICS["matrix"],
+ fpr=_CLASSIFICATION_METRICS["fpr"],
+ tpr=_CLASSIFICATION_METRICS["tpr"],
+ threshold=_CLASSIFICATION_METRICS["threshold"],
+ )
+
+ run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
+ metrics = run.get_classification_metrics()[0]
+ metric_artifact = aiplatform.Artifact(metrics.pop("id"))
+ assert metrics == _CLASSIFICATION_METRICS
+ assert isinstance(
+ classification_metrics, google_artifact_schema.ClassificationMetrics
+ )
+ metric_artifact.delete()
+
+ def test_log_model(self, shared_state):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ )
+ aiplatform.start_run(_RUN, resume=True)
+
+ train_x = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
+ train_y = np.dot(train_x, np.array([1, 2])) + 3
+ model = LinearRegression()
+ model.fit(train_x, train_y)
+
+ model_artifact = aiplatform.log_model(
+ model=model,
+ artifact_id=self._experiment_model_name,
+ uri=f"gs://{shared_state['staging_bucket_name']}/sklearn-model",
+ input_example=train_x,
+ )
+ shared_state["resources"].append(model_artifact)
+
+ run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
+ experiment_model = run.get_experiment_models()[0]
+ assert "sklearn-model" in experiment_model.name
+ assert (
+ experiment_model.uri
+ == f"gs://{shared_state['staging_bucket_name']}/sklearn-model"
+ )
+ assert experiment_model.get_model_info() == {
+ "model_class": "sklearn.linear_model._base.LinearRegression",
+ "framework_name": "sklearn",
+ "framework_version": sklearn.__version__,
+ "input_example": {
+ "type": "numpy.ndarray",
+ "data": train_x.tolist(),
+ },
+ }
+ experiment_model.delete()
+
+ def test_create_artifact(self, shared_state):
+ ds = aiplatform.Artifact.create(
+ schema_title="system.Dataset",
+ resource_id=self._dataset_artifact_name,
+ uri=self._dataset_artifact_uri,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ shared_state["resources"].append(ds)
+ assert ds.uri == self._dataset_artifact_uri
+
+ def test_get_artifact_by_uri(self):
+ ds = aiplatform.Artifact.get_with_uri(
+ uri=self._dataset_artifact_uri,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ assert ds.uri == self._dataset_artifact_uri
+ assert ds.name == self._dataset_artifact_name
+
+ def test_log_execution_and_artifact(self, shared_state):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ )
+ aiplatform.start_run(_RUN, resume=True)
+
+ with aiplatform.start_execution(
+ schema_title="system.ContainerExecution",
+ resource_id=self._make_display_name("execution"),
+ ) as execution:
+
+ shared_state["resources"].append(execution)
+
+ ds = aiplatform.Artifact(
+ artifact_name=self._dataset_artifact_name,
+ )
+ execution.assign_input_artifacts([ds])
+
+ model = aiplatform.Artifact.create(schema_title="system.Model")
+ shared_state["resources"].append(model)
+
+ storage_client = storage.Client(project=e2e_base._PROJECT)
+ model_blob = storage.Blob.from_string(
+ uri=test_model_upload._XGBOOST_MODEL_URI, client=storage_client
+ )
+ model_path = tempfile.mktemp() + ".my_model.xgb"
+ model_blob.download_to_filename(filename=model_path)
+
+ vertex_model = aiplatform.Model.upload_xgboost_model_file(
+ display_name=self._make_display_name("model"),
+ model_file_path=model_path,
+ )
+ shared_state["resources"].append(vertex_model)
+
+ execution.assign_output_artifacts([model, vertex_model])
+
+ input_artifacts = execution.get_input_artifacts()
+ assert input_artifacts[0].name == ds.name
+
+ output_artifacts = execution.get_output_artifacts()
+ # system.Model, google.VertexModel
+ output_artifacts.sort(key=lambda artifact: artifact.schema_title, reverse=True)
+
+ shared_state["resources"].append(output_artifacts[-1])
+
+ assert output_artifacts[0].name == model.name
+ assert output_artifacts[1].uri == rest_utils.make_gcp_resource_rest_url(
+ resource=vertex_model
+ )
+
+ run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
+ executions = run.get_executions()
+ assert executions[0].name == execution.name
+
+ artifacts = run.get_artifacts()
+
+ # system.Model, system.Dataset, google.VertexTensorboardRun, google.VertexModel
+ artifacts.sort(key=lambda artifact: artifact.schema_title, reverse=True)
+ assert artifacts.pop().uri == rest_utils.make_gcp_resource_rest_url(
+ resource=vertex_model
+ )
+
+ # tensorboard run artifact is also included
+ assert sorted([artifact.name for artifact in artifacts]) == sorted(
+ [ds.name, model.name, run._tensorboard_run_id(run.resource_id)]
+ )
+
+ def test_end_run(self):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ )
+ aiplatform.start_run(_RUN, resume=True)
+ aiplatform.end_run()
+ run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
+ assert run.state == aiplatform.gapic.Execution.State.COMPLETE
+
+ def test_run_context_manager(self):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ )
+ with aiplatform.start_run(_RUN_2) as run:
+ run.log_params(_PARAMS_2)
+ run.log_metrics(_METRICS_2)
+ assert run.state == aiplatform.gapic.Execution.State.RUNNING
+
+ assert run.state == aiplatform.gapic.Execution.State.COMPLETE
+
+ def test_add_pipeline_job_to_experiment(self, shared_state):
+ import kfp.v2.dsl as dsl
+ import kfp.v2.compiler as compiler
+ from kfp.v2.dsl import component, Metrics, Output
+
+ @component
+ def trainer(
+ learning_rate: float, dropout_rate: float, metrics: Output[Metrics]
+ ):
+ metrics.log_metric("accuracy", 0.8)
+ metrics.log_metric("mse", 1.2)
+
+ @dsl.pipeline(name=self._make_display_name("pipeline"))
+ def pipeline(learning_rate: float, dropout_rate: float):
+ trainer(learning_rate=learning_rate, dropout_rate=dropout_rate)
+
+ compiler.Compiler().compile(
+ pipeline_func=pipeline, package_path="pipeline.json"
+ )
+
+ job = aiplatform.PipelineJob(
+ display_name=self._make_display_name("experiment pipeline job"),
+ template_path="pipeline.json",
+ job_id=self._pipeline_job_id,
+ pipeline_root=f'gs://{shared_state["staging_bucket_name"]}',
+ parameter_values={"learning_rate": 0.1, "dropout_rate": 0.2},
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ job.submit(
+ experiment=self._experiment_name,
+ )
+
+ shared_state["resources"].append(job)
+
+ job.wait()
+
+ test_experiment = job.get_associated_experiment()
+
+ assert test_experiment.name == self._experiment_name
+
+ def test_get_experiments_df(self):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ )
+
+ df = aiplatform.get_experiment_df()
+
+ pipelines_param_and_metrics = {
+ "param.dropout_rate": 0.2,
+ "param.learning_rate": 0.1,
+ "metric.accuracy": 0.8,
+ "metric.mse": 1.2,
+ }
+
+ true_df_dict_1 = {f"metric.{key}": value for key, value in _METRICS.items()}
+ for key, value in _PARAMS.items():
+ true_df_dict_1[f"param.{key}"] = value
+
+ true_df_dict_1["experiment_name"] = self._experiment_name
+ true_df_dict_1["run_name"] = _RUN
+ true_df_dict_1["state"] = aiplatform.gapic.Execution.State.COMPLETE.name
+ true_df_dict_1["run_type"] = aiplatform.metadata.constants.SYSTEM_EXPERIMENT_RUN
+ true_df_dict_1[f"time_series_metric.{_TIME_SERIES_METRIC_KEY}"] = 4.0
+
+ true_df_dict_2 = {f"metric.{key}": value for key, value in _METRICS_2.items()}
+ for key, value in _PARAMS_2.items():
+ true_df_dict_2[f"param.{key}"] = value
+
+ true_df_dict_2["experiment_name"] = self._experiment_name
+ true_df_dict_2["run_name"] = _RUN_2
+ true_df_dict_2["state"] = aiplatform.gapic.Execution.State.COMPLETE.name
+ true_df_dict_2["run_type"] = aiplatform.metadata.constants.SYSTEM_EXPERIMENT_RUN
+ true_df_dict_2[f"time_series_metric.{_TIME_SERIES_METRIC_KEY}"] = 0.0
+ true_df_dict_2.update(pipelines_param_and_metrics)
+
+ true_df_dict_3 = {
+ "experiment_name": self._experiment_name,
+ "run_name": self._pipeline_job_id,
+ "run_type": aiplatform.metadata.constants.SYSTEM_PIPELINE_RUN,
+ "state": aiplatform.gapic.Execution.State.COMPLETE.name,
+ "time_series_metric.accuracy": 0.0,
+ }
+
+ true_df_dict_3.update(pipelines_param_and_metrics)
+
+ for key in pipelines_param_and_metrics.keys():
+ true_df_dict_1[key] = 0.0
+ true_df_dict_2[key] = 0.0
+
+ for key in _PARAMS.keys():
+ true_df_dict_3[f"param.{key}"] = 0.0
+
+ for key in _METRICS.keys():
+ true_df_dict_3[f"metric.{key}"] = 0.0
+
+ assert sorted(
+ [true_df_dict_1, true_df_dict_2, true_df_dict_3],
+ key=lambda d: d["run_name"],
+ ) == sorted(df.fillna(0.0).to_dict("records"), key=lambda d: d["run_name"])
+
+ def test_get_experiments_df_include_time_series_false(self):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ )
+
+ df = aiplatform.get_experiment_df(include_time_series=False)
+
+ pipelines_param_and_metrics = {
+ "param.dropout_rate": 0.2,
+ "param.learning_rate": 0.1,
+ "metric.accuracy": 0.8,
+ "metric.mse": 1.2,
+ }
+
+ true_df_dict_1 = {f"metric.{key}": value for key, value in _METRICS.items()}
+ for key, value in _PARAMS.items():
+ true_df_dict_1[f"param.{key}"] = value
+
+ true_df_dict_1["experiment_name"] = self._experiment_name
+ true_df_dict_1["run_name"] = _RUN
+ true_df_dict_1["state"] = aiplatform.gapic.Execution.State.COMPLETE.name
+ true_df_dict_1["run_type"] = aiplatform.metadata.constants.SYSTEM_EXPERIMENT_RUN
+
+ true_df_dict_2 = {f"metric.{key}": value for key, value in _METRICS_2.items()}
+ for key, value in _PARAMS_2.items():
+ true_df_dict_2[f"param.{key}"] = value
+
+ true_df_dict_2["experiment_name"] = self._experiment_name
+ true_df_dict_2["run_name"] = _RUN_2
+ true_df_dict_2["state"] = aiplatform.gapic.Execution.State.COMPLETE.name
+ true_df_dict_2["run_type"] = aiplatform.metadata.constants.SYSTEM_EXPERIMENT_RUN
+ true_df_dict_2.update(pipelines_param_and_metrics)
+
+ true_df_dict_3 = {
+ "experiment_name": self._experiment_name,
+ "run_name": self._pipeline_job_id,
+ "run_type": aiplatform.metadata.constants.SYSTEM_PIPELINE_RUN,
+ "state": aiplatform.gapic.Execution.State.COMPLETE.name,
+ }
+
+ true_df_dict_3.update(pipelines_param_and_metrics)
+
+ for key in pipelines_param_and_metrics.keys():
+ true_df_dict_1[key] = 0.0
+ true_df_dict_2[key] = 0.0
+
+ for key in _PARAMS.keys():
+ true_df_dict_3[f"param.{key}"] = 0.0
+
+ for key in _METRICS.keys():
+ true_df_dict_3[f"metric.{key}"] = 0.0
+
+ assert sorted(
+ [true_df_dict_1, true_df_dict_2, true_df_dict_3],
+ key=lambda d: d["run_name"],
+ ) == sorted(df.fillna(0.0).to_dict("records"), key=lambda d: d["run_name"])
+
+ def test_delete_run_does_not_exist_raises_exception(self):
+ run = aiplatform.ExperimentRun(
+ run_name=_RUN,
+ experiment=self._experiment_name,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+ run.delete(delete_backing_tensorboard_run=True)
+
+ with pytest.raises(exceptions.NotFound):
+ aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
+
+ def test_delete_run_success(self):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ )
+ aiplatform.start_run(_RUN)
+ run = aiplatform.ExperimentRun(
+ run_name=_RUN,
+ experiment=self._experiment_name,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+ aiplatform.end_run()
+
+ run.delete(delete_backing_tensorboard_run=True)
+
+ with pytest.raises(exceptions.NotFound):
+ aiplatform.ExperimentRun(
+ run_name=_RUN,
+ experiment=self._experiment_name,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ def test_reuse_run_success(self):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ )
+ aiplatform.start_run(_RUN)
+ run = aiplatform.ExperimentRun(
+ run_name=_RUN,
+ experiment=self._experiment_name,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+ aiplatform.end_run()
+ run.delete(delete_backing_tensorboard_run=True)
+
+ aiplatform.start_run(_RUN)
+ aiplatform.end_run()
+
+ run = aiplatform.ExperimentRun(
+ run_name=_RUN,
+ experiment=self._experiment_name,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+ assert run.name == _RUN
+
+ def test_delete_run_then_tensorboard_success(self):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ )
+ aiplatform.start_run(_RUN, resume=True)
+ run = aiplatform.ExperimentRun(
+ run_name=_RUN,
+ experiment=self._experiment_name,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+ aiplatform.end_run()
+ run.delete()
+ tensorboard_run_artifact = aiplatform.metadata.artifact.Artifact(
+ artifact_name=f"{self._experiment_name}-{_RUN}-tb-run"
+ )
+ tensorboard_run_resource = aiplatform.TensorboardRun(
+ tensorboard_run_artifact.metadata["resourceName"]
+ )
+ tensorboard_run_resource.delete()
+ tensorboard_run_artifact.delete()
+
+ aiplatform.start_run(_RUN)
+ aiplatform.end_run()
+
+ run = aiplatform.ExperimentRun(
+ run_name=_RUN,
+ experiment=self._experiment_name,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+ assert run.name == _RUN
+
+ def test_delete_wout_backing_tensorboard_reuse_run_raises_exception(self):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ )
+ aiplatform.start_run(_RUN, resume=True)
+ run = aiplatform.ExperimentRun(
+ run_name=_RUN,
+ experiment=self._experiment_name,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+ aiplatform.end_run()
+ run.delete()
+
+ with pytest.raises(ValueError):
+ aiplatform.start_run(_RUN)
+
+ def test_delete_experiment_does_not_exist_raises_exception(self):
+ experiment = aiplatform.Experiment(
+ experiment_name=self._experiment_name,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+ experiment.delete(delete_backing_tensorboard_runs=True)
+
+ with pytest.raises(exceptions.NotFound):
+ aiplatform.Experiment(experiment_name=self._experiment_name)
+
+ def test_init_associates_global_tensorboard_to_experiment(self, shared_state):
+
+ tensorboard = aiplatform.Tensorboard.create(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ display_name=self._make_display_name("")[:64],
+ )
+
+ shared_state["resources"] = [tensorboard]
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment_tensorboard=tensorboard,
+ )
+
+ assert (
+ aiplatform.metadata.metadata._experiment_tracker._global_tensorboard
+ == tensorboard
+ )
+
+ new_experiment_name = self._make_display_name("")[:64]
+ new_experiment_resource = aiplatform.Experiment.create(
+ experiment_name=new_experiment_name
+ )
+
+ shared_state["resources"].append(new_experiment_resource)
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=new_experiment_name,
+ )
+
+ assert (
+ new_experiment_resource._lookup_backing_tensorboard().resource_name
+ == tensorboard.resource_name
+ )
+
+ assert (
+ new_experiment_resource._metadata_context.metadata.get(
+ aiplatform.metadata.constants._BACKING_TENSORBOARD_RESOURCE_KEY
+ )
+ == tensorboard.resource_name
+ )
+
+ def test_get_backing_tensorboard_resource_returns_tensorboard(self, shared_state):
+ tensorboard = aiplatform.Tensorboard.create(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ display_name=self._make_display_name("")[:64],
+ )
+ shared_state["resources"] = [tensorboard]
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ experiment_tensorboard=tensorboard,
+ )
+ experiment = aiplatform.Experiment(
+ self._experiment_name,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ assert (
+ experiment.get_backing_tensorboard_resource().resource_name
+ == tensorboard.resource_name
+ )
+
+ def test_get_backing_tensorboard_resource_returns_none(self):
+ new_experiment_name = f"example-{uuid.uuid1()}"
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=new_experiment_name,
+ experiment_tensorboard=False,
+ )
+ new_experiment = aiplatform.Experiment(
+ new_experiment_name,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ assert new_experiment.get_backing_tensorboard_resource() is None
+
+ def test_delete_backing_tensorboard_experiment_run_success(self):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ experiment=self._experiment_name,
+ )
+ experiment = aiplatform.Experiment(
+ self._experiment_name,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+ experiment.get_backing_tensorboard_resource().delete()
+ run = aiplatform.start_run(_RUN)
+ aiplatform.end_run()
+
+ assert experiment.get_backing_tensorboard_resource() is None
+ assert run.name == _RUN
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_featurestore.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_featurestore.py
new file mode 100644
index 0000000000000000000000000000000000000000..56013aef6becad9611ea04163ea2e94794d4bcb7
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_featurestore.py
@@ -0,0 +1,714 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import datetime
+import logging
+import pytest
+
+from google.cloud import aiplatform
+from tests.system.aiplatform import e2e_base
+
+import pandas as pd
+
+_TEST_USERS_ENTITY_TYPE_GCS_SRC = (
+ "gs://cloud-samples-data-us-central1/vertex-ai/feature-store/datasets/users.avro"
+)
+
+_TEST_READ_INSTANCE_SRC = "gs://cloud-samples-data-us-central1/vertex-ai/feature-store/datasets/movie_prediction.csv"
+
+_TEST_FEATURESTORE_ID = "movie_prediction"
+_TEST_USER_ENTITY_TYPE_ID = "users"
+_TEST_MOVIE_ENTITY_TYPE_ID = "movies"
+_TEST_MOVIE_ENTITY_TYPE_UPDATE_LABELS = {"my_key_update": "my_value_update"}
+
+_TEST_USER_AGE_FEATURE_ID = "age"
+_TEST_USER_GENDER_FEATURE_ID = "gender"
+_TEST_USER_LIKED_GENRES_FEATURE_ID = "liked_genres"
+
+_TEST_MOVIE_TITLE_FEATURE_ID = "title"
+_TEST_MOVIE_GENRES_FEATURE_ID = "genres"
+_TEST_MOVIE_AVERAGE_RATING_FEATURE_ID = "average_rating"
+
+
+@pytest.mark.usefixtures(
+ "prepare_staging_bucket",
+ "delete_staging_bucket",
+ "prepare_bigquery_dataset",
+ "delete_bigquery_dataset",
+ "tear_down_resources",
+)
+class TestFeaturestore(e2e_base.TestEndToEnd):
+
+ _temp_prefix = "temp_vertex_sdk_e2e_featurestore_test"
+
+ def test_create_get_list_featurestore(self, shared_state):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ featurestore_id = self._make_display_name(key=_TEST_FEATURESTORE_ID).replace(
+ "-", "_"
+ )[:60]
+ featurestore = aiplatform.Featurestore.create(
+ featurestore_id=featurestore_id, online_store_fixed_node_count=1
+ )
+
+ shared_state["resources"] = [featurestore]
+ shared_state["featurestore"] = featurestore
+ shared_state["featurestore_name"] = featurestore.resource_name
+
+ get_featurestore = aiplatform.Featurestore(
+ featurestore_name=featurestore.resource_name
+ )
+ assert featurestore.resource_name == get_featurestore.resource_name
+
+ list_featurestores = aiplatform.Featurestore.list()
+ assert get_featurestore.resource_name in [
+ featurestore.resource_name for featurestore in list_featurestores
+ ]
+
+ def test_create_get_list_entity_types(self, shared_state):
+
+ assert shared_state["featurestore"]
+ assert shared_state["featurestore_name"]
+
+ featurestore = shared_state["featurestore"]
+ featurestore_name = shared_state["featurestore_name"]
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ # Users
+ user_entity_type = featurestore.create_entity_type(
+ entity_type_id=_TEST_USER_ENTITY_TYPE_ID
+ )
+ shared_state["user_entity_type"] = user_entity_type
+ shared_state["user_entity_type_name"] = user_entity_type.resource_name
+
+ get_user_entity_type = featurestore.get_entity_type(
+ entity_type_id=_TEST_USER_ENTITY_TYPE_ID
+ )
+ assert user_entity_type.resource_name == get_user_entity_type.resource_name
+
+ # Movies
+ movie_entity_type = aiplatform.EntityType.create(
+ entity_type_id=_TEST_MOVIE_ENTITY_TYPE_ID,
+ featurestore_name=featurestore_name,
+ )
+ shared_state["movie_entity_type"] = movie_entity_type
+ shared_state["movie_entity_type_name"] = movie_entity_type.resource_name
+
+ get_movie_entity_type = aiplatform.EntityType(
+ entity_type_name=movie_entity_type.resource_name
+ )
+ assert movie_entity_type.resource_name == get_movie_entity_type.resource_name
+
+ list_entity_types = aiplatform.EntityType.list(
+ featurestore_name=featurestore_name
+ )
+ assert get_movie_entity_type.resource_name in [
+ entity_type.resource_name for entity_type in list_entity_types
+ ]
+
+ # Update information about the movie entity type.
+ assert movie_entity_type.labels != _TEST_MOVIE_ENTITY_TYPE_UPDATE_LABELS
+
+ movie_entity_type.update(
+ labels=_TEST_MOVIE_ENTITY_TYPE_UPDATE_LABELS,
+ )
+
+ assert movie_entity_type.labels == _TEST_MOVIE_ENTITY_TYPE_UPDATE_LABELS
+
+ def test_create_get_list_features(self, shared_state):
+
+ assert shared_state["user_entity_type"]
+ assert shared_state["user_entity_type_name"]
+ user_entity_type = shared_state["user_entity_type"]
+ user_entity_type_name = shared_state["user_entity_type_name"]
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ # User Features
+ user_age_feature = user_entity_type.create_feature(
+ feature_id=_TEST_USER_AGE_FEATURE_ID, value_type="INT64"
+ )
+ shared_state["user_age_feature_resource_name"] = user_age_feature.resource_name
+ get_user_age_feature = user_entity_type.get_feature(
+ feature_id=_TEST_USER_AGE_FEATURE_ID
+ )
+ assert user_age_feature.resource_name == get_user_age_feature.resource_name
+
+ user_gender_feature = aiplatform.Feature.create(
+ feature_id=_TEST_USER_GENDER_FEATURE_ID,
+ value_type="STRING",
+ entity_type_name=user_entity_type_name,
+ )
+ shared_state[
+ "user_gender_feature_resource_name"
+ ] = user_gender_feature.resource_name
+
+ get_user_gender_feature = aiplatform.Feature(
+ feature_name=user_gender_feature.resource_name
+ )
+ assert (
+ user_gender_feature.resource_name == get_user_gender_feature.resource_name
+ )
+
+ user_liked_genres_feature = user_entity_type.create_feature(
+ feature_id=_TEST_USER_LIKED_GENRES_FEATURE_ID,
+ value_type="STRING_ARRAY",
+ )
+ shared_state[
+ "user_liked_genres_feature_resource_name"
+ ] = user_liked_genres_feature.resource_name
+
+ get_user_liked_genres_feature = aiplatform.Feature(
+ feature_name=user_liked_genres_feature.resource_name
+ )
+ assert (
+ user_liked_genres_feature.resource_name
+ == get_user_liked_genres_feature.resource_name
+ )
+
+ list_user_features = user_entity_type.list_features()
+ list_user_feature_resource_names = [
+ feature.resource_name for feature in list_user_features
+ ]
+
+ assert get_user_age_feature.resource_name in list_user_feature_resource_names
+ assert get_user_gender_feature.resource_name in list_user_feature_resource_names
+ assert (
+ get_user_liked_genres_feature.resource_name
+ in list_user_feature_resource_names
+ )
+
+ def test_ingest_feature_values(self, shared_state, caplog):
+
+ assert shared_state["user_entity_type"]
+ user_entity_type = shared_state["user_entity_type"]
+
+ caplog.set_level(logging.INFO)
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ user_entity_type.ingest_from_gcs(
+ feature_ids=[
+ _TEST_USER_AGE_FEATURE_ID,
+ _TEST_USER_GENDER_FEATURE_ID,
+ _TEST_USER_LIKED_GENRES_FEATURE_ID,
+ ],
+ feature_time="update_time",
+ gcs_source_uris=_TEST_USERS_ENTITY_TYPE_GCS_SRC,
+ gcs_source_type="avro",
+ entity_id_field="user_id",
+ worker_count=1,
+ )
+
+ assert "EntityType feature values imported." in caplog.text
+
+ caplog.clear()
+
+ def test_batch_create_features(self, shared_state):
+ assert shared_state["movie_entity_type"]
+ movie_entity_type = shared_state["movie_entity_type"]
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ movie_feature_configs = {
+ _TEST_MOVIE_TITLE_FEATURE_ID: {"value_type": "STRING"},
+ _TEST_MOVIE_GENRES_FEATURE_ID: {"value_type": "STRING_ARRAY"},
+ _TEST_MOVIE_AVERAGE_RATING_FEATURE_ID: {"value_type": "DOUBLE"},
+ }
+
+ movie_entity_type.batch_create_features(feature_configs=movie_feature_configs)
+
+ get_movie_title_feature = movie_entity_type.get_feature(
+ feature_id=_TEST_MOVIE_TITLE_FEATURE_ID
+ )
+ get_movie_genres_feature = movie_entity_type.get_feature(
+ feature_id=_TEST_MOVIE_GENRES_FEATURE_ID
+ )
+ get_movie_avg_rating_feature = movie_entity_type.get_feature(
+ feature_id=_TEST_MOVIE_AVERAGE_RATING_FEATURE_ID
+ )
+
+ list_movie_features = movie_entity_type.list_features()
+ movie_feature_resource_names = [
+ feature.resource_name for feature in list_movie_features
+ ]
+
+ assert get_movie_title_feature.resource_name in movie_feature_resource_names
+ assert get_movie_genres_feature.resource_name in movie_feature_resource_names
+ assert (
+ get_movie_avg_rating_feature.resource_name in movie_feature_resource_names
+ )
+
+ def test_ingest_feature_values_from_df_using_feature_time_column_and_online_read_multiple_entities(
+ self, shared_state, caplog
+ ):
+
+ assert shared_state["movie_entity_type"]
+ movie_entity_type = shared_state["movie_entity_type"]
+
+ caplog.set_level(logging.INFO)
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ read_feature_ids = ["average_rating", "title", "genres"]
+
+ movie_entity_views_df_before_ingest = movie_entity_type.read(
+ entity_ids=["movie_01", "movie_02"],
+ feature_ids=read_feature_ids,
+ )
+ expected_data_before_ingest = [
+ {
+ "entity_id": "movie_01",
+ "average_rating": None,
+ "title": None,
+ "genres": None,
+ },
+ {
+ "entity_id": "movie_02",
+ "average_rating": None,
+ "title": None,
+ "genres": None,
+ },
+ ]
+ expected_movie_entity_views_df_before_ingest = pd.DataFrame(
+ data=expected_data_before_ingest, columns=read_feature_ids
+ )
+
+ movie_entity_views_df_before_ingest.equals(
+ expected_movie_entity_views_df_before_ingest
+ )
+
+ movies_df = pd.DataFrame(
+ data=[
+ {
+ "movie_id": "movie_01",
+ "average_rating": 4.9,
+ "title": "The Shawshank Redemption",
+ "genres": ["Drama"],
+ "update_time": "2021-08-20 20:44:11.094375+00:00",
+ },
+ {
+ "movie_id": "movie_02",
+ "average_rating": 4.2,
+ "title": "The Shining",
+ "genres": ["Horror"],
+ "update_time": "2021-08-20 20:44:11.094375+00:00",
+ },
+ ],
+ columns=["movie_id", "average_rating", "title", "genres", "update_time"],
+ )
+ movies_df["update_time"] = pd.to_datetime(movies_df["update_time"], utc=True)
+ feature_time_column = "update_time"
+
+ movie_entity_type.ingest_from_df(
+ feature_ids=[
+ _TEST_MOVIE_TITLE_FEATURE_ID,
+ _TEST_MOVIE_GENRES_FEATURE_ID,
+ _TEST_MOVIE_AVERAGE_RATING_FEATURE_ID,
+ ],
+ feature_time=feature_time_column,
+ df_source=movies_df,
+ entity_id_field="movie_id",
+ )
+
+ movie_entity_views_df_after_ingest = movie_entity_type.read(
+ entity_ids=["movie_01", "movie_02"],
+ feature_ids=read_feature_ids,
+ )
+ expected_data_after_ingest = [
+ {
+ "movie_id": "movie_01",
+ "average_rating": 4.9,
+ "title": "The Shawshank Redemption",
+ "genres": ["Drama"],
+ },
+ {
+ "movie_id": "movie_02",
+ "average_rating": 4.2,
+ "title": "The Shining",
+ "genres": ["Horror"],
+ },
+ ]
+ expected_movie_entity_views_df_after_ingest = pd.DataFrame(
+ data=expected_data_after_ingest, columns=read_feature_ids
+ )
+
+ movie_entity_views_df_after_ingest.equals(
+ expected_movie_entity_views_df_after_ingest
+ )
+
+ assert "EntityType feature values imported." in caplog.text
+ caplog.clear()
+
+ def test_ingest_feature_values_from_df_using_feature_time_datetime_and_online_read_single_entity(
+ self, shared_state, caplog
+ ):
+ assert shared_state["movie_entity_type"]
+ movie_entity_type = shared_state["movie_entity_type"]
+
+ caplog.set_level(logging.INFO)
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ movies_df = pd.DataFrame(
+ data=[
+ {
+ "movie_id": "movie_03",
+ "average_rating": 4.5,
+ "title": "Cinema Paradiso",
+ "genres": ["Romance"],
+ },
+ {
+ "movie_id": "movie_04",
+ "average_rating": 4.6,
+ "title": "The Dark Knight",
+ "genres": ["Action"],
+ },
+ ],
+ columns=["movie_id", "average_rating", "title", "genres"],
+ )
+
+ feature_time_datetime_str = datetime.datetime.now().isoformat(
+ sep=" ", timespec="milliseconds"
+ )
+ feature_time_datetime = datetime.datetime.strptime(
+ feature_time_datetime_str, "%Y-%m-%d %H:%M:%S.%f"
+ )
+
+ movie_entity_type.ingest_from_df(
+ feature_ids=[
+ _TEST_MOVIE_TITLE_FEATURE_ID,
+ _TEST_MOVIE_GENRES_FEATURE_ID,
+ _TEST_MOVIE_AVERAGE_RATING_FEATURE_ID,
+ ],
+ feature_time=feature_time_datetime,
+ df_source=movies_df,
+ entity_id_field="movie_id",
+ )
+
+ movie_entity_views_df_avg_rating = movie_entity_type.read(
+ entity_ids="movie_04",
+ feature_ids="average_rating",
+ )
+ expected_data_avg_rating = [
+ {"movie_id": "movie_04", "average_rating": 4.6},
+ ]
+ expected_movie_entity_views_df_avg_rating = pd.DataFrame(
+ data=expected_data_avg_rating, columns=["average_rating"]
+ )
+
+ movie_entity_views_df_avg_rating.equals(
+ expected_movie_entity_views_df_avg_rating
+ )
+
+ assert "EntityType feature values imported." in caplog.text
+
+ caplog.clear()
+
+ def test_write_features(self, shared_state, caplog):
+ assert shared_state["movie_entity_type"]
+ movie_entity_type = shared_state["movie_entity_type"]
+
+ caplog.set_level(logging.INFO)
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ # Create pandas DataFrame
+ movies_df = pd.DataFrame(
+ data=[
+ {
+ "entity_id": "movie_01",
+ "average_rating": 4.9,
+ "title": "The Shawshank Redemption",
+ "genres": ["Drama", "Action"],
+ },
+ {
+ "entity_id": "movie_02",
+ "average_rating": 4.4,
+ "title": "The Shining",
+ "genres": ["Horror", "Action"],
+ },
+ ],
+ columns=["entity_id", "average_rating", "title", "genres"],
+ )
+ movies_df = movies_df.set_index("entity_id")
+
+ # Write feature values
+ movie_entity_type.preview.write_feature_values(instances=movies_df)
+ movie_entity_type.write_feature_values(
+ instances={"movie_02": {"average_rating": 4.5}}
+ )
+
+ # Ensure writing feature values overwrites previous values
+ movie_entity_df_avg_rating_genres = movie_entity_type.read(
+ entity_ids="movie_02", feature_ids=["average_rating", "genres"]
+ )
+ expected_data_avg_rating = [
+ {
+ "entity_id": "movie_02",
+ "average_rating": 4.5,
+ "genres": ["Horror", "Action"],
+ },
+ ]
+ expected_movie_entity_df_avg_rating_genres = pd.DataFrame(
+ data=expected_data_avg_rating,
+ columns=["entity_id", "average_rating", "genres"],
+ )
+ expected_movie_entity_df_avg_rating_genres.equals(
+ movie_entity_df_avg_rating_genres
+ )
+
+ assert "EntityType feature values written." in caplog.text
+
+ caplog.clear()
+
+ def test_search_features(self, shared_state):
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ list_searched_features = aiplatform.Feature.search()
+ assert len(list_searched_features) >= 1
+
+ def test_batch_serve_to_df(self, shared_state, caplog):
+
+ assert shared_state["featurestore"]
+ assert shared_state["user_age_feature_resource_name"]
+ assert shared_state["user_gender_feature_resource_name"]
+ assert shared_state["user_liked_genres_feature_resource_name"]
+
+ featurestore = shared_state["featurestore"]
+
+ user_age_feature_resource_name = shared_state["user_age_feature_resource_name"]
+ user_gender_feature_resource_name = shared_state[
+ "user_gender_feature_resource_name"
+ ]
+ user_liked_genres_feature_resource_name = shared_state[
+ "user_liked_genres_feature_resource_name"
+ ]
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ caplog.set_level(logging.INFO)
+
+ read_instances_df = pd.DataFrame(
+ data=[
+ ["alice", "movie_01", "2021-09-15T08:28:14Z"],
+ ["bob", "movie_02", "2021-09-15T08:28:14Z"],
+ ["dav", "movie_03", "2021-09-15T08:28:14Z"],
+ ["eve", "movie_04", "2021-09-15T08:28:14Z"],
+ ["alice", "movie_03", "2021-09-14T09:35:15Z"],
+ ["bob", "movie_04", "2020-02-14T09:35:15Z"],
+ ],
+ columns=["users", "movies", "timestamp"],
+ )
+ read_instances_df["timestamp"] = pd.to_datetime(
+ read_instances_df["timestamp"], utc=True
+ )
+
+ df = featurestore.batch_serve_to_df(
+ serving_feature_ids={
+ _TEST_USER_ENTITY_TYPE_ID: [
+ _TEST_USER_AGE_FEATURE_ID,
+ _TEST_USER_GENDER_FEATURE_ID,
+ _TEST_USER_LIKED_GENRES_FEATURE_ID,
+ ],
+ _TEST_MOVIE_ENTITY_TYPE_ID: [
+ _TEST_MOVIE_TITLE_FEATURE_ID,
+ _TEST_MOVIE_GENRES_FEATURE_ID,
+ _TEST_MOVIE_AVERAGE_RATING_FEATURE_ID,
+ ],
+ },
+ read_instances_df=read_instances_df,
+ feature_destination_fields={
+ user_age_feature_resource_name: "user_age_dest",
+ user_gender_feature_resource_name: "user_gender_dest",
+ user_liked_genres_feature_resource_name: "user_liked_genres_dest",
+ },
+ )
+
+ expected_df_columns = [
+ "timestamp",
+ "entity_type_users",
+ "user_age_dest",
+ "user_gender_dest",
+ "user_liked_genres_dest",
+ "entity_type_movies",
+ "title",
+ "genres",
+ "average_rating",
+ ]
+
+ assert isinstance(df, pd.DataFrame)
+ assert list(df.columns) == expected_df_columns
+ assert df.size == 54
+ assert "Featurestore feature values served." in caplog.text
+
+ caplog.clear()
+
+ def test_batch_serve_to_gcs(self, shared_state, caplog):
+
+ assert shared_state["featurestore"]
+ assert shared_state["bucket"]
+ assert shared_state["user_age_feature_resource_name"]
+ assert shared_state["user_gender_feature_resource_name"]
+ assert shared_state["user_liked_genres_feature_resource_name"]
+
+ featurestore = shared_state["featurestore"]
+ bucket_name = shared_state["staging_bucket_name"]
+ user_age_feature_resource_name = shared_state["user_age_feature_resource_name"]
+ user_gender_feature_resource_name = shared_state[
+ "user_gender_feature_resource_name"
+ ]
+ user_liked_genres_feature_resource_name = shared_state[
+ "user_liked_genres_feature_resource_name"
+ ]
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ caplog.set_level(logging.INFO)
+
+ featurestore.batch_serve_to_gcs(
+ serving_feature_ids={
+ _TEST_USER_ENTITY_TYPE_ID: [
+ _TEST_USER_AGE_FEATURE_ID,
+ _TEST_USER_GENDER_FEATURE_ID,
+ _TEST_USER_LIKED_GENRES_FEATURE_ID,
+ ],
+ _TEST_MOVIE_ENTITY_TYPE_ID: [
+ _TEST_MOVIE_TITLE_FEATURE_ID,
+ _TEST_MOVIE_GENRES_FEATURE_ID,
+ _TEST_MOVIE_AVERAGE_RATING_FEATURE_ID,
+ ],
+ },
+ read_instances_uri=_TEST_READ_INSTANCE_SRC,
+ feature_destination_fields={
+ user_age_feature_resource_name: "user_age_dest",
+ user_gender_feature_resource_name: "user_gender_dest",
+ user_liked_genres_feature_resource_name: "user_liked_genres_dest",
+ },
+ gcs_destination_output_uri_prefix=f"gs://{bucket_name}/featurestore_test/tfrecord",
+ gcs_destination_type="tfrecord",
+ )
+ assert "Featurestore feature values served." in caplog.text
+
+ caplog.clear()
+
+ def test_batch_serve_to_bq(self, shared_state, caplog):
+
+ assert shared_state["featurestore"]
+ assert shared_state["bigquery_dataset"]
+ assert shared_state["user_age_feature_resource_name"]
+ assert shared_state["user_gender_feature_resource_name"]
+ assert shared_state["user_liked_genres_feature_resource_name"]
+
+ featurestore = shared_state["featurestore"]
+ bigquery_dataset_id = shared_state["bigquery_dataset_id"]
+ user_age_feature_resource_name = shared_state["user_age_feature_resource_name"]
+ user_gender_feature_resource_name = shared_state[
+ "user_gender_feature_resource_name"
+ ]
+ user_liked_genres_feature_resource_name = shared_state[
+ "user_liked_genres_feature_resource_name"
+ ]
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ caplog.set_level(logging.INFO)
+
+ featurestore.batch_serve_to_bq(
+ serving_feature_ids={
+ _TEST_USER_ENTITY_TYPE_ID: [
+ _TEST_USER_AGE_FEATURE_ID,
+ _TEST_USER_GENDER_FEATURE_ID,
+ _TEST_USER_LIKED_GENRES_FEATURE_ID,
+ ],
+ _TEST_MOVIE_ENTITY_TYPE_ID: [
+ _TEST_MOVIE_TITLE_FEATURE_ID,
+ _TEST_MOVIE_GENRES_FEATURE_ID,
+ _TEST_MOVIE_AVERAGE_RATING_FEATURE_ID,
+ ],
+ },
+ read_instances_uri=_TEST_READ_INSTANCE_SRC,
+ feature_destination_fields={
+ user_age_feature_resource_name: "user_age_dest",
+ user_gender_feature_resource_name: "user_gender_dest",
+ user_liked_genres_feature_resource_name: "user_liked_genres_dest",
+ },
+ bq_destination_output_uri=f"bq://{bigquery_dataset_id}.test_table",
+ )
+
+ assert "Featurestore feature values served." in caplog.text
+ caplog.clear()
+
+ def test_online_reads(self, shared_state):
+ assert shared_state["user_entity_type"]
+ assert shared_state["movie_entity_type"]
+
+ user_entity_type = shared_state["user_entity_type"]
+ movie_entity_type = shared_state["movie_entity_type"]
+
+ user_entity_views = user_entity_type.read(entity_ids="alice")
+ assert isinstance(user_entity_views, pd.DataFrame)
+
+ movie_entity_views = movie_entity_type.read(
+ entity_ids=["movie_01", "movie_04"],
+ feature_ids=[_TEST_MOVIE_TITLE_FEATURE_ID, _TEST_MOVIE_GENRES_FEATURE_ID],
+ )
+ assert isinstance(movie_entity_views, pd.DataFrame)
+
+ movie_entity_views = movie_entity_type.read(
+ entity_ids="movie_01",
+ feature_ids=[_TEST_MOVIE_TITLE_FEATURE_ID, _TEST_MOVIE_GENRES_FEATURE_ID],
+ )
+ assert isinstance(movie_entity_views, pd.DataFrame)
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_initializer.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_initializer.py
new file mode 100644
index 0000000000000000000000000000000000000000..f50e68c2488cbd2ec3a6522318824d39761ce6de
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_initializer.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+
+from google.auth import credentials as auth_credentials
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import initializer as aiplatform_initializer
+from tests.system.aiplatform import e2e_base
+
+
+class TestInitializer(e2e_base.TestEndToEnd):
+ """Tests the _set_google_auth_default() functionality in initializer._Config."""
+
+ _temp_prefix = "test_initializer_"
+
+ def test_init_calls_set_google_auth_default(self):
+ aiplatform.init(project=e2e_base._PROJECT)
+
+ # init() with only creds shouldn't overwrite the project
+ creds = auth_credentials.AnonymousCredentials()
+ aiplatform.init(credentials=creds)
+
+ assert aiplatform.initializer.global_config.project == e2e_base._PROJECT
+ assert aiplatform.initializer.global_config.credentials == creds
+
+ # init() with only project shouldn't overwrite creds
+ aiplatform.init(project=e2e_base._PROJECT)
+ assert aiplatform.initializer.global_config.credentials == creds
+
+ def test_init_rest_async_incorrect_credentials(self):
+ # Async REST credentials must be explicitly set using
+ # _set_async_rest_credentials() for async REST transport.
+ creds = auth_credentials.AnonymousCredentials()
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ api_transport="rest",
+ )
+
+ # System tests are run on Python 3.10 which has async deps.
+ with pytest.raises(ValueError):
+ # Expect a ValueError for passing in sync credentials.
+ aiplatform_initializer._set_async_rest_credentials(credentials=creds)
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_language_models.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_language_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e5b005b477bd9c66dadcdefda5d3f85f10efeb1
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_language_models.py
@@ -0,0 +1,559 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=protected-access, g-multiple-import
+
+import pytest
+
+from google import auth
+from google.cloud import aiplatform
+from google.cloud.aiplatform.compat.types import (
+ job_state as gca_job_state,
+)
+from tests.system.aiplatform import e2e_base
+from google.cloud.aiplatform.utils import gcs_utils
+from vertexai import language_models
+from vertexai.preview import (
+ language_models as preview_language_models,
+)
+from vertexai.preview.language_models import (
+ ChatModel,
+ CodeGenerationModel,
+ InputOutputTextPair,
+ TextGenerationModel,
+ TextGenerationResponse,
+ TextEmbeddingModel,
+)
+
+STAGING_DIR_URI = "gs://ucaip-samples-us-central1/tmp/staging"
+
+
+class TestLanguageModels(e2e_base.TestEndToEnd):
+ """System tests for language models."""
+
+ _temp_prefix = "temp_language_models_test_"
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_text_generation(self, api_transport):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ api_transport=api_transport,
+ )
+
+ model = TextGenerationModel.from_pretrained("google/text-bison@001")
+ grounding_source = language_models.GroundingSource.WebSearch()
+ response = model.predict(
+ "What is the best recipe for cupcakes? Recipe:",
+ max_output_tokens=128,
+ temperature=0.0,
+ top_p=1.0,
+ top_k=5,
+ stop_sequences=["# %%"],
+ grounding_source=grounding_source,
+ )
+ assert response.text or response.is_blocked
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_text_generation_preview_count_tokens(self, api_transport):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ api_transport=api_transport,
+ )
+
+ model = preview_language_models.TextGenerationModel.from_pretrained(
+ "google/text-bison@001"
+ )
+
+ response = model.count_tokens(["How are you doing?"])
+
+ assert response.total_tokens
+ assert response.total_billable_characters
+
+ @pytest.mark.asyncio
+ async def test_text_generation_model_predict_async(self):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ model = TextGenerationModel.from_pretrained("google/text-bison@001")
+ grounding_source = language_models.GroundingSource.WebSearch()
+ response = await model.predict_async(
+ "What is the best recipe for cupcakes? Recipe:",
+ max_output_tokens=128,
+ temperature=0.0,
+ top_p=1.0,
+ top_k=5,
+ stop_sequences=["# %%"],
+ grounding_source=grounding_source,
+ )
+ assert response.text or response.is_blocked
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_text_generation_streaming(self, api_transport):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ api_transport=api_transport,
+ )
+
+ model = TextGenerationModel.from_pretrained("google/text-bison@001")
+
+ for response in model.predict_streaming(
+ "What is the best recipe for cupcakes? Recipe:",
+ max_output_tokens=128,
+ temperature=0.0,
+ top_p=1.0,
+ top_k=5,
+ ):
+ assert response.text or response.is_blocked
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_preview_text_generation_from_pretrained(self, api_transport):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ api_transport=api_transport,
+ )
+
+ model = preview_language_models.TextGenerationModel.from_pretrained(
+ "google/text-bison@001"
+ )
+
+ response = model.predict(
+ "What is the best recipe for cupcakes? Recipe:",
+ max_output_tokens=128,
+ temperature=0.0,
+ top_p=1.0,
+ top_k=5,
+ stop_sequences=["# %%"],
+ )
+ assert response.text or response.is_blocked
+
+ assert isinstance(model, preview_language_models.TextGenerationModel)
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_chat_on_chat_model(self, api_transport):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ api_transport=api_transport,
+ )
+ chat_model = ChatModel.from_pretrained("google/chat-bison@001")
+ grounding_source = language_models.GroundingSource.WebSearch()
+ chat = chat_model.start_chat(
+ context="My name is Ned. You are my personal assistant. My favorite movies are Lord of the Rings and Hobbit.",
+ examples=[
+ InputOutputTextPair(
+ input_text="Who do you work for?",
+ output_text="I work for Ned.",
+ ),
+ InputOutputTextPair(
+ input_text="What do I like?",
+ output_text="Ned likes watching movies.",
+ ),
+ ],
+ temperature=0.0,
+ stop_sequences=["# %%"],
+ )
+
+ message1 = "Are my favorite movies based on a book series?"
+ response1 = chat.send_message(
+ message1,
+ grounding_source=grounding_source,
+ )
+ assert response1.text
+ assert response1.grounding_metadata
+ assert len(chat.message_history) == 2
+ assert chat.message_history[0].author == chat.USER_AUTHOR
+ assert chat.message_history[0].content == message1
+ assert chat.message_history[1].author == chat.MODEL_AUTHOR
+
+ message2 = "When were these books published?"
+ response2 = chat.send_message(
+ message2, temperature=0.1, grounding_source=grounding_source
+ )
+ assert response2.text
+ assert response2.grounding_metadata
+ assert len(chat.message_history) == 4
+ assert chat.message_history[2].author == chat.USER_AUTHOR
+ assert chat.message_history[2].content == message2
+ assert chat.message_history[3].author == chat.MODEL_AUTHOR
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_chat_model_preview_count_tokens(self, api_transport):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ api_transport=api_transport,
+ )
+
+ chat_model = ChatModel.from_pretrained("google/chat-bison@001")
+
+ chat = chat_model.start_chat()
+
+ chat.send_message("What should I do today?")
+
+ response_with_history = chat.count_tokens("Any ideas?")
+
+ response_without_history = chat_model.start_chat().count_tokens(
+ "What should I do today?"
+ )
+
+ assert (
+ response_with_history.total_tokens > response_without_history.total_tokens
+ )
+ assert (
+ response_with_history.total_billable_characters
+ > response_without_history.total_billable_characters
+ )
+
+ @pytest.mark.asyncio
+ async def test_chat_model_async(self):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ chat_model = ChatModel.from_pretrained("google/chat-bison@001")
+ grounding_source = language_models.GroundingSource.WebSearch()
+ chat = chat_model.start_chat(
+ context="My name is Ned. You are my personal assistant. My favorite movies are Lord of the Rings and Hobbit.",
+ examples=[
+ InputOutputTextPair(
+ input_text="Who do you work for?",
+ output_text="I work for Ned.",
+ ),
+ InputOutputTextPair(
+ input_text="What do I like?",
+ output_text="Ned likes watching movies.",
+ ),
+ ],
+ temperature=0.0,
+ stop_sequences=["# %%"],
+ )
+
+ message1 = "Are my favorite movies based on a book series?"
+ response1 = await chat.send_message_async(
+ message1,
+ grounding_source=grounding_source,
+ )
+ assert response1.text
+ assert response1.grounding_metadata
+ assert len(chat.message_history) == 2
+ assert chat.message_history[0].author == chat.USER_AUTHOR
+ assert chat.message_history[0].content == message1
+ assert chat.message_history[1].author == chat.MODEL_AUTHOR
+
+ message2 = "When were these books published?"
+ response2 = await chat.send_message_async(
+ message2,
+ temperature=0.1,
+ grounding_source=grounding_source,
+ )
+ assert response2.text
+ assert response2.grounding_metadata
+ assert len(chat.message_history) == 4
+ assert chat.message_history[2].author == chat.USER_AUTHOR
+ assert chat.message_history[2].content == message2
+ assert chat.message_history[3].author == chat.MODEL_AUTHOR
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_chat_model_send_message_streaming(self, api_transport):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ api_transport=api_transport,
+ )
+
+ chat_model = ChatModel.from_pretrained("google/chat-bison@001")
+ chat = chat_model.start_chat(
+ context="My name is Ned. You are my personal assistant. My favorite movies are Lord of the Rings and Hobbit.",
+ examples=[
+ InputOutputTextPair(
+ input_text="Who do you work for?",
+ output_text="I work for Ned.",
+ ),
+ InputOutputTextPair(
+ input_text="What do I like?",
+ output_text="Ned likes watching movies.",
+ ),
+ ],
+ temperature=0.0,
+ )
+
+ message1 = "Are my favorite movies based on a book series?"
+ for response in chat.send_message_streaming(message1):
+ assert isinstance(response, TextGenerationResponse)
+ assert len(chat.message_history) == 2
+ assert chat.message_history[0].author == chat.USER_AUTHOR
+ assert chat.message_history[0].content == message1
+ assert chat.message_history[1].author == chat.MODEL_AUTHOR
+
+ message2 = "When were these books published?"
+ for response2 in chat.send_message_streaming(
+ message2,
+ temperature=0.1,
+ ):
+ assert isinstance(response2, TextGenerationResponse)
+ assert len(chat.message_history) == 4
+ assert chat.message_history[2].author == chat.USER_AUTHOR
+ assert chat.message_history[2].content == message2
+ assert chat.message_history[3].author == chat.MODEL_AUTHOR
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_text_embedding(self, api_transport):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ api_transport=api_transport,
+ )
+
+ model = TextEmbeddingModel.from_pretrained("google/textembedding-gecko@001")
+ # One short text, one llong text (to check truncation)
+ texts = ["What is life?", "What is life?" * 1000]
+ embeddings = model.get_embeddings(texts)
+ assert len(embeddings) == 2
+ assert len(embeddings[0].values) == 768
+ assert embeddings[0].statistics.token_count > 0
+ assert not embeddings[0].statistics.truncated
+
+ assert len(embeddings[1].values) == 768
+ assert embeddings[1].statistics.token_count > 1000
+ assert embeddings[1].statistics.truncated
+
+ @pytest.mark.asyncio
+ async def test_text_embedding_async(self):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ model = TextEmbeddingModel.from_pretrained("google/textembedding-gecko@001")
+ # One short text, one llong text (to check truncation)
+ texts = ["What is life?", "What is life?" * 1000]
+ embeddings = await model.get_embeddings_async(texts)
+ assert len(embeddings) == 2
+ assert len(embeddings[0].values) == 768
+ assert embeddings[0].statistics.token_count > 0
+ assert not embeddings[0].statistics.truncated
+
+ assert len(embeddings[1].values) == 768
+ assert embeddings[1].statistics.token_count > 1000
+ assert embeddings[1].statistics.truncated
+
+ # TODO(b/339907038): Re-enable test after timeout issue is fixed.
+ @pytest.mark.skip(reason="Causes system tests timeout")
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_tuning(self, shared_state, api_transport):
+ """Test tuning, listing and loading models."""
+ credentials, _ = auth.default(
+ scopes=["https://www.googleapis.com/auth/cloud-platform"]
+ )
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ credentials=credentials,
+ api_transport=api_transport,
+ )
+
+ model = language_models.TextGenerationModel.from_pretrained("text-bison@001")
+
+ import pandas
+
+ training_data = pandas.DataFrame(
+ data=[
+ {"input_text": "Input 0", "output_text": "Output 0"},
+ {"input_text": "Input 1", "output_text": "Output 1"},
+ {"input_text": "Input 2", "output_text": "Output 2"},
+ {"input_text": "Input 3", "output_text": "Output 3"},
+ {"input_text": "Input 4", "output_text": "Output 4"},
+ {"input_text": "Input 5", "output_text": "Output 5"},
+ {"input_text": "Input 6", "output_text": "Output 6"},
+ {"input_text": "Input 7", "output_text": "Output 7"},
+ {"input_text": "Input 8", "output_text": "Output 8"},
+ {"input_text": "Input 9", "output_text": "Output 9"},
+ ]
+ )
+
+ dataset_uri = (
+ STAGING_DIR_URI + "/veretx_llm_tuning_training_data.text-bison.dummy.jsonl"
+ )
+ gcs_utils._upload_pandas_df_to_gcs(
+ df=training_data, upload_gcs_path=dataset_uri
+ )
+
+ tuning_job = model.tune_model(
+ training_data=training_data,
+ train_steps=1,
+ tuning_job_location="europe-west4",
+ tuned_model_location="us-central1",
+ learning_rate_multiplier=2.0,
+ tuning_evaluation_spec=preview_language_models.TuningEvaluationSpec(
+ evaluation_data=dataset_uri,
+ evaluation_interval=37,
+ enable_early_stopping=True,
+ ),
+ )
+ tuned_model1 = tuning_job.get_tuned_model()
+
+ # According to the Pipelines design, external resources created by a pipeline
+ # must not be modified or deleted. Otherwise caching will break next pipeline runs.
+ shared_state.setdefault("resources", [])
+ shared_state["resources"].append(tuned_model1._endpoint)
+ shared_state["resources"].extend(
+ aiplatform.Model(model_name=deployed_model.model)
+ for deployed_model in tuned_model1._endpoint.list_models()
+ )
+ # Deleting the Endpoint is a little less bad since the LLM SDK will recreate it, but it's not advised for the same reason.
+
+ # Testing the new model returned by the `tuning_job.get_tuned_model` method
+ response1 = tuned_model1.predict(
+ "What is the best recipe for cupcakes? Recipe:",
+ max_output_tokens=128,
+ temperature=0.0,
+ top_p=1.0,
+ top_k=5,
+ )
+ assert response1.text or response1.is_blocked
+
+ # Testing listing and getting tuned models
+ tuned_model_names = model.list_tuned_model_names()
+ assert tuned_model_names
+ tuned_model_name = tuned_model_names[0]
+
+ tuned_model = TextGenerationModel.get_tuned_model(tuned_model_name)
+
+ tuned_model_response = tuned_model.predict(
+ "What is the best recipe for cupcakes? Recipe:",
+ max_output_tokens=128,
+ temperature=0.0,
+ top_p=1.0,
+ top_k=5,
+ )
+ assert tuned_model_response.text or tuned_model_response.is_blocked
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_batch_prediction_for_text_generation(self, api_transport):
+ source_uri = "gs://ucaip-samples-us-central1/model/llm/batch_prediction/batch_prediction_prompts1.jsonl"
+ destination_uri_prefix = "gs://ucaip-samples-us-central1/model/llm/batch_prediction/predictions/text-bison@001_"
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ api_transport=api_transport,
+ )
+
+ model = TextGenerationModel.from_pretrained("text-bison@001")
+ job = model.batch_predict(
+ dataset=source_uri,
+ destination_uri_prefix=destination_uri_prefix,
+ model_parameters={"temperature": 0, "top_p": 1, "top_k": 5},
+ )
+
+ job.wait_for_resource_creation()
+ job.wait()
+ gapic_job = job._gca_resource
+ job.delete()
+
+ assert gapic_job.state == gca_job_state.JobState.JOB_STATE_SUCCEEDED
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_batch_prediction_for_textembedding(self, api_transport):
+ source_uri = "gs://ucaip-samples-us-central1/model/llm/batch_prediction/batch_prediction_prompts_textembedding_dummy1.jsonl"
+ destination_uri_prefix = "gs://ucaip-samples-us-central1/model/llm/batch_prediction/predictions/textembedding-gecko@001_"
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ api_transport=api_transport,
+ )
+
+ model = TextEmbeddingModel.from_pretrained("textembedding-gecko@001")
+ job = model.batch_predict(
+ dataset=source_uri,
+ destination_uri_prefix=destination_uri_prefix,
+ model_parameters={},
+ )
+
+ job.wait_for_resource_creation()
+ job.wait()
+ gapic_job = job._gca_resource
+ job.delete()
+
+ assert gapic_job.state == gca_job_state.JobState.JOB_STATE_SUCCEEDED
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_batch_prediction_for_code_generation(self, api_transport):
+ source_uri = "gs://ucaip-samples-us-central1/model/llm/batch_prediction/code-bison.batch_prediction_prompts.1.jsonl"
+ destination_uri_prefix = "gs://ucaip-samples-us-central1/model/llm/batch_prediction/predictions/code-bison@001_"
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ api_transport=api_transport,
+ )
+
+ model = CodeGenerationModel.from_pretrained("code-bison@001")
+ job = model.batch_predict(
+ dataset=source_uri,
+ destination_uri_prefix=destination_uri_prefix,
+ model_parameters={"temperature": 0},
+ )
+
+ job.wait_for_resource_creation()
+ job.wait()
+ gapic_job = job._gca_resource
+ job.delete()
+
+ assert gapic_job.state == gca_job_state.JobState.JOB_STATE_SUCCEEDED
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_code_generation_streaming(self, api_transport):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ api_transport=api_transport,
+ )
+
+ model = language_models.CodeGenerationModel.from_pretrained("code-bison@001")
+
+ for response in model.predict_streaming(
+ prefix="def reverse_string(s):",
+ # code-bison does not support suffix
+ # suffix=" return s",
+ max_output_tokens=128,
+ temperature=0.0,
+ ):
+ assert response.text
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_code_chat_model_send_message_streaming(self, api_transport):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ api_transport=api_transport,
+ )
+
+ chat_model = language_models.CodeChatModel.from_pretrained("codechat-bison@001")
+ chat = chat_model.start_chat()
+
+ message1 = "Please help write a function to calculate the max of two numbers"
+ for response in chat.send_message_streaming(message1):
+ assert response.text
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_matching_engine_index.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_matching_engine_index.py
new file mode 100644
index 0000000000000000000000000000000000000000..113b48c06fd95e17b615a768d360d28128655265
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_matching_engine_index.py
@@ -0,0 +1,667 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import uuid
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform.matching_engine.matching_engine_index_endpoint import (
+ Namespace,
+)
+from google.cloud import aiplatform_v1
+from tests.system.aiplatform import e2e_base
+
+# project
+_TEST_INDEX_DISPLAY_NAME = "index_display_name"
+_TEST_STREAM_INDEX_DISPLAY_NAME = "stream_index_display_name"
+_TEST_INDEX_DESCRIPTION = "index_description"
+_TEST_INDEX_DISTANCE_MEASURE_TYPE = "SQUARED_L2_DISTANCE"
+
+_TEST_INDEX_CONFIG_DIMENSIONS = 100
+_TEST_INDEX_APPROXIMATE_NEIGHBORS_COUNT = 150
+_TEST_LEAF_NODE_EMBEDDING_COUNT = 123
+_TEST_LEAF_NODES_TO_SEARCH_PERCENT = 50
+
+
+_TEST_CONTENTS_DELTA_URI = (
+ "gs://cloud-samples-data-us-central1/vertex-ai/matching_engine/glove100/initial"
+)
+_TEST_CONTENTS_DELTA_URI_UPDATE = (
+ "gs://cloud-samples-data-us-central1/vertex-ai/matching_engine/glove100/incremental"
+)
+_TEST_IS_COMPLETE_OVERWRITE = True
+_TEST_INDEX_DISTANCE_MEASURE_TYPE = "SQUARED_L2_DISTANCE"
+
+
+_TEST_LABELS = {"my_key": "my_value"}
+_TEST_DISPLAY_NAME_UPDATE = "my new display name"
+_TEST_DESCRIPTION_UPDATE = "my description update"
+_TEST_LABELS_UPDATE = {"my_key_update": "my_value_update"}
+
+# ENDPOINT
+_TEST_INDEX_ENDPOINT_DISPLAY_NAME = "endpoint_name"
+_TEST_PUBLIC_INDEX_ENDPOINT_DISPLAY_NAME = "public_endpoint_name"
+_TEST_INDEX_ENDPOINT_DESCRIPTION = "my endpoint"
+_TEST_PUBLIC_INDEX_ENDPOINT_DESCRIPTION = "my public endpoint"
+
+# DEPLOYED INDEX
+_TEST_DEPLOYED_INDEX_ID = f"deployed_index_id_{uuid.uuid4()}".replace("-", "_")
+_TEST_DEPLOYED_INDEX_DISPLAY_NAME = f"deployed_index_display_name_{uuid.uuid4()}"
+_TEST_DEPLOYED_INDEX_ID_PUBLIC = f"deployed_index_id_{uuid.uuid4()}".replace("-", "_")
+_TEST_DEPLOYED_INDEX_DISPLAY_NAME_PUBLIC = f"deployed_index_display_name_{uuid.uuid4()}"
+_TEST_DEPLOYED_STREAM_INDEX_ID = f"deployed_index_id_{uuid.uuid4()}".replace("-", "_")
+_TEST_DEPLOYED_STREAM_INDEX_DISPLAY_NAME = f"deployed_index_display_name_{uuid.uuid4()}"
+_TEST_DEPLOYED_STREAM_INDEX_ID_PUBLIC = f"deployed_index_id_{uuid.uuid4()}".replace(
+ "-", "_"
+)
+_TEST_DEPLOYED_STREAM_INDEX_DISPLAY_NAME_PUBLIC = (
+ f"deployed_index_display_name_{uuid.uuid4()}"
+)
+_TEST_MIN_REPLICA_COUNT_UPDATED = 4
+_TEST_MAX_REPLICA_COUNT_UPDATED = 4
+
+# QUERY
+_TEST_MATCH_QUERY = query = [
+ -0.11333,
+ 0.48402,
+ 0.090771,
+ -0.22439,
+ 0.034206,
+ -0.55831,
+ 0.041849,
+ -0.53573,
+ 0.18809,
+ -0.58722,
+ 0.015313,
+ -0.014555,
+ 0.80842,
+ -0.038519,
+ 0.75348,
+ 0.70502,
+ -0.17863,
+ 0.3222,
+ 0.67575,
+ 0.67198,
+ 0.26044,
+ 0.4187,
+ -0.34122,
+ 0.2286,
+ -0.53529,
+ 1.2582,
+ -0.091543,
+ 0.19716,
+ -0.037454,
+ -0.3336,
+ 0.31399,
+ 0.36488,
+ 0.71263,
+ 0.1307,
+ -0.24654,
+ -0.52445,
+ -0.036091,
+ 0.55068,
+ 0.10017,
+ 0.48095,
+ 0.71104,
+ -0.053462,
+ 0.22325,
+ 0.30917,
+ -0.39926,
+ 0.036634,
+ -0.35431,
+ -0.42795,
+ 0.46444,
+ 0.25586,
+ 0.68257,
+ -0.20821,
+ 0.38433,
+ 0.055773,
+ -0.2539,
+ -0.20804,
+ 0.52522,
+ -0.11399,
+ -0.3253,
+ -0.44104,
+ 0.17528,
+ 0.62255,
+ 0.50237,
+ -0.7607,
+ -0.071786,
+ 0.0080131,
+ -0.13286,
+ 0.50097,
+ 0.18824,
+ -0.54722,
+ -0.42664,
+ 0.4292,
+ 0.14877,
+ -0.0072514,
+ -0.16484,
+ -0.059798,
+ 0.9895,
+ -0.61738,
+ 0.054169,
+ 0.48424,
+ -0.35084,
+ -0.27053,
+ 0.37829,
+ 0.11503,
+ -0.39613,
+ 0.24266,
+ 0.39147,
+ -0.075256,
+ 0.65093,
+ -0.20822,
+ -0.17456,
+ 0.53571,
+ -0.16537,
+ 0.13582,
+ -0.56016,
+ 0.016964,
+ 0.1277,
+ 0.94071,
+ -0.22608,
+ -0.021106,
+]
+
+_TEST_FILTER = [Namespace("name", ["allow_token"], ["deny_token"])]
+
+
+# STREAM UPDATE
+_TEST_DATAPOINT_1 = aiplatform_v1.types.index.IndexDatapoint(
+ datapoint_id="upsert_0",
+ feature_vector=_TEST_MATCH_QUERY,
+ restricts=[
+ aiplatform_v1.types.index.IndexDatapoint.Restriction(
+ namespace="Color", allow_list=["red"]
+ )
+ ],
+ numeric_restricts=[
+ aiplatform_v1.types.index.IndexDatapoint.NumericRestriction(
+ namespace="cost",
+ value_int=1,
+ )
+ ],
+)
+_TEST_DATAPOINT_2 = aiplatform_v1.types.index.IndexDatapoint(
+ datapoint_id="upsert_1",
+ feature_vector=_TEST_MATCH_QUERY,
+ numeric_restricts=[
+ aiplatform_v1.types.index.IndexDatapoint.NumericRestriction(
+ namespace="cost",
+ value_double=0.1,
+ )
+ ],
+ crowding_tag=aiplatform_v1.types.index.IndexDatapoint.CrowdingTag(
+ crowding_attribute="crowding"
+ ),
+)
+_TEST_DATAPOINT_3 = aiplatform_v1.types.index.IndexDatapoint(
+ datapoint_id="5",
+ feature_vector=_TEST_MATCH_QUERY,
+ numeric_restricts=[
+ aiplatform_v1.types.index.IndexDatapoint.NumericRestriction(
+ namespace="cost",
+ value_float=1.1,
+ )
+ ],
+)
+_TEST_STREAM_INDEX_DATAPOINTS = [
+ _TEST_DATAPOINT_1,
+ _TEST_DATAPOINT_2,
+ _TEST_DATAPOINT_3,
+]
+
+
+class TestMatchingEngine(e2e_base.TestEndToEnd):
+
+ _temp_prefix = "temp_vertex_sdk_e2e_matching_engine_test"
+
+ def test_create_get_list_matching_engine_index(self, shared_state):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ # Clean up resources from previous test runs.
+ for index_endpoint in aiplatform.MatchingEngineIndexEndpoint.list():
+ for deployed_index in index_endpoint.deployed_indexes:
+ index_endpoint.undeploy_index(deployed_index_id=deployed_index.id)
+ index_endpoint.delete()
+
+ for index in aiplatform.MatchingEngineIndex.list():
+ index.delete()
+
+ # Create an index
+ index = aiplatform.MatchingEngineIndex.create_tree_ah_index(
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ contents_delta_uri=_TEST_CONTENTS_DELTA_URI,
+ dimensions=_TEST_INDEX_CONFIG_DIMENSIONS,
+ approximate_neighbors_count=_TEST_INDEX_APPROXIMATE_NEIGHBORS_COUNT,
+ distance_measure_type=_TEST_INDEX_DISTANCE_MEASURE_TYPE,
+ leaf_node_embedding_count=_TEST_LEAF_NODE_EMBEDDING_COUNT,
+ leaf_nodes_to_search_percent=_TEST_LEAF_NODES_TO_SEARCH_PERCENT,
+ description=_TEST_INDEX_DESCRIPTION,
+ labels=_TEST_LABELS,
+ )
+
+ shared_state["resources"] = [index]
+ shared_state["index"] = index
+ shared_state["index_name"] = index.resource_name
+
+ # Verify that the retrieved index is the same
+ get_index = aiplatform.MatchingEngineIndex(index_name=index.resource_name)
+ assert index.resource_name == get_index.resource_name
+
+ # Create index and check that it is listed
+ list_indexes = aiplatform.MatchingEngineIndex.list()
+ assert get_index.resource_name in [
+ index.resource_name for index in list_indexes
+ ]
+
+ # Update the index metadata
+ updated_index = get_index.update_metadata(
+ display_name=_TEST_DISPLAY_NAME_UPDATE,
+ description=_TEST_DESCRIPTION_UPDATE,
+ labels=_TEST_LABELS_UPDATE,
+ )
+
+ assert updated_index.name == get_index.name
+ # TODO: Reinstate assertions once b/220005272 is fixed.
+ # assert updated_index.display_name == _TEST_DISPLAY_NAME_UPDATE
+ # assert updated_index.description == _TEST_DESCRIPTION_UPDATE
+ # assert updated_index.labels == _TEST_LABELS_UPDATE
+
+ # Update the index embeddings
+ updated_index = get_index.update_embeddings(
+ contents_delta_uri=_TEST_CONTENTS_DELTA_URI_UPDATE,
+ is_complete_overwrite=_TEST_IS_COMPLETE_OVERWRITE,
+ )
+
+ assert updated_index.name == get_index.name
+
+ # Create endpoint and check that it is listed
+ psa_index_endpoint = aiplatform.MatchingEngineIndexEndpoint.create(
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ network=e2e_base._VPC_NETWORK_URI,
+ labels=_TEST_LABELS,
+ )
+ assert psa_index_endpoint.resource_name in [
+ index_endpoint.resource_name
+ for index_endpoint in aiplatform.MatchingEngineIndexEndpoint.list()
+ ]
+
+ assert psa_index_endpoint.labels == _TEST_LABELS
+ assert psa_index_endpoint.display_name == _TEST_INDEX_ENDPOINT_DISPLAY_NAME
+ assert psa_index_endpoint.description == _TEST_INDEX_ENDPOINT_DESCRIPTION
+
+ # Create endpoint and check that it is listed
+ public_index_endpoint = aiplatform.MatchingEngineIndexEndpoint.create(
+ display_name=_TEST_PUBLIC_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_PUBLIC_INDEX_ENDPOINT_DESCRIPTION,
+ public_endpoint_enabled=True,
+ labels=_TEST_LABELS,
+ )
+ assert public_index_endpoint.resource_name in [
+ index_endpoint.resource_name
+ for index_endpoint in aiplatform.MatchingEngineIndexEndpoint.list()
+ ]
+
+ assert public_index_endpoint.labels == _TEST_LABELS
+ assert (
+ public_index_endpoint.display_name
+ == _TEST_PUBLIC_INDEX_ENDPOINT_DISPLAY_NAME
+ )
+ assert (
+ public_index_endpoint.description == _TEST_PUBLIC_INDEX_ENDPOINT_DESCRIPTION
+ )
+
+ shared_state["resources"].append(psa_index_endpoint)
+
+ # Deploy endpoint
+ psa_index_endpoint = psa_index_endpoint.deploy_index(
+ index=index,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
+ )
+
+ # Deploy public endpoint
+ public_index_endpoint = public_index_endpoint.deploy_index(
+ index=index,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID_PUBLIC,
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME_PUBLIC,
+ min_replica_count=_TEST_MIN_REPLICA_COUNT_UPDATED,
+ max_replica_count=_TEST_MAX_REPLICA_COUNT_UPDATED,
+ )
+
+ # Update endpoint
+ updated_index_endpoint = psa_index_endpoint.update(
+ display_name=_TEST_DISPLAY_NAME_UPDATE,
+ description=_TEST_DESCRIPTION_UPDATE,
+ labels=_TEST_LABELS_UPDATE,
+ )
+
+ assert updated_index_endpoint.labels == _TEST_LABELS_UPDATE
+ assert updated_index_endpoint.display_name == _TEST_DISPLAY_NAME_UPDATE
+ assert updated_index_endpoint.description == _TEST_DESCRIPTION_UPDATE
+
+ # Mutate deployed index
+ psa_index_endpoint.mutate_deployed_index(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ min_replica_count=_TEST_MIN_REPLICA_COUNT_UPDATED,
+ max_replica_count=_TEST_MAX_REPLICA_COUNT_UPDATED,
+ )
+
+ # deployed index on private endpoint.
+ deployed_index = psa_index_endpoint.deployed_indexes[0]
+
+ assert deployed_index.id == _TEST_DEPLOYED_INDEX_ID
+ assert deployed_index.index == index.resource_name
+ assert (
+ deployed_index.automatic_resources.min_replica_count
+ == _TEST_MIN_REPLICA_COUNT_UPDATED
+ )
+ assert (
+ deployed_index.automatic_resources.max_replica_count
+ == _TEST_MAX_REPLICA_COUNT_UPDATED
+ )
+
+ # deployed index on public endpoint.
+ deployed_index_public = public_index_endpoint.deployed_indexes[0]
+
+ assert deployed_index_public.id == _TEST_DEPLOYED_INDEX_ID_PUBLIC
+ assert deployed_index_public.index == index.resource_name
+ assert (
+ deployed_index_public.automatic_resources.min_replica_count
+ == _TEST_MIN_REPLICA_COUNT_UPDATED
+ )
+ assert (
+ deployed_index_public.automatic_resources.max_replica_count
+ == _TEST_MAX_REPLICA_COUNT_UPDATED
+ )
+
+ # TODO: Test `psa_index_endpoint.match` request. This requires running this test in a VPC.
+ # results = psa_index_endpoint.match(
+ # deployed_index_id=_TEST_DEPLOYED_INDEX_ID, queries=[_TEST_MATCH_QUERY]
+ # )
+
+ # assert results[0][0].id == 870
+
+ # TODO: Test `psa_index_endpoint.match` with filter.
+ # This requires uploading a new content of the Matching Engine Index to Cloud Storage.
+ # results = psa_index_endpoint.match(
+ # deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ # queries=[_TEST_MATCH_QUERY],
+ # num_neighbors=1,
+ # filter=_TEST_FILTER,
+ # )
+ # assert results[0][0].id == 9999
+
+ # FindNeighbors query for public index
+ results = public_index_endpoint.find_neighbors(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID_PUBLIC,
+ queries=[_TEST_MATCH_QUERY],
+ )
+ assert results[0][0].id == "0"
+
+ # Undeploy index from private endpoint
+ psa_index_endpoint = psa_index_endpoint.undeploy_index(
+ deployed_index_id=deployed_index.id
+ )
+
+ # Undeploy index from public endpoint
+ public_index_endpoint = public_index_endpoint.undeploy_index(
+ deployed_index_id=deployed_index_public.id
+ )
+
+ # Delete index and check that it is no longer listed
+ index.delete()
+ list_indexes = aiplatform.MatchingEngineIndex.list()
+ assert get_index.resource_name not in [
+ index.resource_name for index in list_indexes
+ ]
+
+ # Delete index endpoint and check that it is no longer listed
+ psa_index_endpoint.delete()
+ assert psa_index_endpoint.resource_name not in [
+ index_endpoint.resource_name
+ for index_endpoint in aiplatform.MatchingEngineIndexEndpoint.list()
+ ]
+
+ # Delete public index endpoint
+ public_index_endpoint.delete()
+ assert public_index_endpoint.resource_name not in [
+ index_endpoint.resource_name
+ for index_endpoint in aiplatform.MatchingEngineIndexEndpoint.list()
+ ]
+
+ def test_matching_engine_stream_index(self, shared_state):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ # Clean up resources from previous test runs.
+ for index_endpoint in aiplatform.MatchingEngineIndexEndpoint.list():
+ for deployed_index in index_endpoint.deployed_indexes:
+ index_endpoint.undeploy_index(deployed_index_id=deployed_index.id)
+ index_endpoint.delete()
+
+ for index in aiplatform.MatchingEngineIndex.list():
+ index.delete()
+
+ # Create an index
+ stream_index = aiplatform.MatchingEngineIndex.create_tree_ah_index(
+ display_name=_TEST_STREAM_INDEX_DISPLAY_NAME,
+ contents_delta_uri=_TEST_CONTENTS_DELTA_URI,
+ dimensions=_TEST_INDEX_CONFIG_DIMENSIONS,
+ approximate_neighbors_count=_TEST_INDEX_APPROXIMATE_NEIGHBORS_COUNT,
+ distance_measure_type=_TEST_INDEX_DISTANCE_MEASURE_TYPE,
+ leaf_node_embedding_count=_TEST_LEAF_NODE_EMBEDDING_COUNT,
+ leaf_nodes_to_search_percent=_TEST_LEAF_NODES_TO_SEARCH_PERCENT,
+ description=_TEST_INDEX_DESCRIPTION,
+ labels=_TEST_LABELS,
+ index_update_method="STREAM_UPDATE",
+ )
+
+ shared_state["resources"].append(stream_index)
+ shared_state["stream_index"] = stream_index
+ shared_state["stream_index_name"] = stream_index.resource_name
+
+ # Verify that the retrieved index is the same
+ get_index = aiplatform.MatchingEngineIndex(
+ index_name=stream_index.resource_name
+ )
+ assert stream_index.resource_name == get_index.resource_name
+
+ # Create index and check that it is listed
+ list_indexes = aiplatform.MatchingEngineIndex.list()
+ assert get_index.resource_name in [
+ index.resource_name for index in list_indexes
+ ]
+
+ # Update the index metadata
+ updated_index = get_index.update_metadata(
+ display_name=_TEST_DISPLAY_NAME_UPDATE,
+ description=_TEST_DESCRIPTION_UPDATE,
+ labels=_TEST_LABELS_UPDATE,
+ )
+
+ assert updated_index.name == get_index.name
+
+ # Update the index embeddings
+ updated_index = get_index.update_embeddings(
+ contents_delta_uri=_TEST_CONTENTS_DELTA_URI_UPDATE,
+ is_complete_overwrite=_TEST_IS_COMPLETE_OVERWRITE,
+ )
+
+ assert updated_index.name == get_index.name
+
+ # Create endpoint and check that it is listed
+ psa_index_endpoint = aiplatform.MatchingEngineIndexEndpoint.create(
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ network=e2e_base._VPC_NETWORK_URI,
+ labels=_TEST_LABELS,
+ )
+ assert psa_index_endpoint.resource_name in [
+ index_endpoint.resource_name
+ for index_endpoint in aiplatform.MatchingEngineIndexEndpoint.list()
+ ]
+
+ assert psa_index_endpoint.labels == _TEST_LABELS
+ assert psa_index_endpoint.display_name == _TEST_INDEX_ENDPOINT_DISPLAY_NAME
+ assert psa_index_endpoint.description == _TEST_INDEX_ENDPOINT_DESCRIPTION
+
+ # Create endpoint and check that it is listed
+ public_index_endpoint = aiplatform.MatchingEngineIndexEndpoint.create(
+ display_name=_TEST_PUBLIC_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_PUBLIC_INDEX_ENDPOINT_DESCRIPTION,
+ public_endpoint_enabled=True,
+ labels=_TEST_LABELS,
+ )
+ assert public_index_endpoint.resource_name in [
+ index_endpoint.resource_name
+ for index_endpoint in aiplatform.MatchingEngineIndexEndpoint.list()
+ ]
+
+ assert public_index_endpoint.labels == _TEST_LABELS
+ assert (
+ public_index_endpoint.display_name
+ == _TEST_PUBLIC_INDEX_ENDPOINT_DISPLAY_NAME
+ )
+ assert (
+ public_index_endpoint.description == _TEST_PUBLIC_INDEX_ENDPOINT_DESCRIPTION
+ )
+
+ shared_state["resources"].append(psa_index_endpoint)
+
+ # Deploy endpoint
+ psa_index_endpoint = psa_index_endpoint.deploy_index(
+ index=stream_index,
+ deployed_index_id=_TEST_DEPLOYED_STREAM_INDEX_ID,
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
+ )
+
+ # Deploy public endpoint
+ public_index_endpoint = public_index_endpoint.deploy_index(
+ index=stream_index,
+ deployed_index_id=_TEST_DEPLOYED_STREAM_INDEX_ID_PUBLIC,
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME_PUBLIC,
+ min_replica_count=_TEST_MIN_REPLICA_COUNT_UPDATED,
+ max_replica_count=_TEST_MAX_REPLICA_COUNT_UPDATED,
+ )
+
+ # Update endpoint
+ updated_index_endpoint = psa_index_endpoint.update(
+ display_name=_TEST_DISPLAY_NAME_UPDATE,
+ description=_TEST_DESCRIPTION_UPDATE,
+ labels=_TEST_LABELS_UPDATE,
+ )
+
+ assert updated_index_endpoint.labels == _TEST_LABELS_UPDATE
+ assert updated_index_endpoint.display_name == _TEST_DISPLAY_NAME_UPDATE
+ assert updated_index_endpoint.description == _TEST_DESCRIPTION_UPDATE
+
+ # Mutate deployed index
+ psa_index_endpoint.mutate_deployed_index(
+ deployed_index_id=_TEST_DEPLOYED_STREAM_INDEX_ID,
+ min_replica_count=_TEST_MIN_REPLICA_COUNT_UPDATED,
+ max_replica_count=_TEST_MAX_REPLICA_COUNT_UPDATED,
+ )
+
+ # deployed index on private endpoint.
+ deployed_index = psa_index_endpoint.deployed_indexes[0]
+
+ assert deployed_index.id == _TEST_DEPLOYED_STREAM_INDEX_ID
+ assert deployed_index.index == stream_index.resource_name
+ assert (
+ deployed_index.automatic_resources.min_replica_count
+ == _TEST_MIN_REPLICA_COUNT_UPDATED
+ )
+ assert (
+ deployed_index.automatic_resources.max_replica_count
+ == _TEST_MAX_REPLICA_COUNT_UPDATED
+ )
+
+ # deployed index on public endpoint.
+ deployed_index_public = public_index_endpoint.deployed_indexes[0]
+
+ assert deployed_index_public.id == _TEST_DEPLOYED_STREAM_INDEX_ID_PUBLIC
+ assert deployed_index_public.index == stream_index.resource_name
+ assert (
+ deployed_index_public.automatic_resources.min_replica_count
+ == _TEST_MIN_REPLICA_COUNT_UPDATED
+ )
+ assert (
+ deployed_index_public.automatic_resources.max_replica_count
+ == _TEST_MAX_REPLICA_COUNT_UPDATED
+ )
+
+ # TODO: Test `psa_index_endpoint.match` request. This requires running this test in a VPC.
+ # results = psa_index_endpoint.match(
+ # deployed_index_id=_TEST_DEPLOYED_INDEX_ID, queries=[_TEST_MATCH_QUERY]
+ # )
+
+ # assert results[0][0].id == 870
+
+ # TODO: Test `psa_index_endpoint.match` with filter.
+ # This requires uploading a new content of the Matching Engine Index to Cloud Storage.
+ # results = psa_index_endpoint.match(
+ # deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ # queries=[_TEST_MATCH_QUERY],
+ # num_neighbors=1,
+ # filter=_TEST_FILTER,
+ # )
+ # assert results[0][0].id == 9999
+
+ # Upsert datapoint to stream index
+ stream_index.upsert_datapoints(datapoints=_TEST_STREAM_INDEX_DATAPOINTS)
+
+ # Remove datapoint upserted to stream index
+ stream_index.remove_datapoints(datapoint_ids="upsert_0")
+
+ # Undeploy index from private endpoint
+ psa_index_endpoint = psa_index_endpoint.undeploy_index(
+ deployed_index_id=deployed_index.id
+ )
+
+ # Undeploy index from public endpoint
+ public_index_endpoint = public_index_endpoint.undeploy_index(
+ deployed_index_id=deployed_index_public.id
+ )
+
+ # Delete index and check that it is no longer listed
+ stream_index.delete()
+ list_indexes = aiplatform.MatchingEngineIndex.list()
+ assert get_index.resource_name not in [
+ index.resource_name for index in list_indexes
+ ]
+
+ # Delete index endpoint and check that it is no longer listed
+ psa_index_endpoint.delete()
+ assert psa_index_endpoint.resource_name not in [
+ index_endpoint.resource_name
+ for index_endpoint in aiplatform.MatchingEngineIndexEndpoint.list()
+ ]
+
+ # Delete public index endpoint
+ public_index_endpoint.delete()
+ assert public_index_endpoint.resource_name not in [
+ index_endpoint.resource_name
+ for index_endpoint in aiplatform.MatchingEngineIndexEndpoint.list()
+ ]
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_model_interactions.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_model_interactions.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f24ccc7535090809c1b37b2ac5be4f6c9a11b3f
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_model_interactions.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import json
+import pytest
+
+from google.cloud import aiplatform
+
+from tests.system.aiplatform import e2e_base
+
+_PERMANENT_IRIS_ENDPOINT_ID = "4966625964059525120"
+_PREDICTION_INSTANCE = {
+ "petal_length": "3.0",
+ "petal_width": "3.0",
+ "sepal_length": "3.0",
+ "sepal_width": "3.0",
+}
+
+
+class TestModelInteractions(e2e_base.TestEndToEnd):
+ _temp_prefix = ""
+ aiplatform.init(project=e2e_base._PROJECT, location=e2e_base._LOCATION)
+ endpoint = aiplatform.Endpoint(_PERMANENT_IRIS_ENDPOINT_ID)
+
+ def test_prediction(self):
+ # test basic predict
+ prediction_response = self.endpoint.predict(instances=[_PREDICTION_INSTANCE])
+ assert len(prediction_response.predictions) == 1
+
+ # test predict(use_raw_predict = True)
+ prediction_with_raw_predict = self.endpoint.predict(
+ instances=[_PREDICTION_INSTANCE], use_raw_predict=True
+ )
+ assert (
+ prediction_with_raw_predict.deployed_model_id
+ == prediction_response.deployed_model_id
+ )
+ assert (
+ prediction_with_raw_predict.model_resource_name
+ == prediction_response.model_resource_name
+ )
+ assert (
+ prediction_with_raw_predict.model_version_id
+ == prediction_response.model_version_id
+ )
+
+ # test raw_predict
+ raw_prediction_response = self.endpoint.raw_predict(
+ json.dumps({"instances": [_PREDICTION_INSTANCE]}),
+ {"Content-Type": "application/json"},
+ )
+ assert raw_prediction_response.status_code == 200
+ assert len(json.loads(raw_prediction_response.text)) == 1
+
+ @pytest.mark.asyncio
+ async def test_endpoint_predict_async(self):
+ # Test the Endpoint.predict_async method.
+ prediction_response = await self.endpoint.predict_async(
+ instances=[_PREDICTION_INSTANCE]
+ )
+ assert len(prediction_response.predictions) == 1
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_model_monitoring.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_model_monitoring.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4c04ae3c854bba7a7fabcaab4559ee3aad57f01
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_model_monitoring.py
@@ -0,0 +1,440 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+import time
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import model_monitoring
+from google.cloud.aiplatform.compat.types import job_state as gca_job_state
+from google.api_core import exceptions as core_exceptions
+from tests.system.aiplatform import e2e_base
+
+from google.cloud.aiplatform_v1.types import (
+ io as gca_io,
+ model_monitoring as gca_model_monitoring,
+)
+
+# constants used for testing
+USER_EMAIL = "rosiezou@cloudadvocacyorg.joonix.net"
+NOTIFICATION_CHANNEL = (
+ "projects/ucaip-sample-tests/notificationChannels/11578134490450491958"
+)
+PERMANENT_CHURN_MODEL_ID = "5295507484113371136"
+CHURN_MODEL_PATH = "gs://mco-mm/churn"
+DEFAULT_INPUT = {
+ "cnt_ad_reward": 0,
+ "cnt_challenge_a_friend": 0,
+ "cnt_completed_5_levels": 1,
+ "cnt_level_complete_quickplay": 3,
+ "cnt_level_end_quickplay": 5,
+ "cnt_level_reset_quickplay": 2,
+ "cnt_level_start_quickplay": 6,
+ "cnt_post_score": 34,
+ "cnt_spend_virtual_currency": 0,
+ "cnt_use_extra_steps": 0,
+ "cnt_user_engagement": 120,
+ "country": "Denmark",
+ "dayofweek": 3,
+ "julianday": 254,
+ "language": "da-dk",
+ "month": 9,
+ "operating_system": "IOS",
+ "user_pseudo_id": "104B0770BAE16E8B53DF330C95881893",
+}
+
+JOB_NAME = "churn"
+
+# Sampling rate (optional, default=.8)
+LOG_SAMPLE_RATE = 0.8
+
+# Monitoring Interval in hours
+MONITOR_INTERVAL = 1
+
+# URI to training dataset.
+DATASET_BQ_URI = "bq://mco-mm.bqmlga4.train"
+
+# Prediction target column name in training dataset.
+TARGET = "churned"
+
+# Skew and drift thresholds.
+DEFAULT_THRESHOLD_VALUE = 0.001
+SKEW_THRESHOLDS = {
+ "country": DEFAULT_THRESHOLD_VALUE,
+ "cnt_user_engagement": DEFAULT_THRESHOLD_VALUE,
+}
+DRIFT_THRESHOLDS = {
+ "country": DEFAULT_THRESHOLD_VALUE,
+ "cnt_user_engagement": DEFAULT_THRESHOLD_VALUE,
+}
+ATTRIB_SKEW_THRESHOLDS = {
+ "country": DEFAULT_THRESHOLD_VALUE,
+ "cnt_user_engagement": DEFAULT_THRESHOLD_VALUE,
+}
+ATTRIB_DRIFT_THRESHOLDS = {
+ "country": DEFAULT_THRESHOLD_VALUE,
+ "cnt_user_engagement": DEFAULT_THRESHOLD_VALUE,
+}
+
+# global test constants
+sampling_strategy = model_monitoring.RandomSampleConfig(sample_rate=LOG_SAMPLE_RATE)
+
+email_alert_config = model_monitoring.EmailAlertConfig(
+ user_emails=[USER_EMAIL], enable_logging=True
+)
+
+alert_config = model_monitoring.AlertConfig(
+ user_emails=[USER_EMAIL],
+ enable_logging=True,
+ notification_channels=[NOTIFICATION_CHANNEL],
+)
+
+schedule_config = model_monitoring.ScheduleConfig(monitor_interval=MONITOR_INTERVAL)
+
+skew_config = model_monitoring.SkewDetectionConfig(
+ data_source=DATASET_BQ_URI,
+ skew_thresholds=SKEW_THRESHOLDS,
+ attribute_skew_thresholds=ATTRIB_SKEW_THRESHOLDS,
+ target_field=TARGET,
+)
+
+drift_config = model_monitoring.DriftDetectionConfig(
+ drift_thresholds=DRIFT_THRESHOLDS,
+ attribute_drift_thresholds=ATTRIB_DRIFT_THRESHOLDS,
+)
+
+drift_config2 = model_monitoring.DriftDetectionConfig(
+ drift_thresholds=DRIFT_THRESHOLDS,
+)
+
+objective_config = model_monitoring.ObjectiveConfig(skew_config, drift_config)
+
+objective_config2 = model_monitoring.ObjectiveConfig(skew_config, drift_config2)
+
+
+@pytest.mark.usefixtures("tear_down_resources")
+class TestModelDeploymentMonitoring(e2e_base.TestEndToEnd):
+ _temp_prefix = "temp_e2e_model_monitoring_test_"
+
+ def test_create_endpoint(self, shared_state):
+ # initial setup
+ aiplatform.init(project=e2e_base._PROJECT, location=e2e_base._LOCATION)
+ self.endpoint = aiplatform.Endpoint.create(self._make_display_name("endpoint"))
+ shared_state["resources"] = [self.endpoint]
+ self.model = aiplatform.Model(PERMANENT_CHURN_MODEL_ID)
+ self.endpoint.deploy(
+ self.model,
+ deployed_model_display_name=self._make_display_name(key=JOB_NAME),
+ )
+ self.endpoint.deploy(
+ self.model,
+ deployed_model_display_name=self._make_display_name(key=JOB_NAME),
+ traffic_percentage=50,
+ )
+
+ def test_mdm_two_models_one_valid_config(self, shared_state):
+ """
+ Enable model monitoring on two existing models deployed to the same endpoint.
+ """
+ assert len(shared_state["resources"]) == 1
+ self.endpoint = shared_state["resources"][0]
+ aiplatform.init(project=e2e_base._PROJECT, location=e2e_base._LOCATION)
+ # test model monitoring configurations
+ job = aiplatform.ModelDeploymentMonitoringJob.create(
+ display_name=self._make_display_name(key=JOB_NAME),
+ logging_sampling_strategy=sampling_strategy,
+ schedule_config=schedule_config,
+ alert_config=email_alert_config,
+ objective_configs=objective_config,
+ create_request_timeout=3600,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ endpoint=self.endpoint,
+ )
+
+ gapic_job = job._gca_resource
+ assert (
+ gapic_job.logging_sampling_strategy.random_sample_config.sample_rate
+ == LOG_SAMPLE_RATE
+ )
+ assert (
+ gapic_job.model_deployment_monitoring_schedule_config.monitor_interval.seconds
+ == MONITOR_INTERVAL * 3600
+ )
+ assert (
+ gapic_job.model_monitoring_alert_config.email_alert_config.user_emails
+ == [USER_EMAIL]
+ )
+ assert gapic_job.model_monitoring_alert_config.enable_logging
+ assert len(gapic_job.model_deployment_monitoring_objective_configs) == 2
+
+ gca_obj_config = gapic_job.model_deployment_monitoring_objective_configs[
+ 0
+ ].objective_config
+
+ expected_training_dataset = (
+ gca_model_monitoring.ModelMonitoringObjectiveConfig.TrainingDataset(
+ bigquery_source=gca_io.BigQuerySource(input_uri=DATASET_BQ_URI),
+ target_field=TARGET,
+ )
+ )
+ assert gca_obj_config.training_dataset == expected_training_dataset
+ assert (
+ gca_obj_config.training_prediction_skew_detection_config
+ == skew_config.as_proto()
+ )
+ assert (
+ gca_obj_config.prediction_drift_detection_config == drift_config.as_proto()
+ )
+
+ # delete this job and re-configure it to only enable drift detection for faster testing
+ job.delete()
+ job_resource = job._gca_resource.name
+
+ # test job delete
+ with pytest.raises(core_exceptions.NotFound):
+ job.api_client.get_model_deployment_monitoring_job(name=job_resource)
+
+ # TODO(b/275569167) Uncomment this after timeout issue is resolved
+ @pytest.mark.skip(reason="System tests timing out")
+ def test_mdm_pause_and_update_config(self, shared_state):
+ """Test objective config updates for existing MDM job"""
+ assert len(shared_state["resources"]) == 1
+ self.endpoint = shared_state["resources"][0]
+ aiplatform.init(project=e2e_base._PROJECT, location=e2e_base._LOCATION)
+ job = aiplatform.ModelDeploymentMonitoringJob.create(
+ display_name=self._make_display_name(key=JOB_NAME),
+ logging_sampling_strategy=sampling_strategy,
+ schedule_config=schedule_config,
+ alert_config=email_alert_config,
+ objective_configs=model_monitoring.ObjectiveConfig(
+ drift_detection_config=drift_config
+ ),
+ create_request_timeout=3600,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ endpoint=self.endpoint,
+ )
+ # test unsuccessful job update when it's pending
+ DRIFT_THRESHOLDS["cnt_user_engagement"] += 0.01
+ new_obj_config = model_monitoring.ObjectiveConfig(
+ drift_detection_config=model_monitoring.DriftDetectionConfig(
+ drift_thresholds=DRIFT_THRESHOLDS,
+ attribute_drift_thresholds=ATTRIB_DRIFT_THRESHOLDS,
+ )
+ )
+ if job.state == gca_job_state.JobState.JOB_STATE_PENDING:
+ with pytest.raises(core_exceptions.FailedPrecondition):
+ job.update(objective_configs=new_obj_config)
+
+ # generate traffic to force MDM job to come online
+ for i in range(2000):
+ DEFAULT_INPUT["cnt_user_engagement"] += i
+ self.endpoint.predict([DEFAULT_INPUT], use_raw_predict=True)
+
+ # test job update
+ while True:
+ time.sleep(1)
+ if job.state == gca_job_state.JobState.JOB_STATE_RUNNING:
+ job.update(objective_configs=new_obj_config)
+ break
+
+ # verify job update
+ while True:
+ time.sleep(1)
+ if job.state == gca_job_state.JobState.JOB_STATE_RUNNING:
+ gca_obj_config = (
+ job._gca_resource.model_deployment_monitoring_objective_configs[
+ 0
+ ].objective_config
+ )
+ assert (
+ gca_obj_config.prediction_drift_detection_config
+ == new_obj_config.drift_detection_config.as_proto()
+ )
+ break
+
+ # test pause
+ job.pause()
+ while job.state != gca_job_state.JobState.JOB_STATE_PAUSED:
+ time.sleep(1)
+ job.delete()
+
+ # confirm deletion
+ with pytest.raises(core_exceptions.NotFound):
+ job.state
+
+ def test_mdm_two_models_two_valid_configs(self, shared_state):
+ assert len(shared_state["resources"]) == 1
+ self.endpoint = shared_state["resources"][0]
+ aiplatform.init(project=e2e_base._PROJECT, location=e2e_base._LOCATION)
+ [deployed_model1, deployed_model2] = list(
+ map(lambda x: x.id, self.endpoint.list_models())
+ )
+ all_configs = {
+ deployed_model1: objective_config,
+ deployed_model2: objective_config2,
+ }
+ job = aiplatform.ModelDeploymentMonitoringJob.create(
+ display_name=self._make_display_name(key=JOB_NAME),
+ logging_sampling_strategy=sampling_strategy,
+ schedule_config=schedule_config,
+ alert_config=email_alert_config,
+ objective_configs=all_configs,
+ create_request_timeout=3600,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ endpoint=self.endpoint,
+ )
+
+ gapic_job = job._gca_resource
+ assert (
+ gapic_job.logging_sampling_strategy.random_sample_config.sample_rate
+ == LOG_SAMPLE_RATE
+ )
+ assert (
+ gapic_job.model_deployment_monitoring_schedule_config.monitor_interval.seconds
+ == MONITOR_INTERVAL * 3600
+ )
+ assert (
+ gapic_job.model_monitoring_alert_config.email_alert_config.user_emails
+ == [USER_EMAIL]
+ )
+ assert gapic_job.model_monitoring_alert_config.enable_logging
+
+ expected_training_dataset = (
+ gca_model_monitoring.ModelMonitoringObjectiveConfig.TrainingDataset(
+ bigquery_source=gca_io.BigQuerySource(input_uri=DATASET_BQ_URI),
+ target_field=TARGET,
+ )
+ )
+
+ for config in gapic_job.model_deployment_monitoring_objective_configs:
+ gca_obj_config = config.objective_config
+ deployed_model_id = config.deployed_model_id
+ assert gca_obj_config.training_dataset == expected_training_dataset
+ assert (
+ gca_obj_config.training_prediction_skew_detection_config
+ == all_configs[deployed_model_id].skew_detection_config.as_proto()
+ )
+ assert (
+ gca_obj_config.prediction_drift_detection_config
+ == all_configs[deployed_model_id].drift_detection_config.as_proto()
+ )
+
+ job.delete()
+
+ def test_mdm_invalid_config_incorrect_model_id(self, shared_state):
+ assert len(shared_state["resources"]) == 1
+ self.endpoint = shared_state["resources"][0]
+ aiplatform.init(project=e2e_base._PROJECT, location=e2e_base._LOCATION)
+ with pytest.raises(ValueError) as e:
+ aiplatform.ModelDeploymentMonitoringJob.create(
+ display_name=self._make_display_name(key=JOB_NAME),
+ logging_sampling_strategy=sampling_strategy,
+ schedule_config=schedule_config,
+ alert_config=email_alert_config,
+ objective_configs=objective_config,
+ create_request_timeout=3600,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ endpoint=self.endpoint,
+ deployed_model_ids=[""],
+ )
+ assert "Invalid model ID" in str(e.value)
+
+ def test_mdm_invalid_config_xai(self, shared_state):
+ assert len(shared_state["resources"]) == 1
+ self.endpoint = shared_state["resources"][0]
+ aiplatform.init(project=e2e_base._PROJECT, location=e2e_base._LOCATION)
+ with pytest.raises(RuntimeError) as e:
+ objective_config.explanation_config = model_monitoring.ExplanationConfig()
+ aiplatform.ModelDeploymentMonitoringJob.create(
+ display_name=self._make_display_name(key=JOB_NAME),
+ logging_sampling_strategy=sampling_strategy,
+ schedule_config=schedule_config,
+ alert_config=email_alert_config,
+ objective_configs=objective_config,
+ create_request_timeout=3600,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ endpoint=self.endpoint,
+ )
+ assert (
+ "`explanation_config` should only be enabled if the model has `explanation_spec populated"
+ in str(e.value)
+ )
+
+ def test_mdm_two_models_invalid_configs_xai(self, shared_state):
+ assert len(shared_state["resources"]) == 1
+ self.endpoint = shared_state["resources"][0]
+ aiplatform.init(project=e2e_base._PROJECT, location=e2e_base._LOCATION)
+ [deployed_model1, deployed_model2] = list(
+ map(lambda x: x.id, self.endpoint.list_models())
+ )
+ objective_config.explanation_config = model_monitoring.ExplanationConfig()
+ all_configs = {
+ deployed_model1: objective_config,
+ deployed_model2: objective_config2,
+ }
+ with pytest.raises(RuntimeError) as e:
+ objective_config.explanation_config = model_monitoring.ExplanationConfig()
+ aiplatform.ModelDeploymentMonitoringJob.create(
+ display_name=self._make_display_name(key=JOB_NAME),
+ logging_sampling_strategy=sampling_strategy,
+ schedule_config=schedule_config,
+ alert_config=email_alert_config,
+ objective_configs=all_configs,
+ create_request_timeout=3600,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ endpoint=self.endpoint,
+ )
+ assert (
+ "`explanation_config` should only be enabled if the model has `explanation_spec populated"
+ in str(e.value)
+ )
+
+ def test_mdm_notification_channel_alert_config(self, shared_state):
+ self.endpoint = shared_state["resources"][0]
+ aiplatform.init(project=e2e_base._PROJECT, location=e2e_base._LOCATION)
+ # Reset objective_config.explanation_config
+ objective_config.explanation_config = None
+ # test model monitoring configurations
+ job = aiplatform.ModelDeploymentMonitoringJob.create(
+ display_name=self._make_display_name(key=JOB_NAME),
+ logging_sampling_strategy=sampling_strategy,
+ schedule_config=schedule_config,
+ alert_config=alert_config,
+ objective_configs=objective_config,
+ create_request_timeout=3600,
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ endpoint=self.endpoint,
+ )
+
+ gapic_job = job._gca_resource
+ assert (
+ gapic_job.model_monitoring_alert_config.email_alert_config.user_emails
+ == [USER_EMAIL]
+ )
+ assert gapic_job.model_monitoring_alert_config.enable_logging
+ assert gapic_job.model_monitoring_alert_config.notification_channels == [
+ NOTIFICATION_CHANNEL
+ ]
+
+ job.delete()
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_model_upload.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_model_upload.py
new file mode 100644
index 0000000000000000000000000000000000000000..d622b28a1c3db3685c5fc40877d2e91947b2fdd0
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_model_upload.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import tempfile
+
+import pytest
+
+from google.cloud import aiplatform
+from google.cloud import storage
+
+from tests.system.aiplatform import e2e_base
+
+
+_XGBOOST_MODEL_URI = "gs://cloud-samples-data-us-central1/vertex-ai/google-cloud-aiplatform-ci-artifacts/models/iris_xgboost/model.bst"
+
+
+@pytest.mark.usefixtures("tear_down_resources")
+class TestModelUploadAndUpdate(e2e_base.TestEndToEnd):
+
+ _temp_prefix = "temp_vertex_sdk_e2e_model_upload_test"
+
+ def test_upload_and_deploy_xgboost_model(self, shared_state):
+ """Upload XGBoost model from local file and deploy it for prediction. Additionally, update model name, description and labels"""
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ storage_client = storage.Client(project=e2e_base._PROJECT)
+ model_blob = storage.Blob.from_string(
+ uri=_XGBOOST_MODEL_URI, client=storage_client
+ )
+ model_path = tempfile.mktemp() + ".my_model.xgb"
+ model_blob.download_to_filename(filename=model_path)
+
+ model = aiplatform.Model.upload_xgboost_model_file(
+ model_file_path=model_path,
+ )
+ shared_state["resources"] = [model]
+
+ staging_bucket = storage.Blob.from_string(
+ uri=model.uri, client=storage_client
+ ).bucket
+ # Checking that the bucket is auto-generated
+ assert "-vertex-staging-" in staging_bucket.name
+
+ # Currently we need to explicitly specify machine type.
+ # See https://github.com/googleapis/python-aiplatform/issues/773
+ endpoint = model.deploy(machine_type="n1-standard-2")
+ shared_state["resources"].append(endpoint)
+
+ # test model update
+ model = model.update(
+ display_name="new_name",
+ description="new_description",
+ labels={"my_label": "updated"},
+ )
+ assert model.display_name == "new_name"
+ assert model.description == "new_description"
+ assert model.labels == {"my_label": "updated"}
+
+ assert len(endpoint.list_models()) == 1
+ endpoint.deploy(model, traffic_percentage=100)
+ assert len(endpoint.list_models()) == 2
+ traffic_split = {
+ deployed_model.id: 50 for deployed_model in endpoint.list_models()
+ }
+ endpoint.update(traffic_split=traffic_split)
+ assert endpoint.traffic_split == traffic_split
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_model_version_management.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_model_version_management.py
new file mode 100644
index 0000000000000000000000000000000000000000..e58b06ff98dd930c9f1cad3e3c6c56de6127f44c
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_model_version_management.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import tempfile
+import uuid
+
+import pytest
+
+from google.cloud import aiplatform
+from google.cloud import storage
+from google.cloud.aiplatform.models import ModelRegistry
+
+from tests.system.aiplatform import e2e_base
+from tests.system.aiplatform import test_model_upload
+
+
+@pytest.mark.usefixtures("tear_down_resources")
+class TestVersionManagement(e2e_base.TestEndToEnd):
+
+ _temp_prefix = "temp_vertex_sdk_e2e_model_upload_test"
+
+ def test_upload_deploy_manage_versioned_model(self, shared_state):
+ """Upload XGBoost model from local file and deploy it for prediction. Additionally, update model name, description and labels"""
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ storage_client = storage.Client(project=e2e_base._PROJECT)
+ model_blob = storage.Blob.from_string(
+ uri=test_model_upload._XGBOOST_MODEL_URI, client=storage_client
+ )
+ model_path = tempfile.mktemp() + ".my_model.xgb"
+ model_blob.download_to_filename(filename=model_path)
+
+ model_id = "my_model_id" + uuid.uuid4().hex
+ version_description = "My description"
+ version_aliases = ["system-test-model", "testing"]
+
+ model = aiplatform.Model.upload_xgboost_model_file(
+ model_file_path=model_path,
+ version_aliases=version_aliases,
+ model_id=model_id,
+ version_description=version_description,
+ )
+ shared_state["resources"] = [model]
+
+ staging_bucket = storage.Blob.from_string(
+ uri=model.uri, client=storage_client
+ ).bucket
+ # Checking that the bucket is auto-generated
+ assert "-vertex-staging-" in staging_bucket.name
+
+ assert model.version_description == version_description
+ assert model.version_aliases == version_aliases
+ assert "default" in model.version_aliases
+
+ model2 = aiplatform.Model.upload_xgboost_model_file(
+ model_file_path=model_path, parent_model=model_id, is_default_version=False
+ )
+ shared_state["resources"].append(model2)
+
+ assert model2.version_id == "2"
+ assert model2.resource_name == model.resource_name
+ assert model2.version_aliases == []
+
+ # Test that VersionInfo properties are correct.
+ model_info = model2.versioning_registry.get_version_info("testing")
+ version_list = model2.versioning_registry.list_versions()
+ assert len(version_list) == 2
+ list_info = version_list[0]
+ assert model_info.version_id == list_info.version_id == model.version_id
+ assert (
+ model_info.version_aliases
+ == list_info.version_aliases
+ == model.version_aliases
+ )
+ assert (
+ model_info.version_description
+ == list_info.version_description
+ == model.version_description
+ )
+ assert (
+ model_info.model_display_name
+ == list_info.model_display_name
+ == model.display_name
+ )
+ assert (
+ model_info.version_update_time
+ == list_info.version_update_time
+ == model.version_update_time
+ )
+
+ # Test that get_model yields a new instance of `model`
+ model_clone = model2.versioning_registry.get_model()
+ assert model.resource_name == model_clone.resource_name
+ assert model.version_id == model_clone.version_id
+ assert model.name == model_clone.name
+
+ # Test add and removal of aliases
+ registry = ModelRegistry(model)
+ registry.add_version_aliases(["new-alias"], "default")
+ registry.remove_version_aliases(["testing"], "new-alias")
+ model = registry.get_model("new-alias")
+ assert "testing" not in model.version_aliases
+
+ # Test deletion of a model version
+ registry.delete_version("2")
+ versions = registry.list_versions()
+ assert "2" not in [version.version_id for version in versions]
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_persistent_resource.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_persistent_resource.py
new file mode 100644
index 0000000000000000000000000000000000000000..66268d18185d3b7ba3ca6f48343d73faad906bf4
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_persistent_resource.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import importlib
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import persistent_resource
+from google.cloud.aiplatform.compat.types import (
+ machine_resources_v1 as gca_machine_resources,
+)
+from google.cloud.aiplatform.compat.types import (
+ persistent_resource_v1 as gca_persistent_resource,
+)
+from tests.system.aiplatform import e2e_base
+import pytest
+
+
+_TEST_MACHINE_TYPE = "n1-standard-4"
+_TEST_INITIAL_REPLICA_COUNT = 2
+
+
+@pytest.mark.usefixtures("tear_down_resources")
+class TestPersistentResource(e2e_base.TestEndToEnd):
+ _temp_prefix = "test-pr-e2e"
+
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+
+ aiplatform.init(project=e2e_base._PROJECT, location=e2e_base._LOCATION)
+ self.resources = []
+
+ def test_create_persistent_resource(self, shared_state):
+ # PersistentResource ID must be shorter than 64 characters.
+ # IE: "test-pr-e2e-ea3ae19d-3d94-4818-8ecd-1a7a63d7418c"
+ resource_id = self._make_display_name("")
+ resource_pools = [
+ gca_persistent_resource.ResourcePool(
+ machine_spec=gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ ),
+ replica_count=_TEST_INITIAL_REPLICA_COUNT,
+ )
+ ]
+
+ test_resource = persistent_resource.PersistentResource.create(
+ persistent_resource_id=resource_id, resource_pools=resource_pools
+ )
+
+ shared_state["resources"] = [test_resource]
+
+ assert (
+ test_resource.state
+ == gca_persistent_resource.PersistentResource.State.RUNNING
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_pipeline_job.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_pipeline_job.py
new file mode 100644
index 0000000000000000000000000000000000000000..aeff93f1d2e9fbe35858cbe453dfa9543df7304c
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_pipeline_job.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+
+from google.cloud import aiplatform
+from tests.system.aiplatform import e2e_base
+from kfp import compiler
+from kfp import dsl
+
+from google.protobuf.json_format import MessageToDict
+
+
+@pytest.mark.usefixtures("tear_down_resources")
+class TestPipelineJob(e2e_base.TestEndToEnd):
+
+ _temp_prefix = "tmpvrtxsdk-e2e"
+
+ def test_add_pipeline_job_to_experiment(self, shared_state):
+
+ # Components:
+ @dsl.component
+ def train(
+ number_of_epochs: int,
+ learning_rate: float,
+ ):
+ print(f"number_of_epochs={number_of_epochs}")
+ print(f"learning_rate={learning_rate}")
+
+ # Pipeline:
+ @dsl.pipeline
+ def training_pipeline(number_of_epochs: int = 10):
+ train(
+ number_of_epochs=number_of_epochs,
+ learning_rate=0.1,
+ )
+
+ # Submitting the pipeline:
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+ ir_file = "pipeline.yaml"
+ compiler.Compiler().compile(
+ pipeline_func=training_pipeline,
+ package_path=ir_file,
+ )
+ job = aiplatform.PipelineJob(
+ template_path=ir_file,
+ display_name="display_name",
+ )
+ job.submit()
+
+ shared_state.setdefault("resources", []).append(job)
+
+ job.wait()
+
+ list_with_read_mask = aiplatform.PipelineJob.list(enable_simple_view=True)
+ list_without_read_mask = aiplatform.PipelineJob.list()
+
+ # enable_simple_view=True should apply the `read_mask` filter to limit PipelineJob fields returned
+ assert "serviceAccount" in MessageToDict(
+ list_without_read_mask[0].gca_resource._pb
+ )
+ assert "serviceAccount" not in MessageToDict(
+ list_with_read_mask[0].gca_resource._pb
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_pipeline_job_schedule.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_pipeline_job_schedule.py
new file mode 100644
index 0000000000000000000000000000000000000000..7eab611a2149e6c7c67ea980f5d770cd611f7720
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_pipeline_job_schedule.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import pipeline_job_schedules
+from google.cloud.aiplatform.compat.types import schedule as gca_schedule
+from tests.system.aiplatform import e2e_base
+from kfp import compiler
+from kfp import dsl
+
+import pytest
+from google.protobuf.json_format import MessageToDict
+
+
+@pytest.mark.usefixtures(
+ "tear_down_resources", "prepare_staging_bucket", "delete_staging_bucket"
+)
+class TestPipelineJobSchedule(e2e_base.TestEndToEnd):
+ _temp_prefix = "tmpvrtxsdk-e2e-pjs"
+
+ def test_create_get_pause_resume_update_list(self, shared_state):
+ # Components:
+ @dsl.component
+ def train(
+ number_of_epochs: int,
+ learning_rate: float,
+ ):
+ print(f"number_of_epochs={number_of_epochs}")
+ print(f"learning_rate={learning_rate}")
+
+ # Pipeline:
+ @dsl.pipeline(name="system-test-training-pipeline")
+ def training_pipeline(number_of_epochs: int = 2):
+ train(
+ number_of_epochs=number_of_epochs,
+ learning_rate=0.1,
+ )
+
+ # Creating the pipeline job schedule.
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ ir_file = "pipeline.yaml"
+ compiler.Compiler().compile(
+ pipeline_func=training_pipeline,
+ package_path=ir_file,
+ )
+ job = aiplatform.PipelineJob(
+ template_path=ir_file,
+ display_name="display_name",
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job, display_name="pipeline_job_schedule_display_name"
+ )
+
+ max_run_count = 2
+ cron = "*/5 * * * *"
+ pipeline_job_schedule.create(
+ cron=cron,
+ max_run_count=max_run_count,
+ max_concurrent_run_count=2,
+ )
+
+ shared_state.setdefault("resources", []).append(pipeline_job_schedule)
+
+ # Pausing the pipeline job schedule.
+ pipeline_job_schedule.pause()
+ assert pipeline_job_schedule.state == gca_schedule.Schedule.State.PAUSED
+
+ # Before updating, confirm cron is correctly set from the create step.
+ assert pipeline_job_schedule.cron == cron
+
+ # Updating the pipeline job schedule.
+ new_cron = "* * * * *"
+ pipeline_job_schedule.update(cron=new_cron)
+ assert pipeline_job_schedule.cron == new_cron
+
+ # Resuming the pipeline job schedule.
+ pipeline_job_schedule.resume(catch_up=True)
+ assert pipeline_job_schedule.state == gca_schedule.Schedule.State.ACTIVE
+
+ pipeline_job_schedule.wait()
+
+ # Confirming that correct number of runs were scheduled and completed by this pipeline job schedule.
+ list_jobs_with_read_mask = pipeline_job_schedule.list_jobs()
+ assert len(list_jobs_with_read_mask) == max_run_count
+
+ list_jobs_without_read_mask = pipeline_job_schedule.list_jobs(
+ enable_simple_view=False
+ )
+
+ # enable_simple_view=True should apply the `read_mask` filter to limit PipelineJob fields returned
+ assert "serviceAccount" in MessageToDict(
+ list_jobs_without_read_mask[0].gca_resource._pb
+ )
+ assert "serviceAccount" not in MessageToDict(
+ list_jobs_with_read_mask[0].gca_resource._pb
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_prediction_cpr.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_prediction_cpr.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f12e939af2e9c48ac6b87a136db14a15d002c5b
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_prediction_cpr.py
@@ -0,0 +1,112 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import datetime
+import json
+import logging
+import os
+import pytest
+import subprocess
+
+from tests.system.aiplatform.test_resources.cpr_user_code.predictor import (
+ SklearnPredictor,
+)
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import models
+from google.cloud.aiplatform.prediction import LocalModel
+
+from tests.system.aiplatform import e2e_base
+
+_TIMESTAMP = f"{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}"
+_IMAGE_URI = f"gcr.io/ucaip-sample-tests/prediction-cpr/sklearn:{_TIMESTAMP}"
+_DIR_NAME = os.path.dirname(os.path.abspath(__file__))
+_USER_CODE_DIR = os.path.join(_DIR_NAME, "test_resources/cpr_user_code")
+_REQUIREMENTS_FILE = "requirements.txt"
+_DIR_NAME = os.path.dirname(os.path.abspath(__file__))
+_LOCAL_MODEL_DIR = os.path.join(_DIR_NAME, "test_resources/cpr_model")
+_ARTIFACT_URI = "gs://cloud-aiplatform-us-central1/vertex-ai/prediction-cpr/sklearn"
+_PREDICTION_INPUT = [[4.6, 3.1, 1.5, 0.2]]
+
+
+@pytest.mark.usefixtures("tear_down_resources")
+class TestPredictionCpr(e2e_base.TestEndToEnd):
+ """End to end system test of the Vertex SDK with Prediction custom prediction routines."""
+
+ _temp_prefix = "temp-vertex-sdk-e2e-prediction-cpr"
+
+ def test_build_cpr_model_upload_and_deploy(self, shared_state, caplog):
+ """Creates a CPR model from custom predictor, uploads it and deploys."""
+
+ caplog.set_level(logging.INFO)
+
+ aiplatform.init(project=e2e_base._PROJECT, location=e2e_base._LOCATION)
+
+ local_model = LocalModel.build_cpr_model(
+ _USER_CODE_DIR,
+ _IMAGE_URI,
+ predictor=SklearnPredictor,
+ requirements_path=os.path.join(_USER_CODE_DIR, _REQUIREMENTS_FILE),
+ )
+
+ with local_model.deploy_to_local_endpoint(
+ artifact_uri=_LOCAL_MODEL_DIR,
+ ) as local_endpoint:
+ local_predict_response = local_endpoint.predict(
+ request=f'{{"instances": {_PREDICTION_INPUT}}}',
+ headers={"Content-Type": "application/json"},
+ )
+ assert len(json.loads(local_predict_response.content)["predictions"]) == 1
+
+ interactive_local_endpoint = local_model.deploy_to_local_endpoint(
+ artifact_uri=_LOCAL_MODEL_DIR,
+ )
+ interactive_local_endpoint.serve()
+ interactive_local_predict_response = interactive_local_endpoint.predict(
+ request=f'{{"instances": {_PREDICTION_INPUT}}}',
+ headers={"Content-Type": "application/json"},
+ )
+ interactive_local_endpoint.stop()
+ assert (
+ len(json.loads(interactive_local_predict_response.content)["predictions"])
+ == 1
+ )
+
+ # Configure docker.
+ logging.info(
+ subprocess.run(["gcloud", "auth", "configure-docker"], capture_output=True)
+ )
+
+ local_model.push_image()
+
+ model = models.Model.upload(
+ local_model=local_model,
+ display_name=f"cpr_e2e_test_{_TIMESTAMP}",
+ artifact_uri=_ARTIFACT_URI,
+ serving_container_deployment_timeout=3600,
+ serving_container_shared_memory_size_mb=20,
+ )
+ shared_state["resources"] = [model]
+
+ # Currently we need to explicitly specify machine type.
+ # See https://github.com/googleapis/python-aiplatform/issues/773
+ endpoint = model.deploy(machine_type="n1-standard-2")
+ shared_state["resources"].append(endpoint)
+ predict_response = endpoint.predict(instances=_PREDICTION_INPUT)
+ assert len(predict_response.predictions) == 1
+
+ caplog.clear()
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_private_endpoint.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_private_endpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..443b5b8e57e6281cbd5e7ac4471e82853228afe5
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_private_endpoint.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import pytest
+
+from google.cloud import aiplatform
+
+from tests.system.aiplatform import e2e_base
+
+# permanent_custom_mnist_model
+_MODEL_ID = "6430031960164270080"
+_PRIVATE_ENDPOINT_NETWORK = "projects/580378083368/global/networks/private-endpoint-vpc"
+
+
+@pytest.mark.usefixtures("tear_down_resources")
+class TestPrivateEndpoint(e2e_base.TestEndToEnd):
+
+ _temp_prefix = "temp_vertex_sdk_e2e"
+
+ def test_create_deploy_delete_private_endpoint(self, shared_state):
+ # Collection of resources generated by this test, to be deleted during teardown
+ shared_state["resources"] = []
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ private_endpoint = aiplatform.PrivateEndpoint.create(
+ display_name=self._make_display_name("private_endpoint_test"),
+ network=_PRIVATE_ENDPOINT_NETWORK,
+ )
+ shared_state["resources"].append(private_endpoint)
+
+ # Verify that the retrieved private Endpoint is the same
+ my_private_endpoint = aiplatform.PrivateEndpoint(
+ endpoint_name=private_endpoint.resource_name
+ )
+ assert private_endpoint.resource_name == my_private_endpoint.resource_name
+ assert private_endpoint.display_name == my_private_endpoint.display_name
+
+ # Verify the endpoint is in the private Endpoint list
+ list_private_endpoint = aiplatform.PrivateEndpoint.list()
+ assert private_endpoint.resource_name in [
+ private_endpoint.resource_name for private_endpoint in list_private_endpoint
+ ]
+
+ # Retrieve permanent model, deploy to private Endpoint, then undeploy
+ my_model = aiplatform.Model(model_name=_MODEL_ID)
+
+ my_private_endpoint.deploy(model=my_model)
+ assert my_private_endpoint._gca_resource.deployed_models
+
+ deployed_model_id = my_private_endpoint.list_models()[0].id
+ my_private_endpoint.undeploy(deployed_model_id=deployed_model_id)
+ assert not my_private_endpoint._gca_resource.deployed_models
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_project_id_inference.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_project_id_inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..4150ed35e28c8180a6057f43232375408d5254c0
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_project_id_inference.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform.compat.types import job_state as gca_job_state
+from tests.system.aiplatform import e2e_base
+
+_SCRIPT = """
+from google.cloud import aiplatform
+# Not initializing the Vertex SDK explicitly
+# Checking the project ID
+print(aiplatform.initializer.global_config.project)
+assert not aiplatform.initializer.global_config.project.endswith("-tp")
+"""
+
+
+@pytest.mark.usefixtures("prepare_staging_bucket", "delete_staging_bucket")
+class TestProjectIDInference(e2e_base.TestEndToEnd):
+
+ _temp_prefix = "temp-vertex-sdk-project-id-inference"
+
+ def test_project_id_inference(self, shared_state):
+ # Collection of resources generated by this test, to be deleted during teardown
+ shared_state["resources"] = []
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ staging_bucket=shared_state["staging_bucket_name"],
+ )
+
+ worker_pool_specs = [
+ {
+ "machine_spec": {"machine_type": "n1-standard-4"},
+ "replica_count": 1,
+ "container_spec": {
+ "image_uri": "python:3.9",
+ "command": [
+ "sh",
+ "-exc",
+ """python3 -m pip install git+https://github.com/googleapis/python-aiplatform@main
+ "$0" "$@"
+ """,
+ "python3",
+ "-c",
+ _SCRIPT,
+ ],
+ "args": [],
+ },
+ }
+ ]
+
+ custom_job = aiplatform.CustomJob(
+ display_name=self._make_display_name("custom"),
+ worker_pool_specs=worker_pool_specs,
+ )
+ custom_job.run()
+
+ shared_state["resources"].append(custom_job)
+
+ assert custom_job.state == gca_job_state.JobState.JOB_STATE_SUCCEEDED
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_resources/__init__.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_resources/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8e1c3845db5b44e0d5727e3354929c81d631f15
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_resources/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_resources/cpr_user_code/predictor.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_resources/cpr_user_code/predictor.py
new file mode 100644
index 0000000000000000000000000000000000000000..0dffe48ce1f416925ce6643aee4c33c2deb2033e
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_resources/cpr_user_code/predictor.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import joblib
+import numpy as np
+from typing import Any
+
+from google.cloud.aiplatform.utils import prediction_utils
+from google.cloud.aiplatform.prediction.predictor import Predictor
+
+
+class SklearnPredictor(Predictor):
+ """Interface for Predictor class that users would be implementing."""
+
+ def __init__(self):
+ return
+
+ def load(self, artifacts_uri: str):
+ """Loads the model artifact.
+
+ Args:
+ artifacts_uri (str):
+ Required. The value of the environment variable AIP_STORAGE_URI.
+ """
+ prediction_utils.download_model_artifacts(artifacts_uri)
+ self._model = joblib.load("model.joblib")
+
+ def predict(self, instances: Any) -> Any:
+ """Performs prediction.
+
+ Args:
+ instances (Any):
+ Required. The instances to perform prediction.
+
+ Returns:
+ Prediction results.
+ """
+ instances = instances["instances"]
+ inputs = np.asarray(instances)
+ outputs = self._model.predict(inputs)
+ return {"predictions": outputs.tolist()}
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_resources/cpr_user_code/requirements.txt b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_resources/cpr_user_code/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..415fe18c4f6eb27a9ea08b27fc459c91be44ab66
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_resources/cpr_user_code/requirements.txt
@@ -0,0 +1,2 @@
+scikit-learn
+google-cloud-aiplatform[prediction]
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_resources/custom_job_script.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_resources/custom_job_script.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad96e811adc4e64c680486d3638537fc83a38e05
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_resources/custom_job_script.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pandas as pd
+from sklearn.linear_model import LinearRegression
+
+# Create Dataset
+data = {"A": [1.1, 2.2, 4.1, 5.2], "B": [200, 212.12, 22, 123], "Y": [1, 0, 1, 0]}
+df = pd.DataFrame(data)
+X = df[["A", "B"]]
+Y = df["Y"]
+
+# Train model
+model = LinearRegression().fit(X, Y)
+model.score(X, Y)
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_telemetry.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_telemetry.py
new file mode 100644
index 0000000000000000000000000000000000000000..314c7c49f0df2cce5226d3b47c3a99b849a1731d
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_telemetry.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google import auth
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import telemetry
+from tests.system.aiplatform import e2e_base
+
+from vertexai.generative_models import GenerativeModel
+
+GEMINI_MODEL_NAME = "gemini-1.0-pro-002"
+
+
+class TestTelemetry(e2e_base.TestEndToEnd):
+ """Tests the telemetry tool context manager."""
+
+ _temp_prefix = "test_telemetry_"
+
+ def setup_method(self):
+ super().setup_method()
+ credentials, _ = auth.default(
+ scopes=["https://www.googleapis.com/auth/cloud-platform"]
+ )
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ credentials=credentials,
+ )
+
+ def test_single_context_manager(self):
+ with telemetry.tool_context_manager("context"):
+ model = GenerativeModel(GEMINI_MODEL_NAME)
+
+ model.generate_content("Why is the sky blue?")
+
+ def test_nested_context_manager(self):
+ with telemetry.tool_context_manager("outer"):
+ with telemetry.tool_context_manager("inner"):
+ model = GenerativeModel(GEMINI_MODEL_NAME)
+
+ model.generate_content("Why is the sky blue?")
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_tensorboard.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_tensorboard.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1262640dc44f0b6e9265340d628b4196f94ed8c
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_tensorboard.py
@@ -0,0 +1,141 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+
+from google.cloud import aiplatform
+from tests.system.aiplatform import e2e_base
+
+
+@pytest.mark.usefixtures("tear_down_resources")
+class TestTensorboard(e2e_base.TestEndToEnd):
+
+ _temp_prefix = "temp-vertex-sdk-e2e-test"
+
+ def test_create_and_get_tensorboard(self, shared_state):
+
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ display_name = self._make_display_name("tensorboard")
+
+ tb = aiplatform.Tensorboard.create(
+ display_name=display_name,
+ create_request_timeout=None,
+ )
+
+ shared_state["resources"] = [tb]
+ shared_state["tensorboard"] = tb
+
+ get_tb = aiplatform.Tensorboard(tb.resource_name)
+
+ assert tb.resource_name == get_tb.resource_name
+
+ list_tb = aiplatform.Tensorboard.list()
+
+ assert len(list_tb) > 0
+
+ def test_create_and_get_tensorboard_experiment(self, shared_state):
+ assert shared_state["tensorboard"]
+ tb = shared_state["tensorboard"]
+
+ tb_experiment = aiplatform.TensorboardExperiment.create(
+ tensorboard_experiment_id="vertex-sdk-e2e-test-experiment",
+ tensorboard_name=tb.resource_name,
+ display_name=self._make_display_name("tensorboard_experiment"),
+ description="Vertex SDK Integration test.",
+ labels={"test": "labels"},
+ create_request_timeout=None,
+ )
+
+ shared_state["resources"].append(tb_experiment)
+ shared_state["tensorboard_experiment"] = tb_experiment
+
+ get_tb_experiment = aiplatform.TensorboardExperiment(
+ tb_experiment.resource_name
+ )
+
+ assert tb_experiment.resource_name == get_tb_experiment.resource_name
+
+ list_tb_experiment = aiplatform.TensorboardExperiment.list(
+ tensorboard_name=tb.resource_name
+ )
+
+ assert len(list_tb_experiment) > 0
+
+ def test_create_and_get_tensorboard_run(self, shared_state):
+ assert shared_state["tensorboard_experiment"]
+ tb_experiment = shared_state["tensorboard_experiment"]
+
+ tb_run = aiplatform.TensorboardRun.create(
+ tensorboard_run_id="test-run",
+ tensorboard_experiment_name=tb_experiment.resource_name,
+ description="Vertex SDK Integration test run",
+ labels={"test": "labels"},
+ create_request_timeout=None,
+ )
+
+ shared_state["resources"].append(tb_run)
+ shared_state["tensorboard_run"] = tb_run
+
+ get_tb_run = aiplatform.TensorboardRun(tb_run.resource_name)
+
+ assert tb_run.resource_name == get_tb_run.resource_name
+
+ list_tb_run = aiplatform.TensorboardRun.list(
+ tensorboard_experiment_name=tb_experiment.resource_name
+ )
+
+ assert len(list_tb_run) > 0
+
+ def test_create_and_get_tensorboard_time_series(self, shared_state):
+ assert shared_state["tensorboard_run"]
+ tb_run = shared_state["tensorboard_run"]
+
+ tb_time_series = aiplatform.TensorboardTimeSeries.create(
+ display_name="test-time-series",
+ tensorboard_run_name=tb_run.resource_name,
+ description="Vertex SDK Integration test run",
+ )
+
+ shared_state["resources"].append(tb_time_series)
+ shared_state["tensorboard_time_series"] = tb_time_series
+
+ get_tb_time_series = aiplatform.TensorboardTimeSeries(
+ tb_time_series.resource_name
+ )
+
+ assert tb_time_series.resource_name == get_tb_time_series.resource_name
+
+ list_tb_time_series = aiplatform.TensorboardTimeSeries.list(
+ tensorboard_run_name=tb_run.resource_name
+ )
+
+ assert len(list_tb_time_series) > 0
+
+ def test_write_tensorboard_scalar_data(self, shared_state):
+ assert shared_state["tensorboard_time_series"]
+ assert shared_state["tensorboard_run"]
+ tb_run = shared_state["tensorboard_run"]
+ tb_time_series = shared_state["tensorboard_time_series"]
+
+ tb_run.write_tensorboard_scalar_data(
+ time_series_data={tb_time_series.display_name: 1.0},
+ step=1,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_vision_models.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_vision_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..b17f807f0d7028bcf1295bd78615a38e8bdd739a
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_vision_models.py
@@ -0,0 +1,549 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=protected-access
+
+import os
+import tempfile
+
+from google.cloud import aiplatform
+from tests.system.aiplatform import e2e_base
+from vertexai import vision_models as ga_vision_models
+from vertexai.preview import vision_models
+from PIL import Image as PIL_Image
+
+
+def _create_blank_image(
+ width: int = 100,
+ height: int = 100,
+) -> vision_models.Image:
+ with tempfile.TemporaryDirectory() as temp_dir:
+ image_path = os.path.join(temp_dir, "image.png")
+ pil_image = PIL_Image.new(mode="RGB", size=(width, height))
+ pil_image.save(image_path, format="PNG")
+ return vision_models.Image.load_from_file(image_path)
+
+
+def _load_image_from_gcs(
+ gcs_uri: str = "gs://cloud-samples-data/vertex-ai/llm/prompts/landmark1.png",
+) -> vision_models.Image:
+ return vision_models.Image.load_from_file(gcs_uri)
+
+
+def _load_video_from_gcs(
+ gcs_uri: str = "gs://cloud-samples-data/vertex-ai-vision/highway_vehicles.mp4",
+) -> vision_models.Video:
+ return vision_models.Video.load_from_file(gcs_uri)
+
+
+class VisionModelTestSuite(e2e_base.TestEndToEnd):
+ """System tests for vision models."""
+
+ _temp_prefix = "temp_vision_models_test_"
+
+ def test_image_captioning_model_get_captions(self):
+ aiplatform.init(project=e2e_base._PROJECT, location=e2e_base._LOCATION)
+
+ model = ga_vision_models.ImageCaptioningModel.from_pretrained("imagetext")
+ image = _create_blank_image()
+ captions = model.get_captions(
+ image=image,
+ # Optional:
+ number_of_results=2,
+ language="en",
+ )
+ assert len(captions) == 2
+
+ def test_image_q_and_a_model_ask_question(self):
+ aiplatform.init(project=e2e_base._PROJECT, location=e2e_base._LOCATION)
+
+ model = ga_vision_models.ImageQnAModel.from_pretrained("imagetext")
+ image = _create_blank_image()
+ answers = model.ask_question(
+ image=image,
+ question="What color is the car in this image?",
+ # Optional:
+ number_of_results=2,
+ )
+ assert len(answers) == 2
+
+ def test_multi_modal_embedding_model(self):
+ aiplatform.init(project=e2e_base._PROJECT, location=e2e_base._LOCATION)
+
+ model = ga_vision_models.MultiModalEmbeddingModel.from_pretrained(
+ "multimodalembedding@001"
+ )
+ image = _create_blank_image()
+ embeddings = model.get_embeddings(
+ image=image,
+ # Optional:
+ contextual_text="this is a car",
+ )
+ # The service is expected to return the embeddings of size 1408
+ assert len(embeddings.image_embedding) == 1408
+ assert len(embeddings.text_embedding) == 1408
+
+ def test_multi_modal_embedding_model_with_gcs_uri(self):
+ aiplatform.init(project=e2e_base._PROJECT, location=e2e_base._LOCATION)
+
+ model = ga_vision_models.MultiModalEmbeddingModel.from_pretrained(
+ "multimodalembedding@001"
+ )
+ image = _load_image_from_gcs()
+ video = _load_video_from_gcs()
+ video_segment_config = vision_models.VideoSegmentConfig()
+ embeddings = model.get_embeddings(
+ image=image,
+ video=video,
+ # Optional:
+ contextual_text="this is a car",
+ video_segment_config=video_segment_config,
+ )
+ # The service is expected to return the embeddings of size 1408
+ assert len(embeddings.image_embedding) == 1408
+ assert len(embeddings.video_embeddings[0].embedding) == 1408
+ assert len(embeddings.text_embedding) == 1408
+
+ def test_image_generation_model_generate_images(self):
+ """Tests the image generation model generating images."""
+ model = vision_models.ImageGenerationModel.from_pretrained(
+ "imagegeneration@001"
+ )
+
+ # TODO(b/295946075): The service stopped supporting image sizes.
+ # width = 1024
+ # height = 768
+ number_of_images = 4
+ seed = 1
+ guidance_scale = 15
+ language = "en"
+
+ prompt1 = "Astronaut riding a horse"
+ negative_prompt1 = "bad quality"
+ image_response = model.generate_images(
+ prompt=prompt1,
+ # Optional:
+ negative_prompt=negative_prompt1,
+ number_of_images=number_of_images,
+ # TODO(b/295946075): The service stopped supporting image sizes.
+ # width=width,
+ # height=height,
+ seed=seed,
+ guidance_scale=guidance_scale,
+ language=language,
+ )
+
+ assert len(image_response.images) == number_of_images
+ for idx, image in enumerate(image_response):
+ # TODO(b/295946075): The service stopped supporting image sizes.
+ # assert image._pil_image.size == (width, height)
+ assert image.generation_parameters
+ assert image.generation_parameters["prompt"] == prompt1
+ assert image.generation_parameters["negative_prompt"] == negative_prompt1
+ # TODO(b/295946075): The service stopped supporting image sizes.
+ # assert image.generation_parameters["width"] == width
+ # assert image.generation_parameters["height"] == height
+ assert image.generation_parameters["seed"] == seed
+ assert image.generation_parameters["guidance_scale"] == guidance_scale
+ assert image.generation_parameters["index_of_image_in_batch"] == idx
+ assert image.generation_parameters["language"] == language
+
+ for width, height in [(1, 1), (9, 16), (16, 9), (4, 3), (3, 4)]:
+ prompt_aspect_ratio = "A street lit up on a rainy night"
+ model = vision_models.ImageGenerationModel.from_pretrained(
+ "imagegeneration@006"
+ )
+
+ number_of_images = 4
+ seed = 1
+ guidance_scale = 15
+ language = "en"
+ aspect_ratio = f"{width}:{height}"
+
+ image_response = model.generate_images(
+ prompt=prompt_aspect_ratio,
+ number_of_images=number_of_images,
+ aspect_ratio=aspect_ratio,
+ seed=seed,
+ guidance_scale=guidance_scale,
+ language=language,
+ )
+
+ assert len(image_response.images) == number_of_images
+ for idx, image in enumerate(image_response):
+ assert image.generation_parameters
+ assert image.generation_parameters["prompt"] == prompt_aspect_ratio
+ assert image.generation_parameters["aspect_ratio"] == aspect_ratio
+ assert image.generation_parameters["seed"] == seed
+ assert image.generation_parameters["guidance_scale"] == guidance_scale
+ assert image.generation_parameters["index_of_image_in_batch"] == idx
+ assert image.generation_parameters["language"] == language
+ assert (
+ abs(
+ float(image.size[0]) / float(image.size[1])
+ - float(width) / float(height)
+ )
+ <= 0.001
+ )
+
+ person_generation_prompts = [
+ "A street lit up on a rainy night",
+ "A woman walking down a street lit up on a rainy night",
+ "A child walking down a street lit up on a rainy night",
+ "A man walking down a street lit up on a rainy night",
+ ]
+
+ person_generation_levels = ["dont_allow", "allow_adult", "allow_all"]
+
+ for i in range(0, 3):
+ for j in range(0, i + 1):
+ image_response = model.generate_images(
+ prompt=person_generation_prompts[j],
+ number_of_images=number_of_images,
+ seed=seed,
+ guidance_scale=guidance_scale,
+ language=language,
+ person_generation=person_generation_levels[j],
+ )
+ if i == j:
+ assert len(image_response.images) == number_of_images
+ else:
+ assert len(image_response.images) < number_of_images
+ for idx, image in enumerate(image_response):
+ assert (
+ image.generation_parameters["person_generation"]
+ == person_generation_levels[j]
+ )
+ assert (
+ image.generation_parameters["prompt"]
+ == person_generation_prompts[j]
+ )
+ assert image.generation_parameters["seed"] == seed
+ assert (
+ image.generation_parameters["guidance_scale"] == guidance_scale
+ )
+ assert image.generation_parameters["index_of_image_in_batch"] == idx
+ assert image.generation_parameters["language"] == language
+
+ # Test saving and loading images
+ with tempfile.TemporaryDirectory() as temp_dir:
+ image_path = os.path.join(temp_dir, "image.png")
+ image_response[0].save(location=image_path)
+ image1 = vision_models.GeneratedImage.load_from_file(image_path)
+ # assert image1._pil_image.size == (width, height)
+ assert image1.generation_parameters
+ assert image1.generation_parameters["prompt"] == prompt1
+ assert image1.generation_parameters["language"] == language
+
+ # Preparing mask
+ mask_path = os.path.join(temp_dir, "mask.png")
+ mask_pil_image = PIL_Image.new(mode="RGB", size=image1._pil_image.size)
+ mask_pil_image.save(mask_path, format="PNG")
+ mask_image = vision_models.Image.load_from_file(mask_path)
+
+ # Test generating image from base image
+ prompt2 = "Ancient book style"
+ edit_mode = "inpainting-insert"
+ mask_mode = "foreground"
+ mask_dilation = 0.06
+ product_position = "fixed"
+ output_mime_type = "image/jpeg"
+ compression_quality = 0.90
+ image_response2 = model.edit_image(
+ prompt=prompt2,
+ # Optional:
+ number_of_images=number_of_images,
+ seed=seed,
+ guidance_scale=guidance_scale,
+ base_image=image1,
+ mask=mask_image,
+ edit_mode=edit_mode,
+ mask_mode=mask_mode,
+ mask_dilation=mask_dilation,
+ product_position=product_position,
+ output_mime_type=output_mime_type,
+ compression_quality=compression_quality,
+ language=language,
+ )
+ assert len(image_response2.images) == number_of_images
+ for idx, image in enumerate(image_response2):
+ # TODO(b/295946075): The service stopped supporting image sizes.
+ # assert image._pil_image.size == (width, height)
+ assert image.generation_parameters
+ assert image.generation_parameters["prompt"] == prompt2
+ assert image.generation_parameters["seed"] == seed
+ assert image.generation_parameters["guidance_scale"] == guidance_scale
+ assert image.generation_parameters["index_of_image_in_batch"] == idx
+ assert image.generation_parameters["edit_mode"] == edit_mode
+ assert image.generation_parameters["mask_mode"] == mask_mode
+ assert image.generation_parameters["mask_dilation"] == mask_dilation
+ assert image.generation_parameters["product_position"] == product_position
+ assert image.generation_parameters["mime_type"] == output_mime_type
+ assert (
+ image.generation_parameters["compression_quality"]
+ == compression_quality
+ )
+ assert image.generation_parameters["language"] == language
+ assert "base_image_hash" in image.generation_parameters
+ assert "mask_hash" in image.generation_parameters
+
+ prompt3 = "Chocolate chip cookies"
+ edit_mode = "inpainting-insert"
+ mask_mode = "semantic"
+ segmentation_classes = [1, 13, 17, 9, 18]
+ product_position = "fixed"
+ output_mime_type = "image/png"
+
+ image_response3 = model.edit_image(
+ prompt=prompt3,
+ number_of_images=number_of_images,
+ seed=seed,
+ guidance_scale=guidance_scale,
+ base_image=image1,
+ mask=mask_image,
+ edit_mode=edit_mode,
+ mask_mode=mask_mode,
+ segmentation_classes=segmentation_classes,
+ product_position=product_position,
+ output_mime_type=output_mime_type,
+ language=language,
+ )
+
+ assert len(image_response3.images) == number_of_images
+ for idx, image in enumerate(image_response3):
+ assert image.generation_parameters
+ assert image.generation_parameters["prompt"] == prompt3
+ assert image.generation_parameters["seed"] == seed
+ assert image.generation_parameters["guidance_scale"] == guidance_scale
+ assert image.generation_parameters["index_of_image_in_batch"] == idx
+ assert image.generation_parameters["edit_mode"] == edit_mode
+ assert image.generation_parameters["mask_mode"] == mask_mode
+ assert (
+ image.generation_parameters["segmentation_classes"]
+ == segmentation_classes
+ )
+ assert image.generation_parameters["product_position"] == product_position
+ assert image.generation_parameters["mime_type"] == output_mime_type
+ assert image.generation_parameters["language"] == language
+ assert "base_image_hash" in image.generation_parameters
+ assert "mask_hash" in image.generation_parameters
+
+ def test_image_edit_model_capability_mode(self):
+ """Tests the image edit model capability mode."""
+ model = vision_models.ImageGenerationModel.from_pretrained(
+ "imagen-3.0-capability-preview-0930"
+ )
+ gen_model = vision_models.ImageGenerationModel.from_pretrained(
+ "imagen-3.0-generate-001"
+ )
+ imagen_gen_prompt = "A street lit up on a rainy night"
+ number_of_images = 1
+ image_response = gen_model.generate_images(
+ prompt=imagen_gen_prompt,
+ number_of_images=number_of_images,
+ )
+ assert len(image_response.images) == number_of_images
+ for image in enumerate(image_response):
+ assert image.generation_parameters
+ assert image.generation_parameters["prompt"] == imagen_gen_prompt
+ with tempfile.TemporaryDirectory() as temp_dir:
+ image_path = os.path.join(temp_dir, "image.png")
+ image_response[0].save(location=image_path)
+ raw_image1 = vision_models.GeneratedImage.load_from_file(image_path)
+
+ raw_ref_image = vision_models.RawReferenceImage(
+ image=raw_image1, reference_id=0
+ )
+ mask_mode = "MASK_MODE_BACKGROUND"
+ mask_dilation = 0.06
+ mask_config = vision_models.MaskConfig(
+ mask_mode=mask_mode,
+ mask_dilation=mask_dilation,
+ )
+ mask_ref_image = vision_models.MaskReferenceImage(
+ reference_id=1,
+ mask_mode="background",
+ dilation=mask_dilation,
+ )
+ capability_mode = "EDIT_MODE_INPAINT_INSERTION"
+ edit_prompt = "Sunlight and clear weather"
+ edit_response = model.edit_images(
+ prompt=edit_prompt,
+ number_of_images=number_of_images,
+ reference_images=[raw_ref_image, mask_ref_image],
+ edit_mode="inpainting-insert",
+ )
+
+ assert len(edit_response.images) == number_of_images
+ for image in enumerate(edit_response):
+ assert image.generation_parameters
+ assert image.generation_parameters["prompt"] == edit_prompt
+ assert image.generation_parameters["edit_mode"] == capability_mode
+ assert (
+ image.generation_parameters[
+ f"reference_type_{raw_ref_image.reference_id}"
+ ]
+ == raw_ref_image.reference_type
+ )
+ assert (
+ image.generation_parameters[
+ f"reference_type_{mask_ref_image.reference_id}"
+ ]
+ == mask_ref_image.reference_type
+ )
+ assert image.generation_parameters[
+ f"reference_image_mask_config_{mask_ref_image.reference_id}"
+ ] == str(mask_config)
+
+ image_gen_prompt2 = "A dog playing with a ball in the San Fransisco Presidio"
+ image_response2 = gen_model.generate_images(
+ prompt=image_gen_prompt2,
+ number_of_images=number_of_images,
+ )
+ assert len(image_response2.images) == number_of_images
+ for image in enumerate(image_response2):
+ assert image.generation_parameters
+ assert image.generation_parameters["prompt"] == image_gen_prompt2
+ with tempfile.TemporaryDirectory() as temp_dir:
+ image_path = os.path.join(temp_dir, "subject_image.png")
+ image_response2[0].save(location=image_path)
+ raw_image2 = vision_models.GeneratedImage.load_from_file(image_path)
+ subject_config = vision_models.SubjectImageConfig(
+ subject_type="SUBJECT_TYPE_ANIMAL", subject_description="dog"
+ )
+ subject_ref_image2 = vision_models.SubjectReferenceImage(
+ image=raw_image2,
+ reference_id=0,
+ subject_type="animal",
+ subject_description="dog",
+ )
+ edit_prompt2 = "Change the dog to a cat"
+ capability_mode2 = "EDIT_MODE_DEFAULT"
+ edit_response2 = model.edit_images(
+ prompt=edit_prompt2,
+ number_of_images=number_of_images,
+ reference_images=[subject_ref_image2],
+ capability_mode=capability_mode2,
+ )
+ assert len(edit_response2.images) == number_of_images
+ for image in enumerate(edit_response2):
+ assert image.generation_parameters
+ assert image.generation_parameters["prompt"] == edit_prompt2
+ assert image.generation_parameters["edit_mode"] == capability_mode
+ assert (
+ image.generation_parameters[
+ f"reference_type_{subject_ref_image2.reference_id}"
+ ]
+ == "REFERENCE_TYPE_SUBJECT"
+ )
+ assert image.generation_parameters[
+ f"reference_image_subject_config_{subject_ref_image2.reference_id}"
+ ] == str(subject_config)
+
+ def test_image_verification_model_verify_image(self):
+ """Tests the image verification model verifying watermark presence in an image."""
+ verification_model = vision_models.ImageVerificationModel.from_pretrained(
+ "imageverification@001"
+ )
+ model = vision_models.ImageGenerationModel.from_pretrained(
+ "imagegeneration@005"
+ )
+ seed = 1
+ guidance_scale = 15
+ language = "en"
+ image_verification_response = verification_model.verify_image(
+ image=_create_blank_image()
+ )
+ assert image_verification_response["decision"] == "REJECT"
+
+ prompt = "A street lit up on a rainy night"
+ image_response = model.generate_images(
+ prompt=prompt,
+ number_of_images=1,
+ seed=seed,
+ guidance_scale=guidance_scale,
+ language=language,
+ add_watermark=True,
+ )
+ assert len(image_response.images) == 1
+
+ image_with_watermark = vision_models.Image(image_response.images[0].image_bytes)
+
+ image_verification_response = verification_model.verify_image(
+ image_with_watermark
+ )
+ assert image_verification_response["decision"] == "ACCEPT"
+
+ def test_image_upscaling_model_upscale_image(self):
+ """Tests the image upscaling model upscaling an image."""
+ arbitrary_width = 1111
+ arbitrary_height = 2000
+ target_max_size = 4096
+ image_generation_model = vision_models.ImageGenerationModel.from_pretrained(
+ "imagen-3.0-generate-001"
+ )
+ blank_image = _create_blank_image(
+ width=arbitrary_width, height=arbitrary_height
+ )
+ assert blank_image.size == (arbitrary_width, arbitrary_height)
+ upscaled_image = image_generation_model.upscale_image(
+ blank_image, new_size=target_max_size
+ )
+ new_size = (
+ int(arbitrary_width / arbitrary_height * target_max_size),
+ target_max_size,
+ )
+ assert upscaled_image._size == new_size
+
+ def test_image_upscaling_model_upscaling_factor(self):
+ """Tests the image upscaling model upscaling an image."""
+ arbitrary_width = 1111
+ arbitrary_height = 2222
+ target_upscaling_factor = "x2"
+ image_generation_model = vision_models.ImageGenerationModel.from_pretrained(
+ "imagen-3.0-generate-001"
+ )
+ blank_image = _create_blank_image(
+ width=arbitrary_width, height=arbitrary_height
+ )
+ assert blank_image.size == (arbitrary_width, arbitrary_height)
+ upscaled_image = image_generation_model.upscale_image(
+ blank_image, upscale_factor=target_upscaling_factor
+ )
+ new_size = (arbitrary_width * 2, arbitrary_height * 2)
+ assert upscaled_image._size == new_size
+
+ def test_image_upscaling_model_save_as_jpeg(self):
+ """Tests the image upscaling model upscaling an image."""
+ arbitrary_width = 1111
+ arbitrary_height = 2222
+ target_upscaling_factor = "x2"
+ image_generation_model = vision_models.ImageGenerationModel.from_pretrained(
+ "imagen-3.0-generate-001"
+ )
+ blank_image = _create_blank_image(
+ width=arbitrary_width, height=arbitrary_height
+ )
+ assert blank_image.size == (arbitrary_width, arbitrary_height)
+ upscaled_image = image_generation_model.upscale_image(
+ blank_image,
+ upscale_factor=target_upscaling_factor,
+ output_mime_type="image/jpeg",
+ output_compression_quality=90,
+ )
+ assert upscaled_image._mime_type == "image/jpeg"
+ assert upscaled_image._size == (arbitrary_width * 2, arbitrary_height * 2)
diff --git a/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_vizier.py b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_vizier.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f592cc7cec7f6edd835d7e3ffe2a45d4e981bdb
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/aiplatform/test_vizier.py
@@ -0,0 +1,106 @@
+import pytest
+
+from google.api_core import exceptions
+from google.cloud import aiplatform
+from google.cloud.aiplatform.vizier import Study
+from google.cloud.aiplatform.vizier import Trial
+from tests.system.aiplatform import e2e_base
+from google.cloud.aiplatform.vizier import pyvizier
+
+_TEST_STUDY_ID = 123
+
+
+@pytest.mark.usefixtures("tear_down_resources")
+class TestVizier(e2e_base.TestEndToEnd):
+ _temp_prefix = "temp_vertex_sdk_e2e_vizier_test"
+
+ def test_vizier_lifecycle(self, shared_state):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+ sc = pyvizier.StudyConfig()
+ sc.algorithm = pyvizier.Algorithm.RANDOM_SEARCH
+ sc.metric_information.append(
+ pyvizier.MetricInformation(
+ name="pr-auc", goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE
+ )
+ )
+ root = sc.search_space.select_root()
+ root.add_float_param(
+ "learning_rate", 0.00001, 1.0, scale_type=pyvizier.ScaleType.LINEAR
+ )
+ root.add_categorical_param("optimizer", ["adagrad", "adam", "experimental"])
+ sc.automated_stopping_config = (
+ pyvizier.AutomatedStoppingConfig.decay_curve_stopping_config(use_steps=True)
+ )
+
+ study = Study.create_or_load(display_name=self._temp_prefix, problem=sc)
+ shared_state["resources"] = [study]
+ trials = study.suggest(count=3, worker="halio_test_worker")
+ for trial in trials:
+ if not trial.should_stop():
+ measurement = pyvizier.Measurement()
+ measurement.metrics["pr-auc"] = 0.4
+ trial.add_measurement(measurement=measurement)
+ trial.complete(measurement=measurement)
+ optimal_trials = study.optimal_trials()
+
+ for trial in study.trials():
+ assert trial.status == pyvizier.TrialStatus.COMPLETED
+ assert optimal_trials[0].status == pyvizier.TrialStatus.COMPLETED
+
+ def test_vizier_study_deletion(self, shared_state):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+ sc = pyvizier.StudyConfig()
+ sc.algorithm = pyvizier.Algorithm.RANDOM_SEARCH
+ sc.metric_information.append(
+ pyvizier.MetricInformation(
+ name="pr-auc", goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE
+ )
+ )
+ root = sc.search_space.select_root()
+ root.add_float_param(
+ "learning_rate", 0.00001, 1.0, scale_type=pyvizier.ScaleType.LINEAR
+ )
+ root.add_categorical_param("optimizer", ["adagrad", "adam", "experimental"])
+ sc.automated_stopping_config = (
+ pyvizier.AutomatedStoppingConfig.decay_curve_stopping_config(use_steps=True)
+ )
+
+ study = Study.create_or_load(display_name=self._temp_prefix, problem=sc)
+ study.delete()
+
+ with pytest.raises(exceptions.NotFound):
+ study = Study(study_id=study.name)
+
+ def test_vizier_trial_deletion(self, shared_state):
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+ sc = pyvizier.StudyConfig()
+ sc.algorithm = pyvizier.Algorithm.RANDOM_SEARCH
+ sc.metric_information.append(
+ pyvizier.MetricInformation(
+ name="pr-auc", goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE
+ )
+ )
+ root = sc.search_space.select_root()
+ root.add_float_param(
+ "learning_rate", 0.00001, 1.0, scale_type=pyvizier.ScaleType.LINEAR
+ )
+ root.add_categorical_param("optimizer", ["adagrad", "adam", "experimental"])
+ sc.automated_stopping_config = (
+ pyvizier.AutomatedStoppingConfig.decay_curve_stopping_config(use_steps=True)
+ )
+
+ study = Study.create_or_load(display_name=self._temp_prefix, problem=sc)
+ trials = study.suggest(count=1, worker="halio_test_worker")
+ trials[0].delete()
+
+ with pytest.raises(exceptions.NotFound):
+ study = Trial(study_id=study.name, trial_name=trials[0].name)
diff --git a/testbed/googleapis__python-aiplatform/tests/system/vertex_ray/test_cluster_management.py b/testbed/googleapis__python-aiplatform/tests/system/vertex_ray/test_cluster_management.py
new file mode 100644
index 0000000000000000000000000000000000000000..f87bb0b65c0e373417e0c4290a13361f7a0804af
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/vertex_ray/test_cluster_management.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import vertex_ray
+from tests.system.aiplatform import e2e_base
+import datetime
+import pytest
+import ray
+
+# Local ray version will always be 2.4.0 regardless of cluster version due to
+# depenency conflicts. Remote job execution's Ray version is 2.9.
+RAY_VERSION = "2.4.0"
+PROJECT_ID = "ucaip-sample-tests"
+
+
+class TestClusterManagement(e2e_base.TestEndToEnd):
+ _temp_prefix = "temp-rov-cluster-management"
+
+ @pytest.mark.parametrize("cluster_ray_version", ["2.9", "2.33"])
+ def test_cluster_management(self, cluster_ray_version):
+ assert ray.__version__ == RAY_VERSION
+ aiplatform.init(project=PROJECT_ID, location="us-central1")
+
+ # CPU default cluster
+ head_node_type = vertex_ray.Resources()
+ worker_node_types = [vertex_ray.Resources()]
+
+ timestamp = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
+
+ cluster_resource_name = vertex_ray.create_ray_cluster(
+ head_node_type=head_node_type,
+ worker_node_types=worker_node_types,
+ cluster_name=f"ray-cluster-{timestamp}-test-cluster-management",
+ ray_version=cluster_ray_version,
+ )
+
+ cluster_details = vertex_ray.get_ray_cluster(cluster_resource_name)
+ assert cluster_details.ray_version == cluster_ray_version
+ assert cluster_details.state == "RUNNING"
+
+ found_cluster = False
+ for cluster in vertex_ray.list_ray_clusters():
+ if cluster.cluster_resource_name == cluster_resource_name:
+ assert cluster.ray_version == cluster_ray_version
+ assert cluster.state == "RUNNING"
+ found_cluster = True
+
+ if not found_cluster:
+ raise ValueError(
+ f"Cluster {cluster_resource_name} not found in list_ray_clusters"
+ )
+
+ vertex_ray.delete_ray_cluster(cluster_resource_name)
+ # Ensure cluster was deleted
+ for cluster in vertex_ray.list_ray_clusters():
+ assert cluster.cluster_resource_name != cluster_resource_name
diff --git a/testbed/googleapis__python-aiplatform/tests/system/vertex_ray/test_job_submission_dashboard.py b/testbed/googleapis__python-aiplatform/tests/system/vertex_ray/test_job_submission_dashboard.py
new file mode 100644
index 0000000000000000000000000000000000000000..84d95bff1dddc6bd17840b96cdbb9b660e1b79ca
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/vertex_ray/test_job_submission_dashboard.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import vertex_ray
+from ray.job_submission import JobSubmissionClient
+from tests.system.aiplatform import e2e_base
+import datetime
+import os
+import pytest
+import ray
+import time
+import tempfile
+
+# Local ray version will always be 2.4 regardless of cluster version due to
+# depenency conflicts. Remote job execution's Ray version is 2.9.
+RAY_VERSION = "2.4.0"
+PROJECT_ID = "ucaip-sample-tests"
+
+
+class TestJobSubmissionDashboard(e2e_base.TestEndToEnd):
+ _temp_prefix = "temp-job-submission-dashboard"
+
+ @pytest.mark.parametrize("cluster_ray_version", ["2.9", "2.33"])
+ def test_job_submission_dashboard(self, cluster_ray_version):
+ assert ray.__version__ == RAY_VERSION
+ aiplatform.init(project=PROJECT_ID, location="us-central1")
+
+ head_node_type = vertex_ray.Resources()
+ worker_node_types = [vertex_ray.Resources()]
+
+ timestamp = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
+
+ # Create cluster, get dashboard address
+ cluster_resource_name = vertex_ray.create_ray_cluster(
+ head_node_type=head_node_type,
+ worker_node_types=worker_node_types,
+ cluster_name=f"ray-cluster-{timestamp}-test-job-submission-dashboard",
+ ray_version=cluster_ray_version,
+ )
+
+ cluster_details = vertex_ray.get_ray_cluster(cluster_resource_name)
+
+ # Need to use the full path since the installation is editable, not from a release
+ client = JobSubmissionClient(
+ "google.cloud.aiplatform.vertex_ray://{}".format(
+ cluster_details.dashboard_address
+ )
+ )
+
+ my_script = """
+import ray
+import time
+
+@ray.remote
+def hello_world():
+ return "hello world"
+
+@ray.remote
+def square(x):
+ print(x)
+ time.sleep(100)
+ return x * x
+
+ray.init() # No need to specify address="vertex_ray://...."
+print(ray.get(hello_world.remote()))
+print(ray.get([square.remote(i) for i in range(4)]))
+"""
+
+ with tempfile.TemporaryDirectory() as temp_dir:
+ # Create my_script.py file
+ fp = os.path.join(temp_dir, "my_script.py")
+ f = open(fp, "w")
+ f.write(my_script)
+ f.close()
+
+ job_id = client.submit_job(
+ # Entrypoint shell command to execute
+ entrypoint="python my_script.py",
+ # Path to the local directory that contains the my_script.py file
+ runtime_env={"working_dir": temp_dir},
+ )
+
+ job_status = None
+ while job_status != ray.job_submission.JobStatus.SUCCEEDED:
+ job_status = client.get_job_info(job_id).status
+ print(job_id, "has status:", job_status)
+ if (
+ job_status == ray.job_submission.JobStatus.PENDING
+ or job_status == ray.job_submission.JobStatus.RUNNING
+ ):
+ time.sleep(10)
+ elif (
+ job_status == ray.job_submission.JobStatus.FAILED
+ or job_status == ray.job_submission.JobStatus.STOPPED
+ ):
+ print(job_id, "job logs:")
+ print(client.get_job_info(job_id).message)
+ raise RuntimeError("The Ray Job encountered an error and failed")
+
+ vertex_ray.delete_ray_cluster(cluster_resource_name)
+ # Ensure cluster was deleted
+ for cluster in vertex_ray.list_ray_clusters():
+ assert cluster.cluster_resource_name != cluster_resource_name
diff --git a/testbed/googleapis__python-aiplatform/tests/system/vertex_ray/test_ray_data.py b/testbed/googleapis__python-aiplatform/tests/system/vertex_ray/test_ray_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..76e647698a61e1b66d7266331ad4641e0479117f
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/vertex_ray/test_ray_data.py
@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import vertex_ray
+from ray.job_submission import JobSubmissionClient
+from tests.system.aiplatform import e2e_base
+import datetime
+import os
+import pytest
+import ray
+import time
+import tempfile
+
+# Local ray version will always be 2.4 regardless of cluster version due to
+# depenency conflicts. Remote job execution's Ray version is 2.9.
+RAY_VERSION = "2.4.0"
+SDK_VERSION = aiplatform.__version__
+PROJECT_ID = "ucaip-sample-tests"
+
+my_script_ray29 = """
+import ray
+import vertex_ray
+
+parallelism = 10
+query = "SELECT * FROM `bigquery-public-data.ml_datasets.ulb_fraud_detection` LIMIT 10000000"
+
+ds = vertex_ray.data.read_bigquery(
+ parallelism=parallelism,
+ query=query
+)
+
+# The reads are lazy, so the end time cannot be captured until ds.materialize() is called
+ds.materialize()
+
+# Write
+vertex_ray.data.write_bigquery(
+ ds,
+ dataset="bugbashbq1.system_test_ray29_write",
+)
+"""
+
+my_script_ray233 = """
+import ray
+import vertex_ray
+
+override_num_blocks = 10
+query = "SELECT * FROM `bigquery-public-data.ml_datasets.ulb_fraud_detection` LIMIT 10000000"
+
+ds = vertex_ray.data.read_bigquery(
+ override_num_blocks=override_num_blocks,
+ query=query,
+)
+
+# The reads are lazy, so the end time cannot be captured until ds.materialize() is called
+ds.materialize()
+
+# Write
+vertex_ray.data.write_bigquery(
+ ds,
+ dataset="bugbashbq1.system_test_ray29_write",
+)
+"""
+
+my_script = {"2.9": my_script_ray29, "2.33": my_script_ray233}
+
+
+class TestRayData(e2e_base.TestEndToEnd):
+ _temp_prefix = "temp-ray-data"
+
+ @pytest.mark.parametrize("cluster_ray_version", ["2.9", "2.33"])
+ def test_ray_data(self, cluster_ray_version):
+ head_node_type = vertex_ray.Resources()
+ worker_node_types = [
+ vertex_ray.Resources(),
+ vertex_ray.Resources(),
+ vertex_ray.Resources(),
+ ]
+
+ assert ray.__version__ == RAY_VERSION
+ aiplatform.init(project=PROJECT_ID, location="us-central1")
+
+ timestamp = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
+
+ # Create cluster, get dashboard address
+ cluster_resource_name = vertex_ray.create_ray_cluster(
+ head_node_type=head_node_type,
+ worker_node_types=worker_node_types,
+ cluster_name=f"ray-cluster-{timestamp}-test-ray-data",
+ ray_version=cluster_ray_version,
+ )
+
+ cluster_details = vertex_ray.get_ray_cluster(cluster_resource_name)
+
+ # Connect to cluster
+ client = JobSubmissionClient(
+ "google.cloud.aiplatform.vertex_ray://{}".format(
+ cluster_details.dashboard_address
+ )
+ )
+
+ with tempfile.TemporaryDirectory() as temp_dir:
+ fp = os.path.join(temp_dir, "my_script.py")
+ f = open(fp, "w")
+ f.write(my_script[cluster_ray_version])
+ f.close()
+
+ job_id = client.submit_job(
+ # Entrypoint shell command to execute
+ entrypoint="python my_script.py",
+ # Path to the local directory that contains the my_script.py file
+ runtime_env={
+ "working_dir": temp_dir,
+ "pip": [
+ "pandas==2.1.4",
+ "google-cloud-aiplatform[ray]==" + SDK_VERSION,
+ ],
+ },
+ )
+
+ job_status = None
+ while job_status != ray.job_submission.JobStatus.SUCCEEDED:
+ job_status = client.get_job_info(job_id).status
+ print(job_id, "has status:", job_status)
+ if (
+ job_status == ray.job_submission.JobStatus.PENDING
+ or job_status == ray.job_submission.JobStatus.RUNNING
+ ):
+ time.sleep(10)
+ elif (
+ job_status == ray.job_submission.JobStatus.FAILED
+ or job_status == ray.job_submission.JobStatus.STOPPED
+ ):
+ print(job_id, "job logs:")
+ print(client.get_job_info(job_id).message)
+ raise RuntimeError("The Ray Job encountered an error and failed")
+
+ vertex_ray.delete_ray_cluster(cluster_resource_name)
+ # Ensure cluster was deleted
+ for cluster in vertex_ray.list_ray_clusters():
+ assert cluster.cluster_resource_name != cluster_resource_name
diff --git a/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_batch_prediction.py b/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_batch_prediction.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a6c175586c5c7777218a6122343944778aacf1a
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_batch_prediction.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# pylint: disable=protected-access, g-multiple-import
+"""System tests for GenAI batch prediction."""
+
+import time
+import pytest
+
+import vertexai
+from tests.system.aiplatform import e2e_base
+from vertexai.generative_models import GenerativeModel
+from vertexai.preview import batch_prediction
+
+
+_GEMINI_MODEL_NAME = "gemini-1.0-pro"
+_GEMINI_MODEL_RESOURCE_NAME = f"publishers/google/models/{_GEMINI_MODEL_NAME}"
+_GCS_INPUT_URI = "gs://ucaip-samples-us-central1/model/llm/batch_prediction/gemini_batch_prediction_input.jsonl"
+_BQ_INPUT_URI = (
+ "bq://ucaip-sample-tests.ucaip_test_us_central1.gemini_batch_prediction_input"
+)
+
+
+@pytest.mark.usefixtures(
+ "prepare_staging_bucket",
+ "delete_staging_bucket",
+ "prepare_bigquery_dataset",
+ "delete_bigquery_dataset",
+ "tear_down_resources",
+)
+class TestBatchPrediction(e2e_base.TestEndToEnd):
+ """System tests for GenAI batch prediction."""
+
+ _temp_prefix = "temp-genai-batch-prediction"
+
+ def test_batch_prediction_with_gcs_input(self, shared_state):
+ shared_state["resources"] = []
+
+ vertexai.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ staging_bucket=f"gs://{shared_state['staging_bucket_name']}",
+ )
+
+ # Pass the model name and do not specify the output prefix.
+ job = batch_prediction.BatchPredictionJob.submit(
+ source_model=_GEMINI_MODEL_NAME,
+ input_dataset=_GCS_INPUT_URI,
+ )
+ shared_state["resources"].append(job)
+
+ assert (
+ job.model_name == _GEMINI_MODEL_RESOURCE_NAME
+ ), f"Unexpected model name {job.model_name} in the job."
+
+ # Refresh the job until complete
+ while not job.has_ended:
+ time.sleep(10)
+ job.refresh()
+
+ assert job.has_succeeded, f"The job has failed with error: {job.error.message}."
+ assert job.output_location.startswith(
+ f"gs://{shared_state['staging_bucket_name']}/gen-ai-batch-prediction/"
+ ), f"Unexpected output location: {job.output_location}."
+
+ def test_batch_prediction_with_bq_input(self, shared_state):
+ vertexai.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ )
+
+ model = GenerativeModel(_GEMINI_MODEL_NAME)
+
+ # Pass the model object and specify the output prefix.
+ job = batch_prediction.BatchPredictionJob.submit(
+ source_model=model,
+ input_dataset=_BQ_INPUT_URI,
+ output_uri_prefix=f"bq://{shared_state['bigquery_dataset_id']}",
+ )
+ shared_state["resources"].append(job)
+
+ assert (
+ job.model_name == _GEMINI_MODEL_RESOURCE_NAME
+ ), f"Unexpected model name {job.model_name} in the job."
+
+ # Refresh the job until complete
+ while not job.has_ended:
+ time.sleep(10)
+ job.refresh()
+
+ assert job.has_succeeded, f"The job has failed with error: {job.error.message}."
+ assert job.output_location.startswith(
+ f"bq://{shared_state['bigquery_dataset_id']}.predictions"
+ ), f"Unexpected output location: {job.output_location}."
diff --git a/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_evaluation.py b/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..c65f2417f404dfed9a1995a032dbd84b1f4f5031
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_evaluation.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# pylint: disable=protected-access, g-multiple-import
+"""System tests for Gen AI evaluation."""
+
+from google import auth
+from google.cloud import aiplatform
+from tests.system.aiplatform import e2e_base
+from vertexai.generative_models import GenerativeModel
+from vertexai.preview.evaluation import EvalTask
+from vertexai.preview.evaluation import (
+ MetricPromptTemplateExamples,
+)
+import pandas as pd
+import pytest
+
+
+_METRICS = [
+ "rouge_l_sum",
+ MetricPromptTemplateExamples.Pointwise.SAFETY,
+]
+_GEMINI_MODEL_NAME = "gemini-1.0-pro"
+_EXPERIMENT_NAME = "test_experiment"
+_EXPERIMENT__RUN_NAME = "test_experiment_run"
+
+
+@pytest.mark.usefixtures(
+ "tear_down_resources",
+)
+class TestEvaluation(e2e_base.TestEndToEnd):
+ """System tests for Gen AI evaluation."""
+
+ def setup_method(self):
+ super().setup_method()
+ credentials, _ = auth.default(
+ scopes=["https://www.googleapis.com/auth/cloud-platform"]
+ )
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ credentials=credentials,
+ )
+
+ def test_run_eval_task(self):
+ test_eval_task = EvalTask(
+ dataset=pd.DataFrame(
+ {
+ "prompt": ["Why is sky blue?"],
+ "reference": [
+ "The sky appears blue due to a phenomenon called "
+ "Rayleigh scattering."
+ ],
+ }
+ ),
+ metrics=_METRICS,
+ experiment=_EXPERIMENT_NAME,
+ )
+
+ eval_result = test_eval_task.evaluate(
+ model=GenerativeModel(_GEMINI_MODEL_NAME),
+ experiment_run_name=_EXPERIMENT__RUN_NAME,
+ )
+
+ assert eval_result.summary_metrics["row_count"] == 1
+ assert set(eval_result.summary_metrics.keys()) == set(
+ [
+ "row_count",
+ "rouge_l_sum/mean",
+ "rouge_l_sum/std",
+ "safety/mean",
+ "safety/std",
+ ]
+ )
+ assert set(eval_result.metrics_table.columns.values) == set(
+ [
+ "prompt",
+ "reference",
+ "response",
+ "rouge_l_sum/score",
+ "safety/score",
+ "safety/explanation",
+ ]
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_generative_models.py b/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_generative_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b69e8b7b01c893c897662e7ac30c8b4f8e13485
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_generative_models.py
@@ -0,0 +1,702 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# pylint: disable=protected-access, g-multiple-import
+"""System tests for generative models."""
+
+from typing import Any
+
+import json
+import os
+import pytest
+
+# Google imports
+from google import auth
+from google.auth.aio.credentials import StaticCredentials
+from google.cloud import aiplatform
+from tests.system.aiplatform import e2e_base
+from vertexai import generative_models
+from vertexai.generative_models import Content
+from vertexai.preview import (
+ generative_models as preview_generative_models,
+)
+from vertexai.preview import caching
+
+
+GEMINI_MODEL_NAME = "gemini-1.0-pro-002"
+GEMINI_VISION_MODEL_NAME = "gemini-1.0-pro-vision"
+GEMINI_15_MODEL_NAME = "gemini-1.5-pro-preview-0409"
+GEMINI_15_PRO_MODEL_NAME = "gemini-1.5-pro-001"
+SMART_ROUTER_NAME = "smart-router-001"
+GEMINI_15_PRO_2_MODEL_NAME = "gemini-1.5-pro-002"
+
+STAGING_API_ENDPOINT = "STAGING_ENDPOINT"
+PROD_API_ENDPOINT = "PROD_ENDPOINT"
+
+
+# A dummy function for function calling
+def get_current_weather(location: str, unit: str = "centigrade"):
+ """Gets weather in the specified location.
+
+ Args:
+ location: The location for which to get the weather.
+ unit: Optional. Temperature unit. Can be Centigrade or Fahrenheit. Defaults to Centigrade.
+
+ Returns:
+ The weather information as a dict.
+ """
+ return dict(
+ location=location,
+ unit=unit,
+ weather="Super nice, but maybe a bit hot.",
+ )
+
+
+def get_client_api_transport(client: Any):
+ return client._transport.__class__.__name__.lower()
+
+
+_REQUEST_FUNCTION_PARAMETER_SCHEMA_STRUCT = {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city and state, e.g. San Francisco, CA",
+ },
+ "unit": {
+ "type": "string",
+ "enum": [
+ "celsius",
+ "fahrenheit",
+ ],
+ },
+ },
+ "required": ["location"],
+}
+
+_RESPONSE_SCHEMA_STRUCT = {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ },
+ },
+ "required": ["location"],
+}
+
+
+@pytest.mark.parametrize("api_endpoint_env_name", [PROD_API_ENDPOINT])
+@pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+class TestGenerativeModels(e2e_base.TestEndToEnd):
+ """System tests for generative models."""
+
+ _temp_prefix = "temp_generative_models_test_"
+
+ @pytest.fixture(scope="function", autouse=True)
+ def setup_method(self, api_endpoint_env_name, api_transport):
+ super().setup_method()
+ credentials, _ = auth.default(
+ scopes=["https://www.googleapis.com/auth/cloud-platform"]
+ )
+ if api_endpoint_env_name == STAGING_API_ENDPOINT:
+ api_endpoint = os.getenv(api_endpoint_env_name) or None
+ else:
+ api_endpoint = None
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ credentials=credentials,
+ api_endpoint=api_endpoint,
+ api_transport=api_transport,
+ )
+
+ def test_generate_content_with_cached_content_from_text(
+ self, api_endpoint_env_name
+ ):
+ cached_content = caching.CachedContent.create(
+ model_name=GEMINI_15_PRO_MODEL_NAME,
+ system_instruction="Please answer all the questions like a pirate.",
+ contents=[
+ Content.from_dict(
+ {
+ "role": "user",
+ "parts": [
+ {
+ "file_data": {
+ "mime_type": "application/pdf",
+ "file_uri": "gs://ucaip-samples-us-central1/sdk_system_test_resources/megatro-llm.pdf",
+ }
+ }
+ for _ in range(10)
+ ]
+ + [
+ {"text": "Please try to summarize the previous contents."},
+ ],
+ }
+ )
+ ],
+ )
+
+ model = preview_generative_models.GenerativeModel.from_cached_content(
+ cached_content=cached_content
+ )
+
+ response = model.generate_content(
+ "Why is sky blue?",
+ generation_config=preview_generative_models.GenerationConfig(temperature=0),
+ )
+ try:
+ assert response.text
+ finally:
+ cached_content.delete()
+
+ def test_generate_content_from_text(self, api_endpoint_env_name, api_transport):
+ model = generative_models.GenerativeModel(GEMINI_MODEL_NAME)
+ response = model.generate_content(
+ "Why is sky blue?",
+ generation_config=generative_models.GenerationConfig(temperature=0),
+ )
+ assert response.text
+ assert api_transport in get_client_api_transport(model._prediction_client)
+
+ def test_generate_content_latency(self, api_endpoint_env_name):
+ import time
+ from unittest import mock
+ from vertexai.generative_models._generative_models import (
+ prediction_service_v1 as prediction_service,
+ )
+
+ gapic_response_time = None
+ gapic_generate_content = (
+ prediction_service.PredictionServiceClient.generate_content
+ )
+
+ def generate_content_patch(self, *args, **kwargs):
+ nonlocal gapic_response_time
+ gapic_start_time = time.time()
+ response = gapic_generate_content(self, *args, **kwargs)
+ gapic_response_time = time.time() - gapic_start_time
+ return response
+
+ with mock.patch.object(
+ prediction_service.PredictionServiceClient,
+ "generate_content",
+ generate_content_patch,
+ ):
+ sdk_start_time = time.time()
+ model = generative_models.GenerativeModel(GEMINI_MODEL_NAME)
+ model.generate_content(
+ "Why is sky blue?",
+ generation_config=generative_models.GenerationConfig(temperature=0),
+ )
+ sdk_response_time = time.time() - sdk_start_time
+
+ sdk_latency = sdk_response_time - gapic_response_time
+
+ percent_latency = (sdk_response_time - gapic_response_time) / sdk_response_time
+
+ # Assert SDK adds <= 0.01 seconds of latency and <=.01% of the overall latency
+ assert sdk_latency <= 0.01
+ assert percent_latency <= 0.01
+
+ @pytest.mark.asyncio
+ async def test_generate_content_async(self, api_endpoint_env_name, api_transport):
+ # Retrieve access token from ADC required to construct
+ # google.auth.aio.credentials.StaticCredentials for async REST transport.
+ # TODO: Update this when google.auth.aio.default is supported for async.
+ if api_transport == "rest":
+ # Construct google.auth.aio.credentials.StaticCredentials
+ # using the access token from ADC for async REST transport.
+ default_credentials, _ = auth.default(
+ scopes=["https://www.googleapis.com/auth/cloud-platform"]
+ )
+ auth_req = auth.transport.requests.Request()
+ default_credentials.refresh(auth_req)
+
+ async_credentials = StaticCredentials(token=default_credentials.token)
+ aiplatform.initializer._set_async_rest_credentials(async_credentials)
+ model = generative_models.GenerativeModel(GEMINI_MODEL_NAME)
+ response = await model.generate_content_async(
+ "Why is sky blue?",
+ generation_config=generative_models.GenerationConfig(temperature=0),
+ )
+ assert response.text
+ assert api_transport in get_client_api_transport(
+ model._prediction_async_client._client
+ )
+ await model._close_async_client()
+
+ def test_generate_content_streaming(self, api_endpoint_env_name, api_transport):
+ model = generative_models.GenerativeModel(GEMINI_MODEL_NAME)
+ stream = model.generate_content(
+ "Why is sky blue?",
+ stream=True,
+ generation_config=generative_models.GenerationConfig(temperature=0),
+ )
+ for chunk in stream:
+ assert (
+ chunk.text
+ or chunk.candidates[0].finish_reason
+ is generative_models.FinishReason.STOP
+ )
+ assert api_transport in get_client_api_transport(model._prediction_client)
+
+ @pytest.mark.asyncio
+ async def test_generate_content_streaming_async(
+ self, api_endpoint_env_name, api_transport
+ ):
+ # Retrieve access token from ADC required to construct
+ # google.auth.aio.credentials.StaticCredentials for async REST transport.
+ # TODO: Update this when google.auth.aio.default is supported for async.
+ if api_transport == "rest":
+ # Construct google.auth.aio.credentials.StaticCredentials
+ # using the access token from ADC for async REST transport.
+ default_credentials, _ = auth.default(
+ scopes=["https://www.googleapis.com/auth/cloud-platform"]
+ )
+ auth_req = auth.transport.requests.Request()
+ default_credentials.refresh(auth_req)
+
+ async_credentials = StaticCredentials(token=default_credentials.token)
+ aiplatform.initializer._set_async_rest_credentials(async_credentials)
+ model = generative_models.GenerativeModel(GEMINI_MODEL_NAME)
+ async_stream = await model.generate_content_async(
+ "Why is sky blue?",
+ stream=True,
+ generation_config=generative_models.GenerationConfig(temperature=0),
+ )
+ async for chunk in async_stream:
+ assert (
+ chunk.text
+ or chunk.candidates[0].finish_reason
+ is generative_models.FinishReason.STOP
+ )
+ assert api_transport in get_client_api_transport(
+ model._prediction_async_client._client
+ )
+ await model._close_async_client()
+
+ def test_generate_content_with_parameters(
+ self, api_endpoint_env_name, api_transport
+ ):
+ model = generative_models.GenerativeModel(
+ GEMINI_MODEL_NAME,
+ system_instruction=[
+ "Talk like a pirate.",
+ "Don't use rude words.",
+ ],
+ )
+ response = model.generate_content(
+ contents="Why is sky blue?",
+ generation_config=generative_models.GenerationConfig(
+ temperature=0,
+ top_p=0.95,
+ top_k=20,
+ candidate_count=1,
+ max_output_tokens=100,
+ stop_sequences=["STOP!"],
+ response_logprobs=True,
+ logprobs=3,
+ ),
+ safety_settings={
+ generative_models.HarmCategory.HARM_CATEGORY_HATE_SPEECH: generative_models.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
+ generative_models.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: generative_models.HarmBlockThreshold.BLOCK_ONLY_HIGH,
+ generative_models.HarmCategory.HARM_CATEGORY_HARASSMENT: generative_models.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
+ generative_models.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: generative_models.HarmBlockThreshold.BLOCK_NONE,
+ },
+ )
+ assert response.text
+ assert api_transport in get_client_api_transport(model._prediction_client)
+
+ def test_generate_content_with_gemini_15_parameters(self, api_endpoint_env_name):
+ model = generative_models.GenerativeModel(GEMINI_15_MODEL_NAME)
+ response = model.generate_content(
+ contents="Why is sky blue? Respond in JSON Format.",
+ generation_config=generative_models.GenerationConfig(
+ temperature=0,
+ top_p=0.95,
+ top_k=20,
+ candidate_count=1,
+ seed=5,
+ max_output_tokens=100,
+ stop_sequences=["STOP!"],
+ presence_penalty=0.0,
+ frequency_penalty=0.0,
+ response_mime_type="application/json",
+ response_schema=_RESPONSE_SCHEMA_STRUCT,
+ ),
+ safety_settings={
+ generative_models.HarmCategory.HARM_CATEGORY_HATE_SPEECH: generative_models.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
+ generative_models.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: generative_models.HarmBlockThreshold.BLOCK_ONLY_HIGH,
+ generative_models.HarmCategory.HARM_CATEGORY_HARASSMENT: generative_models.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
+ generative_models.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: generative_models.HarmBlockThreshold.BLOCK_NONE,
+ },
+ )
+ assert response.text
+ assert json.loads(response.text)
+
+ def test_generate_content_from_list_of_content_dict(
+ self, api_endpoint_env_name, api_transport
+ ):
+ model = generative_models.GenerativeModel(GEMINI_MODEL_NAME)
+ response = model.generate_content(
+ contents=[{"role": "user", "parts": [{"text": "Why is sky blue?"}]}],
+ generation_config=generative_models.GenerationConfig(temperature=0),
+ )
+ assert response.text
+ assert api_transport in get_client_api_transport(model._prediction_client)
+
+ @pytest.mark.skip(
+ reason="Breaking change in the gemini-pro-vision model. See b/315803556#comment3"
+ )
+ def test_generate_content_from_remote_image(self, api_endpoint_env_name):
+ vision_model = generative_models.GenerativeModel(GEMINI_VISION_MODEL_NAME)
+ image_part = generative_models.Part.from_uri(
+ uri="gs://download.tensorflow.org/example_images/320px-Felis_catus-cat_on_snow.jpg",
+ mime_type="image/jpeg",
+ )
+ response = vision_model.generate_content(
+ image_part,
+ generation_config=generative_models.GenerationConfig(temperature=0),
+ )
+ assert response.text
+ assert "cat" in response.text
+
+ def test_generate_content_from_text_and_remote_image(
+ self, api_endpoint_env_name, api_transport
+ ):
+ vision_model = generative_models.GenerativeModel(GEMINI_VISION_MODEL_NAME)
+ image_part = generative_models.Part.from_uri(
+ uri="gs://download.tensorflow.org/example_images/320px-Felis_catus-cat_on_snow.jpg",
+ mime_type="image/jpeg",
+ )
+ response = vision_model.generate_content(
+ contents=["What is shown in this image?", image_part],
+ generation_config=generative_models.GenerationConfig(temperature=0),
+ )
+ assert response.text
+ assert "cat" in response.text
+ assert api_transport in get_client_api_transport(
+ vision_model._prediction_client
+ )
+
+ def test_generate_content_from_text_and_remote_video(
+ self, api_endpoint_env_name, api_transport
+ ):
+ vision_model = generative_models.GenerativeModel(GEMINI_VISION_MODEL_NAME)
+ video_part = generative_models.Part.from_uri(
+ uri="gs://cloud-samples-data/video/animals.mp4",
+ mime_type="video/mp4",
+ )
+ response = vision_model.generate_content(
+ contents=["What is in the video?", video_part],
+ generation_config=generative_models.GenerationConfig(temperature=0),
+ )
+ assert response.text
+ assert "Zootopia" in response.text
+ assert api_transport in get_client_api_transport(
+ vision_model._prediction_client
+ )
+
+ def test_generate_content_from_text_and_remote_audio(
+ self, api_endpoint_env_name, api_transport
+ ):
+ pro_model = generative_models.GenerativeModel(GEMINI_15_PRO_2_MODEL_NAME)
+ audio_part = generative_models.Part.from_uri(
+ uri="gs://cloud-samples-data/generative-ai/audio/pixel.mp3",
+ mime_type="audio/mp3",
+ )
+ response = pro_model.generate_content(
+ contents=["What is in the audio?", audio_part],
+ generation_config=generative_models.GenerationConfig(audio_timestamp=True),
+ )
+ assert response.text
+ assert api_transport in get_client_api_transport(pro_model._prediction_client)
+
+ def test_grounding_google_search_retriever(self, api_endpoint_env_name):
+ model = generative_models.GenerativeModel(GEMINI_MODEL_NAME)
+ google_search_retriever_tool = (
+ generative_models.Tool.from_google_search_retrieval(
+ generative_models.grounding.GoogleSearchRetrieval()
+ )
+ )
+ response = model.generate_content(
+ "Why is sky blue?",
+ tools=[google_search_retriever_tool],
+ generation_config=generative_models.GenerationConfig(temperature=0),
+ )
+ assert (
+ response.candidates[0].finish_reason
+ is generative_models.FinishReason.RECITATION
+ or response.text
+ )
+
+ def test_grounding_google_search_retriever_with_dynamic_retrieval(
+ self, api_endpoint_env_name
+ ):
+ model = generative_models.GenerativeModel(GEMINI_MODEL_NAME)
+ google_search_retriever_tool = generative_models.Tool.from_google_search_retrieval(
+ generative_models.grounding.GoogleSearchRetrieval(
+ generative_models.grounding.DynamicRetrievalConfig(
+ mode=generative_models.grounding.DynamicRetrievalConfig.Mode.MODE_DYNAMIC,
+ dynamic_threshold=0.05,
+ )
+ )
+ )
+ response = model.generate_content(
+ "Why is sky blue?",
+ tools=[google_search_retriever_tool],
+ generation_config=generative_models.GenerationConfig(temperature=0),
+ )
+ assert (
+ response.candidates[0].finish_reason
+ is generative_models.FinishReason.RECITATION
+ or response.text
+ )
+
+ # Chat
+
+ def test_send_message_from_text(self, api_endpoint_env_name, api_transport):
+ model = generative_models.GenerativeModel(GEMINI_MODEL_NAME)
+ chat = model.start_chat()
+ response1 = chat.send_message(
+ "I really like fantasy movies.",
+ generation_config=generative_models.GenerationConfig(temperature=0),
+ )
+ assert response1.text
+ assert len(chat.history) == 2
+
+ response2 = chat.send_message(
+ "What things do I like?.",
+ generation_config=generative_models.GenerationConfig(temperature=0),
+ )
+ assert response2.text
+ assert len(chat.history) == 4
+ assert api_transport in get_client_api_transport(model._prediction_client)
+
+ def test_chat_function_calling(self, api_endpoint_env_name):
+ get_current_weather_func = generative_models.FunctionDeclaration(
+ name="get_current_weather",
+ description="Get the current weather in a given location",
+ parameters=_REQUEST_FUNCTION_PARAMETER_SCHEMA_STRUCT,
+ )
+
+ weather_tool = generative_models.Tool(
+ function_declarations=[get_current_weather_func],
+ )
+
+ model = generative_models.GenerativeModel(
+ GEMINI_MODEL_NAME,
+ # Specifying the tools once to avoid specifying them in every request
+ tools=[weather_tool],
+ )
+
+ chat = model.start_chat()
+
+ response1 = chat.send_message("What is the weather like in Boston?")
+ assert (
+ response1.candidates[0].content.parts[0].function_call.name
+ == "get_current_weather"
+ )
+ response2 = chat.send_message(
+ generative_models.Part.from_function_response(
+ name="get_current_weather",
+ response={
+ "content": {"weather": "super nice"},
+ },
+ ),
+ )
+ assert response2.text
+
+ def test_generate_content_function_calling(self, api_endpoint_env_name):
+ get_current_weather_func = generative_models.FunctionDeclaration(
+ name="get_current_weather",
+ description="Get the current weather in a given location",
+ parameters=_REQUEST_FUNCTION_PARAMETER_SCHEMA_STRUCT,
+ )
+
+ weather_tool = generative_models.Tool(
+ function_declarations=[get_current_weather_func],
+ )
+
+ model = generative_models.GenerativeModel(
+ GEMINI_MODEL_NAME,
+ # Specifying the tools once to avoid specifying them in every request
+ tools=[weather_tool],
+ )
+
+ # Define the user's prompt in a Content object that we can reuse in model calls
+ prompt = "What is the weather like in Boston?"
+ user_prompt_content = generative_models.Content(
+ role="user",
+ parts=[
+ generative_models.Part.from_text(prompt),
+ ],
+ )
+
+ # Send the prompt and instruct the model to generate content using the Tool
+ response = model.generate_content(
+ user_prompt_content,
+ generation_config={"temperature": 0},
+ tools=[weather_tool],
+ )
+ response_function_call_content = response.candidates[0].content
+
+ assert (
+ response.candidates[0].content.parts[0].function_call.name
+ == "get_current_weather"
+ )
+
+ assert response.candidates[0].function_calls[0].args["location"]
+ assert len(response.candidates[0].function_calls) == 1
+ assert (
+ response.candidates[0].function_calls[0]
+ == response.candidates[0].content.parts[0].function_call
+ )
+
+ # fake api_response data
+ api_response = {
+ "location": "Boston, MA",
+ "temperature": 38,
+ "description": "Partly Cloudy",
+ "icon": "partly-cloudy",
+ "humidity": 65,
+ "wind": {"speed": 10, "direction": "NW"},
+ }
+
+ response = model.generate_content(
+ [
+ user_prompt_content,
+ response_function_call_content,
+ generative_models.Content(
+ role="user",
+ parts=[
+ generative_models.Part.from_function_response(
+ name="get_current_weather",
+ response=api_response,
+ )
+ ],
+ ),
+ ],
+ tools=[weather_tool],
+ )
+ assert response
+ assert len(response.candidates[0].function_calls) == 0
+
+ # Get the model summary response
+ summary = response.candidates[0].content.parts[0].text
+
+ assert summary
+
+ def test_generate_content_model_router(self, api_endpoint_env_name):
+ model = generative_models.GenerativeModel(SMART_ROUTER_NAME)
+ response = model.generate_content(
+ contents="Why is sky blue?",
+ generation_config=generative_models.GenerationConfig(
+ temperature=0,
+ routing_config=generative_models.GenerationConfig.RoutingConfig(
+ routing_config=generative_models.GenerationConfig.RoutingConfig.AutoRoutingMode(
+ model_routing_preference=generative_models.GenerationConfig.RoutingConfig.AutoRoutingMode.ModelRoutingPreference.BALANCED,
+ ),
+ ),
+ ),
+ )
+ assert response.text
+
+ def test_chat_automatic_function_calling(self, api_endpoint_env_name):
+ get_current_weather_func = generative_models.FunctionDeclaration.from_func(
+ get_current_weather
+ )
+
+ weather_tool = generative_models.Tool(
+ function_declarations=[get_current_weather_func],
+ )
+
+ model = preview_generative_models.GenerativeModel(
+ GEMINI_MODEL_NAME,
+ # Specifying the tools once to avoid specifying them in every request
+ tools=[weather_tool],
+ )
+
+ chat = model.start_chat(
+ responder=preview_generative_models.AutomaticFunctionCallingResponder(
+ max_automatic_function_calls=1,
+ )
+ )
+
+ response = chat.send_message("What is the weather like in Boston?")
+
+ assert response.text
+ assert "nice" in response.text
+ assert len(chat.history) == 4
+ assert chat.history[-3].parts[0].function_call
+ assert chat.history[-3].parts[0].function_call.name == "get_current_weather"
+ assert chat.history[-2].parts[0].function_response
+ assert chat.history[-2].parts[0].function_response.name == "get_current_weather"
+
+ def test_additional_request_metadata(self, api_endpoint_env_name):
+ aiplatform.init(request_metadata=[("foo", "bar")])
+ model = generative_models.GenerativeModel(GEMINI_MODEL_NAME)
+ response = model.generate_content(
+ "Why is sky blue?",
+ generation_config=generative_models.GenerationConfig(temperature=0),
+ )
+ assert response
+
+ def test_compute_tokens_from_text(self, api_endpoint_env_name):
+ model = generative_models.GenerativeModel(GEMINI_MODEL_NAME)
+ response = model.compute_tokens(["Why is sky blue?", "Explain it like I'm 5."])
+ assert len(response.tokens_info) == 2
+ for token_info in response.tokens_info:
+ assert token_info.tokens
+ assert token_info.token_ids
+ assert len(token_info.token_ids) == len(token_info.tokens)
+ assert token_info.role
+ # Lightly validate that the tokens are not Base64 encoded
+ assert b"=" not in token_info.tokens
+
+ def test_count_tokens_from_text(self):
+ plain_model = generative_models.GenerativeModel(GEMINI_MODEL_NAME)
+ model = generative_models.GenerativeModel(
+ GEMINI_MODEL_NAME, system_instruction=["You are a chatbot."]
+ )
+ get_current_weather_func = generative_models.FunctionDeclaration.from_func(
+ get_current_weather
+ )
+ weather_tool = generative_models.Tool(
+ function_declarations=[get_current_weather_func],
+ )
+ content = ["Why is sky blue?", "Explain it like I'm 5."]
+
+ response_without_si = plain_model.count_tokens(content)
+ response_with_si = model.count_tokens(content)
+ response_with_si_and_tool = model.count_tokens(
+ content,
+ tools=[weather_tool],
+ )
+
+ # system instruction + user prompt
+ assert response_with_si.total_tokens > response_without_si.total_tokens
+ assert (
+ response_with_si.total_billable_characters
+ > response_without_si.total_billable_characters
+ )
+ # system instruction + user prompt + tool
+ assert response_with_si_and_tool.total_tokens > response_with_si.total_tokens
+ assert (
+ response_with_si_and_tool.total_billable_characters
+ > response_with_si.total_billable_characters
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_prompts.py b/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_prompts.py
new file mode 100644
index 0000000000000000000000000000000000000000..62064318262993049086a510d11b0e78d399850f
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_prompts.py
@@ -0,0 +1,193 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# pylint: disable=protected-access, g-multiple-import
+"""System tests for GenAI prompts."""
+
+from google.cloud import aiplatform
+from vertexai import generative_models
+from vertexai.generative_models import (
+ GenerationConfig,
+ SafetySetting,
+ ToolConfig,
+)
+from vertexai.preview import prompts
+from vertexai.preview.prompts import Prompt
+
+from tests.system.aiplatform import e2e_base
+from google import auth
+
+_REQUEST_FUNCTION_PARAMETER_SCHEMA_STRUCT = {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city and state, e.g. San Francisco, CA",
+ },
+ "unit": {
+ "type": "string",
+ "enum": [
+ "celsius",
+ "fahrenheit",
+ ],
+ },
+ },
+ "required": ["location"],
+}
+
+
+class TestPrompts(e2e_base.TestEndToEnd):
+ """System tests for prompts."""
+
+ _temp_prefix = "temp_prompts_test_"
+
+ def setup_method(self):
+ super().setup_method()
+ credentials, _ = auth.default(
+ scopes=["https://www.googleapis.com/auth/cloud-platform"]
+ )
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ credentials=credentials,
+ )
+
+ def test_create_prompt_with_variables(self):
+ # Create local Prompt
+ prompt = Prompt(
+ prompt_data="Hello, {name}! Today is {day}. How are you?",
+ variables=[
+ {"name": "Alice", "day": "Monday"},
+ {"name": "Bob", "day": "Tuesday"},
+ ],
+ generation_config=GenerationConfig(temperature=0.1),
+ model_name="gemini-1.0-pro-002",
+ safety_settings=[
+ SafetySetting(
+ category=SafetySetting.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
+ threshold=SafetySetting.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
+ method=SafetySetting.HarmBlockMethod.SEVERITY,
+ )
+ ],
+ system_instruction="Please answer in a short sentence.",
+ )
+
+ # Generate content using the assembled prompt for each variable set.
+ for i in range(len(prompt.variables)):
+ prompt.generate_content(
+ contents=prompt.assemble_contents(**prompt.variables[i])
+ )
+
+ # Save Prompt to online resource. Returns a new Prompt object associated with the online resource
+ prompt1 = prompts.create_version(prompt=prompt)
+
+ # Only new prompt should be associated with a prompt resource
+ assert prompt1.prompt_id
+ assert not prompt.prompt_id
+
+ # Update prompt and save a new version
+ prompt1.prompt_data = "Hi, {name}! How are you? Today is {day}."
+ prompt2 = prompts.create_version(prompt=prompt1, version_name="v2")
+ assert prompt2.prompt_id == prompt1.prompt_id
+ assert prompt2.version_id != prompt1.version_id
+
+ # Restore previous version
+ metadata = prompts.restore_version(
+ prompt_id=prompt2.prompt_id, version_id=prompt1.version_id
+ )
+ assert metadata.prompt_id == prompt2.prompt_id
+ assert metadata.version_id != prompt2.version_id
+
+ # List prompt versions
+ versions_metadata = prompts.list_versions(prompt_id=metadata.prompt_id)
+ assert len(versions_metadata) == 3
+
+ # Delete the prompt resource
+ prompts.delete(prompt_id=prompt2.prompt_id)
+
+ def test_create_prompt_with_function_calling(self):
+ # Create local Prompt
+ get_current_weather_func = generative_models.FunctionDeclaration(
+ name="get_current_weather",
+ description="Get the current weather in a given location",
+ parameters=_REQUEST_FUNCTION_PARAMETER_SCHEMA_STRUCT,
+ )
+ weather_tool = generative_models.Tool(
+ function_declarations=[get_current_weather_func],
+ )
+
+ tool_config = ToolConfig(
+ function_calling_config=ToolConfig.FunctionCallingConfig(
+ mode=ToolConfig.FunctionCallingConfig.Mode.ANY,
+ allowed_function_names=["get_current_weather"],
+ )
+ )
+
+ prompt = Prompt(
+ prompt_data="What is the weather like in Boston?",
+ tools=[weather_tool],
+ tool_config=tool_config,
+ model_name="gemini-1.0-pro-002",
+ )
+
+ # (Optional) Create a separate prompt resource to save the version to
+ prompt_temp = Prompt(model_name="gemini-1.0-pro-002")
+ prompt_temp1 = prompts.create_version(prompt=prompt_temp, version_name="empty")
+
+ # Create a new version to an existing prompt
+ prompt1 = prompts.create_version(
+ prompt=prompt, prompt_id=prompt_temp1.prompt_id, version_name="fc"
+ )
+
+ # Delete the prompt resource
+ prompts.delete(prompt_id=prompt1.prompt_id)
+
+ def test_get_prompt_with_variables(self):
+ # List prompts
+ prompts_list = prompts.list()
+ assert prompts_list
+
+ # Get prompt created in UI
+ prompt_id = "3217694940163211264"
+ prompt = prompts.get(prompt_id=prompt_id)
+ assert prompt.prompt_id == prompt_id
+ assert prompt.prompt_data
+ assert prompt.generation_config
+ assert prompt.system_instruction
+ # UI has a bug where safety settings are not saved
+ # assert prompt.safety_settings
+
+ # Generate content using the assembled prompt for each variable set.
+ for i in range(len(prompt.variables)):
+ response = prompt.generate_content(
+ contents=prompt.assemble_contents(**prompt.variables[i])
+ )
+ assert response.text
+
+ def test_get_prompt_with_function_calling(self):
+ # List prompts
+ prompts_list = prompts.list()
+ assert prompts_list
+
+ # Get prompt created in UI
+ prompt_id = "1173060709337006080"
+ prompt = prompts.get(prompt_id=prompt_id)
+ assert prompt.prompt_id == prompt_id
+ assert prompt.tools
+
+ # Generate content using the prompt
+ response = prompt.generate_content(contents=prompt.assemble_contents())
+ assert response
diff --git a/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_reasoning_engines.py b/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_reasoning_engines.py
new file mode 100644
index 0000000000000000000000000000000000000000..74f5e344674278884894bb3829009c508471a032
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_reasoning_engines.py
@@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""System tests for reasoning engines."""
+import traceback
+import pytest
+from google import auth
+from google.api_core import exceptions
+from google.cloud import storage
+import vertexai
+from tests.system.aiplatform import e2e_base
+from vertexai.preview import reasoning_engines
+
+
+_BLOB_FILENAME = vertexai.reasoning_engines._reasoning_engines._BLOB_FILENAME
+
+
+@pytest.mark.usefixtures(
+ "prepare_staging_bucket", "delete_staging_bucket", "tear_down_resources"
+)
+class TestReasoningEngines(e2e_base.TestEndToEnd):
+ """System tests for reasoning engines."""
+
+ _temp_prefix = "test-reasoning-engine"
+
+ def test_langchain_template(self, shared_state):
+ super().setup_method()
+ credentials, _ = auth.default(
+ scopes=["https://www.googleapis.com/auth/cloud-platform"]
+ )
+ vertexai.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ staging_bucket=f"gs://{shared_state['staging_bucket_name']}",
+ credentials=credentials,
+ )
+
+ # System tests are currently affected by contamination in the Gemini
+ # model and ToolConfig test fixture.
+ # To eliminate false positives, we are mocking the runnnable builder to
+ # make the system tests hermetic.
+ # This change will be reverted once the the test fixture is corrected.
+ class LangchainAgentNoDependencies:
+ """LangChain Agent with no dependencies."""
+
+ def invoke(self, input, **kwargs) -> str:
+ return "Testing langchain agent with no dependencies."
+
+ def runnable_builder(**kwargs):
+ """Creates a LangChain Runnable."""
+ return LangchainAgentNoDependencies()
+
+ # Test prebuilt langchain_template
+ created_app = reasoning_engines.ReasoningEngine.create(
+ reasoning_engines.LangchainAgent(
+ model="gemini-1.5-pro-preview-0409",
+ runnable_builder=runnable_builder,
+ ),
+ requirements=["google-cloud-aiplatform[reasoningengine,langchain]"],
+ display_name="test-display-name",
+ description="test-description",
+ gcs_dir_name="test-gcs-dir-name",
+ )
+ shared_state.setdefault("resources", [])
+ shared_state["resources"].append(created_app) # Deletion at teardown.
+ got_app = reasoning_engines.ReasoningEngine(created_app.resource_name)
+
+ # Test resource attributes
+ assert isinstance(created_app.resource_name, str)
+ assert got_app.resource_name == created_app.resource_name
+ assert got_app.gca_resource.name == got_app.resource_name
+ assert got_app.gca_resource.display_name == "test-display-name"
+ assert got_app.gca_resource.description == "test-description"
+
+ # Test operation schemas
+ assert got_app.operation_schemas() == created_app.operation_schemas()
+
+ # Test query response
+ # (Wrap in a try-except block because of non-determinism from Gemini.)
+ try:
+ response = created_app.query(input="hello")
+ assert response.get("input") == "hello"
+ response = got_app.query(input="hello")
+ assert response.get("input") == "hello"
+ except exceptions.FailedPrecondition:
+ traceback.print_exc()
+ except Exception:
+ traceback.print_exc()
+
+ # Test GCS Bucket subdirectory creation
+ # Original: https://github.com/googleapis/python-aiplatform/issues/3650
+ client = storage.Client(project=e2e_base._PROJECT)
+ bucket = client.bucket(shared_state["staging_bucket_name"])
+ assert bucket.exists()
+ assert bucket.get_blob(f"test-gcs-dir-name/{_BLOB_FILENAME}").exists()
diff --git a/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_tokenization.py b/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_tokenization.py
new file mode 100644
index 0000000000000000000000000000000000000000..105bcf0a2d9e425ba9e0199a288f7951005cf277
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/system/vertexai/test_tokenization.py
@@ -0,0 +1,287 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import pytest
+import nltk
+
+from nltk.corpus import udhr
+from google.cloud import aiplatform
+from vertexai.preview.tokenization import (
+ get_tokenizer_for_model as tokenizer_preview,
+)
+from vertexai.tokenization._tokenizers import (
+ get_tokenizer_for_model as tokenizer_ga,
+)
+from vertexai.generative_models import (
+ GenerativeModel,
+ Part,
+ Tool,
+)
+from tests.system.aiplatform import e2e_base
+from google import auth
+from google.cloud.aiplatform_v1beta1.types import (
+ content as gapic_content_types,
+ tool as gapic_tool_types,
+ openapi,
+)
+from google.protobuf import struct_pb2
+
+
+_MODELS = [
+ "gemini-1.0-pro",
+ "gemini-1.5-pro",
+ "gemini-1.5-flash",
+ "gemini-1.5-flash-002",
+ "gemini-1.5-pro-002",
+]
+_CORPUS = [
+ "udhr",
+]
+_CORPUS_LIB = [
+ udhr,
+]
+_VERSIONED_TOKENIZER = [tokenizer_preview, tokenizer_ga]
+_MODEL_CORPUS_PARAMS = [
+ (get_tokenizer_for_model, model_name, corpus_name, corpus_lib)
+ for get_tokenizer_for_model in _VERSIONED_TOKENIZER
+ for model_name in _MODELS
+ for (corpus_name, corpus_lib) in zip(_CORPUS, _CORPUS_LIB)
+]
+_STRUCT = struct_pb2.Struct(
+ fields={
+ "string_key": struct_pb2.Value(string_value="value"),
+ }
+)
+_FUNCTION_CALL = gapic_tool_types.FunctionCall(name="test_function_call", args=_STRUCT)
+_FUNCTION_RESPONSE = gapic_tool_types.FunctionResponse(
+ name="function_response",
+ response=_STRUCT,
+)
+
+
+_SCHEMA_1 = openapi.Schema(format="schema1_format", description="schema1_description")
+_SCHEMA_2 = openapi.Schema(format="schema2_format", description="schema2_description")
+_EXAMPLE = struct_pb2.Value(string_value="value1")
+
+_FUNCTION_DECLARATION_1 = gapic_tool_types.FunctionDeclaration(
+ name="function_declaration_name",
+ description="function_declaration_description",
+ parameters=openapi.Schema(
+ format="schema_format",
+ description="schema_description",
+ enum=["schema_enum1", "schema_enum2"],
+ required=["schema_required1", "schema_required2"],
+ items=_SCHEMA_2,
+ properties={"property_key": _SCHEMA_1},
+ example=_EXAMPLE,
+ ),
+)
+_FUNCTION_DECLARATION_2 = gapic_tool_types.FunctionDeclaration(
+ parameters=openapi.Schema(
+ nullable=True,
+ default=struct_pb2.Value(string_value="value1"),
+ min_items=0,
+ max_items=0,
+ min_properties=0,
+ max_properties=0,
+ minimum=0,
+ maximum=0,
+ min_length=0,
+ max_length=0,
+ pattern="pattern",
+ ),
+ response=_SCHEMA_1,
+)
+
+STAGING_API_ENDPOINT = "STAGING_ENDPOINT"
+PROD_API_ENDPOINT = "PROD_ENDPOINT"
+
+
+@pytest.mark.parametrize("api_endpoint_env_name", [PROD_API_ENDPOINT])
+class TestTokenization(e2e_base.TestEndToEnd):
+ """System tests for tokenization."""
+
+ _temp_prefix = "temp_tokenization_test_"
+
+ @pytest.fixture(scope="function", autouse=True)
+ def setup_method(self, api_endpoint_env_name):
+ super().setup_method()
+ credentials, _ = auth.default(
+ scopes=["https://www.googleapis.com/auth/cloud-platform"]
+ )
+ if api_endpoint_env_name == STAGING_API_ENDPOINT:
+ api_endpoint = os.getenv(api_endpoint_env_name)
+ else:
+ api_endpoint = None
+ aiplatform.init(
+ project=e2e_base._PROJECT,
+ location=e2e_base._LOCATION,
+ credentials=credentials,
+ api_endpoint=api_endpoint,
+ )
+
+ @pytest.mark.parametrize(
+ "get_tokenizer_for_model, model_name, corpus_name, corpus_lib",
+ _MODEL_CORPUS_PARAMS,
+ )
+ def test_count_tokens_local(
+ self,
+ get_tokenizer_for_model,
+ model_name,
+ corpus_name,
+ corpus_lib,
+ api_endpoint_env_name,
+ ):
+ # The Gemini 1.5 flash model requires the model version
+ # number suffix (001) in staging only
+ if api_endpoint_env_name == STAGING_API_ENDPOINT and "-00" not in model_name:
+ model_name = model_name + "-001"
+ tokenizer = get_tokenizer_for_model(model_name)
+ model = GenerativeModel(model_name)
+ nltk.download(corpus_name, quiet=True)
+ for id, book in enumerate(corpus_lib.fileids()):
+ text = corpus_lib.raw(book)
+ service_result = model.count_tokens(text)
+ local_result = tokenizer.count_tokens(text)
+ assert service_result.total_tokens == local_result.total_tokens
+
+ @pytest.mark.parametrize(
+ "get_tokenizer_for_model, model_name, corpus_name, corpus_lib",
+ _MODEL_CORPUS_PARAMS,
+ )
+ def test_compute_tokens(
+ self,
+ get_tokenizer_for_model,
+ model_name,
+ corpus_name,
+ corpus_lib,
+ api_endpoint_env_name,
+ ):
+ # The Gemini 1.5 flash model requires the model version
+ # number suffix (001) in staging only
+ if api_endpoint_env_name == STAGING_API_ENDPOINT:
+ model_name = model_name + "-001"
+ tokenizer = get_tokenizer_for_model(model_name)
+ model = GenerativeModel(model_name)
+ nltk.download(corpus_name, quiet=True)
+ for id, book in enumerate(corpus_lib.fileids()):
+ text = corpus_lib.raw(book)
+ response = model.compute_tokens(text)
+ local_result = tokenizer.compute_tokens(text)
+ for local, service in zip(local_result.tokens_info, response.tokens_info):
+ assert local.tokens == service.tokens
+ assert local.token_ids == service.token_ids
+
+ @pytest.mark.parametrize(
+ "model_name",
+ _MODELS,
+ )
+ def test_count_tokens_system_instruction(self, model_name):
+ tokenizer = tokenizer_preview(model_name)
+ model = GenerativeModel(model_name, system_instruction=["You are a chatbot."])
+
+ assert (
+ tokenizer.count_tokens(
+ "hello", system_instruction=["You are a chatbot."]
+ ).total_tokens
+ == model.count_tokens("hello").total_tokens
+ )
+
+ @pytest.mark.parametrize(
+ "model_name",
+ _MODELS,
+ )
+ def test_count_tokens_system_instruction_is_function_call(self, model_name):
+ part = Part._from_gapic(gapic_content_types.Part(function_call=_FUNCTION_CALL))
+
+ tokenizer = tokenizer_preview(model_name)
+ model = GenerativeModel(model_name, system_instruction=[part])
+
+ assert (
+ tokenizer.count_tokens("hello", system_instruction=[part]).total_tokens
+ == model.count_tokens("hello").total_tokens
+ )
+
+ @pytest.mark.parametrize(
+ "model_name",
+ _MODELS,
+ )
+ def test_count_tokens_system_instruction_is_function_response(self, model_name):
+ part = Part._from_gapic(
+ gapic_content_types.Part(function_response=_FUNCTION_RESPONSE)
+ )
+ tokenizer = tokenizer_preview(model_name)
+ model = GenerativeModel(model_name, system_instruction=[part])
+
+ assert tokenizer.count_tokens(part, system_instruction=[part]).total_tokens
+ assert (
+ tokenizer.count_tokens("hello", system_instruction=[part]).total_tokens
+ == model.count_tokens("hello").total_tokens
+ )
+
+ @pytest.mark.parametrize(
+ "model_name",
+ _MODELS,
+ )
+ def test_count_tokens_tool_is_function_declaration(self, model_name):
+ tokenizer = tokenizer_preview(model_name)
+ model = GenerativeModel(model_name)
+ tool1 = Tool._from_gapic(
+ gapic_tool_types.Tool(function_declarations=[_FUNCTION_DECLARATION_1])
+ )
+ tool2 = Tool._from_gapic(
+ gapic_tool_types.Tool(function_declarations=[_FUNCTION_DECLARATION_2])
+ )
+
+ assert tokenizer.count_tokens("hello", tools=[tool1]).total_tokens
+ with pytest.raises(ValueError):
+ tokenizer.count_tokens("hello", tools=[tool2]).total_tokens
+ assert (
+ tokenizer.count_tokens("hello", tools=[tool1]).total_tokens
+ == model.count_tokens("hello", tools=[tool1]).total_tokens
+ )
+
+ @pytest.mark.parametrize(
+ "model_name",
+ _MODELS,
+ )
+ def test_count_tokens_content_is_function_call(self, model_name):
+ part = Part._from_gapic(gapic_content_types.Part(function_call=_FUNCTION_CALL))
+ tokenizer = tokenizer_preview(model_name)
+ model = GenerativeModel(model_name)
+
+ assert tokenizer.count_tokens(part).total_tokens
+ assert (
+ tokenizer.count_tokens(part).total_tokens
+ == model.count_tokens(part).total_tokens
+ )
+
+ @pytest.mark.parametrize(
+ "model_name",
+ _MODELS,
+ )
+ def test_count_tokens_content_is_function_response(self, model_name):
+ part = Part._from_gapic(
+ gapic_content_types.Part(function_response=_FUNCTION_RESPONSE)
+ )
+ tokenizer = tokenizer_preview(model_name)
+ model = GenerativeModel(model_name)
+
+ assert tokenizer.count_tokens(part).total_tokens
+ assert (
+ tokenizer.count_tokens(part).total_tokens
+ == model.count_tokens(part).total_tokens
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/conftest.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..7976049498fe28bd9e726df0673a223dddc24711
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/conftest.py
@@ -0,0 +1,515 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+
+from google import auth
+from google.api_core import operation
+from google.auth import credentials as auth_credentials
+
+from unittest import mock
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform.utils import source_utils
+import constants as test_constants
+from google.cloud.aiplatform.metadata import constants as metadata_constants
+from google.cloud.aiplatform.compat.services import (
+ metadata_service_client_v1,
+ model_service_client,
+ tensorboard_service_client,
+ pipeline_service_client,
+)
+
+from google.cloud.aiplatform.compat.types import (
+ context,
+ endpoint,
+ metadata_store,
+ endpoint_service,
+ model,
+ model_service,
+ pipeline_job,
+ pipeline_state,
+ tensorboard,
+ tensorboard_service,
+ dataset,
+ prediction_service,
+ training_pipeline,
+)
+
+
+from google.cloud.aiplatform.compat.services import (
+ dataset_service_client,
+ endpoint_service_client,
+ prediction_service_client,
+)
+
+
+# Module-scoped fixtures
+@pytest.fixture(scope="module")
+def google_auth_mock():
+ with mock.patch.object(auth, "default") as google_auth_mock:
+ google_auth_mock.return_value = (
+ auth_credentials.AnonymousCredentials(),
+ "test-project",
+ )
+ yield google_auth_mock
+
+
+# Training job fixtures
+@pytest.fixture
+def mock_python_package_to_gcs():
+ with mock.patch.object(
+ source_utils._TrainingScriptPythonPackager, "package_and_copy_to_gcs"
+ ) as mock_package_to_copy_gcs:
+ mock_package_to_copy_gcs.return_value = (
+ test_constants.TrainingJobConstants._TEST_OUTPUT_PYTHON_PACKAGE_PATH
+ )
+ yield mock_package_to_copy_gcs
+
+
+# Model fixtures
+@pytest.fixture
+def upload_model_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "upload_model"
+ ) as upload_model_mock:
+ mock_lro = mock.Mock(operation.Operation)
+ mock_lro.result.return_value = model_service.UploadModelResponse(
+ model=test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME
+ )
+ upload_model_mock.return_value = mock_lro
+ yield upload_model_mock
+
+
+@pytest.fixture
+def get_model_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = model.Model(
+ display_name=test_constants.ModelConstants._TEST_MODEL_NAME,
+ name=test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME,
+ )
+ yield get_model_mock
+
+
+@pytest.fixture
+def get_model_with_version_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = (
+ test_constants.ModelConstants._TEST_MODEL_OBJ_WITH_VERSION
+ )
+ yield get_model_mock
+
+
+@pytest.fixture
+def deploy_model_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "deploy_model"
+ ) as deploy_model_mock:
+ deployed_model = endpoint.DeployedModel(
+ model=test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME,
+ display_name=test_constants.ModelConstants._TEST_MODEL_NAME,
+ )
+ deploy_model_lro_mock = mock.Mock(operation.Operation)
+ deploy_model_lro_mock.result.return_value = (
+ endpoint_service.DeployModelResponse(
+ deployed_model=deployed_model,
+ )
+ )
+ deploy_model_mock.return_value = deploy_model_lro_mock
+ yield deploy_model_mock
+
+
+# Tensorboard fixtures
+@pytest.fixture
+def get_tensorboard_mock():
+ with mock.patch.object(
+ tensorboard_service_client.TensorboardServiceClient, "get_tensorboard"
+ ) as get_tensorboard_mock:
+ get_tensorboard_mock.return_value = tensorboard.Tensorboard(
+ name=test_constants.TensorboardConstants._TEST_TENSORBOARD_NAME,
+ display_name=test_constants.TensorboardConstants._TEST_DISPLAY_NAME,
+ encryption_spec=test_constants.ProjectConstants._TEST_ENCRYPTION_SPEC,
+ )
+ yield get_tensorboard_mock
+
+
+@pytest.fixture
+def create_tensorboard_experiment_mock():
+ with mock.patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "create_tensorboard_experiment",
+ ) as create_tensorboard_experiment_mock:
+ create_tensorboard_experiment_mock.return_value = (
+ test_constants.TensorboardConstants._TEST_TENSORBOARD_EXPERIMENT
+ )
+ yield create_tensorboard_experiment_mock
+
+
+@pytest.fixture
+def create_tensorboard_run_mock():
+ with mock.patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "create_tensorboard_run",
+ ) as create_tensorboard_run_mock:
+ create_tensorboard_run_mock.return_value = (
+ test_constants.TensorboardConstants._TEST_TENSORBOARD_RUN
+ )
+ yield create_tensorboard_run_mock
+
+
+@pytest.fixture
+def write_tensorboard_run_data_mock():
+ with mock.patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "write_tensorboard_run_data",
+ ) as write_tensorboard_run_data_mock:
+ yield write_tensorboard_run_data_mock
+
+
+@pytest.fixture
+def create_tensorboard_time_series_mock():
+ with mock.patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "create_tensorboard_time_series",
+ ) as create_tensorboard_time_series_mock:
+ create_tensorboard_time_series_mock.return_value = (
+ test_constants.TensorboardConstants._TEST_TENSORBOARD_TIME_SERIES
+ )
+ yield create_tensorboard_time_series_mock
+
+
+@pytest.fixture
+def get_tensorboard_run_mock():
+ with mock.patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "get_tensorboard_run",
+ ) as get_tensorboard_run_mock:
+ get_tensorboard_run_mock.return_value = (
+ test_constants.TensorboardConstants._TEST_TENSORBOARD_RUN
+ )
+ yield get_tensorboard_run_mock
+
+
+@pytest.fixture
+def list_tensorboard_time_series_mock():
+ with mock.patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "list_tensorboard_time_series",
+ ) as list_tensorboard_time_series_mock:
+ list_tensorboard_time_series_mock.return_value = [
+ test_constants.TensorboardConstants._TEST_TENSORBOARD_TIME_SERIES
+ ]
+ yield list_tensorboard_time_series_mock
+
+
+@pytest.fixture
+def batch_read_tensorboard_time_series_mock():
+ with mock.patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "batch_read_tensorboard_time_series_data",
+ ) as batch_read_tensorboard_time_series_data_mock:
+ batch_read_tensorboard_time_series_data_mock.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse(
+ time_series_data=[
+ test_constants.TensorboardConstants._TEST_TENSORBOARD_TIME_SERIES_DATA
+ ]
+ )
+ yield batch_read_tensorboard_time_series_data_mock
+
+
+# Endpoint mocks
+@pytest.fixture
+def create_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "create_endpoint"
+ ) as create_endpoint_mock:
+ create_endpoint_lro_mock = mock.Mock(operation.Operation)
+ create_endpoint_lro_mock.result.return_value = endpoint.Endpoint(
+ name=test_constants.EndpointConstants._TEST_ENDPOINT_NAME,
+ display_name=test_constants.EndpointConstants._TEST_DISPLAY_NAME,
+ encryption_spec=test_constants.ProjectConstants._TEST_ENCRYPTION_SPEC,
+ )
+ create_endpoint_mock.return_value = create_endpoint_lro_mock
+ yield create_endpoint_mock
+
+
+@pytest.fixture
+def get_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = endpoint.Endpoint(
+ display_name=test_constants.EndpointConstants._TEST_DISPLAY_NAME,
+ name=test_constants.EndpointConstants._TEST_ENDPOINT_NAME,
+ encryption_spec=test_constants.ProjectConstants._TEST_ENCRYPTION_SPEC,
+ )
+ yield get_endpoint_mock
+
+
+@pytest.fixture
+def get_endpoint_with_models_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = endpoint.Endpoint(
+ display_name=test_constants.EndpointConstants._TEST_DISPLAY_NAME,
+ name=test_constants.EndpointConstants._TEST_ENDPOINT_NAME,
+ deployed_models=test_constants.EndpointConstants._TEST_DEPLOYED_MODELS,
+ traffic_split=test_constants.EndpointConstants._TEST_TRAFFIC_SPLIT,
+ )
+ yield get_endpoint_mock
+
+
+@pytest.fixture
+def predict_client_predict_mock():
+ with mock.patch.object(
+ prediction_service_client.PredictionServiceClient, "predict"
+ ) as predict_mock:
+ predict_mock.return_value = prediction_service.PredictResponse(
+ deployed_model_id=test_constants.EndpointConstants._TEST_MODEL_ID,
+ model_version_id=test_constants.EndpointConstants._TEST_VERSION_ID,
+ model=test_constants.EndpointConstants._TEST_MODEL_NAME,
+ )
+ predict_mock.return_value.predictions.extend(
+ test_constants.EndpointConstants._TEST_PREDICTION
+ )
+ yield predict_mock
+
+
+# PipelineJob fixtures
+def make_pipeline_job(state):
+ return pipeline_job.PipelineJob(
+ name=test_constants.PipelineJobConstants._TEST_PIPELINE_JOB_NAME,
+ state=state,
+ create_time=test_constants.PipelineJobConstants._TEST_PIPELINE_CREATE_TIME,
+ service_account=test_constants.ProjectConstants._TEST_SERVICE_ACCOUNT,
+ network=test_constants.TrainingJobConstants._TEST_NETWORK,
+ job_detail=pipeline_job.PipelineJobDetail(
+ pipeline_run_context=context.Context(
+ name=test_constants.PipelineJobConstants._TEST_PIPELINE_JOB_NAME,
+ )
+ ),
+ )
+
+
+@pytest.fixture
+def get_pipeline_job_mock():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_pipeline_job:
+ mock_get_pipeline_job.side_effect = [
+ make_pipeline_job(pipeline_state.PipelineState.PIPELINE_STATE_RUNNING),
+ make_pipeline_job(pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_pipeline_job(pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_pipeline_job(pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_pipeline_job(pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_pipeline_job(pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_pipeline_job(pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_pipeline_job(pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_pipeline_job(pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ ]
+
+ yield mock_get_pipeline_job
+
+
+# Dataset mocks
+@pytest.fixture
+def create_dataset_mock():
+ with mock.patch.object(
+ dataset_service_client.DatasetServiceClient, "create_dataset"
+ ) as create_dataset_mock:
+ create_dataset_lro_mock = mock.Mock(operation.Operation)
+ create_dataset_lro_mock.result.return_value = dataset.Dataset(
+ name=test_constants.DatasetConstants._TEST_NAME,
+ display_name=test_constants.DatasetConstants._TEST_DISPLAY_NAME,
+ metadata_schema_uri=test_constants.DatasetConstants._TEST_METADATA_SCHEMA_URI_TEXT,
+ encryption_spec=test_constants.DatasetConstants._TEST_ENCRYPTION_SPEC,
+ )
+ create_dataset_mock.return_value = create_dataset_lro_mock
+ yield create_dataset_mock
+
+
+@pytest.fixture
+def get_dataset_mock():
+ with mock.patch.object(
+ dataset_service_client.DatasetServiceClient, "get_dataset"
+ ) as get_dataset_mock:
+ get_dataset_mock.return_value = dataset.Dataset(
+ display_name=test_constants.DatasetConstants._TEST_DISPLAY_NAME,
+ metadata_schema_uri=test_constants.DatasetConstants._TEST_METADATA_SCHEMA_URI_NONTABULAR,
+ name=test_constants.DatasetConstants._TEST_NAME,
+ metadata=test_constants.DatasetConstants._TEST_NONTABULAR_DATASET_METADATA,
+ encryption_spec=test_constants.DatasetConstants._TEST_ENCRYPTION_SPEC,
+ )
+ yield get_dataset_mock
+
+
+@pytest.fixture
+def import_data_mock():
+ with mock.patch.object(
+ dataset_service_client.DatasetServiceClient, "import_data"
+ ) as import_data_mock:
+ import_data_mock.return_value = mock.Mock(operation.Operation)
+ yield import_data_mock
+
+
+# TrainingJob mocks
+@pytest.fixture
+def mock_model_service_get():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as mock_get_model:
+ mock_get_model.return_value = model.Model(
+ name=test_constants.TrainingJobConstants._TEST_MODEL_NAME
+ )
+ mock_get_model.return_value.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ mock_get_model.return_value.version_id = "1"
+ yield mock_get_model
+
+
+@pytest.fixture
+def mock_pipeline_service_create():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = training_pipeline.TrainingPipeline(
+ name=test_constants.TrainingJobConstants._TEST_PIPELINE_RESOURCE_NAME,
+ state=pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ model_to_upload=model.Model(
+ name=test_constants.TrainingJobConstants._TEST_MODEL_NAME
+ ),
+ )
+ yield mock_create_training_pipeline
+
+
+def make_training_pipeline(state, add_training_task_metadata=True):
+ return training_pipeline.TrainingPipeline(
+ name=test_constants.TrainingJobConstants._TEST_PIPELINE_RESOURCE_NAME,
+ state=state,
+ model_to_upload=model.Model(
+ name=test_constants.TrainingJobConstants._TEST_MODEL_NAME
+ ),
+ training_task_inputs={
+ "tensorboard": test_constants.TrainingJobConstants._TEST_TENSORBOARD_RESOURCE_NAME
+ },
+ training_task_metadata={
+ "backingCustomJob": test_constants.TrainingJobConstants._TEST_CUSTOM_JOB_RESOURCE_NAME
+ }
+ if add_training_task_metadata
+ else None,
+ )
+
+
+@pytest.fixture
+def mock_pipeline_service_get(make_call=make_training_pipeline):
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.side_effect = [
+ make_call(
+ pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ add_training_task_metadata=False,
+ ),
+ make_call(
+ pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_call(pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_call(pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_call(pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_call(pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_call(pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_call(pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_call(pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_call(pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ ]
+
+ yield mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_create_and_get_with_fail():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = training_pipeline.TrainingPipeline(
+ name=test_constants.TrainingJobConstants._TEST_PIPELINE_RESOURCE_NAME,
+ state=pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ )
+
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.return_value = training_pipeline.TrainingPipeline(
+ name=test_constants.TrainingJobConstants._TEST_PIPELINE_RESOURCE_NAME,
+ state=pipeline_state.PipelineState.PIPELINE_STATE_FAILED,
+ )
+
+ yield mock_create_training_pipeline, mock_get_training_pipeline
+
+
+# Experiment fixtures
+@pytest.fixture
+def get_experiment_mock():
+ with mock.patch.object(
+ metadata_service_client_v1.MetadataServiceClient, "get_context"
+ ) as get_context_mock:
+ get_context_mock.return_value = (
+ test_constants.ExperimentConstants._EXPERIMENT_MOCK
+ )
+ yield get_context_mock
+
+
+@pytest.fixture
+def get_metadata_store_mock():
+ with mock.patch.object(
+ metadata_service_client_v1.MetadataServiceClient, "get_metadata_store"
+ ) as get_metadata_store_mock:
+ get_metadata_store_mock.return_value = metadata_store.MetadataStore(
+ name=test_constants.ExperimentConstants._TEST_METADATASTORE,
+ )
+ yield get_metadata_store_mock
+
+
+@pytest.fixture
+def get_context_mock():
+ with mock.patch.object(
+ metadata_service_client_v1.MetadataServiceClient, "get_context"
+ ) as get_context_mock:
+ get_context_mock.return_value = context.Context(
+ name=test_constants.ExperimentConstants._TEST_CONTEXT_NAME,
+ display_name=test_constants.ExperimentConstants._TEST_EXPERIMENT,
+ description=test_constants.ExperimentConstants._TEST_EXPERIMENT_DESCRIPTION,
+ schema_title=metadata_constants.SYSTEM_EXPERIMENT,
+ schema_version=metadata_constants.SCHEMA_VERSIONS[
+ metadata_constants.SYSTEM_EXPERIMENT
+ ],
+ metadata=metadata_constants.EXPERIMENT_METADATA,
+ )
+ yield get_context_mock
+
+
+@pytest.fixture
+def add_context_children_mock():
+ with mock.patch.object(
+ metadata_service_client_v1.MetadataServiceClient, "add_context_children"
+ ) as add_context_children_mock:
+ yield add_context_children_mock
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/constants.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c3897141be79c11abc983061146c53033c2e140
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/constants.py
@@ -0,0 +1,463 @@
+# -*- coding: utf-8 -*-
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Use this file to store global variables that will be shared across multiple tests
+
+import dataclasses
+from datetime import datetime
+from unittest import mock
+from google.auth import credentials as auth_credentials
+from google.protobuf import timestamp_pb2, duration_pb2
+
+from google.cloud.aiplatform.utils import source_utils
+from google.cloud.aiplatform import explain
+from google.cloud.aiplatform import utils
+from google.cloud.aiplatform import schema
+from google.cloud.aiplatform.metadata import constants as metadata_constants
+
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
+)
+
+from google.cloud.aiplatform.compat.types import (
+ context,
+ custom_job,
+ encryption_spec,
+ endpoint,
+ io,
+ model,
+ tensorboard_data,
+ tensorboard_experiment,
+ tensorboard_run,
+ tensorboard_time_series,
+)
+
+
+@dataclasses.dataclass(frozen=True)
+class ProjectConstants:
+ """Defines project-specific constants used by tests."""
+
+ _TEST_PROJECT = "test-project"
+ _TEST_LOCATION = "us-central1"
+ _TEST_ENCRYPTION_KEY_NAME = "key_1234"
+ _TEST_ENCRYPTION_SPEC = encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_ENCRYPTION_KEY_NAME
+ )
+ _TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+ _TEST_SERVICE_ACCOUNT = "vinnys@my-project.iam.gserviceaccount.com"
+ _TEST_LABELS = {"my_key": "my_value"}
+
+
+@dataclasses.dataclass(frozen=True)
+class TrainingJobConstants:
+ """Defines constants used by tests that create training jobs."""
+
+ _TEST_OUTPUT_PYTHON_PACKAGE_PATH = "gs://test-staging-bucket/trainer.tar.gz"
+ _TEST_MODULE_NAME = (
+ f"{source_utils._TrainingScriptPythonPackager._ROOT_MODULE}.task"
+ )
+ _TEST_LOCAL_SCRIPT_FILE_NAME = "____test____script.py"
+ _TEST_REQUIREMENTS = ["pandas", "numpy", "tensorflow"]
+ _TEST_ENVIRONMENT_VARIABLES = {
+ "MY_PATH": "/path/to/my_path",
+ }
+ _TEST_REPLICA_COUNT = 1
+ _TEST_MACHINE_TYPE = "n1-standard-4"
+ _TEST_MACHINE_TYPE_TPU = "cloud-tpu"
+ _TEST_MACHINE_TYPE_TPU_V5E = "ct5lp-hightpu-4t"
+ _TEST_ACCELERATOR_TPU_TYPE = "TPU_V3"
+ _TEST_ACCELERATOR_TYPE = "NVIDIA_TESLA_K80"
+ _TEST_ACCELERATOR_COUNT = 1
+ _TEST_BOOT_DISK_TYPE = "pd-standard"
+ _TEST_BOOT_DISK_SIZE_GB = 300
+ _TEST_REDUCTION_SERVER_REPLICA_COUNT = 1
+ _TEST_REDUCTION_SERVER_MACHINE_TYPE = "n1-highcpu-16"
+ _TEST_REDUCTION_SERVER_CONTAINER_URI = (
+ "us-docker.pkg.dev/vertex-ai-restricted/training/reductionserver:latest"
+ )
+ _TEST_DATASET_DISPLAY_NAME = "test-dataset-display-name"
+ _TEST_DATASET_NAME = "test-dataset-name"
+ _TEST_DISPLAY_NAME = "test-display-name"
+ _TEST_BUCKET_NAME = "test-bucket"
+ _TEST_GCS_PATH_WITHOUT_BUCKET = "path/to/folder"
+ _TEST_GCS_PATH = f"{_TEST_BUCKET_NAME}/{_TEST_GCS_PATH_WITHOUT_BUCKET}"
+ _TEST_GCS_PATH_WITH_TRAILING_SLASH = f"{_TEST_GCS_PATH}/"
+ _TEST_MODEL_DISPLAY_NAME = "model-display-name"
+ _TEST_MODEL_LABELS = {"model_key": "model_value"}
+ _TEST_STAGING_BUCKET = "gs://test-staging-bucket"
+ _TEST_DISPLAY_NAME = "my_job_1234"
+ _TEST_BASE_OUTPUT_DIR = f"{_TEST_STAGING_BUCKET}/{_TEST_DISPLAY_NAME}"
+ _TEST_ENABLE_WEB_ACCESS = True
+ _TEST_WEB_ACCESS_URIS = {"workerpool0-0": "uri"}
+ _TEST_TRAINING_CONTAINER_IMAGE = "gcr.io/test-training/container:image"
+
+ _TEST_RUN_ARGS = ["-v", "0.1", "--test=arg"]
+
+ _TEST_MACHINE_SPEC = {
+ "machine_type": "n1-standard-4",
+ "accelerator_type": "NVIDIA_TESLA_K80",
+ "accelerator_count": 1,
+ }
+ _TEST_WORKER_POOL_SPEC = [
+ {
+ "machine_spec": _TEST_MACHINE_SPEC,
+ "replica_count": 1,
+ "disk_spec": {"boot_disk_type": "pd-ssd", "boot_disk_size_gb": 100},
+ "container_spec": {
+ "image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "command": [],
+ "args": _TEST_RUN_ARGS,
+ },
+ }
+ ]
+ _TEST_TPU_V5E_WORKER_POOL_SPEC = [
+ {
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE_TPU_V5E,
+ "tpu_topology": "2x2",
+ },
+ "replica_count": 1,
+ "disk_spec": {"boot_disk_type": "pd-ssd", "boot_disk_size_gb": 100},
+ "container_spec": {
+ "image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ },
+ }
+ ]
+ _TEST_TPU_V3_WORKER_POOL_SPEC = [
+ {
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE_TPU,
+ "accelerator_type": _TEST_ACCELERATOR_TPU_TYPE,
+ "accelerator_count": 32,
+ },
+ "replica_count": 1,
+ "disk_spec": {"boot_disk_type": "pd-ssd", "boot_disk_size_gb": 100},
+ "container_spec": {
+ "image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ },
+ }
+ ]
+ _TEST_RESERVATION_AFFINITY_WORKER_POOL_SPEC = [
+ {
+ "machine_spec": {
+ "machine_type": "n1-standard-4",
+ "accelerator_type": "NVIDIA_TESLA_K80",
+ "accelerator_count": 1,
+ "reservation_affinity": {
+ "reservation_affinity_type": "ANY_RESERVATION"
+ },
+ },
+ "replica_count": 1,
+ "disk_spec": {"boot_disk_type": "pd-ssd", "boot_disk_size_gb": 100},
+ "container_spec": {
+ "image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "command": [],
+ "args": _TEST_RUN_ARGS,
+ },
+ }
+ ]
+ _TEST_ID = "1028944691210842416"
+ _TEST_NETWORK = (
+ f"projects/{ProjectConstants._TEST_PROJECT}/global/networks/{_TEST_ID}"
+ )
+ _TEST_RESERVED_IP_RANGES = ["example_ip_range"]
+ _TEST_TIMEOUT = 8000
+ _TEST_TIMEOUT_SECONDS = duration_pb2.Duration(seconds=_TEST_TIMEOUT)
+ _TEST_RESTART_JOB_ON_WORKER_RESTART = True
+ _TEST_DISABLE_RETRIES = True
+ _TEST_MAX_WAIT_DURATION = 8000
+
+ _TEST_BASE_CUSTOM_JOB_PROTO = custom_job.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ job_spec=custom_job.CustomJobSpec(
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ base_output_directory=io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ scheduling=custom_job.Scheduling(
+ timeout=_TEST_TIMEOUT_SECONDS,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=(
+ duration_pb2.Duration(seconds=_TEST_MAX_WAIT_DURATION)
+ ),
+ ),
+ service_account=ProjectConstants._TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ ),
+ labels=ProjectConstants._TEST_LABELS,
+ encryption_spec=ProjectConstants._TEST_ENCRYPTION_SPEC,
+ )
+ _TEST_PIPELINE_RESOURCE_NAME = f"projects/{ProjectConstants._TEST_PROJECT}/locations/us-central1/trainingPipelines/{_TEST_ID}"
+ _TEST_BUCKET_NAME = "test-bucket"
+ _TEST_TENSORBOARD_RESOURCE_NAME = f"projects/{ProjectConstants._TEST_PROJECT}/locations/{ProjectConstants._TEST_LOCATION}/tensorboards/{_TEST_ID}"
+ _TEST_MODEL_NAME = f"projects/{ProjectConstants._TEST_PROJECT}/locations/us-central1/models/{_TEST_ID}"
+ _TEST_CUSTOM_JOB_RESOURCE_NAME = f"projects/{ProjectConstants._TEST_PROJECT}/locations/{ProjectConstants._TEST_LOCATION}/customJobs/{_TEST_ID}"
+ _TEST_CREDENTIALS = mock.Mock(spec=auth_credentials.AnonymousCredentials())
+ _TEST_SERVING_CONTAINER_PREDICTION_ROUTE = "predict"
+ _TEST_SERVING_CONTAINER_HEALTH_ROUTE = "metadata"
+ _TEST_MODEL_DISPLAY_NAME = "model-display-name"
+ _TEST_TRAINING_FRACTION_SPLIT = 0.6
+ _TEST_VALIDATION_FRACTION_SPLIT = 0.2
+ _TEST_TEST_FRACTION_SPLIT = 0.2
+ _TEST_BOOT_DISK_TYPE_DEFAULT = "pd-ssd"
+ _TEST_BOOT_DISK_SIZE_GB_DEFAULT = 100
+ # # DUPLICATE: THIS NEEDS TO BE MOVED TO THE TRAINING JOB TEST THAT USES IT
+ _TEST_PIPELINE_RESOURCE_NAME = (
+ "projects/my-project/locations/us-central1/trainingPipelines/12345"
+ )
+ _TEST_DEFAULT_ENCRYPTION_KEY_NAME = "key_default"
+ _TEST_SPOT_STRATEGY = custom_job.Scheduling.Strategy.SPOT
+
+ def create_tpu_job_proto(tpu_version):
+ worker_pool_spec = (
+ TrainingJobConstants._TEST_TPU_V5E_WORKER_POOL_SPEC
+ if tpu_version == "v5e"
+ else TrainingJobConstants._TEST_TPU_V3_WORKER_POOL_SPEC
+ )
+ return custom_job.CustomJob(
+ display_name=TrainingJobConstants._TEST_DISPLAY_NAME,
+ job_spec=custom_job.CustomJobSpec(
+ worker_pool_specs=worker_pool_spec,
+ base_output_directory=io.GcsDestination(
+ output_uri_prefix=TrainingJobConstants._TEST_BASE_OUTPUT_DIR
+ ),
+ scheduling=custom_job.Scheduling(
+ timeout=TrainingJobConstants._TEST_TIMEOUT_SECONDS,
+ restart_job_on_worker_restart=TrainingJobConstants._TEST_RESTART_JOB_ON_WORKER_RESTART,
+ ),
+ service_account=ProjectConstants._TEST_SERVICE_ACCOUNT,
+ network=TrainingJobConstants._TEST_NETWORK,
+ ),
+ )
+
+
+@dataclasses.dataclass(frozen=True)
+class ModelConstants:
+ """Defines constants used by tests that create model resources."""
+
+ _TEST_MODEL_NAME = "123"
+ _TEST_ID = "1028944691210842416"
+ _TEST_VERSION_ID = "2"
+ _TEST_MODEL_RESOURCE_NAME = model_service_client.ModelServiceClient.model_path(
+ ProjectConstants._TEST_PROJECT, ProjectConstants._TEST_LOCATION, _TEST_ID
+ )
+ _TEST_MODEL_PARENT = f"projects/{ProjectConstants._TEST_PROJECT}/locations/{ProjectConstants._TEST_LOCATION}/models/{_TEST_MODEL_NAME}"
+ _TEST_VERSION_ALIAS_1 = "myalias"
+ _TEST_VERSION_ALIAS_2 = "youralias"
+ _TEST_MODEL_VERSION_DESCRIPTION_2 = "My version 2 description"
+ _TEST_SERVING_CONTAINER_IMAGE = "gcr.io/test-serving/container:image"
+ _TEST_EXPLANATION_PARAMETERS = explain.ExplanationParameters(
+ {"sampled_shapley_attribution": {"path_count": 10}}
+ )
+ _TEST_LABEL = {"team": "experimentation", "trial_id": "x435"}
+ _TEST_MODEL_OBJ_WITH_VERSION = model.Model(
+ version_id=_TEST_VERSION_ID,
+ create_time=timestamp_pb2.Timestamp(),
+ update_time=timestamp_pb2.Timestamp(),
+ display_name=_TEST_MODEL_NAME,
+ name=f"{_TEST_MODEL_PARENT}@{_TEST_VERSION_ID}",
+ version_aliases=[_TEST_VERSION_ALIAS_1, _TEST_VERSION_ALIAS_2],
+ version_description=_TEST_MODEL_VERSION_DESCRIPTION_2,
+ )
+ _TEST_MODEL_EVAL_METRICS = {
+ "auPrc": 0.80592036,
+ "auRoc": 0.8100363,
+ "logLoss": 0.53061414,
+ "confidenceMetrics": [
+ {
+ "confidenceThreshold": -0.01,
+ "recall": 1.0,
+ "precision": 0.5,
+ "falsePositiveRate": 1.0,
+ "f1Score": 0.6666667,
+ "recallAt1": 1.0,
+ "precisionAt1": 0.5,
+ "falsePositiveRateAt1": 1.0,
+ "f1ScoreAt1": 0.6666667,
+ "truePositiveCount": "415",
+ "falsePositiveCount": "415",
+ },
+ {
+ "recall": 1.0,
+ "precision": 0.5,
+ "falsePositiveRate": 1.0,
+ "f1Score": 0.6666667,
+ "recallAt1": 0.74216866,
+ "precisionAt1": 0.74216866,
+ "falsePositiveRateAt1": 0.25783134,
+ "f1ScoreAt1": 0.74216866,
+ "truePositiveCount": "415",
+ "falsePositiveCount": "415",
+ },
+ ],
+ }
+
+
+@dataclasses.dataclass(frozen=True)
+class EndpointConstants:
+ """Defines constants used by tests that create endpoints."""
+
+ _TEST_DISPLAY_NAME = "test-display-name"
+ _TEST_DISPLAY_NAME_2 = "test-display-name-2"
+ _TEST_DISPLAY_NAME_3 = "test-display-name-3"
+ _TEST_ID = "1028944691210842416"
+ _TEST_ID_2 = "4366591682456584192"
+ _TEST_ID_3 = "5820582938582924817"
+ _TEST_ENDPOINT_NAME = f"projects/{ProjectConstants._TEST_PROJECT}/locations/{ProjectConstants._TEST_LOCATION}/endpoints/{_TEST_ID}"
+ _TEST_ENDPOINT_NAME_2 = f"projects/{ProjectConstants._TEST_PROJECT}/locations/{ProjectConstants._TEST_LOCATION}/endpoints/{_TEST_ID_2}"
+ _TEST_DISPLAY_NAME = "test-display-name"
+ _TEST_DEPLOYED_MODELS = [
+ endpoint.DeployedModel(id=_TEST_ID, display_name=_TEST_DISPLAY_NAME),
+ endpoint.DeployedModel(id=_TEST_ID_2, display_name=_TEST_DISPLAY_NAME_2),
+ endpoint.DeployedModel(id=_TEST_ID_3, display_name=_TEST_DISPLAY_NAME_3),
+ ]
+ _TEST_TRAFFIC_SPLIT = {_TEST_ID: 0, _TEST_ID_2: 100, _TEST_ID_3: 0}
+ _TEST_MODEL_ID = "1028944691210842416"
+ _TEST_PREDICTION = [[1.0, 2.0, 3.0], [3.0, 3.0, 1.0]]
+ _TEST_VERSION_ID = "1"
+ _TEST_MODEL_NAME = f"projects/{ProjectConstants._TEST_PROJECT}/locations/{ProjectConstants._TEST_LOCATION}/models/{_TEST_ID}"
+
+
+@dataclasses.dataclass(frozen=True)
+class TensorboardConstants:
+ """Defines constants used by tests that create Tensorboard resources."""
+
+ _TEST_ID = "1028944691210842416"
+ _TEST_DISPLAY_NAME = "my_tensorboard_1234"
+ _TEST_TENSORBOARD_NAME = f"{ProjectConstants._TEST_PARENT}/tensorboards/{_TEST_ID}"
+ _TEST_TENSORBOARD_EXPERIMENT_ID = "test-experiment"
+ _TEST_TENSORBOARD_EXPERIMENT_NAME = (
+ f"{_TEST_TENSORBOARD_NAME}/experiments/{_TEST_TENSORBOARD_EXPERIMENT_ID}"
+ )
+
+ _TEST_TENSORBOARD_RUN_ID = "test-run"
+ _TEST_TENSORBOARD_RUN_NAME = (
+ f"{_TEST_TENSORBOARD_EXPERIMENT_NAME}/runs/{_TEST_TENSORBOARD_RUN_ID}"
+ )
+ _TEST_TENSORBOARD_RUN = tensorboard_run.TensorboardRun(
+ name=_TEST_TENSORBOARD_RUN_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ )
+ _TEST_TENSORBOARD_TIME_SERIES_ID = "test-time-series"
+ _TEST_TENSORBOARD_TIME_SERIES_NAME = (
+ f"{_TEST_TENSORBOARD_RUN_NAME}/timeSeries/{_TEST_TENSORBOARD_TIME_SERIES_ID}"
+ )
+ _TEST_TIME_SERIES_DISPLAY_NAME = "accuracy"
+ _TEST_TENSORBOARD_TIME_SERIES = tensorboard_time_series.TensorboardTimeSeries(
+ name=_TEST_TENSORBOARD_TIME_SERIES_NAME,
+ display_name=_TEST_TIME_SERIES_DISPLAY_NAME,
+ value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
+ )
+ _TEST_TENSORBOARD_TIME_SERIES_DATA = tensorboard_data.TimeSeriesData(
+ tensorboard_time_series_id=_TEST_TENSORBOARD_TIME_SERIES_ID,
+ value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[
+ tensorboard_data.TimeSeriesDataPoint(
+ scalar=tensorboard_data.Scalar(value=1.0),
+ step=1,
+ wall_time=utils.get_timestamp_proto(),
+ )
+ ],
+ )
+
+ _TEST_TENSORBOARD_EXPERIMENT = tensorboard_experiment.TensorboardExperiment(
+ name=_TEST_TENSORBOARD_EXPERIMENT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ )
+
+
+@dataclasses.dataclass(frozen=True)
+class PipelineJobConstants:
+ """Defines constants used by tests that create PipelineJob resources."""
+
+ _TEST_PIPELINE_JOB_ID = "sample-test-pipeline-202111111"
+ _TEST_PIPELINE_JOB_NAME = f"projects/{ProjectConstants._TEST_PROJECT}/locations/{ProjectConstants._TEST_LOCATION}/pipelineJobs/{_TEST_PIPELINE_JOB_ID}"
+ _TEST_PIPELINE_CREATE_TIME = datetime.now()
+
+
+@dataclasses.dataclass(frozen=True)
+class DatasetConstants:
+ """Defines constants used by tests that create Dataset resources."""
+
+ _TEST_ID = "1028944691210842416"
+ _TEST_NAME = f"projects/{ProjectConstants._TEST_PROJECT}/locations/{ProjectConstants._TEST_LOCATION}/datasets/{_TEST_ID}"
+ _TEST_DISPLAY_NAME = "my_dataset_1234"
+ _TEST_ENCRYPTION_KEY_NAME = "key_1234"
+ _TEST_METADATA_SCHEMA_URI_TEXT = schema.dataset.metadata.text
+ _TEST_ENCRYPTION_SPEC = encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_ENCRYPTION_KEY_NAME
+ )
+ _TEST_METADATA_SCHEMA_URI_NONTABULAR = schema.dataset.metadata.image
+ _TEST_NONTABULAR_DATASET_METADATA = None
+ _TEST_IMPORT_SCHEMA_URI = schema.dataset.ioformat.image.single_label_classification
+ _TEST_IMPORT_SCHEMA_URI_IMAGE = (
+ schema.dataset.ioformat.image.single_label_classification
+ )
+ _TEST_DATA_LABEL_ITEMS = None
+ _TEST_REQUEST_METADATA = ()
+ _TEST_SOURCE_URI_GCS = "gs://my-bucket/my_index_file.jsonl"
+
+
+@dataclasses.dataclass(frozen=True)
+class ExperimentConstants:
+ """Defines constants used by Experiments and Metadata tests."""
+
+ _TEST_EXPERIMENT = "test-experiment"
+ _TEST_CONTEXT_ID = _TEST_EXPERIMENT
+ _TEST_METADATA_PARENT = f"projects/{ProjectConstants._TEST_PROJECT}/locations/{ProjectConstants._TEST_LOCATION}/metadataStores/default"
+ _TEST_CONTEXT_NAME = f"{_TEST_METADATA_PARENT}/contexts/{_TEST_CONTEXT_ID}"
+ _TEST_EXPERIMENT_DESCRIPTION = "test-experiment-description"
+
+ _EXPERIMENT_MOCK = context.Context(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_EXPERIMENT,
+ description=_TEST_EXPERIMENT_DESCRIPTION,
+ schema_title=metadata_constants.SYSTEM_EXPERIMENT,
+ schema_version=metadata_constants.SCHEMA_VERSIONS[
+ metadata_constants.SYSTEM_EXPERIMENT
+ ],
+ metadata={**metadata_constants.EXPERIMENT_METADATA},
+ )
+
+ _TEST_METADATASTORE = f"projects/{ProjectConstants._TEST_PROJECT}/locations/{ProjectConstants._TEST_LOCATION}/metadataStores/default"
+
+
+@dataclasses.dataclass(frozen=True)
+class MatchingEngineConstants:
+ """Defines constants used by tests that create MatchingEngine resources."""
+
+ _TEST_INDEX_ID = "index_id"
+ _TEST_INDEX_NAME = f"{ProjectConstants._TEST_PARENT}/indexes/{_TEST_INDEX_ID}"
+ _TEST_INDEX_DISPLAY_NAME = "index_display_name"
+ _TEST_INDEX_DESCRIPTION = "index_description"
+ _TEST_LABELS = {"my_key": "my_value"}
+ _TEST_LABELS_UPDATE = {"my_key_update": "my_value_update"}
+ _TEST_DISPLAY_NAME_UPDATE = "my new display name"
+ _TEST_DESCRIPTION_UPDATE = "my description update"
+ _TEST_REQUEST_METADATA = ()
+
+
+@dataclasses.dataclass(frozen=True)
+class PersistentResourceConstants:
+ """Defines constants used by tests that create PersistentResource resources."""
+
+ _TEST_PERSISTENT_RESOURCE_ID = "test_persistent_resource_id"
+ _TEST_PERSISTENT_RESOURCE_DISPLAY_NAME = "test_display_name"
+ _TEST_RESOURCE_POOL = {
+ "machine_spec": TrainingJobConstants._TEST_MACHINE_SPEC,
+ "replica_count": 1,
+ }
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_autologging.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_autologging.py
new file mode 100644
index 0000000000000000000000000000000000000000..71a5c06190815c64de9598fbaaed3b938ca248fd
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_autologging.py
@@ -0,0 +1,1009 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import copy
+import datetime
+from importlib import reload
+import os
+from unittest import mock
+from unittest.mock import patch
+
+
+from mlflow import entities as mlflow_entities
+from google.cloud.aiplatform._mlflow_plugin import _vertex_mlflow_tracking
+from google.cloud.aiplatform.utils import autologging_utils
+
+import pytest
+from google.api_core import exceptions
+
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform_v1 import (
+ Artifact as GapicArtifact,
+ Context as GapicContext,
+ Execution as GapicExecution,
+ MetadataServiceClient,
+ MetadataStore as GapicMetadataStore,
+ TensorboardServiceClient,
+)
+from google.cloud.aiplatform.compat.types import execution as gca_execution
+from google.cloud.aiplatform.compat.types import (
+ tensorboard_run as gca_tensorboard_run,
+)
+from google.cloud.aiplatform.compat.types import (
+ tensorboard_time_series as gca_tensorboard_time_series,
+)
+from google.cloud.aiplatform.metadata import constants
+import constants as test_constants
+
+from google.cloud.aiplatform.compat.services import (
+ tensorboard_service_client,
+)
+
+from google.cloud.aiplatform.compat.types import (
+ tensorboard as gca_tensorboard,
+)
+from google.cloud.aiplatform.metadata import metadata
+
+
+import test_tensorboard
+import test_metadata
+
+import numpy as np
+
+_TEST_PROJECT = test_constants.ProjectConstants._TEST_PROJECT
+_TEST_OTHER_PROJECT = "test-project-1"
+_TEST_LOCATION = test_constants.ProjectConstants._TEST_LOCATION
+_TEST_PARENT = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/default"
+)
+_TEST_EXPERIMENT = "test-experiment"
+_TEST_OTHER_EXPERIMENT = "test-other-experiment"
+_TEST_EXPERIMENT_DESCRIPTION = "test-experiment-description"
+_TEST_OTHER_EXPERIMENT_DESCRIPTION = "test-other-experiment-description"
+_TEST_PIPELINE = _TEST_EXPERIMENT
+_TEST_RUN = "run-1"
+_TEST_OTHER_RUN = "run-2"
+_TEST_DISPLAY_NAME = "test-display-name"
+
+# resource attributes
+_TEST_METADATA = {"test-param1": 1, "test-param2": "test-value", "test-param3": True}
+
+# metadataStore
+_TEST_METADATASTORE = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/default"
+)
+
+# context
+_TEST_CONTEXT_ID = _TEST_EXPERIMENT
+_TEST_CONTEXT_NAME = f"{_TEST_PARENT}/contexts/{_TEST_CONTEXT_ID}"
+
+# execution
+_TEST_EXECUTION_ID = f"{_TEST_EXPERIMENT}-{_TEST_RUN}"
+_TEST_EXECUTION_NAME = f"{_TEST_PARENT}/executions/{_TEST_EXECUTION_ID}"
+_TEST_OTHER_EXECUTION_ID = f"{_TEST_EXPERIMENT}-{_TEST_OTHER_RUN}"
+_TEST_OTHER_EXECUTION_NAME = f"{_TEST_PARENT}/executions/{_TEST_OTHER_EXECUTION_ID}"
+_TEST_SCHEMA_TITLE = "test.Schema"
+
+_TEST_EXECUTION = GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ metadata=_TEST_METADATA,
+ state=GapicExecution.State.RUNNING,
+)
+
+# artifact
+_TEST_ARTIFACT_ID = f"{_TEST_EXPERIMENT}-{_TEST_RUN}-metrics"
+_TEST_ARTIFACT_NAME = f"{_TEST_PARENT}/artifacts/{_TEST_ARTIFACT_ID}"
+_TEST_OTHER_ARTIFACT_ID = f"{_TEST_EXPERIMENT}-{_TEST_OTHER_RUN}-metrics"
+_TEST_OTHER_ARTIFACT_NAME = f"{_TEST_PARENT}/artifacts/{_TEST_OTHER_ARTIFACT_ID}"
+
+# parameters
+_TEST_PARAM_KEY_1 = "learning_rate"
+_TEST_PARAM_KEY_2 = "dropout"
+_TEST_PARAMS = {_TEST_PARAM_KEY_1: 0.01, _TEST_PARAM_KEY_2: 0.2}
+_TEST_OTHER_PARAMS = {_TEST_PARAM_KEY_1: 0.02, _TEST_PARAM_KEY_2: 0.3}
+
+# metrics
+_TEST_METRIC_KEY_1 = "rmse"
+_TEST_METRIC_KEY_2 = "accuracy"
+_TEST_METRICS = {_TEST_METRIC_KEY_1: 222, _TEST_METRIC_KEY_2: 1}
+_TEST_OTHER_METRICS = {_TEST_METRIC_KEY_2: 0.9}
+
+# classification_metrics
+_TEST_CLASSIFICATION_METRICS = {
+ "display_name": "my-classification-metrics",
+ "labels": ["cat", "dog"],
+ "matrix": [[9, 1], [1, 9]],
+ "fpr": [0.1, 0.5, 0.9],
+ "tpr": [0.1, 0.7, 0.9],
+ "threshold": [0.9, 0.5, 0.1],
+}
+
+# schema
+_TEST_WRONG_SCHEMA_TITLE = "system.WrongSchema"
+
+# tf model autologging
+_TEST_TF_EXPERIMENT_RUN_PARAMS = {
+ "batch_size": "None",
+ "class_weight": "None",
+ "epochs": "5",
+ "initial_epoch": "0",
+ "max_queue_size": "10",
+ "sample_weight": "None",
+ "shuffle": "True",
+ "steps_per_epoch": "None",
+ "use_multiprocessing": "False",
+ "validation_batch_size": "None",
+ "validation_freq": "1",
+ "validation_split": "0.0",
+ "validation_steps": "None",
+ "workers": "1",
+}
+_TEST_TF_EXPERIMENT_RUN_METRICS = {
+ "accuracy": 0.0,
+ "loss": 1.013,
+}
+
+# tensorboard
+_TEST_TB_ID = "1028944691210842416"
+_TEST_TENSORBOARD_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/tensorboards/{_TEST_TB_ID}"
+)
+_TEST_TB_DISPLAY_NAME = "my_tensorboard_1234"
+_TEST_ENCRYPTION_KEY_NAME = test_constants.ProjectConstants._TEST_ENCRYPTION_KEY_NAME
+_TEST_ENCRYPTION_SPEC = test_constants.ProjectConstants._TEST_ENCRYPTION_SPEC
+_TEST_TB_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/tensorboards/{_TEST_TB_ID}"
+)
+_TEST_TENSORBOARD_EXPERIMENT_ID = "test-experiment"
+_TEST_TENSORBOARD_EXPERIMENT_NAME = (
+ f"{_TEST_TB_NAME}/experiments/{_TEST_TENSORBOARD_EXPERIMENT_ID}"
+)
+
+_TEST_TENSORBOARD_RUN_ID = "run-1"
+_TEST_TENSORBOARD_RUN_NAME = (
+ f"{_TEST_TENSORBOARD_EXPERIMENT_NAME}/runs/{_TEST_TENSORBOARD_RUN_ID}"
+)
+
+_TEST_TENSORBOARD_RUN = gca_tensorboard_run.TensorboardRun(
+ name=_TEST_TENSORBOARD_RUN_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+)
+_TEST_TIME_SERIES_DISPLAY_NAME = "loss"
+_TEST_TIME_SERIES_DISPLAY_NAME_2 = "accuracy"
+_TEST_TENSORBOARD_TIME_SERIES_ID = "test-time-series"
+_TEST_TENSORBOARD_TIME_SERIES_NAME = (
+ f"{_TEST_TENSORBOARD_RUN_NAME}/timeSeries/{_TEST_TENSORBOARD_TIME_SERIES_ID}"
+)
+
+_TEST_TENSORBOARD_TIME_SERIES_LIST = [
+ gca_tensorboard_time_series.TensorboardTimeSeries(
+ name=_TEST_TENSORBOARD_TIME_SERIES_NAME,
+ display_name=_TEST_TIME_SERIES_DISPLAY_NAME,
+ value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
+ ),
+ gca_tensorboard_time_series.TensorboardTimeSeries(
+ name=_TEST_TENSORBOARD_TIME_SERIES_NAME,
+ display_name=_TEST_TIME_SERIES_DISPLAY_NAME_2,
+ value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
+ ),
+]
+
+# mlflow
+_TEST_MLFLOW_TRACKING_URI = "file://my-test-tracking-uri"
+_TEST_MLFLOW_CREATE_RUN_TIMESTAMP = int(datetime.datetime.now().timestamp())
+_TEST_MLFLOW_RUN_ID = f"tensorflow-{_TEST_MLFLOW_CREATE_RUN_TIMESTAMP}"
+
+_MOCK_MLFLOW_RUN_INFO = mlflow_entities.RunInfo(
+ run_uuid=_TEST_MLFLOW_RUN_ID,
+ run_id=_TEST_MLFLOW_RUN_ID,
+ experiment_id=_TEST_EXPERIMENT,
+ user_id="",
+ status=gca_execution.Execution.State.RUNNING,
+ start_time=1,
+ end_time=2,
+ lifecycle_stage=mlflow_entities.LifecycleStage.ACTIVE,
+ artifact_uri="file:///tmp/",
+)
+
+_MOCK_MLFLOW_RUN_INFO_COMPLETE = mlflow_entities.RunInfo(
+ run_uuid=_TEST_MLFLOW_RUN_ID,
+ run_id=_TEST_MLFLOW_RUN_ID,
+ experiment_id=_TEST_EXPERIMENT,
+ user_id="",
+ status=gca_execution.Execution.State.COMPLETE,
+ start_time=1,
+ end_time=2,
+ lifecycle_stage=mlflow_entities.LifecycleStage.ACTIVE,
+ artifact_uri="file:///tmp/",
+)
+
+
+_MOCK_MLFLOW_RUN_DATA = mlflow_entities.RunData(
+ metrics=[
+ mlflow_entities.Metric(key=k, value=v, step=0, timestamp=0)
+ for k, v in _TEST_TF_EXPERIMENT_RUN_METRICS.items()
+ ],
+ params=[
+ mlflow_entities.Param(key=k, value=v)
+ for k, v in _TEST_TF_EXPERIMENT_RUN_PARAMS.items()
+ ],
+ tags={},
+)
+
+
+@pytest.fixture
+def mlflow_plugin_create_run_mock():
+ with patch.object(
+ _vertex_mlflow_tracking._VertexMlflowTracking, "create_run"
+ ) as create_vertex_run_mock:
+ create_vertex_run_mock.return_value = mlflow_entities.Run(
+ run_info=_MOCK_MLFLOW_RUN_INFO, run_data=_MOCK_MLFLOW_RUN_DATA
+ )
+ yield create_vertex_run_mock
+
+
+@pytest.fixture
+def mlflow_plugin_get_run_mock():
+ with patch.object(
+ _vertex_mlflow_tracking._VertexMlflowTracking, "get_run"
+ ) as get_vertex_run_mock:
+ get_vertex_run_mock.return_value = mlflow_entities.Run(
+ run_info=_MOCK_MLFLOW_RUN_INFO, run_data=_MOCK_MLFLOW_RUN_DATA
+ )
+ yield get_vertex_run_mock
+
+
+@pytest.fixture
+def mlflow_plugin_update_run_info_mock():
+ with patch.object(
+ _vertex_mlflow_tracking._VertexMlflowTracking, "update_run_info"
+ ) as update_run_mock:
+ update_run_mock.return_value = _MOCK_MLFLOW_RUN_INFO_COMPLETE
+ yield update_run_mock
+
+
+@pytest.fixture
+def mock_experiment_run():
+ exp_run_mock = mock.MagicMock(aiplatform.ExperimentRun)
+ exp_run_mock.run_name = _TEST_MLFLOW_RUN_ID
+ exp_run_mock.experiment = _EXPERIMENT_MOCK
+ return exp_run_mock
+
+
+@pytest.fixture
+def mlflow_plugin_run_map_mock(mock_experiment_run):
+ with patch.object(
+ _vertex_mlflow_tracking._VertexMlflowTracking,
+ "run_map",
+ new_callable=mock.PropertyMock,
+ ) as run_map_mock:
+ run_map_mock.return_value = {
+ _TEST_MLFLOW_RUN_ID: _vertex_mlflow_tracking._RunTracker(
+ autocreate=True, experiment_run=mock_experiment_run
+ )
+ }
+ yield run_map_mock
+
+
+@pytest.fixture
+def mlflow_plugin_vertex_experiment_mock(mock_experiment_run):
+ with patch.object(
+ _vertex_mlflow_tracking._VertexMlflowTracking,
+ "vertex_experiment",
+ new_callable=mock.PropertyMock,
+ ) as vertex_experiment_mock:
+ vertex_experiment_mock.return_value = _EXPERIMENT_MOCK
+ yield vertex_experiment_mock
+
+
+@pytest.fixture
+def get_tensorboard_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient, "get_tensorboard"
+ ) as get_tensorboard_mock:
+ get_tensorboard_mock.return_value = gca_tensorboard.Tensorboard(
+ name=_TEST_TENSORBOARD_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ yield get_tensorboard_mock
+
+
+@pytest.fixture
+def get_tensorboard_run_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "get_tensorboard_run",
+ ) as get_tensorboard_run_mock:
+ get_tensorboard_run_mock.return_value = _TEST_TENSORBOARD_RUN
+ yield get_tensorboard_run_mock
+
+
+@pytest.fixture
+def list_tensorboard_time_series_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "list_tensorboard_time_series",
+ ) as list_tensorboard_time_series_mock:
+ list_tensorboard_time_series_mock.return_value = (
+ _TEST_TENSORBOARD_TIME_SERIES_LIST
+ )
+ yield list_tensorboard_time_series_mock
+
+
+create_tensorboard_experiment_mock = test_tensorboard.create_tensorboard_experiment_mock
+write_tensorboard_run_data_mock = test_tensorboard.write_tensorboard_run_data_mock
+get_tensorboard_time_series_mock = test_tensorboard.get_tensorboard_time_series_mock
+
+create_tensorboard_run_artifact_mock = (
+ test_metadata.create_tensorboard_run_artifact_mock
+)
+add_context_artifacts_and_executions_mock = (
+ test_metadata.add_context_artifacts_and_executions_mock
+)
+
+
+@pytest.fixture
+def get_metadata_store_mock():
+ with patch.object(
+ MetadataServiceClient, "get_metadata_store"
+ ) as get_metadata_store_mock:
+ get_metadata_store_mock.return_value = GapicMetadataStore(
+ name=_TEST_METADATASTORE,
+ )
+ yield get_metadata_store_mock
+
+
+_TEST_EXPERIMENT_CONTEXT = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_EXPERIMENT,
+ description=_TEST_EXPERIMENT_DESCRIPTION,
+ schema_title=constants.SYSTEM_EXPERIMENT,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_EXPERIMENT],
+ metadata={
+ **constants.EXPERIMENT_METADATA,
+ constants._BACKING_TENSORBOARD_RESOURCE_KEY: test_tensorboard._TEST_NAME,
+ },
+)
+
+
+@pytest.fixture
+def add_context_children_mock():
+ with patch.object(
+ MetadataServiceClient, "add_context_children"
+ ) as add_context_children_mock:
+ yield add_context_children_mock
+
+
+@pytest.fixture
+def get_tensorboard_run_not_found_mock():
+ with patch.object(
+ TensorboardServiceClient, "get_tensorboard_run"
+ ) as get_tensorboard_run_mock:
+ get_tensorboard_run_mock.side_effect = [
+ exceptions.NotFound(""),
+ test_tensorboard._TEST_TENSORBOARD_RUN,
+ ]
+ yield get_tensorboard_run_mock
+
+
+@pytest.fixture
+def get_tensorboard_experiment_not_found_mock():
+ with patch.object(
+ TensorboardServiceClient, "get_tensorboard_experiment"
+ ) as get_tensorboard_experiment_mock:
+ get_tensorboard_experiment_mock.side_effect = [
+ exceptions.NotFound(""),
+ test_tensorboard._TEST_TENSORBOARD_EXPERIMENT,
+ ]
+ yield get_tensorboard_experiment_mock
+
+
+@pytest.fixture
+def get_artifact_mock():
+ with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock:
+ get_artifact_mock.return_value = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ display_name=_TEST_ARTIFACT_ID,
+ schema_title=constants.SYSTEM_METRICS,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_METRICS],
+ )
+ yield get_artifact_mock
+
+
+@pytest.fixture
+def get_artifact_not_found_mock():
+ with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock:
+ get_artifact_mock.side_effect = exceptions.NotFound("")
+ yield get_artifact_mock
+
+
+@pytest.fixture
+def update_context_mock():
+ with patch.object(MetadataServiceClient, "update_context") as update_context_mock:
+ update_context_mock.return_value = _TEST_EXPERIMENT_CONTEXT
+ yield update_context_mock
+
+
+@pytest.fixture
+def get_or_create_default_tb_none_mock():
+ with patch.object(
+ metadata, "_get_or_create_default_tensorboard"
+ ) as get_or_create_default_tb_none_mock:
+ get_or_create_default_tb_none_mock.return_value = None
+ yield get_or_create_default_tb_none_mock
+
+
+_TEST_EXPERIMENT_RUN_CONTEXT_NAME = f"{_TEST_PARENT}/contexts/{_TEST_EXECUTION_ID}"
+_TEST_OTHER_EXPERIMENT_RUN_CONTEXT_NAME = (
+ f"{_TEST_PARENT}/contexts/{_TEST_OTHER_EXECUTION_ID}"
+)
+
+_EXPERIMENT_MOCK = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_EXPERIMENT,
+ description=_TEST_EXPERIMENT_DESCRIPTION,
+ schema_title=constants.SYSTEM_EXPERIMENT,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_EXPERIMENT],
+ metadata={
+ **constants.EXPERIMENT_METADATA,
+ constants._BACKING_TENSORBOARD_RESOURCE_KEY: _TEST_TENSORBOARD_NAME,
+ },
+)
+_EXPERIMENT_MOCK_WITHOUT_TB_SET = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_EXPERIMENT,
+ description=_TEST_EXPERIMENT_DESCRIPTION,
+ schema_title=constants.SYSTEM_EXPERIMENT,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_EXPERIMENT],
+ metadata={
+ **constants.EXPERIMENT_METADATA,
+ },
+)
+_EXPERIMENT_RUN_MOCK = GapicContext(
+ name=_TEST_EXPERIMENT_RUN_CONTEXT_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_EXPERIMENT_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_EXPERIMENT_RUN],
+ metadata={
+ constants._PARAM_KEY: _TEST_TF_EXPERIMENT_RUN_PARAMS,
+ constants._METRIC_KEY: _TEST_TF_EXPERIMENT_RUN_METRICS,
+ constants._STATE_KEY: gca_execution.Execution.State.RUNNING.name,
+ },
+)
+_EXPERIMENT_RUN_MOCK_WITH_BACKING_TB = GapicContext(
+ name=_TEST_EXPERIMENT_RUN_CONTEXT_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_EXPERIMENT_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_EXPERIMENT_RUN],
+ metadata={
+ constants._PARAM_KEY: _TEST_TF_EXPERIMENT_RUN_PARAMS,
+ constants._METRIC_KEY: _TEST_TF_EXPERIMENT_RUN_METRICS,
+ constants._STATE_KEY: gca_execution.Execution.State.RUNNING.name,
+ **constants.EXPERIMENT_METADATA,
+ constants._BACKING_TENSORBOARD_RESOURCE_KEY: _TEST_TENSORBOARD_NAME,
+ },
+)
+
+_EXPERIMENT_RUN_MOCK_WITH_PARENT_EXPERIMENT = copy.deepcopy(_EXPERIMENT_RUN_MOCK)
+_EXPERIMENT_RUN_MOCK_WITH_PARENT_EXPERIMENT.parent_contexts = [_TEST_CONTEXT_NAME]
+
+
+@pytest.fixture
+def get_experiment_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.return_value = _EXPERIMENT_MOCK
+ yield get_context_mock
+
+
+@pytest.fixture
+def get_experiment_mock_without_tensorboard():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.return_value = _EXPERIMENT_MOCK_WITHOUT_TB_SET
+ yield get_context_mock
+
+
+@pytest.fixture
+def get_experiment_run_run_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.side_effect = [
+ _EXPERIMENT_MOCK,
+ _EXPERIMENT_RUN_MOCK,
+ _EXPERIMENT_RUN_MOCK_WITH_PARENT_EXPERIMENT,
+ ]
+
+ yield get_context_mock
+
+
+@pytest.fixture
+def get_experiment_run_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.side_effect = [
+ _EXPERIMENT_MOCK,
+ _EXPERIMENT_RUN_MOCK_WITH_PARENT_EXPERIMENT,
+ ]
+
+ yield get_context_mock
+
+
+@pytest.fixture
+def create_experiment_context_mock():
+ with patch.object(MetadataServiceClient, "create_context") as create_context_mock:
+ create_context_mock.side_effect = [_TEST_EXPERIMENT_CONTEXT]
+ yield create_context_mock
+
+
+@pytest.fixture
+def create_experiment_run_context_mock():
+ with patch.object(MetadataServiceClient, "create_context") as create_context_mock:
+ create_context_mock.side_effect = [_EXPERIMENT_RUN_MOCK]
+ yield create_context_mock
+
+
+_TEST_TENSORBOARD_TIME_SERIES = gca_tensorboard_time_series.TensorboardTimeSeries(
+ name=_TEST_TENSORBOARD_TIME_SERIES_NAME,
+ display_name=_TEST_TIME_SERIES_DISPLAY_NAME,
+ value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
+)
+
+
+@pytest.fixture
+def list_tensorboard_time_series_mock_empty():
+ with patch.object(
+ TensorboardServiceClient,
+ "list_tensorboard_time_series",
+ ) as list_tensorboard_time_series_mock:
+ list_tensorboard_time_series_mock.side_effect = [
+ [], # initially empty
+ [],
+ [_TEST_TENSORBOARD_TIME_SERIES],
+ ]
+ yield list_tensorboard_time_series_mock
+
+
+def build_and_train_test_tf_model():
+ import tensorflow as tf
+
+ X = np.array(
+ [
+ [1, 1],
+ [1, 2],
+ [2, 2],
+ [2, 3],
+ [1, 1],
+ [1, 2],
+ [2, 2],
+ [2, 3],
+ [1, 1],
+ [1, 2],
+ [2, 2],
+ [2, 3],
+ ]
+ )
+ y = np.dot(X, np.array([1, 2])) + 3
+
+ model = tf.keras.models.Sequential(
+ [
+ tf.keras.layers.Flatten(input_shape=(2,)),
+ tf.keras.layers.Dense(128, activation="relu"),
+ tf.keras.layers.Dropout(0.2),
+ tf.keras.layers.Dense(1),
+ ]
+ )
+
+ model.compile(
+ optimizer="adam",
+ loss=tf.keras.losses.CategoricalCrossentropy(),
+ metrics=["accuracy"],
+ )
+
+ model.fit(X, y, epochs=5)
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestAutologging:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ if autologging_utils._is_autologging_enabled():
+ aiplatform.autolog(disable=True)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.usefixtures(
+ "update_context_mock",
+ "get_tensorboard_time_series_mock",
+ "get_tensorboard_run_not_found_mock",
+ "get_tensorboard_experiment_not_found_mock",
+ "list_tensorboard_time_series_mock",
+ "list_tensorboard_time_series_mock_empty",
+ )
+ def test_autologging_init(
+ self,
+ get_experiment_mock,
+ get_metadata_store_mock,
+ get_tensorboard_mock,
+ ):
+
+ try:
+ import mlflow # noqa: F401
+ except ImportError:
+ raise ImportError(
+ "MLFlow is not installed and is required to test autologging. "
+ 'Please install the SDK using "pip install google-cloud-aiplatform[autologging]"'
+ )
+ try:
+ import tensorflow as tf # noqa: F401
+ except ImportError:
+ raise ImportError(
+ "TensorFlow is not installed and is required to test autologging."
+ 'Please install it before running autologging tests."'
+ )
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ experiment_tensorboard=_TEST_TENSORBOARD_NAME,
+ )
+
+ aiplatform.autolog()
+
+ get_tensorboard_mock.assert_called_with(
+ name=_TEST_TENSORBOARD_NAME,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ assert get_tensorboard_mock.call_count == 1
+
+ get_experiment_mock.assert_called_once_with(
+ name=_TEST_CONTEXT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ get_metadata_store_mock.assert_called_once_with(
+ name=_TEST_METADATASTORE,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_experiment_mock",
+ "get_metadata_store_mock",
+ )
+ def test_autologging_raises_if_experiment_not_set(
+ self,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with pytest.raises(ValueError):
+ aiplatform.autolog()
+
+ @pytest.mark.usefixtures(
+ "get_experiment_mock_without_tensorboard",
+ "get_metadata_store_mock",
+ "update_context_mock",
+ "get_or_create_default_tb_none_mock",
+ )
+ def test_autologging_raises_if_experiment_tensorboard_not_set(
+ self,
+ ):
+
+ # unset the global tensorboard
+ aiplatform.metadata.metadata._experiment_tracker._global_tensorboard = None
+
+ aiplatform.init(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, experiment=_TEST_EXPERIMENT
+ )
+
+ with pytest.raises(ValueError):
+ aiplatform.autolog()
+
+ @pytest.mark.usefixtures(
+ "get_experiment_mock",
+ "update_context_mock",
+ "get_metadata_store_mock",
+ "create_experiment_run_context_mock",
+ "get_tensorboard_mock",
+ "get_tensorboard_time_series_mock",
+ "get_tensorboard_run_not_found_mock",
+ "get_tensorboard_experiment_not_found_mock",
+ "list_tensorboard_time_series_mock",
+ "get_artifact_not_found_mock",
+ "list_tensorboard_time_series_mock_empty",
+ )
+ def test_autologging_sets_and_resets_mlflow_tracking_uri(
+ self,
+ ):
+ import mlflow # noqa: F401
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ experiment_tensorboard=_TEST_TENSORBOARD_NAME,
+ )
+ mlflow.set_tracking_uri(_TEST_MLFLOW_TRACKING_URI)
+
+ aiplatform.autolog()
+
+ assert mlflow.get_tracking_uri() == "vertex-mlflow-plugin://"
+
+ aiplatform.autolog(disable=True)
+
+ assert mlflow.get_tracking_uri() == _TEST_MLFLOW_TRACKING_URI
+
+ @pytest.mark.usefixtures(
+ "get_experiment_mock",
+ "update_context_mock",
+ "get_metadata_store_mock",
+ "create_experiment_run_context_mock",
+ "get_tensorboard_mock",
+ "get_tensorboard_time_series_mock",
+ "get_tensorboard_run_not_found_mock",
+ "get_tensorboard_experiment_not_found_mock",
+ "list_tensorboard_time_series_mock",
+ "get_artifact_not_found_mock",
+ "list_tensorboard_time_series_mock_empty",
+ )
+ def test_autologging_enabled_check(
+ self,
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ experiment_tensorboard=_TEST_TENSORBOARD_NAME,
+ )
+
+ aiplatform.autolog()
+
+ assert aiplatform.utils.autologging_utils._is_autologging_enabled()
+
+ aiplatform.autolog(disable=True)
+
+ assert not aiplatform.utils.autologging_utils._is_autologging_enabled()
+
+ @pytest.mark.usefixtures(
+ "get_experiment_mock",
+ "update_context_mock",
+ "get_metadata_store_mock",
+ "create_experiment_run_context_mock",
+ "get_tensorboard_mock",
+ "get_tensorboard_time_series_mock",
+ "get_tensorboard_run_not_found_mock",
+ "get_tensorboard_experiment_not_found_mock",
+ "list_tensorboard_time_series_mock",
+ "get_artifact_not_found_mock",
+ "list_tensorboard_time_series_mock_empty",
+ )
+ def test_calling_autolog_with_disable_raises_if_not_enabled(
+ self,
+ ):
+
+ import mlflow # noqa: F401
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ experiment_tensorboard=_TEST_TENSORBOARD_NAME,
+ )
+
+ with pytest.raises(ValueError):
+ aiplatform.autolog(disable=True)
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "add_context_children_mock",
+ "get_experiment_mock",
+ "get_experiment_run_run_mock",
+ "get_tensorboard_mock",
+ "create_tensorboard_experiment_mock",
+ "write_tensorboard_run_data_mock",
+ "get_tensorboard_experiment_not_found_mock",
+ "get_artifact_not_found_mock",
+ "list_tensorboard_time_series_mock",
+ "create_tensorboard_run_artifact_mock",
+ "get_tensorboard_time_series_mock",
+ "get_tensorboard_run_mock",
+ "update_context_mock",
+ "list_tensorboard_time_series_mock_empty",
+ "add_context_artifacts_and_executions_mock",
+ )
+ def test_autologging_plugin_autocreates_run_id(
+ self,
+ create_experiment_run_context_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ experiment_tensorboard=_TEST_TENSORBOARD_NAME,
+ )
+
+ aiplatform.autolog()
+
+ build_and_train_test_tf_model()
+
+ # An ExperimentRun should be created with an auto-generated ID
+ for args, kwargs in create_experiment_run_context_mock.call_args_list:
+ assert kwargs["context"].display_name.startswith("tensorflow-")
+ assert kwargs["context_id"].startswith(f"{_TEST_EXPERIMENT}-tensorflow-")
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "add_context_children_mock",
+ "get_experiment_mock",
+ "create_experiment_context_mock",
+ "get_experiment_run_run_mock",
+ "get_tensorboard_mock",
+ "create_tensorboard_experiment_mock",
+ "write_tensorboard_run_data_mock",
+ "get_tensorboard_experiment_not_found_mock",
+ "get_artifact_not_found_mock",
+ "list_tensorboard_time_series_mock",
+ "create_tensorboard_run_artifact_mock",
+ "get_tensorboard_time_series_mock",
+ "get_tensorboard_run_mock",
+ "update_context_mock",
+ "list_tensorboard_time_series_mock_empty",
+ "add_context_artifacts_and_executions_mock",
+ "mlflow_plugin_get_run_mock",
+ "mlflow_plugin_run_map_mock",
+ "mlflow_plugin_create_run_mock",
+ "mlflow_plugin_vertex_experiment_mock",
+ )
+ def test_autologging_plugin_with_auto_run_creation(
+ self,
+ mlflow_plugin_create_run_mock,
+ mlflow_plugin_get_run_mock,
+ mlflow_plugin_update_run_info_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ experiment_tensorboard=_TEST_TENSORBOARD_NAME,
+ )
+
+ aiplatform.autolog()
+
+ build_and_train_test_tf_model()
+
+ assert mlflow_plugin_create_run_mock.call_count == 1
+
+ build_and_train_test_tf_model()
+
+ # a subsequent model.fit() call should create another ExperimentRun
+ assert mlflow_plugin_create_run_mock.call_count == 2
+
+ assert (
+ mlflow_plugin_update_run_info_mock.call_args_list[0][0][0]
+ == _TEST_MLFLOW_RUN_ID
+ )
+
+ # the above model.fit() calls should not result in any data being written locally
+ assert not os.path.isdir("mlruns")
+
+ # training a model after disabling autologging should not create additional ExperimentRuns
+ # and the plugin should not be invoked
+ aiplatform.autolog(disable=True)
+ build_and_train_test_tf_model()
+ assert mlflow_plugin_create_run_mock.call_count == 2
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "add_context_children_mock",
+ "get_experiment_mock",
+ "get_experiment_run_run_mock",
+ "create_experiment_context_mock",
+ "get_tensorboard_mock",
+ "create_tensorboard_experiment_mock",
+ "write_tensorboard_run_data_mock",
+ "get_tensorboard_experiment_not_found_mock",
+ "get_artifact_not_found_mock",
+ "list_tensorboard_time_series_mock",
+ "create_tensorboard_run_artifact_mock",
+ "get_tensorboard_time_series_mock",
+ "get_tensorboard_run_mock",
+ "update_context_mock",
+ "list_tensorboard_time_series_mock_empty",
+ "add_context_artifacts_and_executions_mock",
+ )
+ def test_autologging_with_manual_run_creation(
+ self,
+ create_experiment_run_context_mock,
+ caplog,
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ experiment_tensorboard=_TEST_TENSORBOARD_NAME,
+ )
+
+ aiplatform.autolog()
+
+ aiplatform.start_run(_TEST_RUN)
+ build_and_train_test_tf_model()
+ assert create_experiment_run_context_mock.call_count == 1
+
+ # metrics and params from additional training calls will not be logged
+ # and no new ExperimentRun will be created
+ # a warning will be logged with details
+ build_and_train_test_tf_model()
+ assert create_experiment_run_context_mock.call_count == 1
+ assert (
+ "Metrics and parameters have already been logged to this run" in caplog.text
+ )
+
+ # ending the run and training a new model should result in an auto-created run
+ aiplatform.end_run()
+
+ build_and_train_test_tf_model()
+ assert create_experiment_run_context_mock.call_count == 2
+
+ caplog.clear()
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "add_context_children_mock",
+ "get_experiment_mock",
+ "create_experiment_run_context_mock",
+ "get_experiment_run_run_mock",
+ "get_tensorboard_mock",
+ "create_tensorboard_experiment_mock",
+ "write_tensorboard_run_data_mock",
+ "get_tensorboard_experiment_not_found_mock",
+ "get_artifact_not_found_mock",
+ "list_tensorboard_time_series_mock",
+ "create_tensorboard_run_artifact_mock",
+ "get_tensorboard_time_series_mock",
+ "get_tensorboard_run_mock",
+ "update_context_mock",
+ "list_tensorboard_time_series_mock_empty",
+ "add_context_artifacts_and_executions_mock",
+ )
+ def test_mlflow_log_filter_only_shows_framework_warning_logs(
+ self,
+ caplog,
+ ):
+
+ import tensorflow # noqa: F401
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ experiment_tensorboard=_TEST_TENSORBOARD_NAME,
+ )
+
+ aiplatform.autolog()
+
+ # Tests that no INFO logs are being surfaced from MLFlow
+ # We can't test for the unsupported version warning log since
+ # MLFlow changes supported versions regularly
+ assert "INFO mlflow" not in caplog.text
+
+ caplog.clear()
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py
new file mode 100644
index 0000000000000000000000000000000000000000..c08c70381a1d27882c5ea5c1f484b06daf9b139b
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py
@@ -0,0 +1,1415 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import importlib
+import pytest
+from unittest import mock
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import datasets
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import schema
+from google.cloud.aiplatform import training_jobs
+
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
+ pipeline_service_client,
+)
+
+from google.cloud.aiplatform.compat.types import (
+ dataset as gca_dataset,
+ model as gca_model,
+ pipeline_state as gca_pipeline_state,
+ training_pipeline as gca_training_pipeline,
+ encryption_spec as gca_encryption_spec,
+)
+from google.protobuf import json_format
+from google.protobuf import struct_pb2
+import constants as test_constants
+
+_TEST_BUCKET_NAME = test_constants.TrainingJobConstants._TEST_BUCKET_NAME
+_TEST_GCS_PATH_WITHOUT_BUCKET = (
+ test_constants.TrainingJobConstants._TEST_GCS_PATH_WITHOUT_BUCKET
+)
+_TEST_GCS_PATH = test_constants.TrainingJobConstants._TEST_GCS_PATH
+_TEST_GCS_PATH_WITH_TRAILING_SLASH = (
+ test_constants.TrainingJobConstants._TEST_GCS_PATH_WITH_TRAILING_SLASH
+)
+_TEST_PROJECT = test_constants.ProjectConstants._TEST_PROJECT
+
+_TEST_DATASET_DISPLAY_NAME = (
+ test_constants.TrainingJobConstants._TEST_DATASET_DISPLAY_NAME
+)
+_TEST_DATASET_NAME = test_constants.TrainingJobConstants._TEST_DATASET_NAME
+_TEST_DISPLAY_NAME = test_constants.TrainingJobConstants._TEST_DISPLAY_NAME
+_TEST_TRAINING_CONTAINER_IMAGE = (
+ test_constants.TrainingJobConstants._TEST_TRAINING_CONTAINER_IMAGE
+)
+_TEST_METADATA_SCHEMA_URI_TIMESERIES = schema.dataset.metadata.time_series
+_TEST_METADATA_SCHEMA_URI_NONTIMESERIES = schema.dataset.metadata.image
+
+_TEST_TRAINING_COLUMN_TRANSFORMATIONS = [
+ {"auto": {"column_name": "time"}},
+ {"auto": {"column_name": "time_series_identifier"}},
+ {"auto": {"column_name": "target"}},
+ {"auto": {"column_name": "weight"}},
+]
+_TEST_TRAINING_TARGET_COLUMN = "target"
+_TEST_TRAINING_TIME_COLUMN = "time"
+_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN = "time_series_identifier"
+_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS = []
+_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS = []
+_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS = []
+_TEST_TRAINING_FORECAST_HORIZON = 10
+_TEST_TRAINING_DATA_GRANULARITY_UNIT = "day"
+_TEST_TRAINING_DATA_GRANULARITY_COUNT = 1
+_TEST_TRAINING_CONTEXT_WINDOW = None
+_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS = True
+_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI = (
+ "bq://path.to.table"
+)
+_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION = False
+_TEST_TRAINING_QUANTILES = None
+_TEST_TRAINING_VALIDATION_OPTIONS = None
+_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS = 1000
+_TEST_TRAINING_WEIGHT_COLUMN = "weight"
+_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME = "minimize-rmse"
+_TEST_ADDITIONAL_EXPERIMENTS = ["exp1", "exp2"]
+_TEST_HIERARCHY_GROUP_COLUMNS = []
+_TEST_HIERARCHY_GROUP_TOTAL_WEIGHT = 1
+_TEST_HIERARCHY_TEMPORAL_TOTAL_WEIGHT = None
+_TEST_HIERARCHY_GROUP_TEMPORAL_TOTAL_WEIGHT = None
+_TEST_WINDOW_COLUMN = None
+_TEST_WINDOW_STRIDE_LENGTH = 1
+_TEST_WINDOW_MAX_COUNT = None
+_TEST_TRAINING_HOLIDAY_REGIONS = ["GLOBAL"]
+_TEST_ENABLE_PROBABILISTIC_INFERENCE = True
+_TEST_ADDITIONAL_EXPERIMENTS_PROBABILISTIC_INFERENCE = [
+ "exp1",
+ "exp2",
+ "enable_probabilistic_inference",
+]
+_TEST_TRAINING_TASK_INPUTS_DICT = {
+ # required inputs
+ "targetColumn": _TEST_TRAINING_TARGET_COLUMN,
+ "timeColumn": _TEST_TRAINING_TIME_COLUMN,
+ "timeSeriesIdentifierColumn": _TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
+ "timeSeriesAttributeColumns": _TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS,
+ "unavailableAtForecastColumns": _TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
+ "availableAtForecastColumns": _TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
+ "forecastHorizon": _TEST_TRAINING_FORECAST_HORIZON,
+ "dataGranularity": {
+ "unit": _TEST_TRAINING_DATA_GRANULARITY_UNIT,
+ "quantity": _TEST_TRAINING_DATA_GRANULARITY_COUNT,
+ },
+ "transformations": _TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ "trainBudgetMilliNodeHours": _TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ # optional inputs
+ "weightColumn": _TEST_TRAINING_WEIGHT_COLUMN,
+ "contextWindow": _TEST_TRAINING_CONTEXT_WINDOW,
+ "exportEvaluatedDataItemsConfig": {
+ "destinationBigqueryUri": _TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
+ "overrideExistingTable": _TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
+ },
+ "quantiles": _TEST_TRAINING_QUANTILES,
+ "validationOptions": _TEST_TRAINING_VALIDATION_OPTIONS,
+ "optimizationObjective": _TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ "hierarchyConfig": {
+ "groupColumns": _TEST_HIERARCHY_GROUP_COLUMNS,
+ "groupTotalWeight": _TEST_HIERARCHY_GROUP_TOTAL_WEIGHT,
+ "temporalTotalWeight": _TEST_HIERARCHY_TEMPORAL_TOTAL_WEIGHT,
+ "groupTemporalTotalWeight": _TEST_HIERARCHY_GROUP_TEMPORAL_TOTAL_WEIGHT,
+ },
+ "windowConfig": {
+ "strideLength": _TEST_WINDOW_STRIDE_LENGTH,
+ },
+ "holidayRegions": _TEST_TRAINING_HOLIDAY_REGIONS,
+}
+
+_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS = json_format.ParseDict(
+ {
+ **_TEST_TRAINING_TASK_INPUTS_DICT,
+ "additionalExperiments": _TEST_ADDITIONAL_EXPERIMENTS,
+ },
+ struct_pb2.Value(),
+)
+
+_TEST_TRAINING_TASK_INPUTS_WITH_PROBABILISTIC_INFERENCE = json_format.ParseDict(
+ {
+ **_TEST_TRAINING_TASK_INPUTS_DICT,
+ "additionalExperiments": _TEST_ADDITIONAL_EXPERIMENTS,
+ "enableProbabilisticInference": True,
+ },
+ struct_pb2.Value(),
+)
+
+_TEST_TRAINING_TASK_INPUTS = json_format.ParseDict(
+ _TEST_TRAINING_TASK_INPUTS_DICT,
+ struct_pb2.Value(),
+)
+
+_TEST_DATASET_NAME = test_constants.TrainingJobConstants._TEST_DATASET_NAME
+
+_TEST_MODEL_DISPLAY_NAME = test_constants.TrainingJobConstants._TEST_MODEL_DISPLAY_NAME
+
+_TEST_LABELS = test_constants.ProjectConstants._TEST_LABELS
+_TEST_MODEL_LABELS = test_constants.TrainingJobConstants._TEST_MODEL_LABELS
+
+_TEST_PREDEFINED_SPLIT_COLUMN_NAME = "split"
+
+_TEST_MODEL_NAME = "projects/my-project/locations/us-central1/models/12345"
+
+_TEST_PIPELINE_RESOURCE_NAME = (
+ test_constants.TrainingJobConstants._TEST_PIPELINE_RESOURCE_NAME
+)
+
+# CMEK encryption
+_TEST_DEFAULT_ENCRYPTION_KEY_NAME = (
+ test_constants.TrainingJobConstants._TEST_DEFAULT_ENCRYPTION_KEY_NAME
+)
+_TEST_DEFAULT_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME
+)
+
+_TEST_FRACTION_SPLIT_TRAINING = 0.6
+_TEST_FRACTION_SPLIT_VALIDATION = 0.2
+_TEST_FRACTION_SPLIT_TEST = 0.2
+
+_TEST_SPLIT_PREDEFINED_COLUMN_NAME = "split"
+_TEST_SPLIT_TIMESTAMP_COLUMN_NAME = "timestamp"
+
+_FORECASTING_JOB_MODEL_TYPES = [
+ training_jobs.AutoMLForecastingTrainingJob,
+ training_jobs.SequenceToSequencePlusForecastingTrainingJob,
+ training_jobs.TemporalFusionTransformerForecastingTrainingJob,
+ training_jobs.TimeSeriesDenseEncoderForecastingTrainingJob,
+]
+
+
+@pytest.fixture
+def mock_pipeline_service_create():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
+ )
+ )
+ yield mock_create_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_get():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.return_value = gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
+ training_task_metadata={
+ "evaluatedDataItemsBigqueryUri": _TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI
+ },
+ )
+ yield mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_create_and_get_with_fail():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ )
+ )
+
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED,
+ )
+ )
+
+ yield mock_create_training_pipeline, mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_model_service_get():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as mock_get_model:
+ mock_get_model.return_value = gca_model.Model(name=_TEST_MODEL_NAME)
+ yield mock_get_model
+
+
+@pytest.fixture
+def mock_dataset_time_series():
+ ds = mock.MagicMock(datasets.TimeSeriesDataset)
+ ds.name = _TEST_DATASET_NAME
+ ds._latest_future = None
+ ds._exception = None
+ ds._gca_resource = gca_dataset.Dataset(
+ display_name=_TEST_DATASET_DISPLAY_NAME,
+ metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TIMESERIES,
+ labels={},
+ name=_TEST_DATASET_NAME,
+ metadata={},
+ )
+ return ds
+
+
+@pytest.fixture
+def mock_dataset_nontimeseries():
+ ds = mock.MagicMock(datasets.ImageDataset)
+ ds.name = _TEST_DATASET_NAME
+ ds._latest_future = None
+ ds._exception = None
+ ds._gca_resource = gca_dataset.Dataset(
+ display_name=_TEST_DATASET_DISPLAY_NAME,
+ metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTIMESERIES,
+ labels={},
+ name=_TEST_DATASET_NAME,
+ metadata={},
+ )
+ return ds
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestForecastingTrainingJob:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize("training_job", _FORECASTING_JOB_MODEL_TYPES)
+ def test_run_call_pipeline_service_create(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_time_series,
+ mock_model_service_get,
+ sync,
+ training_job,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_job(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ labels=_TEST_LABELS,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_time_series,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ time_column=_TEST_TRAINING_TIME_COLUMN,
+ time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
+ unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
+ available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
+ forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON,
+ data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT,
+ data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS,
+ context_window=_TEST_TRAINING_CONTEXT_WINDOW,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS,
+ export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
+ export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
+ quantiles=_TEST_TRAINING_QUANTILES,
+ validation_options=_TEST_TRAINING_VALIDATION_OPTIONS,
+ additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS,
+ hierarchy_group_columns=_TEST_HIERARCHY_GROUP_COLUMNS,
+ hierarchy_group_total_weight=_TEST_HIERARCHY_GROUP_TOTAL_WEIGHT,
+ hierarchy_temporal_total_weight=_TEST_HIERARCHY_TEMPORAL_TOTAL_WEIGHT,
+ hierarchy_group_temporal_total_weight=_TEST_HIERARCHY_GROUP_TEMPORAL_TOTAL_WEIGHT,
+ window_column=_TEST_WINDOW_COLUMN,
+ window_stride_length=_TEST_WINDOW_STRIDE_LENGTH,
+ window_max_count=_TEST_WINDOW_MAX_COUNT,
+ sync=sync,
+ create_request_timeout=None,
+ holiday_regions=_TEST_TRAINING_HOLIDAY_REGIONS,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ predefined_split=gca_training_pipeline.PredefinedSplit(
+ key=_TEST_PREDEFINED_SPLIT_COLUMN_NAME
+ ),
+ dataset_id=mock_dataset_time_series.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=training_job._training_task_definition,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource is mock_pipeline_service_get.return_value
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize("training_job", _FORECASTING_JOB_MODEL_TYPES)
+ def test_run_call_pipeline_service_create_with_timeout(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_time_series,
+ mock_model_service_get,
+ sync,
+ training_job,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_job(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ labels=_TEST_LABELS,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_time_series,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ time_column=_TEST_TRAINING_TIME_COLUMN,
+ time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
+ unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
+ available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
+ forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON,
+ data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT,
+ data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS,
+ context_window=_TEST_TRAINING_CONTEXT_WINDOW,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS,
+ export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
+ export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
+ quantiles=_TEST_TRAINING_QUANTILES,
+ validation_options=_TEST_TRAINING_VALIDATION_OPTIONS,
+ additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS,
+ hierarchy_group_columns=_TEST_HIERARCHY_GROUP_COLUMNS,
+ hierarchy_group_total_weight=_TEST_HIERARCHY_GROUP_TOTAL_WEIGHT,
+ hierarchy_temporal_total_weight=_TEST_HIERARCHY_TEMPORAL_TOTAL_WEIGHT,
+ hierarchy_group_temporal_total_weight=_TEST_HIERARCHY_GROUP_TEMPORAL_TOTAL_WEIGHT,
+ window_column=_TEST_WINDOW_COLUMN,
+ window_stride_length=_TEST_WINDOW_STRIDE_LENGTH,
+ window_max_count=_TEST_WINDOW_MAX_COUNT,
+ sync=sync,
+ create_request_timeout=180.0,
+ holiday_regions=_TEST_TRAINING_HOLIDAY_REGIONS,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ predefined_split=gca_training_pipeline.PredefinedSplit(
+ key=_TEST_PREDEFINED_SPLIT_COLUMN_NAME
+ ),
+ dataset_id=mock_dataset_time_series.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=training_job._training_task_definition,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=180.0,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures("mock_pipeline_service_get")
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize("training_job", _FORECASTING_JOB_MODEL_TYPES)
+ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels(
+ self,
+ mock_pipeline_service_create,
+ mock_dataset_time_series,
+ mock_model_service_get,
+ sync,
+ training_job,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_job(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ labels=_TEST_LABELS,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_time_series,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ time_column=_TEST_TRAINING_TIME_COLUMN,
+ time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
+ unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
+ available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
+ forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON,
+ data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT,
+ data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS,
+ context_window=_TEST_TRAINING_CONTEXT_WINDOW,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS,
+ export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
+ export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
+ quantiles=_TEST_TRAINING_QUANTILES,
+ validation_options=_TEST_TRAINING_VALIDATION_OPTIONS,
+ hierarchy_group_columns=_TEST_HIERARCHY_GROUP_COLUMNS,
+ hierarchy_group_total_weight=_TEST_HIERARCHY_GROUP_TOTAL_WEIGHT,
+ hierarchy_temporal_total_weight=_TEST_HIERARCHY_TEMPORAL_TOTAL_WEIGHT,
+ hierarchy_group_temporal_total_weight=_TEST_HIERARCHY_GROUP_TEMPORAL_TOTAL_WEIGHT,
+ window_column=_TEST_WINDOW_COLUMN,
+ window_stride_length=_TEST_WINDOW_STRIDE_LENGTH,
+ window_max_count=_TEST_WINDOW_MAX_COUNT,
+ sync=sync,
+ create_request_timeout=None,
+ holiday_regions=_TEST_TRAINING_HOLIDAY_REGIONS,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ # Test that if defaults to the job display name
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_time_series.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=training_job._training_task_definition,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures("mock_pipeline_service_get")
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize("training_job", _FORECASTING_JOB_MODEL_TYPES)
+ def test_run_call_pipeline_if_set_additional_experiments(
+ self,
+ mock_pipeline_service_create,
+ mock_dataset_time_series,
+ mock_model_service_get,
+ sync,
+ training_job,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_job(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ )
+
+ job._add_additional_experiments(_TEST_ADDITIONAL_EXPERIMENTS)
+
+ model_from_job = job.run(
+ dataset=mock_dataset_time_series,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ time_column=_TEST_TRAINING_TIME_COLUMN,
+ time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
+ unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
+ available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
+ forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON,
+ data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT,
+ data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS,
+ context_window=_TEST_TRAINING_CONTEXT_WINDOW,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS,
+ export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
+ export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
+ quantiles=_TEST_TRAINING_QUANTILES,
+ validation_options=_TEST_TRAINING_VALIDATION_OPTIONS,
+ hierarchy_group_columns=_TEST_HIERARCHY_GROUP_COLUMNS,
+ hierarchy_group_total_weight=_TEST_HIERARCHY_GROUP_TOTAL_WEIGHT,
+ hierarchy_temporal_total_weight=_TEST_HIERARCHY_TEMPORAL_TOTAL_WEIGHT,
+ hierarchy_group_temporal_total_weight=_TEST_HIERARCHY_GROUP_TEMPORAL_TOTAL_WEIGHT,
+ window_column=_TEST_WINDOW_COLUMN,
+ window_stride_length=_TEST_WINDOW_STRIDE_LENGTH,
+ window_max_count=_TEST_WINDOW_MAX_COUNT,
+ sync=sync,
+ create_request_timeout=None,
+ holiday_regions=_TEST_TRAINING_HOLIDAY_REGIONS,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ # Test that if defaults to the job display name
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_time_series.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=training_job._training_task_definition,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_model_service_get",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize("training_job", _FORECASTING_JOB_MODEL_TYPES)
+ def test_run_called_twice_raises(
+ self,
+ mock_dataset_time_series,
+ sync,
+ training_job,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_job(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ )
+
+ job.run(
+ dataset=mock_dataset_time_series,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ time_column=_TEST_TRAINING_TIME_COLUMN,
+ time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
+ unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
+ available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
+ forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON,
+ data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT,
+ data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS,
+ context_window=_TEST_TRAINING_CONTEXT_WINDOW,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS,
+ export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
+ export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
+ quantiles=_TEST_TRAINING_QUANTILES,
+ validation_options=_TEST_TRAINING_VALIDATION_OPTIONS,
+ hierarchy_group_columns=_TEST_HIERARCHY_GROUP_COLUMNS,
+ hierarchy_group_total_weight=_TEST_HIERARCHY_GROUP_TOTAL_WEIGHT,
+ hierarchy_temporal_total_weight=_TEST_HIERARCHY_TEMPORAL_TOTAL_WEIGHT,
+ hierarchy_group_temporal_total_weight=_TEST_HIERARCHY_GROUP_TEMPORAL_TOTAL_WEIGHT,
+ window_column=_TEST_WINDOW_COLUMN,
+ window_stride_length=_TEST_WINDOW_STRIDE_LENGTH,
+ window_max_count=_TEST_WINDOW_MAX_COUNT,
+ sync=sync,
+ holiday_regions=_TEST_TRAINING_HOLIDAY_REGIONS,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ dataset=mock_dataset_time_series,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ time_column=_TEST_TRAINING_TIME_COLUMN,
+ time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
+ unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
+ available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
+ forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON,
+ data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT,
+ data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS,
+ context_window=_TEST_TRAINING_CONTEXT_WINDOW,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS,
+ export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
+ export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
+ quantiles=_TEST_TRAINING_QUANTILES,
+ validation_options=_TEST_TRAINING_VALIDATION_OPTIONS,
+ hierarchy_group_columns=_TEST_HIERARCHY_GROUP_COLUMNS,
+ hierarchy_group_total_weight=_TEST_HIERARCHY_GROUP_TOTAL_WEIGHT,
+ hierarchy_temporal_total_weight=_TEST_HIERARCHY_TEMPORAL_TOTAL_WEIGHT,
+ hierarchy_group_temporal_total_weight=_TEST_HIERARCHY_GROUP_TEMPORAL_TOTAL_WEIGHT,
+ window_column=_TEST_WINDOW_COLUMN,
+ window_stride_length=_TEST_WINDOW_STRIDE_LENGTH,
+ window_max_count=_TEST_WINDOW_MAX_COUNT,
+ sync=sync,
+ holiday_regions=_TEST_TRAINING_HOLIDAY_REGIONS,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize("training_job", _FORECASTING_JOB_MODEL_TYPES)
+ def test_run_raises_if_pipeline_fails(
+ self,
+ mock_pipeline_service_create_and_get_with_fail,
+ mock_dataset_time_series,
+ sync,
+ training_job,
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_job(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ dataset=mock_dataset_time_series,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ time_column=_TEST_TRAINING_TIME_COLUMN,
+ time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
+ unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
+ available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
+ forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON,
+ data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT,
+ data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS,
+ context_window=_TEST_TRAINING_CONTEXT_WINDOW,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS,
+ export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
+ export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
+ quantiles=_TEST_TRAINING_QUANTILES,
+ validation_options=_TEST_TRAINING_VALIDATION_OPTIONS,
+ hierarchy_group_columns=_TEST_HIERARCHY_GROUP_COLUMNS,
+ hierarchy_group_total_weight=_TEST_HIERARCHY_GROUP_TOTAL_WEIGHT,
+ hierarchy_temporal_total_weight=_TEST_HIERARCHY_TEMPORAL_TOTAL_WEIGHT,
+ hierarchy_group_temporal_total_weight=_TEST_HIERARCHY_GROUP_TEMPORAL_TOTAL_WEIGHT,
+ window_column=_TEST_WINDOW_COLUMN,
+ window_stride_length=_TEST_WINDOW_STRIDE_LENGTH,
+ window_max_count=_TEST_WINDOW_MAX_COUNT,
+ sync=sync,
+ holiday_regions=_TEST_TRAINING_HOLIDAY_REGIONS,
+ )
+
+ if not sync:
+ job.wait()
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ @pytest.mark.parametrize("training_job", _FORECASTING_JOB_MODEL_TYPES)
+ def test_raises_before_run_is_called(
+ self,
+ mock_pipeline_service_create,
+ training_job,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_job(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ with pytest.raises(RuntimeError):
+ job.has_failed
+
+ with pytest.raises(RuntimeError):
+ job.state
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize("training_job", _FORECASTING_JOB_MODEL_TYPES)
+ def test_splits_fraction(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_time_series,
+ mock_model_service_get,
+ sync,
+ training_job,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an Forecasting training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_job(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_time_series,
+ training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ time_column=_TEST_TRAINING_TIME_COLUMN,
+ time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
+ unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
+ available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
+ forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON,
+ data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT,
+ data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS,
+ context_window=_TEST_TRAINING_CONTEXT_WINDOW,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS,
+ export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
+ export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
+ quantiles=_TEST_TRAINING_QUANTILES,
+ validation_options=_TEST_TRAINING_VALIDATION_OPTIONS,
+ hierarchy_group_columns=_TEST_HIERARCHY_GROUP_COLUMNS,
+ hierarchy_group_total_weight=_TEST_HIERARCHY_GROUP_TOTAL_WEIGHT,
+ hierarchy_temporal_total_weight=_TEST_HIERARCHY_TEMPORAL_TOTAL_WEIGHT,
+ hierarchy_group_temporal_total_weight=_TEST_HIERARCHY_GROUP_TEMPORAL_TOTAL_WEIGHT,
+ window_column=_TEST_WINDOW_COLUMN,
+ window_stride_length=_TEST_WINDOW_STRIDE_LENGTH,
+ window_max_count=_TEST_WINDOW_MAX_COUNT,
+ sync=sync,
+ create_request_timeout=None,
+ holiday_regions=_TEST_TRAINING_HOLIDAY_REGIONS,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_fraction_split = gca_training_pipeline.FractionSplit(
+ training_fraction=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction=_TEST_FRACTION_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ fraction_split=true_fraction_split,
+ dataset_id=mock_dataset_time_series.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=training_job._training_task_definition,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize("training_job", _FORECASTING_JOB_MODEL_TYPES)
+ def test_splits_timestamp(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_time_series,
+ mock_model_service_get,
+ sync,
+ training_job,
+ ):
+ """Initiate aiplatform with encryption key name.
+
+ Create and run an Forecasting training job, verify calls and
+ return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_job(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_time_series,
+ training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
+ timestamp_split_column_name=_TEST_SPLIT_TIMESTAMP_COLUMN_NAME,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ time_column=_TEST_TRAINING_TIME_COLUMN,
+ time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
+ unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
+ available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
+ forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON,
+ data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT,
+ data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS,
+ context_window=_TEST_TRAINING_CONTEXT_WINDOW,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS,
+ export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
+ export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
+ quantiles=_TEST_TRAINING_QUANTILES,
+ validation_options=_TEST_TRAINING_VALIDATION_OPTIONS,
+ hierarchy_group_columns=_TEST_HIERARCHY_GROUP_COLUMNS,
+ hierarchy_group_total_weight=_TEST_HIERARCHY_GROUP_TOTAL_WEIGHT,
+ hierarchy_temporal_total_weight=_TEST_HIERARCHY_TEMPORAL_TOTAL_WEIGHT,
+ hierarchy_group_temporal_total_weight=_TEST_HIERARCHY_GROUP_TEMPORAL_TOTAL_WEIGHT,
+ window_column=_TEST_WINDOW_COLUMN,
+ window_stride_length=_TEST_WINDOW_STRIDE_LENGTH,
+ window_max_count=_TEST_WINDOW_MAX_COUNT,
+ sync=sync,
+ create_request_timeout=None,
+ holiday_regions=_TEST_TRAINING_HOLIDAY_REGIONS,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_split = gca_training_pipeline.TimestampSplit(
+ training_fraction=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction=_TEST_FRACTION_SPLIT_TEST,
+ key=_TEST_SPLIT_TIMESTAMP_COLUMN_NAME,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ timestamp_split=true_split, dataset_id=mock_dataset_time_series.name
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=training_job._training_task_definition,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize("training_job", _FORECASTING_JOB_MODEL_TYPES)
+ def test_splits_predefined(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_time_series,
+ mock_model_service_get,
+ sync,
+ training_job,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an Forecasting training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_job(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_time_series,
+ predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ time_column=_TEST_TRAINING_TIME_COLUMN,
+ time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
+ unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
+ available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
+ forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON,
+ data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT,
+ data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS,
+ context_window=_TEST_TRAINING_CONTEXT_WINDOW,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS,
+ export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
+ export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
+ quantiles=_TEST_TRAINING_QUANTILES,
+ validation_options=_TEST_TRAINING_VALIDATION_OPTIONS,
+ hierarchy_group_columns=_TEST_HIERARCHY_GROUP_COLUMNS,
+ hierarchy_group_total_weight=_TEST_HIERARCHY_GROUP_TOTAL_WEIGHT,
+ hierarchy_temporal_total_weight=_TEST_HIERARCHY_TEMPORAL_TOTAL_WEIGHT,
+ hierarchy_group_temporal_total_weight=_TEST_HIERARCHY_GROUP_TEMPORAL_TOTAL_WEIGHT,
+ window_column=_TEST_WINDOW_COLUMN,
+ window_stride_length=_TEST_WINDOW_STRIDE_LENGTH,
+ window_max_count=_TEST_WINDOW_MAX_COUNT,
+ sync=sync,
+ create_request_timeout=None,
+ holiday_regions=_TEST_TRAINING_HOLIDAY_REGIONS,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_split = gca_training_pipeline.PredefinedSplit(
+ key=_TEST_SPLIT_PREDEFINED_COLUMN_NAME
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ predefined_split=true_split,
+ dataset_id=mock_dataset_time_series.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=training_job._training_task_definition,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize("training_job", _FORECASTING_JOB_MODEL_TYPES)
+ def test_splits_default(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_time_series,
+ mock_model_service_get,
+ sync,
+ training_job,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an Forecasting training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_job(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_time_series,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ time_column=_TEST_TRAINING_TIME_COLUMN,
+ time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
+ unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
+ available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
+ forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON,
+ data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT,
+ data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS,
+ context_window=_TEST_TRAINING_CONTEXT_WINDOW,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS,
+ export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
+ export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
+ quantiles=_TEST_TRAINING_QUANTILES,
+ validation_options=_TEST_TRAINING_VALIDATION_OPTIONS,
+ hierarchy_group_columns=_TEST_HIERARCHY_GROUP_COLUMNS,
+ hierarchy_group_total_weight=_TEST_HIERARCHY_GROUP_TOTAL_WEIGHT,
+ hierarchy_temporal_total_weight=_TEST_HIERARCHY_TEMPORAL_TOTAL_WEIGHT,
+ hierarchy_group_temporal_total_weight=_TEST_HIERARCHY_GROUP_TEMPORAL_TOTAL_WEIGHT,
+ window_column=_TEST_WINDOW_COLUMN,
+ window_stride_length=_TEST_WINDOW_STRIDE_LENGTH,
+ window_max_count=_TEST_WINDOW_MAX_COUNT,
+ sync=sync,
+ create_request_timeout=None,
+ holiday_regions=_TEST_TRAINING_HOLIDAY_REGIONS,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_time_series.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=training_job._training_task_definition,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures("mock_pipeline_service_get")
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize("training_job", _FORECASTING_JOB_MODEL_TYPES)
+ def test_run_call_pipeline_if_set_additional_experiments_probabilistic_inference(
+ self,
+ mock_pipeline_service_create,
+ mock_dataset_time_series,
+ mock_model_service_get,
+ sync,
+ training_job,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_job(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ )
+
+ job._add_additional_experiments(
+ _TEST_ADDITIONAL_EXPERIMENTS_PROBABILISTIC_INFERENCE
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_time_series,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ time_column=_TEST_TRAINING_TIME_COLUMN,
+ time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
+ unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
+ available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
+ forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON,
+ data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT,
+ data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS,
+ context_window=_TEST_TRAINING_CONTEXT_WINDOW,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS,
+ export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
+ export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
+ quantiles=_TEST_TRAINING_QUANTILES,
+ validation_options=_TEST_TRAINING_VALIDATION_OPTIONS,
+ hierarchy_group_columns=_TEST_HIERARCHY_GROUP_COLUMNS,
+ hierarchy_group_total_weight=_TEST_HIERARCHY_GROUP_TOTAL_WEIGHT,
+ hierarchy_temporal_total_weight=_TEST_HIERARCHY_TEMPORAL_TOTAL_WEIGHT,
+ hierarchy_group_temporal_total_weight=_TEST_HIERARCHY_GROUP_TEMPORAL_TOTAL_WEIGHT,
+ window_column=_TEST_WINDOW_COLUMN,
+ window_stride_length=_TEST_WINDOW_STRIDE_LENGTH,
+ window_max_count=_TEST_WINDOW_MAX_COUNT,
+ sync=sync,
+ create_request_timeout=None,
+ holiday_regions=_TEST_TRAINING_HOLIDAY_REGIONS,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ # Test that if defaults to the job display name
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_time_series.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=training_job._training_task_definition,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_PROBABILISTIC_INFERENCE,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures("mock_pipeline_service_get")
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize("training_job", _FORECASTING_JOB_MODEL_TYPES)
+ def test_run_call_pipeline_if_set_enable_probabilistic_inference(
+ self,
+ mock_pipeline_service_create,
+ mock_dataset_time_series,
+ mock_model_service_get,
+ sync,
+ training_job,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_job(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ )
+
+ job._add_additional_experiments(_TEST_ADDITIONAL_EXPERIMENTS)
+
+ model_from_job = job.run(
+ dataset=mock_dataset_time_series,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ time_column=_TEST_TRAINING_TIME_COLUMN,
+ time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
+ unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
+ available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
+ forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON,
+ data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT,
+ data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS,
+ context_window=_TEST_TRAINING_CONTEXT_WINDOW,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS,
+ export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
+ export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
+ quantiles=_TEST_TRAINING_QUANTILES,
+ validation_options=_TEST_TRAINING_VALIDATION_OPTIONS,
+ hierarchy_group_columns=_TEST_HIERARCHY_GROUP_COLUMNS,
+ hierarchy_group_total_weight=_TEST_HIERARCHY_GROUP_TOTAL_WEIGHT,
+ hierarchy_temporal_total_weight=_TEST_HIERARCHY_TEMPORAL_TOTAL_WEIGHT,
+ hierarchy_group_temporal_total_weight=_TEST_HIERARCHY_GROUP_TEMPORAL_TOTAL_WEIGHT,
+ window_column=_TEST_WINDOW_COLUMN,
+ window_stride_length=_TEST_WINDOW_STRIDE_LENGTH,
+ window_max_count=_TEST_WINDOW_MAX_COUNT,
+ sync=sync,
+ create_request_timeout=None,
+ holiday_regions=_TEST_TRAINING_HOLIDAY_REGIONS,
+ enable_probabilistic_inference=_TEST_ENABLE_PROBABILISTIC_INFERENCE,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ # Test that if defaults to the job display name
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_time_series.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=training_job._training_task_definition,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_PROBABILISTIC_INFERENCE,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ def test_automl_forecasting_with_no_transformations(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_time_series,
+ mock_model_service_get,
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ job = training_jobs.AutoMLForecastingTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ )
+ mock_dataset_time_series.column_names = [
+ "a",
+ "b",
+ _TEST_TRAINING_TARGET_COLUMN,
+ ]
+ job.run(
+ dataset=mock_dataset_time_series,
+ predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ time_column=_TEST_TRAINING_TIME_COLUMN,
+ time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
+ unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
+ available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
+ forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON,
+ data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT,
+ data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS,
+ context_window=_TEST_TRAINING_CONTEXT_WINDOW,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ )
+ assert job._column_transformations == [
+ {"auto": {"column_name": "a"}},
+ {"auto": {"column_name": "b"}},
+ ]
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_automl_image_training_jobs.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_automl_image_training_jobs.py
new file mode 100644
index 0000000000000000000000000000000000000000..60f7d8774e6d3f802788a26a6e0a91e6ca3d3787
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_automl_image_training_jobs.py
@@ -0,0 +1,1143 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+import importlib
+from unittest import mock
+
+from google.protobuf import json_format
+from google.protobuf import struct_pb2
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import datasets
+from google.cloud.aiplatform import hyperparameter_tuning as hpt
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import models
+from google.cloud.aiplatform import schema
+from google.cloud.aiplatform import training_jobs
+
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
+ pipeline_service_client,
+)
+from google.cloud.aiplatform.compat.types import (
+ dataset as gca_dataset,
+ encryption_spec as gca_encryption_spec,
+ model as gca_model,
+ pipeline_state as gca_pipeline_state,
+ study as gca_study_compat,
+ training_pipeline as gca_training_pipeline,
+)
+import constants as test_constants
+
+_TEST_PROJECT = test_constants.ProjectConstants._TEST_PROJECT
+_TEST_LOCATION = test_constants.ProjectConstants._TEST_LOCATION
+_TEST_DATASET_DISPLAY_NAME = (
+ test_constants.TrainingJobConstants._TEST_DATASET_DISPLAY_NAME
+)
+_TEST_DATASET_NAME = test_constants.TrainingJobConstants._TEST_DATASET_NAME
+_TEST_DISPLAY_NAME = test_constants.TrainingJobConstants._TEST_DISPLAY_NAME
+_TEST_METADATA_SCHEMA_URI_IMAGE = schema.dataset.metadata.image
+
+_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS = 7500
+_TEST_TRAINING_DISABLE_EARLY_STOPPING = True
+_TEST_MODEL_TYPE_ICN = "CLOUD" # Image Classification default
+_TEST_MODEL_TYPE_IOD = "CLOUD_HIGH_ACCURACY_1" # Image Object Detection default
+_TEST_MODEL_TYPE_MOBILE = "MOBILE_TF_LOW_LATENCY_1"
+_TEST_PREDICTION_TYPE_ICN = "classification"
+_TEST_PREDICTION_TYPE_IOD = "object_detection"
+
+_TEST_MODEL_DISPLAY_NAME = test_constants.TrainingJobConstants._TEST_MODEL_DISPLAY_NAME
+_TEST_MODEL_ID = "98777645321"
+
+_TEST_LABELS = test_constants.ProjectConstants._TEST_LABELS
+_TEST_MODEL_LABELS = test_constants.TrainingJobConstants._TEST_MODEL_LABELS
+
+_TEST_TRAINING_TASK_INPUTS = json_format.ParseDict(
+ {
+ "modelType": "CLOUD",
+ "budgetMilliNodeHours": _TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ "multiLabel": False,
+ "disableEarlyStopping": _TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ },
+ struct_pb2.Value(),
+)
+
+_TEST_TRAINING_TASK_INPUTS_WITH_BASE_MODEL = json_format.ParseDict(
+ {
+ "modelType": "CLOUD",
+ "budgetMilliNodeHours": _TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ "multiLabel": False,
+ "disableEarlyStopping": _TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ "baseModelId": _TEST_MODEL_ID,
+ },
+ struct_pb2.Value(),
+)
+
+_TEST_TRAINING_TASK_INPUTS_WITH_UPTRAIN_BASE_MODEL = json_format.ParseDict(
+ {
+ "modelType": "CLOUD",
+ "budgetMilliNodeHours": _TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ "multiLabel": False,
+ "disableEarlyStopping": _TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ "uptrainBaseModelId": _TEST_MODEL_ID,
+ },
+ struct_pb2.Value(),
+)
+
+_TEST_CHECKPOINT_NAME = "gs://coca_ckpt_uri/saved_model"
+_TEST_TRAINER_CONFIG = {
+ "config_key_1": "config_value_1",
+ "config_key_2": "config_value_2",
+}
+_TEST_METRIC_SPEC_KEY = "metric"
+_TEST_METRIC_SPEC_VALUE = "MAXIMIZE"
+_TEST_SEARCH_ALGORITHM = "random"
+_TEST_MEASUREMENT_SELECTION = "best"
+_TEST_CONDITIONAL_PARAMETER_DECAY = hpt.DoubleParameterSpec(
+ min=1e-07, max=1, scale="linear", parent_values=[32, 64]
+)
+_TEST_CONDITIONAL_PARAMETER_LR = hpt.DoubleParameterSpec(
+ min=1e-07, max=1, scale="linear", parent_values=[4, 8, 16]
+)
+_TEST_STUDY_SPEC = gca_study_compat.StudySpec(
+ metrics=[
+ gca_study_compat.StudySpec.MetricSpec(
+ metric_id=_TEST_METRIC_SPEC_KEY, goal=_TEST_METRIC_SPEC_VALUE.upper()
+ )
+ ],
+ parameters=[
+ gca_study_compat.StudySpec.ParameterSpec(
+ parameter_id="lr",
+ scale_type=gca_study_compat.StudySpec.ParameterSpec.ScaleType.UNIT_LOG_SCALE,
+ double_value_spec=gca_study_compat.StudySpec.ParameterSpec.DoubleValueSpec(
+ min_value=0.001, max_value=0.1
+ ),
+ ),
+ gca_study_compat.StudySpec.ParameterSpec(
+ parameter_id="units",
+ scale_type=gca_study_compat.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
+ integer_value_spec=gca_study_compat.StudySpec.ParameterSpec.IntegerValueSpec(
+ min_value=4, max_value=1028
+ ),
+ ),
+ gca_study_compat.StudySpec.ParameterSpec(
+ parameter_id="activation",
+ categorical_value_spec=gca_study_compat.StudySpec.ParameterSpec.CategoricalValueSpec(
+ values=["relu", "sigmoid", "elu", "selu", "tanh"]
+ ),
+ ),
+ gca_study_compat.StudySpec.ParameterSpec(
+ parameter_id="batch_size",
+ scale_type=gca_study_compat.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
+ discrete_value_spec=gca_study_compat.StudySpec.ParameterSpec.DiscreteValueSpec(
+ values=[4, 8, 16, 32, 64]
+ ),
+ conditional_parameter_specs=[
+ gca_study_compat.StudySpec.ParameterSpec.ConditionalParameterSpec(
+ parent_discrete_values=gca_study_compat.StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition(
+ values=[32, 64]
+ ),
+ parameter_spec=gca_study_compat.StudySpec.ParameterSpec(
+ double_value_spec=gca_study_compat.StudySpec.ParameterSpec.DoubleValueSpec(
+ min_value=1e-07, max_value=1
+ ),
+ scale_type=gca_study_compat.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
+ parameter_id="decay",
+ ),
+ ),
+ gca_study_compat.StudySpec.ParameterSpec.ConditionalParameterSpec(
+ parent_discrete_values=gca_study_compat.StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition(
+ values=[4, 8, 16]
+ ),
+ parameter_spec=gca_study_compat.StudySpec.ParameterSpec(
+ double_value_spec=gca_study_compat.StudySpec.ParameterSpec.DoubleValueSpec(
+ min_value=1e-07, max_value=1
+ ),
+ scale_type=gca_study_compat.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
+ parameter_id="learning_rate",
+ ),
+ ),
+ ],
+ ),
+ ],
+ algorithm=gca_study_compat.StudySpec.Algorithm.RANDOM_SEARCH,
+ measurement_selection_type=gca_study_compat.StudySpec.MeasurementSelectionType.BEST_MEASUREMENT,
+)
+
+_TEST_TRAINING_TASK_INPUTS_WITH_TUNABLE_PARAMETERS = json_format.ParseDict(
+ {
+ "modelType": "COCA",
+ "budgetMilliNodeHours": _TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ "multiLabel": False,
+ "disableEarlyStopping": _TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ "uptrainBaseModelId": _TEST_MODEL_ID,
+ "tunableParameter": {
+ "checkpointName": _TEST_CHECKPOINT_NAME,
+ "trainerConfig": _TEST_TRAINER_CONFIG,
+ "studySpec": json_format.MessageToDict(_TEST_STUDY_SPEC._pb),
+ },
+ },
+ struct_pb2.Value(),
+)
+
+_TEST_FRACTION_SPLIT_TRAINING = 0.6
+_TEST_FRACTION_SPLIT_VALIDATION = 0.2
+_TEST_FRACTION_SPLIT_TEST = 0.2
+
+_TEST_FILTER_SPLIT_TRAINING = "train"
+_TEST_FILTER_SPLIT_VALIDATION = "validate"
+_TEST_FILTER_SPLIT_TEST = "test"
+
+_TEST_MODEL_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/models/{_TEST_MODEL_ID}"
+)
+
+_TEST_PIPELINE_RESOURCE_NAME = (
+ test_constants.TrainingJobConstants._TEST_PIPELINE_RESOURCE_NAME
+)
+
+# CMEK encryption
+_TEST_DEFAULT_ENCRYPTION_KEY_NAME = (
+ test_constants.TrainingJobConstants._TEST_DEFAULT_ENCRYPTION_KEY_NAME
+)
+_TEST_DEFAULT_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME
+)
+
+_TEST_PIPELINE_ENCRYPTION_KEY_NAME = "key_pipeline"
+_TEST_PIPELINE_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME
+)
+
+_TEST_MODEL_ENCRYPTION_KEY_NAME = "key_model"
+_TEST_MODEL_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME
+)
+
+
+@pytest.fixture
+def mock_pipeline_service_create():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
+ )
+ )
+ yield mock_create_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_get():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
+ )
+ )
+ yield mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_create_and_get_with_fail():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ )
+ )
+
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED,
+ )
+ )
+
+ yield mock_create_training_pipeline, mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_model_service_get():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as mock_get_model:
+ mock_get_model.return_value = gca_model.Model(name=_TEST_MODEL_NAME)
+ yield mock_get_model
+
+
+@pytest.fixture
+def mock_dataset_image():
+ ds = mock.MagicMock(datasets.ImageDataset)
+ ds.name = _TEST_DATASET_NAME
+ ds.metadata_schema_uri = _TEST_METADATA_SCHEMA_URI_IMAGE
+ ds._latest_future = None
+ ds._exception = None
+ ds._gca_resource = gca_dataset.Dataset(
+ display_name=_TEST_DATASET_DISPLAY_NAME,
+ metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_IMAGE,
+ labels={},
+ name=_TEST_DATASET_NAME,
+ metadata={},
+ )
+ return ds
+
+
+@pytest.fixture
+def mock_model():
+ model = mock.MagicMock(models.Model)
+ model.name = _TEST_MODEL_ID
+ model._latest_future = None
+ model._exception = None
+ model._gca_resource = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ description="This is the mock Model's description",
+ name=_TEST_MODEL_NAME,
+ )
+ yield model
+
+
+@pytest.fixture
+def mock_uptrain_base_model():
+ model = mock.MagicMock(models.Model)
+ model.name = _TEST_MODEL_ID
+ model._latest_future = None
+ model._exception = None
+ model._gca_resource = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ description="This is the mock uptrain base Model's description",
+ name=_TEST_MODEL_NAME,
+ )
+ yield model
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestAutoMLImageTrainingJob:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_init_all_parameters(self, mock_model):
+ """Ensure all private members are set correctly at initialization."""
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_ICN,
+ model_type=_TEST_MODEL_TYPE_MOBILE,
+ base_model=mock_model,
+ multi_label=True,
+ )
+
+ assert job._display_name == _TEST_DISPLAY_NAME
+ assert job._model_type == _TEST_MODEL_TYPE_MOBILE
+ assert job._prediction_type == _TEST_PREDICTION_TYPE_ICN
+ assert job._multi_label is True
+ assert job._base_model == mock_model
+
+ def test_init_job_with_tunable_parameters(self, mock_model):
+ """Ensure all private members are set correctly at initialization."""
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_ICN,
+ model_type="COCA",
+ base_model=mock_model,
+ multi_label=True,
+ checkpoint_name=_TEST_CHECKPOINT_NAME,
+ trainer_config=_TEST_TRAINER_CONFIG,
+ metric_spec={_TEST_METRIC_SPEC_KEY: _TEST_METRIC_SPEC_VALUE},
+ parameter_spec={
+ "lr": hpt.DoubleParameterSpec(min=0.001, max=0.1, scale="log"),
+ "units": hpt.IntegerParameterSpec(min=4, max=1028, scale="linear"),
+ "activation": hpt.CategoricalParameterSpec(
+ values=["relu", "sigmoid", "elu", "selu", "tanh"]
+ ),
+ "batch_size": hpt.DiscreteParameterSpec(
+ values=[4, 8, 16, 32, 64],
+ scale="linear",
+ conditional_parameter_spec={
+ "decay": _TEST_CONDITIONAL_PARAMETER_DECAY,
+ "learning_rate": _TEST_CONDITIONAL_PARAMETER_LR,
+ },
+ ),
+ },
+ search_algorithm=_TEST_SEARCH_ALGORITHM,
+ measurement_selection=_TEST_MEASUREMENT_SELECTION,
+ )
+
+ assert job._display_name == _TEST_DISPLAY_NAME
+ assert job._model_type == "COCA"
+ assert job._prediction_type == _TEST_PREDICTION_TYPE_ICN
+ assert job._multi_label is True
+ assert job._base_model == mock_model
+ assert job._checkpoint_name == _TEST_CHECKPOINT_NAME
+ assert job._trainer_config == _TEST_TRAINER_CONFIG
+ assert job._study_spec == gca_study_compat.StudySpec(
+ metrics=[
+ gca_study_compat.StudySpec.MetricSpec(
+ metric_id=_TEST_METRIC_SPEC_KEY,
+ goal=_TEST_METRIC_SPEC_VALUE.upper(),
+ ),
+ ],
+ parameters=[
+ hpt.DoubleParameterSpec(
+ min=0.001, max=0.1, scale="log"
+ )._to_parameter_spec(parameter_id="lr"),
+ hpt.IntegerParameterSpec(
+ min=4, max=1028, scale="linear"
+ )._to_parameter_spec(parameter_id="units"),
+ hpt.CategoricalParameterSpec(
+ values=["relu", "sigmoid", "elu", "selu", "tanh"]
+ )._to_parameter_spec(parameter_id="activation"),
+ hpt.DiscreteParameterSpec(
+ values=[4, 8, 16, 32, 64],
+ scale="linear",
+ conditional_parameter_spec={
+ "decay": _TEST_CONDITIONAL_PARAMETER_DECAY,
+ "learning_rate": _TEST_CONDITIONAL_PARAMETER_LR,
+ },
+ )._to_parameter_spec(parameter_id="batch_size"),
+ ],
+ algorithm=gca_study_compat.StudySpec.Algorithm.RANDOM_SEARCH,
+ measurement_selection_type=gca_study_compat.StudySpec.MeasurementSelectionType.BEST_MEASUREMENT,
+ )
+
+ def test_init_wrong_parameters(self, mock_model):
+ """Ensure correct exceptions are raised when initializing with invalid args"""
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ with pytest.raises(ValueError, match=r"not a supported prediction type"):
+ training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type="abcdefg",
+ )
+
+ with pytest.raises(ValueError, match=r"not a supported model_type for"):
+ training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type="classification",
+ model_type=_TEST_MODEL_TYPE_IOD,
+ )
+
+ with pytest.raises(ValueError, match=r"`base_model` is only supported"):
+ training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_IOD,
+ base_model=mock_model,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_image,
+ mock_model_service_get,
+ mock_uptrain_base_model,
+ sync,
+ ):
+ """Create and run an AutoML ICN training job, verify calls and return value"""
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ incremental_train_base_model=mock_uptrain_base_model,
+ labels=_TEST_LABELS,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_image,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter_split=_TEST_FILTER_SPLIT_TEST,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_filter_split = gca_training_pipeline.FilterSplit(
+ training_filter=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter=_TEST_FILTER_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ filter_split=true_filter_split,
+ dataset_id=mock_dataset_image.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.automl_image_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_UPTRAIN_BASE_MODEL,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+ assert job._gca_resource is mock_pipeline_service_get.return_value
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+ assert not job.has_failed
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_tunable_parameters(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_image,
+ mock_model_service_get,
+ mock_uptrain_base_model,
+ sync,
+ ):
+ """Create and run an AutoML ICN training job, verify calls and return value"""
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ model_type="COCA",
+ incremental_train_base_model=mock_uptrain_base_model,
+ labels=_TEST_LABELS,
+ checkpoint_name=_TEST_CHECKPOINT_NAME,
+ trainer_config=_TEST_TRAINER_CONFIG,
+ metric_spec={_TEST_METRIC_SPEC_KEY: _TEST_METRIC_SPEC_VALUE},
+ parameter_spec={
+ "lr": hpt.DoubleParameterSpec(min=0.001, max=0.1, scale="log"),
+ "units": hpt.IntegerParameterSpec(min=4, max=1028, scale="linear"),
+ "activation": hpt.CategoricalParameterSpec(
+ values=["relu", "sigmoid", "elu", "selu", "tanh"]
+ ),
+ "batch_size": hpt.DiscreteParameterSpec(
+ values=[4, 8, 16, 32, 64],
+ scale="linear",
+ conditional_parameter_spec={
+ "decay": _TEST_CONDITIONAL_PARAMETER_DECAY,
+ "learning_rate": _TEST_CONDITIONAL_PARAMETER_LR,
+ },
+ ),
+ },
+ search_algorithm=_TEST_SEARCH_ALGORITHM,
+ measurement_selection=_TEST_MEASUREMENT_SELECTION,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_image,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter_split=_TEST_FILTER_SPLIT_TEST,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_filter_split = gca_training_pipeline.FilterSplit(
+ training_filter=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter=_TEST_FILTER_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ filter_split=true_filter_split,
+ dataset_id=mock_dataset_image.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.automl_image_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_TUNABLE_PARAMETERS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+ assert job._gca_resource is mock_pipeline_service_get.return_value
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+ assert not job.has_failed
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_timeout(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_image,
+ mock_model_service_get,
+ mock_model,
+ sync,
+ ):
+ """Create and run an AutoML ICN training job, verify calls and return value"""
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ base_model=mock_model,
+ labels=_TEST_LABELS,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_image,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter_split=_TEST_FILTER_SPLIT_TEST,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ sync=sync,
+ create_request_timeout=180.0,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_filter_split = gca_training_pipeline.FilterSplit(
+ training_filter=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter=_TEST_FILTER_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=mock_model._gca_resource.labels,
+ description=mock_model._gca_resource.description,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ filter_split=true_filter_split,
+ dataset_id=mock_dataset_image.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.automl_image_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_BASE_MODEL,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=180.0,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures("mock_pipeline_service_get")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels(
+ self,
+ mock_pipeline_service_create,
+ mock_dataset_image,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_encryption_spec_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME,
+ model_encryption_spec_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_image,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ # Test that if defaults to the job display name
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ encryption_spec=_TEST_MODEL_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_image.name
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.automl_image_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_PIPELINE_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_model_service_get",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_called_twice_raises(self, mock_dataset_image, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ )
+
+ job.run(
+ dataset=mock_dataset_image,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ sync=sync,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ dataset=mock_dataset_image,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ sync=sync,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_model_service_get",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_with_two_split_raises(
+ self,
+ mock_dataset_image,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ )
+
+ with pytest.raises(ValueError):
+ model_from_job = job.run(
+ dataset=mock_dataset_image,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
+ training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter_split=_TEST_FILTER_SPLIT_TEST,
+ sync=sync,
+ )
+ if not sync:
+ model_from_job.wait()
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_raises_if_pipeline_fails(
+ self, mock_pipeline_service_create_and_get_with_fail, mock_dataset_image, sync
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ dataset=mock_dataset_image,
+ training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
+ sync=sync,
+ )
+
+ if not sync:
+ job.wait()
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ def test_raises_before_run_is_called(self, mock_pipeline_service_create):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ with pytest.raises(RuntimeError):
+ job.has_failed
+
+ with pytest.raises(RuntimeError):
+ job.state
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_splits_fraction(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_image,
+ mock_model_service_get,
+ mock_model,
+ sync,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an AutoML Video Classification training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+ job = training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME, base_model=mock_model
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_image,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_fraction_split = gca_training_pipeline.FractionSplit(
+ training_fraction=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction=_TEST_FRACTION_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ description=mock_model._gca_resource.description,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ fraction_split=true_fraction_split,
+ dataset_id=mock_dataset_image.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_image_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_BASE_MODEL,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_splits_filter(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_image,
+ mock_model_service_get,
+ mock_model,
+ sync,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an AutoML Video Classification training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME, base_model=mock_model
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_image,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter_split=_TEST_FILTER_SPLIT_TEST,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_filter_split = gca_training_pipeline.FilterSplit(
+ training_filter=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter=_TEST_FILTER_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ description=mock_model._gca_resource.description,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ filter_split=true_filter_split,
+ dataset_id=mock_dataset_image.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_image_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_BASE_MODEL,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_splits_default(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_image,
+ mock_model_service_get,
+ mock_uptrain_base_model,
+ sync,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an AutoML Video Classification training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ incremental_train_base_model=mock_uptrain_base_model,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_image,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_image.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_image_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_UPTRAIN_BASE_MODEL,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ def test_splits_filter_incomplete(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_image,
+ mock_model_service_get,
+ mock_model,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an AutoML Video Classification training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLImageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME, base_model=mock_model
+ )
+
+ with pytest.raises(ValueError):
+ job.run(
+ dataset=mock_dataset_image,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
+ validation_fraction_split=None,
+ test_filter_split=_TEST_FILTER_SPLIT_TEST,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_automl_tabular_training_jobs.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_automl_tabular_training_jobs.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5eac2d6d23884793ba8f0aea12ed6e701df966b
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_automl_tabular_training_jobs.py
@@ -0,0 +1,1521 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import importlib
+import pytest
+from unittest import mock
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import datasets
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import schema
+from google.cloud.aiplatform import training_jobs
+
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
+ pipeline_service_client,
+)
+
+from google.cloud.aiplatform.compat.types import (
+ dataset as gca_dataset,
+ encryption_spec as gca_encryption_spec,
+ model as gca_model,
+ pipeline_state as gca_pipeline_state,
+ training_pipeline as gca_training_pipeline,
+)
+from google.protobuf import json_format
+from google.protobuf import struct_pb2
+import constants as test_constants
+
+_TEST_BUCKET_NAME = test_constants.TrainingJobConstants._TEST_BUCKET_NAME
+_TEST_GCS_PATH_WITHOUT_BUCKET = (
+ test_constants.TrainingJobConstants._TEST_GCS_PATH_WITHOUT_BUCKET
+)
+_TEST_GCS_PATH = test_constants.TrainingJobConstants._TEST_GCS_PATH
+_TEST_GCS_PATH_WITH_TRAILING_SLASH = (
+ test_constants.TrainingJobConstants._TEST_GCS_PATH_WITH_TRAILING_SLASH
+)
+_TEST_PROJECT = "test-project"
+
+_TEST_DATASET_DISPLAY_NAME = (
+ test_constants.TrainingJobConstants._TEST_DATASET_DISPLAY_NAME
+)
+_TEST_DATASET_NAME = test_constants.TrainingJobConstants._TEST_DATASET_NAME
+_TEST_DISPLAY_NAME = test_constants.TrainingJobConstants._TEST_DISPLAY_NAME
+_TEST_METADATA_SCHEMA_URI_TABULAR = schema.dataset.metadata.tabular
+_TEST_METADATA_SCHEMA_URI_NONTABULAR = schema.dataset.metadata.image
+
+_TEST_TRAINING_COLUMN_NAMES = [
+ "sepal_width",
+ "sepal_length",
+ "petal_length",
+ "petal_width",
+ "target",
+]
+
+_TEST_TRAINING_COLUMN_NAMES_ALTERNATIVE = [
+ "apple",
+ "banana",
+ "coconut",
+ "target",
+]
+
+_TEST_TRAINING_COLUMN_TRANSFORMATIONS = [
+ {"auto": {"column_name": "sepal_width"}},
+ {"auto": {"column_name": "sepal_length"}},
+ {"auto": {"column_name": "petal_length"}},
+ {"auto": {"column_name": "petal_width"}},
+]
+_TEST_TRAINING_COLUMN_SPECS = {
+ "apple": "auto",
+ "banana": "auto",
+ "coconut": "auto",
+}
+_TEST_TRAINING_COLUMN_TRANSFORMATIONS_ALTERNATIVE = [
+ {"auto": {"column_name": "apple"}},
+ {"auto": {"column_name": "banana"}},
+ {"auto": {"column_name": "coconut"}},
+]
+_TEST_TRAINING_COLUMN_TRANSFORMATIONS_ALTERNATIVE_NOT_AUTO = [
+ {"numeric": {"column_name": "apple"}},
+ {"categorical": {"column_name": "banana"}},
+ {"text": {"column_name": "coconut"}},
+]
+_TEST_TRAINING_TARGET_COLUMN = "target"
+_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS = 1000
+_TEST_TRAINING_WEIGHT_COLUMN = "weight"
+_TEST_TRAINING_DISABLE_EARLY_STOPPING = True
+_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME = "minimize-log-loss"
+_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE = "classification"
+_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS = True
+_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI = (
+ "bq://path.to.table"
+)
+_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION = False
+_TEST_ADDITIONAL_EXPERIMENTS = ["exp1", "exp2"]
+_TEST_TRAINING_TASK_INPUTS_DICT = {
+ # required inputs
+ "targetColumn": _TEST_TRAINING_TARGET_COLUMN,
+ "transformations": _TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ "trainBudgetMilliNodeHours": _TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ # optional inputs
+ "weightColumnName": _TEST_TRAINING_WEIGHT_COLUMN,
+ "disableEarlyStopping": _TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ "predictionType": _TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ "optimizationObjective": _TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ "optimizationObjectiveRecallValue": None,
+ "optimizationObjectivePrecisionValue": None,
+}
+_TEST_TRAINING_TASK_INPUTS = json_format.ParseDict(
+ _TEST_TRAINING_TASK_INPUTS_DICT,
+ struct_pb2.Value(),
+)
+_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS = json_format.ParseDict(
+ {
+ **_TEST_TRAINING_TASK_INPUTS_DICT,
+ "additionalExperiments": _TEST_ADDITIONAL_EXPERIMENTS,
+ },
+ struct_pb2.Value(),
+)
+_TEST_TRAINING_TASK_INPUTS_ALTERNATIVE = json_format.ParseDict(
+ {
+ **_TEST_TRAINING_TASK_INPUTS_DICT,
+ "transformations": _TEST_TRAINING_COLUMN_TRANSFORMATIONS_ALTERNATIVE,
+ },
+ struct_pb2.Value(),
+)
+_TEST_TRAINING_TASK_INPUTS_ALTERNATIVE_NOT_AUTO = json_format.ParseDict(
+ {
+ **_TEST_TRAINING_TASK_INPUTS_DICT,
+ "transformations": _TEST_TRAINING_COLUMN_TRANSFORMATIONS_ALTERNATIVE_NOT_AUTO,
+ },
+ struct_pb2.Value(),
+)
+_TEST_TRAINING_TASK_INPUTS_WITH_EXPORT_EVAL_DATA_ITEMS = json_format.ParseDict(
+ {
+ **_TEST_TRAINING_TASK_INPUTS_DICT,
+ "exportEvaluatedDataItemsConfig": {
+ "destinationBigqueryUri": _TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
+ "overrideExistingTable": _TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
+ },
+ },
+ struct_pb2.Value(),
+)
+
+_TEST_DATASET_NAME = test_constants.TrainingJobConstants._TEST_DATASET_NAME
+
+_TEST_MODEL_DISPLAY_NAME = test_constants.TrainingJobConstants._TEST_MODEL_DISPLAY_NAME
+
+_TEST_LABELS = test_constants.ProjectConstants._TEST_LABELS
+_TEST_MODEL_LABELS = test_constants.TrainingJobConstants._TEST_MODEL_LABELS
+
+_TEST_FRACTION_SPLIT_TRAINING = 0.6
+_TEST_FRACTION_SPLIT_VALIDATION = 0.2
+_TEST_FRACTION_SPLIT_TEST = 0.2
+
+_TEST_SPLIT_PREDEFINED_COLUMN_NAME = "split"
+_TEST_SPLIT_TIMESTAMP_COLUMN_NAME = "timestamp"
+
+_TEST_OUTPUT_PYTHON_PACKAGE_PATH = "gs://test/ouput/python/trainer.tar.gz"
+
+_TEST_MODEL_NAME = "projects/my-project/locations/us-central1/models/12345"
+
+_TEST_PIPELINE_RESOURCE_NAME = (
+ test_constants.TrainingJobConstants._TEST_PIPELINE_RESOURCE_NAME
+)
+
+# CMEK encryption
+_TEST_DEFAULT_ENCRYPTION_KEY_NAME = (
+ test_constants.TrainingJobConstants._TEST_DEFAULT_ENCRYPTION_KEY_NAME
+)
+_TEST_DEFAULT_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME
+)
+
+_TEST_PIPELINE_ENCRYPTION_KEY_NAME = "key_pipeline"
+_TEST_PIPELINE_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME
+)
+
+_TEST_MODEL_ENCRYPTION_KEY_NAME = "key_model"
+_TEST_MODEL_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME
+)
+
+
+@pytest.fixture
+def mock_pipeline_service_create():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
+ )
+ )
+ yield mock_create_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_create_fail():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.side_effect = RuntimeError("Mock fail")
+ yield mock_create_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_get():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
+ )
+ )
+ yield mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_create_and_get_with_fail():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ )
+ )
+
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED,
+ )
+ )
+
+ yield mock_create_training_pipeline, mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_model_service_get():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as mock_get_model:
+ mock_get_model.return_value = gca_model.Model(name=_TEST_MODEL_NAME)
+ yield mock_get_model
+
+
+@pytest.fixture
+def mock_dataset_tabular():
+ ds = mock.MagicMock(datasets.TabularDataset)
+ ds.name = _TEST_DATASET_NAME
+ ds._latest_future = None
+ ds._exception = None
+ ds._gca_resource = gca_dataset.Dataset(
+ display_name=_TEST_DATASET_DISPLAY_NAME,
+ metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR,
+ labels={},
+ name=_TEST_DATASET_NAME,
+ metadata={},
+ )
+ ds.column_names = _TEST_TRAINING_COLUMN_NAMES
+
+ yield ds
+
+
+@pytest.fixture
+def mock_dataset_tabular_alternative():
+ ds = mock.MagicMock(datasets.TabularDataset)
+ ds.name = _TEST_DATASET_NAME
+ ds._latest_future = None
+ ds._exception = None
+ ds._gca_resource = gca_dataset.Dataset(
+ display_name=_TEST_DATASET_DISPLAY_NAME,
+ metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR,
+ labels={},
+ name=_TEST_DATASET_NAME,
+ metadata={},
+ )
+ ds.column_names = _TEST_TRAINING_COLUMN_NAMES_ALTERNATIVE
+
+ yield ds
+
+
+@pytest.fixture
+def mock_dataset_nontabular():
+ ds = mock.MagicMock(datasets.ImageDataset)
+ ds.name = _TEST_DATASET_NAME
+ ds._latest_future = None
+ ds._exception = None
+ ds._gca_resource = gca_dataset.Dataset(
+ display_name=_TEST_DATASET_DISPLAY_NAME,
+ metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR,
+ labels={},
+ name=_TEST_DATASET_NAME,
+ metadata={},
+ )
+ return ds
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestAutoMLTabularTrainingJob:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_tabular,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_tabular,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ job.wait_for_resource_creation()
+
+ assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
+
+ if not sync:
+ model_from_job.wait()
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_tabular.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.automl_tabular,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource is mock_pipeline_service_get.return_value
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_timeout(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_tabular,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_tabular,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS,
+ sync=sync,
+ create_request_timeout=180.0,
+ )
+
+ job.wait_for_resource_creation()
+
+ if not sync:
+ model_from_job.wait()
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_tabular.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.automl_tabular,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=180.0,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_export_eval_data_items(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_tabular,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_tabular,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS,
+ export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
+ export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ job.wait_for_resource_creation()
+
+ assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
+
+ if not sync:
+ model_from_job.wait()
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_tabular.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_tabular,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_EXPORT_EVAL_DATA_ITEMS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource is mock_pipeline_service_get.return_value
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures("mock_pipeline_service_get")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels(
+ self,
+ mock_pipeline_service_create,
+ mock_dataset_tabular,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ training_encryption_spec_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME,
+ model_encryption_spec_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_tabular,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ create_request_timeout=None,
+ )
+
+ job.wait_for_resource_creation()
+
+ assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
+
+ if not sync:
+ model_from_job.wait()
+
+ # Test that if defaults to the job display name
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ encryption_spec=_TEST_MODEL_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_tabular.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.automl_tabular,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_PIPELINE_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ # This test checks that default transformations are used if no columns transformations are provided
+ def test_run_call_pipeline_service_create_if_no_column_transformations(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_tabular,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ column_transformations=None,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_tabular,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ job.wait_for_resource_creation()
+
+ assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
+
+ if not sync:
+ model_from_job.wait()
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_tabular.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_tabular,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ # This test checks that default transformations are used if no columns transformations are provided
+ def test_run_call_pipeline_service_create_if_set_additional_experiments(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_tabular,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ column_transformations=None,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ )
+
+ job._add_additional_experiments(_TEST_ADDITIONAL_EXPERIMENTS)
+
+ model_from_job = job.run(
+ dataset=mock_dataset_tabular,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ job.wait_for_resource_creation()
+
+ assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
+
+ if not sync:
+ model_from_job.wait()
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_tabular.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_tabular,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_column_specs(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_tabular_alternative,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ column_specs = training_jobs.AutoMLTabularTrainingJob.get_auto_column_specs(
+ dataset=mock_dataset_tabular_alternative,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ )
+
+ assert column_specs == _TEST_TRAINING_COLUMN_SPECS
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ column_specs=column_specs,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_tabular_alternative,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_tabular_alternative.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_tabular,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_ALTERNATIVE,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_call_pipeline_service_create_with_column_specs_and_transformations_raises(
+ self,
+ mock_dataset_tabular_alternative,
+ sync,
+ ):
+ aiplatform.init()
+
+ column_specs = training_jobs.AutoMLTabularTrainingJob.get_auto_column_specs(
+ dataset=mock_dataset_tabular_alternative,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ )
+
+ assert column_specs == _TEST_TRAINING_COLUMN_SPECS
+
+ with pytest.raises(ValueError):
+ training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ column_specs=column_specs,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_get_column_specs_no_target_raises(
+ self,
+ mock_dataset_tabular_alternative,
+ sync,
+ ):
+ aiplatform.init()
+
+ with pytest.raises(TypeError):
+ training_jobs.AutoMLTabularTrainingJob.get_auto_column_specs(
+ dataset=mock_dataset_tabular_alternative
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_column_specs_not_auto(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_tabular_alternative,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ column_specs = training_jobs.AutoMLTabularTrainingJob.get_auto_column_specs(
+ dataset=mock_dataset_tabular_alternative,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ )
+ column_specs[
+ _TEST_TRAINING_COLUMN_NAMES_ALTERNATIVE[0]
+ ] = training_jobs.AutoMLTabularTrainingJob.column_data_types.NUMERIC
+ column_specs[
+ _TEST_TRAINING_COLUMN_NAMES_ALTERNATIVE[1]
+ ] = training_jobs.AutoMLTabularTrainingJob.column_data_types.CATEGORICAL
+ column_specs[
+ _TEST_TRAINING_COLUMN_NAMES_ALTERNATIVE[2]
+ ] = training_jobs.AutoMLTabularTrainingJob.column_data_types.TEXT
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ column_specs=column_specs,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_tabular_alternative,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_tabular_alternative.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_tabular,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_ALTERNATIVE_NOT_AUTO,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_model_service_get",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ # Also acts as a custom column_transformations test as it should not error during first call
+ def test_run_called_twice_raises(self, mock_dataset_tabular, sync):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ )
+
+ job.run(
+ dataset=mock_dataset_tabular,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ sync=sync,
+ )
+
+ job.wait_for_resource_creation()
+
+ assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ dataset=mock_dataset_tabular,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ sync=sync,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_raises_if_pipeline_fails(
+ self, mock_pipeline_service_create_and_get_with_fail, mock_dataset_tabular, sync
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ dataset=mock_dataset_tabular,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ sync=sync,
+ )
+
+ if not sync:
+ job.wait()
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ def test_wait_for_resource_creation_does_not_fail_if_creation_does_not_fail(
+ self, mock_pipeline_service_create_and_get_with_fail, mock_dataset_tabular
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ )
+
+ job.run(
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ dataset=mock_dataset_tabular,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ sync=False,
+ )
+
+ job.wait_for_resource_creation()
+
+ assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
+
+ with pytest.raises(RuntimeError):
+ job.wait()
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ @pytest.mark.usefixtures("mock_pipeline_service_create_fail")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_fails(self, mock_dataset_tabular, sync):
+
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ )
+
+ if sync:
+ with pytest.raises(RuntimeError) as e:
+ job.run(
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ dataset=mock_dataset_tabular,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ sync=sync,
+ )
+ assert e.match("Mock fail")
+
+ with pytest.raises(RuntimeError) as e:
+ job.wait_for_resource_creation()
+ assert e.match(
+ regexp=r"AutoMLTabularTrainingJob resource is not scheduled to be created."
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
+ assert e.match(
+ regexp=r"AutoMLTabularTrainingJob resource has not been created."
+ )
+
+ job.wait()
+
+ with pytest.raises(RuntimeError) as e:
+ job.get_model()
+ e.match(
+ regexp="TrainingPipeline has not been launched. You must run this TrainingPipeline using TrainingPipeline.run."
+ )
+
+ else:
+ job.run(
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ dataset=mock_dataset_tabular,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ sync=sync,
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ job.wait_for_resource_creation()
+ assert e.match(regexp=r"Mock fail")
+
+ with pytest.raises(RuntimeError) as e:
+ assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
+ assert e.match(
+ regexp=r"AutoMLTabularTrainingJob resource has not been created. Resource failed with: Mock fail"
+ )
+
+ with pytest.raises(RuntimeError):
+ job.wait()
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ def test_raises_before_run_is_called(self, mock_pipeline_service_create):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ with pytest.raises(RuntimeError):
+ job.has_failed
+
+ with pytest.raises(RuntimeError):
+ job.state
+
+ with pytest.raises(RuntimeError) as e:
+ job.wait_for_resource_creation()
+ assert e.match(
+ regexp=r"AutoMLTabularTrainingJob resource is not scheduled to be created."
+ )
+
+ def test_properties_throw_if_not_available(self):
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ job.name
+ assert e.match(
+ regexp=r"AutoMLTabularTrainingJob resource has not been created"
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ job.resource_name
+ assert e.match(
+ regexp=r"AutoMLTabularTrainingJob resource has not been created"
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ job.display_name
+ assert e.match(
+ regexp=r"AutoMLTabularTrainingJob resource has not been created"
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ job.create_time
+ assert e.match(
+ regexp=r"AutoMLTabularTrainingJob resource has not been created"
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ job.encryption_spec
+ assert e.match(
+ regexp=r"AutoMLTabularTrainingJob resource has not been created"
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ job.labels
+ assert e.match(
+ regexp=r"AutoMLTabularTrainingJob resource has not been created"
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ job.gca_resource
+ assert e.match(
+ regexp=r"AutoMLTabularTrainingJob resource has not been created"
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_splits_fraction(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_tabular,
+ mock_model_service_get,
+ sync,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an AutoML Video Classification training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_tabular,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_fraction_split = gca_training_pipeline.FractionSplit(
+ training_fraction=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction=_TEST_FRACTION_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ fraction_split=true_fraction_split,
+ dataset_id=mock_dataset_tabular.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_tabular,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_splits_timestamp(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_tabular,
+ mock_model_service_get,
+ sync,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an AutoML Video Classification training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_tabular,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
+ timestamp_split_column_name=_TEST_SPLIT_TIMESTAMP_COLUMN_NAME,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_split = gca_training_pipeline.TimestampSplit(
+ training_fraction=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction=_TEST_FRACTION_SPLIT_TEST,
+ key=_TEST_SPLIT_TIMESTAMP_COLUMN_NAME,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ timestamp_split=true_split,
+ dataset_id=mock_dataset_tabular.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_tabular,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_splits_predefined(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_tabular,
+ mock_model_service_get,
+ sync,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an AutoML Video Classification training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_tabular,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ predefined_split_column_name=_TEST_SPLIT_PREDEFINED_COLUMN_NAME,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_split = gca_training_pipeline.PredefinedSplit(
+ key=_TEST_SPLIT_PREDEFINED_COLUMN_NAME
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ predefined_split=true_split,
+ dataset_id=mock_dataset_tabular.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_tabular,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_splits_default(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_tabular,
+ mock_model_service_get,
+ sync,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an AutoML Video Classification training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLTabularTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
+ optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
+ column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
+ optimization_objective_recall_value=None,
+ optimization_objective_precision_value=None,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_tabular,
+ target_column=_TEST_TRAINING_TARGET_COLUMN,
+ weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_tabular.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_tabular,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_automl_text_training_jobs.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_automl_text_training_jobs.py
new file mode 100644
index 0000000000000000000000000000000000000000..72a4d4f5b0a1fa533b6ac79c20a0140bd1970783
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_automl_text_training_jobs.py
@@ -0,0 +1,1017 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+import importlib
+from unittest import mock
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import datasets
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import models
+from google.cloud.aiplatform import schema
+from google.cloud.aiplatform import training_jobs
+
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
+ pipeline_service_client,
+)
+from google.cloud.aiplatform.compat.types import (
+ dataset as gca_dataset,
+ encryption_spec as gca_encryption_spec,
+ model as gca_model,
+ pipeline_state as gca_pipeline_state,
+ training_pipeline as gca_training_pipeline,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob import (
+ definition_v1 as training_job_inputs,
+)
+import constants as test_constants
+
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_DATASET_DISPLAY_NAME = (
+ test_constants.TrainingJobConstants._TEST_DATASET_DISPLAY_NAME
+)
+_TEST_DATASET_NAME = test_constants.TrainingJobConstants._TEST_DATASET_NAME
+_TEST_DISPLAY_NAME = test_constants.TrainingJobConstants._TEST_DISPLAY_NAME
+_TEST_METADATA_SCHEMA_URI_TEXT = schema.dataset.metadata.text
+
+_TEST_PREDICTION_TYPE_CLASSIFICATION = "classification"
+_TEST_CLASSIFICATION_MULTILABEL = True
+_TEST_PREDICTION_TYPE_EXTRACTION = "extraction"
+_TEST_PREDICTION_TYPE_SENTIMENT = "sentiment"
+_TEST_SENTIMENT_MAX = 10
+
+_TEST_DATASET_NAME = test_constants.TrainingJobConstants._TEST_DATASET_NAME
+_TEST_MODEL_DISPLAY_NAME = test_constants.TrainingJobConstants._TEST_MODEL_DISPLAY_NAME
+
+_TEST_LABELS = test_constants.ProjectConstants._TEST_LABELS
+_TEST_MODEL_LABELS = test_constants.TrainingJobConstants._TEST_MODEL_LABELS
+
+_TEST_MODEL_ID = "98777645321"
+
+_TEST_TRAINING_TASK_INPUTS_CLASSIFICATION = (
+ training_job_inputs.AutoMlTextClassificationInputs(
+ multi_label=_TEST_CLASSIFICATION_MULTILABEL
+ )
+)
+_TEST_TRAINING_TASK_INPUTS_EXTRACTION = training_job_inputs.AutoMlTextExtractionInputs()
+_TEST_TRAINING_TASK_INPUTS_SENTIMENT = training_job_inputs.AutoMlTextSentimentInputs(
+ sentiment_max=_TEST_SENTIMENT_MAX
+)
+
+_TEST_FRACTION_SPLIT_TRAINING = 0.6
+_TEST_FRACTION_SPLIT_VALIDATION = 0.2
+_TEST_FRACTION_SPLIT_TEST = 0.2
+_TEST_FILTER_SPLIT_TRAINING = "train"
+_TEST_FILTER_SPLIT_VALIDATION = "validate"
+_TEST_FILTER_SPLIT_TEST = "test"
+_TEST_PREDEFINED_SPLIT_COLUMN_NAME = "predefined_column"
+
+_TEST_MODEL_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/models/{_TEST_MODEL_ID}"
+)
+
+_TEST_PIPELINE_RESOURCE_NAME = (
+ test_constants.TrainingJobConstants._TEST_PIPELINE_RESOURCE_NAME
+)
+
+# CMEK encryption
+_TEST_DEFAULT_ENCRYPTION_KEY_NAME = (
+ test_constants.TrainingJobConstants._TEST_DEFAULT_ENCRYPTION_KEY_NAME
+)
+_TEST_DEFAULT_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME
+)
+
+_TEST_PIPELINE_ENCRYPTION_KEY_NAME = "key_pipeline"
+_TEST_PIPELINE_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME
+)
+
+_TEST_MODEL_ENCRYPTION_KEY_NAME = "key_model"
+_TEST_MODEL_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME
+)
+
+
+@pytest.fixture
+def mock_pipeline_service_create():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
+ )
+ )
+ yield mock_create_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_get():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
+ )
+ )
+ yield mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_create_and_get_with_fail():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ )
+ )
+
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED,
+ )
+ )
+
+ yield mock_create_training_pipeline, mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_model_service_get():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as mock_get_model:
+ mock_get_model.return_value = gca_model.Model(name=_TEST_MODEL_NAME)
+ yield mock_get_model
+
+
+@pytest.fixture
+def mock_dataset_text():
+ ds = mock.MagicMock(datasets.TextDataset)
+ ds.name = _TEST_DATASET_NAME
+ ds.metadata_schema_uri = _TEST_METADATA_SCHEMA_URI_TEXT
+ ds._latest_future = None
+ ds._exception = None
+ ds._gca_resource = gca_dataset.Dataset(
+ display_name=_TEST_DATASET_DISPLAY_NAME,
+ metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TEXT,
+ labels={},
+ name=_TEST_DATASET_NAME,
+ metadata={},
+ )
+ return ds
+
+
+@pytest.fixture
+def mock_model():
+ model = mock.MagicMock(models.Model)
+ model.name = _TEST_MODEL_ID
+ model._latest_future = None
+ model._gca_resource = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ name=_TEST_MODEL_NAME,
+ )
+ yield model
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestAutoMLTextTrainingJob:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_init_all_parameters_classification(self):
+ """Ensure all private members are set correctly at initialization"""
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLTextTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_CLASSIFICATION,
+ multi_label=_TEST_CLASSIFICATION_MULTILABEL,
+ )
+
+ assert job._display_name == _TEST_DISPLAY_NAME
+ assert (
+ job._training_task_definition
+ == schema.training_job.definition.automl_text_classification
+ )
+ assert (
+ job._training_task_inputs_dict
+ == training_job_inputs.AutoMlTextClassificationInputs(
+ multi_label=_TEST_CLASSIFICATION_MULTILABEL
+ )
+ )
+
+ def test_init_all_parameters_extraction(self):
+ """Ensure all private members are set correctly at initialization"""
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLTextTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_EXTRACTION,
+ )
+
+ assert job._display_name == _TEST_DISPLAY_NAME
+ assert (
+ job._training_task_definition
+ == schema.training_job.definition.automl_text_extraction
+ )
+ assert (
+ job._training_task_inputs_dict
+ == training_job_inputs.AutoMlTextExtractionInputs()
+ )
+
+ def test_init_all_parameters_sentiment(self):
+ """Ensure all private members are set correctly at initialization"""
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLTextTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_SENTIMENT,
+ sentiment_max=_TEST_SENTIMENT_MAX,
+ )
+
+ assert job._display_name == _TEST_DISPLAY_NAME
+ assert (
+ job._training_task_definition
+ == schema.training_job.definition.automl_text_sentiment
+ )
+ assert (
+ job._training_task_inputs_dict
+ == training_job_inputs.AutoMlTextSentimentInputs(
+ sentiment_max=_TEST_SENTIMENT_MAX
+ )
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures("mock_pipeline_service_get")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_init_aiplatform_with_encryption_key_name_and_create_training_job(
+ self,
+ mock_pipeline_service_create,
+ mock_dataset_text,
+ mock_model_service_get,
+ sync,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an AutoML Text Classification training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLTextTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_CLASSIFICATION,
+ multi_label=_TEST_CLASSIFICATION_MULTILABEL,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_text,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_text.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_text_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_CLASSIFICATION,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_classification(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_text,
+ mock_model_service_get,
+ sync,
+ ):
+ """Create and run an AutoML Text Classification training job, verify calls and return value"""
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLTextTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ prediction_type=_TEST_PREDICTION_TYPE_CLASSIFICATION,
+ multi_label=_TEST_CLASSIFICATION_MULTILABEL,
+ training_encryption_spec_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME,
+ model_encryption_spec_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_text,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter_split=_TEST_FILTER_SPLIT_TEST,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_filter_split = gca_training_pipeline.FilterSplit(
+ training_filter=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter=_TEST_FILTER_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ encryption_spec=_TEST_MODEL_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ filter_split=true_filter_split,
+ dataset_id=mock_dataset_text.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.automl_text_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_CLASSIFICATION,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_PIPELINE_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+ assert job._gca_resource is mock_pipeline_service_get.return_value
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+ assert not job.has_failed
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_classification_with_timeout(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_text,
+ mock_model_service_get,
+ sync,
+ ):
+ """Create and run an AutoML Text Classification training job, verify calls and return value"""
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLTextTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ prediction_type=_TEST_PREDICTION_TYPE_CLASSIFICATION,
+ multi_label=_TEST_CLASSIFICATION_MULTILABEL,
+ training_encryption_spec_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME,
+ model_encryption_spec_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_text,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter_split=_TEST_FILTER_SPLIT_TEST,
+ sync=sync,
+ create_request_timeout=180.0,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_filter_split = gca_training_pipeline.FilterSplit(
+ training_filter=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter=_TEST_FILTER_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ encryption_spec=_TEST_MODEL_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ filter_split=true_filter_split,
+ dataset_id=mock_dataset_text.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.automl_text_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_CLASSIFICATION,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_PIPELINE_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=180.0,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_extraction(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_text,
+ mock_model_service_get,
+ sync,
+ ):
+ """Create and run an AutoML Text Extraction training job, verify calls and return value"""
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLTextTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ prediction_type=_TEST_PREDICTION_TYPE_EXTRACTION,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_text,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_fraction_split = gca_training_pipeline.FractionSplit(
+ training_fraction=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction=_TEST_FRACTION_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ fraction_split=true_fraction_split,
+ dataset_id=mock_dataset_text.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.automl_text_extraction,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_EXTRACTION,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+ assert job._gca_resource is mock_pipeline_service_get.return_value
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+ assert not job.has_failed
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_sentiment(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_text,
+ mock_model_service_get,
+ sync,
+ ):
+ """Create and run an AutoML Text Sentiment training job, verify calls and return value"""
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLTextTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ prediction_type=_TEST_PREDICTION_TYPE_SENTIMENT,
+ sentiment_max=10,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_text,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter_split=_TEST_FILTER_SPLIT_TEST,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_filter_split = gca_training_pipeline.FilterSplit(
+ training_filter=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter=_TEST_FILTER_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ filter_split=true_filter_split,
+ dataset_id=mock_dataset_text.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.automl_text_sentiment,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_SENTIMENT,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+ assert job._gca_resource is mock_pipeline_service_get.return_value
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+ assert not job.has_failed
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures("mock_pipeline_service_get")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels(
+ self,
+ mock_pipeline_service_create,
+ mock_dataset_text,
+ mock_model_service_get,
+ mock_model,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLTextTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type="classification",
+ multi_label=True,
+ labels=_TEST_LABELS,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_text,
+ model_display_name=None, # Omit model_display_name
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ # Test that if defaults to the job display name
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_text.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.automl_text_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_CLASSIFICATION,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_model_service_get",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_called_twice_raises(self, mock_dataset_text, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLTextTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type="classification",
+ multi_label=True,
+ )
+
+ job.run(
+ dataset=mock_dataset_text,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
+ sync=sync,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ dataset=mock_dataset_text,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ sync=sync,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_model_service_get",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_with_two_split_raises(
+ self,
+ mock_dataset_text,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLTextTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type="classification",
+ multi_label=True,
+ )
+
+ with pytest.raises(ValueError):
+ model_from_job = job.run(
+ dataset=mock_dataset_text,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
+ training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter_split=_TEST_FILTER_SPLIT_TEST,
+ sync=sync,
+ )
+ if not sync:
+ model_from_job.wait()
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_raises_if_pipeline_fails(
+ self, mock_pipeline_service_create_and_get_with_fail, mock_dataset_text, sync
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLTextTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_CLASSIFICATION,
+ multi_label=_TEST_CLASSIFICATION_MULTILABEL,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ dataset=mock_dataset_text,
+ training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
+ sync=sync,
+ )
+
+ if not sync:
+ job.wait()
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_splits_fraction(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_text,
+ mock_model_service_get,
+ mock_model,
+ sync,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an AutoML Video Classification training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLTextTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_CLASSIFICATION,
+ multi_label=_TEST_CLASSIFICATION_MULTILABEL,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_text,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_fraction_split = gca_training_pipeline.FractionSplit(
+ training_fraction=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction=_TEST_FRACTION_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ description=mock_model._gca_resource.description,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ fraction_split=true_fraction_split,
+ dataset_id=mock_dataset_text.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_text_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_CLASSIFICATION,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_splits_filter(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_text,
+ mock_model_service_get,
+ mock_model,
+ sync,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an AutoML Video Classification training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLTextTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_CLASSIFICATION,
+ multi_label=_TEST_CLASSIFICATION_MULTILABEL,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_text,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter_split=_TEST_FILTER_SPLIT_TEST,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_filter_split = gca_training_pipeline.FilterSplit(
+ training_filter=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter=_TEST_FILTER_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ description=mock_model._gca_resource.description,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ filter_split=true_filter_split,
+ dataset_id=mock_dataset_text.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_text_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_CLASSIFICATION,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_splits_default(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_text,
+ mock_model_service_get,
+ mock_model,
+ sync,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an AutoML Video Classification training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLTextTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_CLASSIFICATION,
+ multi_label=_TEST_CLASSIFICATION_MULTILABEL,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_text,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ description=mock_model._gca_resource.description,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_text.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_text_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_CLASSIFICATION,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_automl_video_training_jobs.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_automl_video_training_jobs.py
new file mode 100644
index 0000000000000000000000000000000000000000..e05b1d8709e686a421a87cc92a4bd91bbb3ec3d5
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_automl_video_training_jobs.py
@@ -0,0 +1,854 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+import importlib
+from unittest import mock
+
+from google.protobuf import json_format
+from google.protobuf import struct_pb2
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import datasets
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import models
+from google.cloud.aiplatform import schema
+from google.cloud.aiplatform import training_jobs
+
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
+ pipeline_service_client,
+)
+from google.cloud.aiplatform.compat.types import (
+ dataset as gca_dataset,
+ encryption_spec as gca_encryption_spec,
+ model as gca_model,
+ pipeline_state as gca_pipeline_state,
+ training_pipeline as gca_training_pipeline,
+)
+import constants as test_constants
+
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_DATASET_DISPLAY_NAME = (
+ test_constants.TrainingJobConstants._TEST_DATASET_DISPLAY_NAME
+)
+_TEST_DATASET_NAME = test_constants.TrainingJobConstants._TEST_DATASET_NAME
+_TEST_DISPLAY_NAME = test_constants.TrainingJobConstants._TEST_DISPLAY_NAME
+_TEST_METADATA_SCHEMA_URI_VIDEO = schema.dataset.metadata.video
+
+_TEST_MODEL_TYPE_CLOUD = "CLOUD"
+_TEST_MODEL_TYPE_MOBILE = "MOBILE_VERSATILE_1"
+
+_TEST_PREDICTION_TYPE_VAR = "action_recognition"
+_TEST_PREDICTION_TYPE_VCN = "classification"
+_TEST_PREDICTION_TYPE_VOR = "object_tracking"
+
+_TEST_DATASET_NAME = test_constants.TrainingJobConstants._TEST_DATASET_NAME
+_TEST_MODEL_DISPLAY_NAME = test_constants.TrainingJobConstants._TEST_MODEL_DISPLAY_NAME
+
+_TEST_LABELS = test_constants.ProjectConstants._TEST_LABELS
+_TEST_MODEL_LABELS = test_constants.TrainingJobConstants._TEST_MODEL_LABELS
+
+_TEST_MODEL_ID = "98777645321" # TODO
+
+_TEST_TRAINING_TASK_INPUTS = json_format.ParseDict(
+ {"modelType": "CLOUD"},
+ struct_pb2.Value(),
+)
+
+_TEST_FRACTION_SPLIT_TRAINING = 0.8
+_TEST_FRACTION_SPLIT_VALIDATION = 0.0
+_TEST_FRACTION_SPLIT_TEST = 0.2
+_TEST_ALTERNATE_FRACTION_SPLIT_TRAINING = 0.7
+_TEST_ALTERNATE_FRACTION_SPLIT_TEST = 0.3
+_TEST_FILTER_SPLIT_TRAINING = "train"
+_TEST_FILTER_SPLIT_VALIDATION = "-"
+_TEST_FILTER_SPLIT_TEST = "test"
+
+_TEST_MODEL_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/models/{_TEST_MODEL_ID}"
+)
+
+_TEST_PIPELINE_RESOURCE_NAME = (
+ test_constants.TrainingJobConstants._TEST_PIPELINE_RESOURCE_NAME
+)
+
+# CMEK encryption
+_TEST_DEFAULT_ENCRYPTION_KEY_NAME = (
+ test_constants.TrainingJobConstants._TEST_DEFAULT_ENCRYPTION_KEY_NAME
+)
+_TEST_DEFAULT_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME
+)
+
+_TEST_PIPELINE_ENCRYPTION_KEY_NAME = "key_pipeline"
+_TEST_PIPELINE_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME
+)
+
+_TEST_MODEL_ENCRYPTION_KEY_NAME = "key_model"
+_TEST_MODEL_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME
+)
+
+
+@pytest.fixture
+def mock_pipeline_service_create():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
+ )
+ )
+ yield mock_create_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_get():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
+ )
+ )
+ yield mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_create_and_get_with_fail():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ )
+ )
+
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED,
+ )
+ )
+
+ yield mock_create_training_pipeline, mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_model_service_get():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as mock_get_model:
+ mock_get_model.return_value = gca_model.Model(name=_TEST_MODEL_NAME)
+ yield mock_get_model
+
+
+@pytest.fixture
+def mock_dataset_video():
+ ds = mock.MagicMock(datasets.VideoDataset)
+ ds.name = _TEST_DATASET_NAME
+ ds.metadata_schema_uri = _TEST_METADATA_SCHEMA_URI_VIDEO
+ ds._latest_future = None
+ ds._exception = None
+ ds._gca_resource = gca_dataset.Dataset(
+ display_name=_TEST_DATASET_DISPLAY_NAME,
+ metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_VIDEO,
+ labels={},
+ name=_TEST_DATASET_NAME,
+ metadata={},
+ )
+ return ds
+
+
+@pytest.fixture
+def mock_model():
+ model = mock.MagicMock(models.Model)
+ model.name = _TEST_MODEL_ID
+ model._latest_future = None
+ model._exception = None
+ model._gca_resource = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ name=_TEST_MODEL_NAME,
+ )
+ yield model
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestAutoMLVideoTrainingJob:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_init_all_parameters(self):
+ """Ensure all private members are set correctly at initialization"""
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLVideoTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_VCN,
+ model_type=_TEST_MODEL_TYPE_CLOUD,
+ )
+
+ assert job._display_name == _TEST_DISPLAY_NAME
+ assert job._model_type == _TEST_MODEL_TYPE_CLOUD
+ assert job._prediction_type == _TEST_PREDICTION_TYPE_VCN
+
+ def test_init_wrong_parameters(self):
+ """Ensure correct exceptions are raised when initializing with invalid args"""
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ with pytest.raises(ValueError, match=r"not a supported prediction type"):
+ training_jobs.AutoMLVideoTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type="abcdefg",
+ )
+
+ with pytest.raises(ValueError, match=r"not a supported model_type for"):
+ training_jobs.AutoMLVideoTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_VCN,
+ model_type="abcdefg",
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_init_aiplatform_with_encryption_key_name_and_create_training_job(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_video,
+ mock_model_service_get,
+ mock_model,
+ sync,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an AutoML Video Classification training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLVideoTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_VCN,
+ model_type=_TEST_MODEL_TYPE_CLOUD,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_video,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ description=mock_model._gca_resource.description,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_video.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_video_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+ assert job._gca_resource is mock_pipeline_service_get.return_value
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+ assert not job.has_failed
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_splits_fraction(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_video,
+ mock_model_service_get,
+ mock_model,
+ sync,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an AutoML Video Classification training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLVideoTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_VCN,
+ model_type=_TEST_MODEL_TYPE_CLOUD,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_video,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
+ test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_fraction_split = gca_training_pipeline.FractionSplit(
+ training_fraction=_TEST_FRACTION_SPLIT_TRAINING,
+ validation_fraction=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction=_TEST_FRACTION_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ description=mock_model._gca_resource.description,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ fraction_split=true_fraction_split,
+ dataset_id=mock_dataset_video.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_video_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_splits_filter(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_video,
+ mock_model_service_get,
+ mock_model,
+ sync,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an AutoML Video Classification training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLVideoTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_VCN,
+ model_type=_TEST_MODEL_TYPE_CLOUD,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_video,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
+ test_filter_split=_TEST_FILTER_SPLIT_TEST,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_filter_split = gca_training_pipeline.FilterSplit(
+ training_filter=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter=_TEST_FILTER_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ description=mock_model._gca_resource.description,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ filter_split=true_filter_split,
+ dataset_id=mock_dataset_video.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_video_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_splits_default(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_video,
+ mock_model_service_get,
+ mock_model,
+ sync,
+ ):
+ """
+ Initiate aiplatform with encryption key name.
+ Create and run an AutoML Video Classification training job, verify calls and return value
+ """
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.AutoMLVideoTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ prediction_type=_TEST_PREDICTION_TYPE_VCN,
+ model_type=_TEST_MODEL_TYPE_CLOUD,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_video,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ description=mock_model._gca_resource.description,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_dataset_video.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.automl_video_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_video,
+ mock_model_service_get,
+ mock_model,
+ sync,
+ ):
+ """Create and run an AutoML ICN training job, verify calls and return value"""
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLVideoTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ prediction_type=_TEST_PREDICTION_TYPE_VCN,
+ model_type=_TEST_MODEL_TYPE_CLOUD,
+ training_encryption_spec_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME,
+ model_encryption_spec_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_video,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
+ test_filter_split=_TEST_FILTER_SPLIT_TEST,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_filter_split = gca_training_pipeline.FilterSplit(
+ training_filter=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter=_TEST_FILTER_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ description=mock_model._gca_resource.description,
+ encryption_spec=_TEST_MODEL_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ filter_split=true_filter_split,
+ dataset_id=mock_dataset_video.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.automl_video_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_PIPELINE_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+ assert job._gca_resource is mock_pipeline_service_get.return_value
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+ assert not job.has_failed
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_timeout(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_dataset_video,
+ mock_model_service_get,
+ mock_model,
+ sync,
+ ):
+ """Create and run an AutoML ICN training job, verify calls and return value"""
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLVideoTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ prediction_type=_TEST_PREDICTION_TYPE_VCN,
+ model_type=_TEST_MODEL_TYPE_CLOUD,
+ training_encryption_spec_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME,
+ model_encryption_spec_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_video,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
+ test_filter_split=_TEST_FILTER_SPLIT_TEST,
+ sync=sync,
+ create_request_timeout=180.0,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_filter_split = gca_training_pipeline.FilterSplit(
+ training_filter=_TEST_FILTER_SPLIT_TRAINING,
+ validation_filter=_TEST_FILTER_SPLIT_VALIDATION,
+ test_filter=_TEST_FILTER_SPLIT_TEST,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ description=mock_model._gca_resource.description,
+ encryption_spec=_TEST_MODEL_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ filter_split=true_filter_split,
+ dataset_id=mock_dataset_video.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.automl_video_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_PIPELINE_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=180.0,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures("mock_pipeline_service_get")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels(
+ self,
+ mock_pipeline_service_create,
+ mock_dataset_video,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLVideoTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ prediction_type=_TEST_PREDICTION_TYPE_VCN,
+ model_type=_TEST_MODEL_TYPE_CLOUD,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_dataset_video,
+ training_fraction_split=_TEST_ALTERNATE_FRACTION_SPLIT_TRAINING,
+ test_fraction_split=_TEST_ALTERNATE_FRACTION_SPLIT_TEST,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_fraction_split = gca_training_pipeline.FractionSplit(
+ training_fraction=_TEST_ALTERNATE_FRACTION_SPLIT_TRAINING,
+ validation_fraction=_TEST_FRACTION_SPLIT_VALIDATION,
+ test_fraction=_TEST_ALTERNATE_FRACTION_SPLIT_TEST,
+ )
+
+ # Test that if defaults to the job display name
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ fraction_split=true_fraction_split,
+ dataset_id=mock_dataset_video.name,
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.automl_video_classification,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_model_service_get",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_called_twice_raises(
+ self,
+ mock_dataset_video,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLVideoTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ )
+
+ job.run(
+ dataset=mock_dataset_video,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ sync=sync,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ dataset=mock_dataset_video,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ sync=sync,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_model_service_get",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_with_two_split_raises(
+ self,
+ mock_dataset_video,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLVideoTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ )
+
+ with pytest.raises(ValueError):
+ model_from_job = job.run(
+ dataset=mock_dataset_video,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
+ test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
+ training_filter_split=_TEST_FILTER_SPLIT_TEST,
+ test_filter_split=_TEST_FILTER_SPLIT_TEST,
+ sync=sync,
+ )
+ if not sync:
+ model_from_job.wait()
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_raises_if_pipeline_fails(
+ self, mock_pipeline_service_create_and_get_with_fail, mock_dataset_video, sync
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLVideoTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ dataset=mock_dataset_video,
+ training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
+ test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
+ sync=sync,
+ )
+
+ if not sync:
+ job.wait()
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ def test_raises_before_run_is_called(self, mock_pipeline_service_create):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.AutoMLVideoTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ with pytest.raises(RuntimeError):
+ job.has_failed
+
+ with pytest.raises(RuntimeError):
+ job.state
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_base.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0b9b524644100109e05ca1b8a3c75cc9f60fab7
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_base.py
@@ -0,0 +1,214 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from importlib import reload
+import logging
+import pytest
+import time
+from typing import Optional
+
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+
+
+_TEST_LOGGER_NAME = "test_logger"
+
+
+class _TestClass(base.FutureManager):
+ def __init__(self, x):
+ self.x = x
+ super().__init__()
+
+ @classmethod
+ def _empty_constructor(cls):
+ self = cls.__new__(cls)
+ base.FutureManager.__init__(self)
+ self.x = None
+ return self
+
+ def _sync_object_with_future_result(self, result):
+ self.x = result.x
+
+ @classmethod
+ @base.optional_sync()
+ def create(cls, x: int, sync=True) -> "_TestClass":
+ time.sleep(1)
+ return cls(x)
+
+ @base.optional_sync()
+ def add(self, a: "_TestClass", sync=True) -> None:
+ time.sleep(1)
+ return self._add(a=a, sync=sync)
+
+ def _add(self, a: "_TestClass", sync=True) -> None:
+ self.x = self.x + a.x
+
+
+class _TestClassDownStream(_TestClass):
+ @base.optional_sync(construct_object_on_arg="a")
+ def add_and_create_new(
+ self, a: Optional["_TestClass"] = None, sync=True
+ ) -> _TestClass:
+ time.sleep(1)
+ if a:
+ return _TestClass(self.x + a.x)
+ return None
+
+ @base.optional_sync(return_input_arg="a", bind_future_to_self=False)
+ def add_to_input_arg(self, a: "_TestClass", sync=True) -> _TestClass:
+ time.sleep(1)
+ a._add(self)
+ return a
+
+
+class TestFutureManager:
+ def setup_method(self):
+ reload(initializer)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_task(self, sync):
+ a = _TestClass.create(10, sync=sync)
+ if not sync:
+ assert a.x is None
+ assert a._latest_future is not None
+ a.wait()
+ assert a._latest_future is None
+ assert a.x == 10
+ assert isinstance(a, _TestClass)
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_and_add_task(self, sync):
+ _latest_future = None
+
+ a = _TestClass.create(10, sync=sync)
+ b = _TestClass.create(7, sync=sync)
+ if not sync:
+ assert a.x is None
+ assert a._latest_future is not None
+ assert b.x is None
+ assert b._latest_future is not None
+ _latest_future = b._latest_future
+
+ b.add(a, sync=sync)
+
+ if not sync:
+ assert b._latest_future is not _latest_future
+ b.wait()
+
+ assert a._latest_future is None
+ assert a.x == 10
+ assert b._latest_future is None
+ assert b.x == 17
+ assert isinstance(a, _TestClass)
+ assert isinstance(b, _TestClass)
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_and_add_and_create_new_task(self, sync):
+ _latest_future = None
+
+ a = _TestClass.create(10, sync=sync)
+ b = _TestClassDownStream.create(7, sync=sync)
+ if not sync:
+ assert a.x is None
+ assert a._latest_future is not None
+ assert b.x is None
+ assert b._latest_future is not None
+ _latest_future = b._latest_future
+
+ c = b.add_and_create_new(a, sync=sync)
+
+ if not sync:
+ assert b._latest_future is not _latest_future
+ assert c.x is None
+ assert c._latest_future is not None
+ c.wait()
+
+ assert a._latest_future is None
+ assert a.x == 10
+ assert b._latest_future is None
+ assert b.x == 7
+ assert c._latest_future is None
+ assert c.x == 17
+ assert isinstance(a, _TestClass)
+ assert isinstance(b, _TestClassDownStream)
+ assert isinstance(c, _TestClass)
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_and_add_and_not_create_new_task(self, sync):
+ _latest_future = None
+
+ b = _TestClassDownStream.create(7, sync=sync)
+ if not sync:
+ assert b.x is None
+ assert b._latest_future is not None
+ _latest_future = b._latest_future
+
+ c = b.add_and_create_new(None, sync=sync)
+
+ if not sync:
+ assert b._latest_future is not _latest_future
+ b.wait()
+
+ assert c is None
+
+ assert b._latest_future is None
+ assert b.x == 7
+ assert isinstance(b, _TestClassDownStream)
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_and_add_return_arg(self, sync):
+ _latest_future = None
+
+ a = _TestClass.create(10, sync=sync)
+ b = _TestClassDownStream.create(7, sync=sync)
+ if not sync:
+ assert a.x is None
+ assert a._latest_future is not None
+ assert b.x is None
+ assert b._latest_future is not None
+ _latest_future = b._latest_future
+
+ c = b.add_to_input_arg(a, sync=sync)
+
+ if not sync:
+ assert b._latest_future is _latest_future
+ assert c.x is None
+ assert c._latest_future is not None
+ assert c is a
+ c.wait()
+
+ assert a._latest_future is None
+ assert a.x == 17
+ assert b._latest_future is None
+ assert b.x == 7
+ assert c._latest_future is None
+ assert c.x == 17
+ assert isinstance(a, _TestClass)
+ assert isinstance(b, _TestClassDownStream)
+ assert isinstance(c, _TestClass)
+
+
+class TestLogger:
+ def test_logger_handler(self):
+ logger = base.Logger(_TEST_LOGGER_NAME)
+
+ assert logger.level == logging.INFO
+ # the logger won't have a StreamHandler because root logger already has one
+ assert not logger.handlers
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_cloud_profiler.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_cloud_profiler.py
new file mode 100644
index 0000000000000000000000000000000000000000..9cd0adea53a3958b6788279000d2e9ce9fb5de35
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_cloud_profiler.py
@@ -0,0 +1,458 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import importlib.util
+import json
+import sys
+import threading
+from typing import List, Optional
+
+import pytest
+import unittest
+
+from unittest import mock
+from werkzeug import wrappers
+from werkzeug.test import EnvironBuilder
+
+from google.api_core import exceptions
+from google.cloud import aiplatform
+from google.cloud.aiplatform import training_utils
+from google.cloud.aiplatform.tensorboard.plugins.tf_profiler import (
+ profile_uploader,
+)
+from google.cloud.aiplatform.training_utils.cloud_profiler.plugins import (
+ base_plugin,
+)
+from google.cloud.aiplatform.training_utils.cloud_profiler.plugins.tensorflow import (
+ tf_profiler,
+)
+from google.cloud.aiplatform.training_utils.cloud_profiler.plugins.tensorflow.tf_profiler import (
+ TFProfiler,
+)
+from google.cloud.aiplatform.training_utils.cloud_profiler.plugins.tensorflow import (
+ tensorboard_api,
+)
+from google.cloud.aiplatform.training_utils.cloud_profiler import webserver
+from google.cloud.aiplatform.training_utils.cloud_profiler import initializer
+
+
+# Mock cluster specs from the training environment.
+_CLUSTER_SPEC_VM = {
+ "cluster": {"chief": ["localhost:1234"]},
+ "environment": "cloud",
+ "task": {"type": "chief", "index": 0},
+}
+
+
+def _create_mock_plugin(
+ plugin_name: str = "test_plugin", routes: Optional[List] = ["/route1"]
+):
+ mock_plugin = mock.Mock(spec=base_plugin.BasePlugin)
+ mock_plugin.can_initialize.return_value = True
+ mock_plugin.post_setup_check.return_value = True
+ mock_plugin.PLUGIN_NAME = plugin_name
+
+ # Some mock routes to test number of times each has been called.
+ mock_routes = {}
+ for route in routes:
+ mock_routes[route] = mock.Mock()
+
+ mock_plugin.get_routes.return_value = mock_routes
+
+ # A call should just return the mock object itself.
+ mock_plugin.return_value = mock_plugin
+
+ return mock_plugin
+
+
+def _find_child_modules(root_module):
+ return [module for module in sys.modules.keys() if module.startswith(root_module)]
+
+
+@pytest.fixture
+def tf_profile_plugin_mock():
+ """Mock the tensorboard profile plugin"""
+ import tensorboard_plugin_profile.profile_plugin
+
+ with mock.patch.object(
+ tensorboard_plugin_profile.profile_plugin.ProfilePlugin, "capture_route"
+ ) as profile_mock:
+ profile_mock.return_value = (
+ wrappers.Response(
+ json.dumps({"error": "some error"}),
+ content_type="application/json",
+ status=200,
+ ),
+ )
+ yield profile_mock
+
+
+@pytest.fixture
+def tensorboard_api_mock():
+ with mock.patch.object(
+ tensorboard_api,
+ "create_profile_request_sender",
+ ) as sender_mock:
+ sender_mock.return_value = mock.Mock()
+ yield sender_mock
+
+
+@pytest.fixture
+def mock_api_environment_variables():
+ with mock.patch.object(training_utils, "environment_variables") as mock_env:
+ mock_env.tensorboard_api_uri = "testuri"
+ mock_env.tensorboard_resource_name = (
+ "projects/testproj/locations/us-central1/tensorboards/123"
+ )
+ mock_env.cloud_ml_job_id = "test_job_id"
+ mock_env.tensorboard_log_dir = "gs://my_log_dir"
+
+ yield mock_env
+
+
+def setupProfilerEnvVars():
+ tf_profiler.environment_variables.tf_profiler_port = "6009"
+ tf_profiler.environment_variables.tensorboard_log_dir = "tmp/"
+ tf_profiler.environment_variables.tensorboard_api_uri = "test_api_uri"
+ tf_profiler.environment_variables.tensorboard_resource_name = (
+ "projects/123/region/us-central1/tensorboards/mytb"
+ )
+ tf_profiler.environment_variables.cluster_spec = _CLUSTER_SPEC_VM
+ tf_profiler.environment_variables.cloud_ml_job_id = "myjob"
+
+
+class TestProfilerPlugin(unittest.TestCase):
+ def setUp(self):
+ setupProfilerEnvVars()
+
+ # Environment variable tests
+ def testCanInitializeProfilerPortUnset(self):
+ tf_profiler.environment_variables.tf_profiler_port = None
+ assert not TFProfiler.can_initialize()
+
+ def testCanInitializeTBLogDirUnset(self):
+ tf_profiler.environment_variables.tensorboard_log_dir = None
+ assert not TFProfiler.can_initialize()
+
+ def testCanInitializeTBAPIuriUnset(self):
+ tf_profiler.environment_variables.tensorboard_api_uri = None
+ assert not TFProfiler.can_initialize()
+
+ def testCanInitializeTBResourceNameUnset(self):
+ tf_profiler.environment_variables.tensorboard_resource_name = None
+ assert not TFProfiler.can_initialize()
+
+ def testCanInitializeJobIdUnset(self):
+ tf_profiler.environment_variables.cloud_ml_job_id = None
+ assert not TFProfiler.can_initialize()
+
+ def testCanInitializeNoClusterSpec(self):
+ tf_profiler.environment_variables.cluster_spec = None
+ assert not TFProfiler.can_initialize()
+
+ # Check tensorflow dependencies
+ def testCanInitializeTFInstalled(self):
+ orig_find_spec = importlib.util.find_spec
+
+ def tf_import_mock(name, *args, **kwargs):
+ if name == "tensorflow":
+ return None
+ return orig_find_spec(name, *args, **kwargs)
+
+ with mock.patch("importlib.util.find_spec", side_effect=tf_import_mock):
+ assert not TFProfiler.can_initialize()
+
+ def testCanInitializeTFVersion(self):
+ import tensorflow
+
+ with mock.patch.object(tensorflow, "__version__", "1.2.3.4"):
+ assert not TFProfiler.can_initialize()
+
+ def testCanInitializeOldTFVersion(self):
+ import tensorflow
+
+ with mock.patch.object(tensorflow, "__version__", "2.3.0"):
+ assert not TFProfiler.can_initialize()
+
+ def testCanInitializeRcTFVersion(self):
+ import tensorflow as tf
+
+ with mock.patch.object(tf, "__version__", "2.4.0-rc2"):
+ assert TFProfiler.can_initialize()
+
+ def testCanInitializeNoProfilePlugin(self):
+ orig_find_spec = importlib.util.find_spec
+
+ def plugin_import_mock(name, *args, **kwargs):
+ if name == "tensorboard_plugin_profile":
+ return None
+ return orig_find_spec(name, *args, **kwargs)
+
+ with mock.patch("importlib.util.find_spec", side_effect=plugin_import_mock):
+ assert not TFProfiler.can_initialize()
+
+ def testCanInitialize(self):
+ assert TFProfiler.can_initialize()
+
+ def testSetup(self):
+ import tensorflow
+
+ with mock.patch.object(
+ tensorflow.profiler.experimental.server, "start", return_value=None
+ ) as server_mock:
+ TFProfiler.setup()
+
+ assert server_mock.call_count == 1
+
+ def testPostSetupChecksFail(self):
+ tf_profiler.environment_variables.cluster_spec = {}
+ assert not TFProfiler.post_setup_check()
+
+ def testPostSetupChecks(self):
+ assert TFProfiler.post_setup_check()
+
+ # Tests for plugin
+ @pytest.mark.usefixtures("tf_profile_plugin_mock")
+ @pytest.mark.usefixtures("tensorboard_api_mock")
+ def testCaptureProfile(self):
+ profiler = TFProfiler()
+ environ = dict(QUERY_STRING="?service_addr=myhost1,myhost2&someotherdata=5")
+ start_response = None
+
+ resp = profiler.capture_profile_wrapper(environ, start_response)
+ assert resp[0].status_code == 200
+
+ @pytest.mark.usefixtures("tf_profile_plugin_mock")
+ @pytest.mark.usefixtures("tensorboard_api_mock")
+ def testCaptureProfileNoClusterSpec(self):
+ profiler = TFProfiler()
+
+ environ = dict(QUERY_STRING="?service_addr=myhost1,myhost2&someotherdata=5")
+ start_response = None
+
+ tf_profiler.environment_variables.cluster_spec = None
+ resp = profiler.capture_profile_wrapper(environ, start_response)
+
+ assert resp.status_code == 500
+
+ @pytest.mark.usefixtures("tf_profile_plugin_mock")
+ @pytest.mark.usefixtures("tensorboard_api_mock")
+ def testCaptureProfileNoCluster(self):
+ profiler = TFProfiler()
+
+ environ = dict(QUERY_STRING="?service_addr=myhost1,myhost2&someotherdata=5")
+ start_response = None
+ tf_profiler.environment_variables.cluster_spec = {"cluster": {}}
+
+ resp = profiler.capture_profile_wrapper(environ, start_response)
+
+ assert resp.status_code == 500
+
+ @pytest.mark.usefixtures("tf_profile_plugin_mock")
+ @pytest.mark.usefixtures("tensorboard_api_mock")
+ def testGetRoutes(self):
+ profiler = TFProfiler()
+
+ routes = profiler.get_routes()
+ assert isinstance(routes, dict)
+
+
+# Tensorboard API tests
+class TestTensorboardAPIBuilder(unittest.TestCase):
+ @pytest.mark.usefixtures("mock_api_environment_variables")
+ def test_get_api_client(self):
+ with mock.patch.object(aiplatform, "initializer") as mock_initializer:
+ tensorboard_api._get_api_client()
+ mock_initializer.global_config.create_client.assert_called_once()
+
+ def test_get_project_id_fail(self):
+ with mock.patch.object(training_utils, "environment_variables") as mock_env:
+ mock_env.tensorboard_resource_name = "bad_resource"
+ self.assertRaises(ValueError, tensorboard_api._get_project_id)
+
+ @pytest.mark.usefixtures("mock_api_environment_variables")
+ def test_get_project_id(self):
+ project_id = tensorboard_api._get_project_id()
+ assert project_id == "testproj"
+
+ @pytest.mark.usefixtures("mock_api_environment_variables")
+ def test_get_or_create_experiment(self):
+ api = mock.Mock()
+ api.create_tensorboard_experiment.side_effect = exceptions.AlreadyExists("test")
+ tensorboard_api._get_or_create_experiment(api, "test")
+ api.get_tensorboard_experiment.assert_called_once()
+
+ @pytest.mark.usefixtures("mock_api_environment_variables")
+ def test_create_profile_request_sender(self):
+ tensorboard_api.storage = mock.Mock()
+ tensorboard_api.uploader_utils = mock.Mock()
+
+ with mock.patch.object(profile_uploader, "ProfileRequestSender") as mock_sender:
+ with mock.patch.object(aiplatform, "initializer"):
+ tensorboard_api.create_profile_request_sender()
+ mock_sender.assert_called_once()
+
+
+# Webserver tests
+class TestWebServer(unittest.TestCase):
+ def test_create_webserver_bad_route(self):
+ plugin = _create_mock_plugin()
+ plugin.get_routes.return_value = {"my_route": "some_handler"}
+
+ self.assertRaises(ValueError, webserver.WebServer, [plugin])
+
+ def test_dispatch_bad_request(self):
+ plugin = _create_mock_plugin()
+ plugin.get_routes.return_value = {"/test_route": "test_handler"}
+
+ ws = webserver.WebServer([plugin])
+
+ builder = EnvironBuilder(method="GET", path="/")
+
+ env = builder.get_environ()
+
+ # Mock a start response callable
+ response = []
+ buff = []
+
+ def start_response(status, headers):
+ response[:] = [status, headers]
+ return buff.append
+
+ ws(env, start_response)
+
+ assert response[0] == "404 NOT FOUND"
+
+ def test_correct_response(self):
+ res_dict = {"response": "OK"}
+
+ def my_callable(var1, var2):
+ return wrappers.Response(
+ json.dumps(res_dict), content_type="application/json", status=200
+ )
+
+ plugin = _create_mock_plugin()
+ plugin.get_routes.return_value = {"/my_route": my_callable}
+ ws = webserver.WebServer([plugin])
+
+ builder = EnvironBuilder(method="GET", path="/test_plugin/my_route")
+
+ env = builder.get_environ()
+
+ # Mock a start response callable
+ response = []
+ buff = []
+
+ def start_response(status, headers):
+ response[:] = [status, headers]
+ return buff.append
+
+ res = ws(env, start_response)
+
+ final_response = json.loads(res.response[0].decode("utf-8"))
+
+ assert final_response == res_dict
+
+
+# Initializer tests
+class TestInitializer(unittest.TestCase):
+ def testImportError(self):
+ # Unloads any of the cloud profiler sub-modules
+ for mod in _find_child_modules(
+ "google.cloud.aiplatform.training_utils.cloud_profiler"
+ ):
+ del sys.modules[mod]
+
+ # Modules to be mocked out
+ for mock_module in [
+ "tensorflow",
+ "tensorboard_plugin_profile.profile_plugin",
+ "werkzeug",
+ ]:
+ with self.subTest():
+ with mock.patch.dict("sys.modules", {mock_module: None}):
+ with self.assertRaises(ImportError) as cm:
+ importlib.import_module(
+ "google.cloud.aiplatform.training_utils.cloud_profiler"
+ )
+ assert "Could not load the cloud profiler" in cm.exception.msg
+
+ def test_build_plugin_fail_initialize(self):
+ plugin = _create_mock_plugin()
+ plugin.can_initialize.return_value = False
+
+ assert not initializer._build_plugin(plugin)
+
+ def test_build_plugin_fail_setup_check(self):
+ plugin = _create_mock_plugin()
+ plugin.can_initialize.return_value = True
+ plugin.post_setup_check.return_value = False
+
+ assert not initializer._build_plugin(plugin)
+
+ def test_build_plugin_success(self):
+ plugin = _create_mock_plugin()
+ plugin.can_initialize.return_value = True
+ plugin.post_setup_check.return_value = True
+
+ initializer._build_plugin(plugin)
+
+ assert plugin.called
+
+ # Testing the initialize function
+ def test_initialize_bad_plugin(self):
+ with mock.patch.object(initializer, "_AVAILABLE_PLUGINS", {}):
+ self.assertRaises(ValueError, initializer.initialize, "bad_plugin")
+
+ def test_initialize_build_plugin_fail(self):
+ plugin = _create_mock_plugin()
+ with mock.patch.object(initializer, "_AVAILABLE_PLUGINS", {"test": plugin}):
+ with mock.patch.object(initializer, "_build_plugin") as build_mock:
+ with mock.patch.object(
+ initializer, "_run_app_thread"
+ ) as app_thread_mock:
+ build_mock.return_value = None
+ initializer.initialize("test")
+
+ assert not app_thread_mock.call_count
+
+ def test_initialize_no_http_handler(self):
+ plugin = _create_mock_plugin()
+ initializer.environment_variables.http_handler_port = None
+
+ with mock.patch.object(initializer, "_AVAILABLE_PLUGINS", {"test": plugin}):
+ with pytest.raises(initializer.MissingEnvironmentVariableException):
+ initializer.initialize("test")
+
+ def test_initialize_build_plugin_success(self):
+ plugin = _create_mock_plugin()
+ initializer.environment_variables.http_handler_port = "1234"
+
+ with mock.patch.object(initializer, "_AVAILABLE_PLUGINS", {"test": plugin}):
+ with mock.patch.object(initializer, "_run_app_thread") as app_thread_mock:
+ initializer.initialize("test")
+
+ assert app_thread_mock.call_count == 1
+
+ def test_run_app_thread(self):
+ with mock.patch.object(threading, "Thread") as mock_thread:
+ daemon_mock = mock.Mock()
+ mock_thread.return_value = daemon_mock
+
+ initializer._run_app_thread(None, 1234)
+
+ assert daemon_mock.start.call_count == 1
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_custom_job.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_custom_job.py
new file mode 100644
index 0000000000000000000000000000000000000000..6bd1bca9e74930782d6be62d0631f228cf070a2d
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_custom_job.py
@@ -0,0 +1,1669 @@
+# -*- coding: utf-8 -*-
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+import logging
+
+import copy
+from importlib import reload
+from unittest import mock
+from unittest.mock import patch, mock_open
+
+from google.api_core import exceptions
+import constants as test_constants
+
+from google.rpc import status_pb2
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import jobs
+from google.cloud.aiplatform.compat.types import (
+ custom_job as gca_custom_job_compat,
+ tensorboard_run as gca_tensorboard_run,
+ io,
+)
+
+from google.cloud.aiplatform.compat.types import (
+ job_state as gca_job_state_compat,
+ encryption_spec as gca_encryption_spec_compat,
+ execution as gca_execution,
+)
+from google.cloud.aiplatform.compat.services import job_service_client
+from google.cloud.aiplatform_v1 import (
+ MetadataServiceClient,
+ Context as GapicContext,
+)
+from google.cloud.aiplatform.metadata import constants
+
+_TEST_PROJECT = test_constants.ProjectConstants._TEST_PROJECT
+_TEST_LOCATION = test_constants.ProjectConstants._TEST_LOCATION
+_TEST_ID = "1028944691210842416"
+_TEST_DISPLAY_NAME = test_constants.TrainingJobConstants._TEST_DISPLAY_NAME
+
+_TEST_PARENT = test_constants.ProjectConstants._TEST_PARENT
+
+_TEST_CUSTOM_JOB_NAME = f"{_TEST_PARENT}/customJobs/{_TEST_ID}"
+_TEST_TENSORBOARD_ID = "987654321"
+_TEST_TENSORBOARD_NAME = f"{_TEST_PARENT}/tensorboards/{_TEST_TENSORBOARD_ID}"
+_TEST_ENABLE_WEB_ACCESS = test_constants.TrainingJobConstants._TEST_ENABLE_WEB_ACCESS
+_TEST_WEB_ACCESS_URIS = test_constants.TrainingJobConstants._TEST_WEB_ACCESS_URIS
+_TEST_TRAINING_CONTAINER_IMAGE = (
+ test_constants.TrainingJobConstants._TEST_TRAINING_CONTAINER_IMAGE
+)
+_TEST_PREBUILT_CONTAINER_IMAGE = "gcr.io/cloud-aiplatform/container:image"
+_TEST_SPOT_STRATEGY = test_constants.TrainingJobConstants._TEST_SPOT_STRATEGY
+
+_TEST_RUN_ARGS = test_constants.TrainingJobConstants._TEST_RUN_ARGS
+_TEST_EXPERIMENT = "test-experiment"
+_TEST_EXPERIMENT_RUN = "test-experiment-run"
+_TEST_TIMEOUT_SECONDS = test_constants.TrainingJobConstants._TEST_TIMEOUT_SECONDS
+
+_TEST_WORKER_POOL_SPEC = test_constants.TrainingJobConstants._TEST_WORKER_POOL_SPEC
+
+_TEST_WORKER_POOL_SPEC_WITH_EXPERIMENTS = [
+ {
+ "machine_spec": {
+ "machine_type": "n1-standard-4",
+ "accelerator_type": "NVIDIA_TESLA_K80",
+ "accelerator_count": 1,
+ },
+ "replica_count": 1,
+ "disk_spec": {"boot_disk_type": "pd-ssd", "boot_disk_size_gb": 100},
+ "container_spec": {
+ "image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "command": [],
+ "args": _TEST_RUN_ARGS,
+ },
+ }
+]
+
+_TEST_WORKER_POOL_SPEC_WITH_TPU_V5E = (
+ test_constants.TrainingJobConstants._TEST_TPU_V5E_WORKER_POOL_SPEC
+)
+_TEST_WORKER_POOL_SPEC_WITH_TPU_V3 = (
+ test_constants.TrainingJobConstants._TEST_TPU_V3_WORKER_POOL_SPEC
+)
+
+_TEST_PYTHON_PACKAGE_SPEC = gca_custom_job_compat.PythonPackageSpec(
+ executor_image_uri=_TEST_PREBUILT_CONTAINER_IMAGE,
+ package_uris=[test_constants.TrainingJobConstants._TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ python_module=test_constants.TrainingJobConstants._TEST_MODULE_NAME,
+)
+
+_TEST_CONTAINER_SPEC = gca_custom_job_compat.ContainerSpec(
+ image_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=[
+ "sh",
+ "-c",
+ "pip install --upgrade pip && "
+ + f"pip3 install -q --user {test_constants.TrainingJobConstants._TEST_OUTPUT_PYTHON_PACKAGE_PATH} && ".replace(
+ "gs://", "/gcs/"
+ )
+ + f"python3 -m {test_constants.TrainingJobConstants._TEST_MODULE_NAME}",
+ ],
+)
+
+_TEST_STAGING_BUCKET = test_constants.TrainingJobConstants._TEST_STAGING_BUCKET
+_TEST_BASE_OUTPUT_DIR = test_constants.TrainingJobConstants._TEST_BASE_OUTPUT_DIR
+
+# CMEK encryption
+_TEST_DEFAULT_ENCRYPTION_KEY_NAME = "key_1234"
+_TEST_DEFAULT_ENCRYPTION_SPEC = gca_encryption_spec_compat.EncryptionSpec(
+ kms_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME
+)
+
+_TEST_SERVICE_ACCOUNT = test_constants.ProjectConstants._TEST_SERVICE_ACCOUNT
+
+
+_TEST_NETWORK = test_constants.TrainingJobConstants._TEST_NETWORK
+
+_TEST_TIMEOUT = test_constants.TrainingJobConstants._TEST_TIMEOUT
+_TEST_RESTART_JOB_ON_WORKER_RESTART = (
+ test_constants.TrainingJobConstants._TEST_RESTART_JOB_ON_WORKER_RESTART
+)
+_TEST_DISABLE_RETRIES = test_constants.TrainingJobConstants._TEST_DISABLE_RETRIES
+_TEST_MAX_WAIT_DURATION = test_constants.TrainingJobConstants._TEST_MAX_WAIT_DURATION
+
+_TEST_LABELS = test_constants.ProjectConstants._TEST_LABELS
+
+_TEST_BASE_CUSTOM_JOB_PROTO = (
+ test_constants.TrainingJobConstants._TEST_BASE_CUSTOM_JOB_PROTO
+)
+
+_TEST_TPU_V5E_CUSTOM_JOB_PROTO = (
+ test_constants.TrainingJobConstants.create_tpu_job_proto(tpu_version="v5e")
+)
+
+_TEST_TPU_V3_CUSTOM_JOB_PROTO = (
+ test_constants.TrainingJobConstants.create_tpu_job_proto(tpu_version="v3")
+)
+
+# Experiment args
+_TEST_PARENT_METADATA = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/default"
+)
+_TEST_CONTEXT_ID = "test-experiment"
+_TEST_CONTEXT_NAME = f"{_TEST_PARENT_METADATA}/contexts/{_TEST_CONTEXT_ID}"
+_TEST_EXPERIMENT_DESCRIPTION = "test-experiment-description"
+_TEST_RUN = "run-1"
+_TEST_EXECUTION_ID = f"{_TEST_EXPERIMENT}-{_TEST_RUN}"
+_TEST_EXPERIMENT_CONTEXT_NAME = f"{_TEST_PARENT_METADATA}/contexts/{_TEST_EXPERIMENT}"
+_TEST_EXPERIMENT_RUN_CONTEXT_NAME = (
+ f"{_TEST_PARENT_METADATA}/contexts/{_TEST_EXECUTION_ID}"
+)
+_TEST_TENSORBOARD_RUN_NAME = f"{_TEST_PARENT}/tensorboards/{_TEST_TENSORBOARD_ID}/experiments/{_TEST_ID}/runs/{_TEST_RUN}"
+_TEST_TENSORBOARD_RUN_CONTEXT_NAME = f"{_TEST_ID}-{_TEST_RUN}"
+
+_EXPERIMENT_MOCK = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_EXPERIMENT,
+ description=_TEST_EXPERIMENT_DESCRIPTION,
+ schema_title=constants.SYSTEM_EXPERIMENT,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_EXPERIMENT],
+ metadata={**constants.EXPERIMENT_METADATA},
+)
+
+
+_EXPERIMENT_RUN_MOCK = GapicContext(
+ name=_TEST_EXPERIMENT_RUN_CONTEXT_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_EXPERIMENT_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_EXPERIMENT_RUN],
+ metadata={
+ constants._PARAM_KEY: {},
+ constants._METRIC_KEY: {},
+ constants._STATE_KEY: gca_execution.Execution.State.RUNNING.name,
+ },
+)
+
+
+def _get_custom_job_proto(state=None, name=None, error=None):
+ custom_job_proto = copy.deepcopy(_TEST_BASE_CUSTOM_JOB_PROTO)
+ custom_job_proto.name = name
+ custom_job_proto.state = state
+ custom_job_proto.error = error
+ return custom_job_proto
+
+
+def _get_custom_job_proto_with_experiments(state=None, name=None, error=None):
+ custom_job_proto = copy.deepcopy(_TEST_BASE_CUSTOM_JOB_PROTO)
+ custom_job_proto.job_spec.worker_pool_specs = (
+ _TEST_WORKER_POOL_SPEC_WITH_EXPERIMENTS
+ )
+ custom_job_proto.name = name
+ custom_job_proto.state = state
+ custom_job_proto.error = error
+ custom_job_proto.job_spec.experiment = _TEST_EXPERIMENT_CONTEXT_NAME
+ custom_job_proto.job_spec.experiment_run = _TEST_EXPERIMENT_RUN_CONTEXT_NAME
+ return custom_job_proto
+
+
+def _get_custom_job_proto_with_tensorboard(state=None, name=None, error=None):
+ custom_job_proto = copy.deepcopy(_TEST_BASE_CUSTOM_JOB_PROTO)
+ custom_job_proto.job_spec.worker_pool_specs = _TEST_WORKER_POOL_SPEC
+ custom_job_proto.name = name
+ custom_job_proto.state = state
+ custom_job_proto.error = error
+ custom_job_proto.job_spec.tensorboard = _TEST_TENSORBOARD_NAME
+ return custom_job_proto
+
+
+def _get_custom_job_proto_with_enable_web_access(state=None, name=None, error=None):
+ custom_job_proto = _get_custom_job_proto(state=state, name=name, error=error)
+ custom_job_proto.job_spec.enable_web_access = _TEST_ENABLE_WEB_ACCESS
+ if state == gca_job_state_compat.JobState.JOB_STATE_RUNNING:
+ custom_job_proto.web_access_uris = _TEST_WEB_ACCESS_URIS
+ return custom_job_proto
+
+
+def _get_custom_tpu_job_proto(state=None, name=None, error=None, tpu_version=None):
+ custom_job_proto = (
+ copy.deepcopy(_TEST_TPU_V5E_CUSTOM_JOB_PROTO)
+ if tpu_version == "v5e"
+ else copy.deepcopy(_TEST_TPU_V3_CUSTOM_JOB_PROTO)
+ )
+
+ custom_job_proto.name = name
+ custom_job_proto.state = state
+ custom_job_proto.error = error
+ return custom_job_proto
+
+
+def _get_custom_job_proto_with_spot_strategy(state=None, name=None, error=None):
+ custom_job_proto = _get_custom_job_proto(state=state, name=name, error=error)
+ custom_job_proto.job_spec.scheduling.strategy = _TEST_SPOT_STRATEGY
+ return custom_job_proto
+
+
+@pytest.fixture
+def mock_builtin_open():
+ with patch("builtins.open", mock_open(read_data="data")) as mock_file:
+ yield mock_file
+
+
+@pytest.fixture
+def get_custom_job_mock():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_custom_job"
+ ) as get_custom_job_mock:
+ get_custom_job_mock.side_effect = [
+ _get_custom_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ ),
+ _get_custom_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ ]
+ yield get_custom_job_mock
+
+
+@pytest.fixture
+def get_custom_job_with_experiments_mock():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_custom_job"
+ ) as get_custom_job_mock:
+ get_custom_job_mock.side_effect = [
+ _get_custom_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ ),
+ _get_custom_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto_with_experiments(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ ]
+ yield get_custom_job_mock
+
+
+@pytest.fixture
+def get_custom_job_with_tensorboard_mock():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_custom_job"
+ ) as get_custom_job_mock:
+ get_custom_job_mock.side_effect = [
+ _get_custom_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ ),
+ _get_custom_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto_with_tensorboard(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ ]
+ yield get_custom_job_mock
+
+
+@pytest.fixture
+def get_custom_tpu_v5e_job_mock():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_custom_job"
+ ) as get_custom_job_mock:
+ get_custom_job_mock.side_effect = [
+ _get_custom_tpu_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ tpu_version="v5e",
+ ),
+ _get_custom_tpu_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ tpu_version="v5e",
+ ),
+ _get_custom_tpu_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ tpu_version="v5e",
+ ),
+ ]
+ yield get_custom_job_mock
+
+
+@pytest.fixture
+def get_custom_tpu_v3_job_mock():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_custom_job"
+ ) as get_custom_job_mock:
+ get_custom_job_mock.side_effect = [
+ _get_custom_tpu_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ tpu_version="v3",
+ ),
+ _get_custom_tpu_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ tpu_version="v3",
+ ),
+ _get_custom_tpu_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ tpu_version="v3",
+ ),
+ ]
+ yield get_custom_job_mock
+
+
+@pytest.fixture
+def get_custom_job_mock_with_fail():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_custom_job"
+ ) as get_custom_job_mock:
+ get_custom_job_mock.side_effect = [
+ _get_custom_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ ),
+ _get_custom_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_FAILED,
+ error=status_pb2.Status(message="Test Error"),
+ ),
+ _get_custom_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_FAILED,
+ error=status_pb2.Status(message="Test Error"),
+ ),
+ ]
+ yield get_custom_job_mock
+
+
+@pytest.fixture
+def get_custom_job_mock_with_enable_web_access():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_custom_job"
+ ) as get_custom_job_mock:
+ get_custom_job_mock.side_effect = [
+ _get_custom_job_proto_with_enable_web_access(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ ),
+ _get_custom_job_proto_with_enable_web_access(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto_with_enable_web_access(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto_with_enable_web_access(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto_with_enable_web_access(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ ]
+ yield get_custom_job_mock
+
+
+@pytest.fixture
+def get_custom_job_mock_with_enable_web_access_succeeded():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "get_custom_job"
+ ) as get_custom_job_mock:
+ get_custom_job_mock.return_value = _get_custom_job_proto_with_enable_web_access(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ )
+ yield get_custom_job_mock
+
+
+@pytest.fixture
+def get_custom_job_mock_with_spot_strategy():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_custom_job"
+ ) as get_custom_job_mock:
+ get_custom_job_mock.side_effect = [
+ _get_custom_job_proto_with_spot_strategy(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ ),
+ _get_custom_job_proto_with_spot_strategy(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto_with_spot_strategy(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ ]
+ yield get_custom_job_mock
+
+
+@pytest.fixture
+def create_custom_job_mock():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "create_custom_job"
+ ) as create_custom_job_mock:
+ create_custom_job_mock.return_value = _get_custom_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ )
+ yield create_custom_job_mock
+
+
+@pytest.fixture
+def create_custom_job_mock_with_enable_web_access():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "create_custom_job"
+ ) as create_custom_job_mock:
+ create_custom_job_mock.return_value = (
+ _get_custom_job_proto_with_enable_web_access(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ )
+ )
+ yield create_custom_job_mock
+
+
+@pytest.fixture
+def create_custom_job_mock_with_tensorboard():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "create_custom_job"
+ ) as create_custom_job_mock:
+ custom_job_proto = _get_custom_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ )
+ custom_job_proto.job_spec.tensorboard = _TEST_TENSORBOARD_NAME
+ create_custom_job_mock.return_value = custom_job_proto
+ yield create_custom_job_mock
+
+
+@pytest.fixture
+def create_custom_job_mock_fail():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "create_custom_job"
+ ) as create_custom_job_mock:
+ create_custom_job_mock.side_effect = RuntimeError("Mock fail")
+ yield create_custom_job_mock
+
+
+@pytest.fixture
+def create_custom_job_mock_with_spot_strategy():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "create_custom_job"
+ ) as create_custom_job_mock:
+ create_custom_job_mock.return_value = _get_custom_job_proto_with_spot_strategy(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ )
+ yield create_custom_job_mock
+
+
+_EXPERIMENT_MOCK = copy.deepcopy(_EXPERIMENT_MOCK)
+_EXPERIMENT_MOCK.metadata[
+ constants._BACKING_TENSORBOARD_RESOURCE_KEY
+] = _TEST_TENSORBOARD_NAME
+
+_EXPERIMENT_RUN_MOCK = copy.deepcopy(_EXPERIMENT_RUN_MOCK)
+
+
+@pytest.fixture
+def get_experiment_run_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.side_effect = [
+ _EXPERIMENT_MOCK,
+ _EXPERIMENT_RUN_MOCK,
+ ]
+
+ yield get_context_mock
+
+
+@pytest.fixture
+def get_experiment_run_run_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.side_effect = [
+ _EXPERIMENT_MOCK,
+ _EXPERIMENT_RUN_MOCK,
+ _EXPERIMENT_RUN_MOCK,
+ ]
+
+ yield get_context_mock
+
+
+@pytest.fixture
+def get_experiment_run_not_found_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.side_effect = [
+ _EXPERIMENT_MOCK,
+ _EXPERIMENT_RUN_MOCK,
+ _EXPERIMENT_MOCK,
+ exceptions.NotFound(""),
+ ]
+
+ yield get_context_mock
+
+
+@pytest.fixture
+def update_context_mock():
+ with patch.object(MetadataServiceClient, "update_context") as update_context_mock:
+ update_context_mock.return_value = _EXPERIMENT_RUN_MOCK
+ yield update_context_mock
+
+
+@pytest.fixture
+def get_tensorboard_run_artifact_not_found_mock():
+ with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock:
+ get_artifact_mock.side_effect = exceptions.NotFound("")
+ yield get_artifact_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestCustomJob:
+ def setup_method(self):
+ reload(aiplatform.initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ aiplatform.initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_custom_job(self, create_custom_job_mock, get_custom_job_mock, sync):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ network=_TEST_NETWORK,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ job = aiplatform.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ )
+
+ job.run(
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ job.wait_for_resource_creation()
+
+ assert job.resource_name == _TEST_CUSTOM_JOB_NAME
+
+ job.wait()
+
+ expected_custom_job = _get_custom_job_proto()
+
+ create_custom_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ custom_job=expected_custom_job,
+ timeout=None,
+ )
+
+ assert job.job_spec == expected_custom_job.job_spec
+ assert (
+ job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED
+ )
+ assert job.network == _TEST_NETWORK
+
+ def test_submit_custom_job(self, create_custom_job_mock, get_custom_job_mock):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ )
+
+ job.submit(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ job.wait_for_resource_creation()
+
+ assert job.resource_name == _TEST_CUSTOM_JOB_NAME
+
+ job.wait()
+
+ expected_custom_job = _get_custom_job_proto()
+
+ create_custom_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ custom_job=expected_custom_job,
+ timeout=None,
+ )
+
+ assert job.job_spec == expected_custom_job.job_spec
+ assert (
+ job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_PENDING
+ )
+ assert job.network == _TEST_NETWORK
+
+ @pytest.mark.usefixtures(
+ "get_experiment_run_mock", "get_tensorboard_run_artifact_not_found_mock"
+ )
+ def test_submit_custom_job_with_experiments(
+ self, create_custom_job_mock, get_custom_job_mock, update_context_mock
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ )
+
+ job.submit(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ create_request_timeout=None,
+ experiment=_TEST_EXPERIMENT,
+ experiment_run=_TEST_RUN,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ job.wait_for_resource_creation()
+
+ assert job.resource_name == _TEST_CUSTOM_JOB_NAME
+
+ job.wait()
+
+ expected_custom_job = _get_custom_job_proto_with_experiments()
+
+ create_custom_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ custom_job=expected_custom_job,
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @mock.patch.object(jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(jobs, "_LOG_WAIT_TIME", 1)
+ def test_create_custom_job_with_timeout(
+ self, create_custom_job_mock, get_custom_job_mock, sync
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ create_request_timeout=180.0,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ job.wait_for_resource_creation()
+
+ assert job.resource_name == _TEST_CUSTOM_JOB_NAME
+
+ job.wait()
+
+ expected_custom_job = _get_custom_job_proto()
+
+ create_custom_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ custom_job=expected_custom_job,
+ timeout=180.0,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_custom_job_with_timeout_not_explicitly_set(
+ self, create_custom_job_mock, get_custom_job_mock, sync
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ job.wait_for_resource_creation()
+
+ assert job.resource_name == _TEST_CUSTOM_JOB_NAME
+
+ job.wait()
+
+ expected_custom_job = _get_custom_job_proto()
+
+ create_custom_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ custom_job=expected_custom_job,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "create_custom_job_mock",
+ "get_custom_job_with_experiments_mock",
+ "get_experiment_run_not_found_mock",
+ "get_tensorboard_run_artifact_not_found_mock",
+ )
+ def test_run_custom_job_with_experiment_run_warning(self, caplog):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ create_request_timeout=None,
+ experiment=_TEST_EXPERIMENT,
+ experiment_run=_TEST_RUN,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ )
+
+ # TODO: b/383923584: Re-enable this test once the parent issue is fixed
+ # assert (
+ # f"Failed to end experiment run {_TEST_EXPERIMENT_RUN_CONTEXT_NAME} due to:"
+ # in caplog.text
+ # )
+
+ @pytest.mark.usefixtures(
+ "get_experiment_run_not_found_mock",
+ "get_tensorboard_run_artifact_not_found_mock",
+ )
+ def test_run_custom_job_with_tensorboard_cannot_list_experiment_runs(
+ self,
+ create_custom_job_mock_with_tensorboard,
+ get_custom_job_with_tensorboard_mock,
+ caplog,
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ tensorboard=_TEST_TENSORBOARD_NAME,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ job.wait()
+
+ # TODO: b/383923584: Re-enable this test once the parent issue is fixed
+ # assert "Failed to list experiment runs for tensorboard" in caplog.text
+
+ @pytest.mark.usefixtures(
+ "get_experiment_run_not_found_mock",
+ "get_tensorboard_run_artifact_not_found_mock",
+ )
+ def test_run_custom_job_with_tensorboard_cannot_end_experiment_run(
+ self,
+ create_custom_job_mock_with_tensorboard,
+ get_custom_job_with_tensorboard_mock,
+ caplog,
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ )
+
+ with mock.patch.object(
+ aiplatform.TensorboardRun, "list"
+ ) as list_tensorboard_runs_mock:
+ tb_run = gca_tensorboard_run.TensorboardRun(
+ name=_TEST_TENSORBOARD_RUN_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ )
+ list_tensorboard_runs_mock.return_value = [tb_run]
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ tensorboard=_TEST_TENSORBOARD_NAME,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ job.wait()
+
+ # TODO: b/383923584: Re-enable this test once the parent issue is fixed
+ # assert (
+ # f"Failed to end experiment run {_TEST_TENSORBOARD_RUN_CONTEXT_NAME} due to:"
+ # in caplog.text
+ # )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_custom_job_with_fail_raises(
+ self, create_custom_job_mock, get_custom_job_mock_with_fail, sync
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ job.wait_for_resource_creation()
+ assert e.match(r"CustomJob resource is not scheduled to be created.")
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ job.wait()
+
+ # shouldn't fail
+ job.wait_for_resource_creation()
+ assert job.resource_name == _TEST_CUSTOM_JOB_NAME
+
+ expected_custom_job = _get_custom_job_proto()
+
+ create_custom_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ custom_job=expected_custom_job,
+ timeout=None,
+ )
+
+ assert job.job_spec == expected_custom_job.job_spec
+ assert job.state == gca_job_state_compat.JobState.JOB_STATE_FAILED
+
+ @pytest.mark.usefixtures("create_custom_job_mock_fail")
+ def test_run_custom_job_with_fail_at_creation(self):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=False,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ job.wait_for_resource_creation()
+ assert e.match("Mock fail")
+
+ with pytest.raises(RuntimeError) as e:
+ job.resource_name
+ assert e.match(
+ "CustomJob resource has not been created. Resource failed with: Mock fail"
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ job.network
+ assert e.match(
+ "CustomJob resource has not been created. Resource failed with: Mock fail"
+ )
+
+ def test_custom_job_get_state_raises_without_run(self):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ )
+
+ with pytest.raises(RuntimeError):
+ print(job.state)
+
+ def test_no_staging_bucket_raises(self):
+
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with pytest.raises(RuntimeError):
+ job = aiplatform.CustomJob( # noqa: F841
+ display_name=_TEST_DISPLAY_NAME,
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ )
+
+ def test_get_custom_job(self, get_custom_job_mock):
+
+ job = aiplatform.CustomJob.get(_TEST_CUSTOM_JOB_NAME)
+
+ get_custom_job_mock.assert_called_once_with(
+ name=_TEST_CUSTOM_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+ assert (
+ job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_PENDING
+ )
+ assert job.job_spec == _TEST_BASE_CUSTOM_JOB_PROTO.job_spec
+
+ @pytest.mark.usefixtures("mock_python_package_to_gcs")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_from_local_script_prebuilt_container(
+ self, get_custom_job_mock, create_custom_job_mock, sync
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ # configuration on this is tested in test_training_jobs.py
+ job = aiplatform.CustomJob.from_local_script(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=test_constants.TrainingJobConstants._TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_PREBUILT_CONTAINER_IMAGE,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ )
+
+ assert (
+ job.job_spec.worker_pool_specs[0].python_package_spec
+ == _TEST_PYTHON_PACKAGE_SPEC
+ )
+
+ job.run(sync=sync)
+
+ job.wait()
+
+ assert (
+ job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.usefixtures("mock_python_package_to_gcs")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_from_local_script_custom_container(
+ self, get_custom_job_mock, create_custom_job_mock, sync
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ # configuration on this is tested in test_training_jobs.py
+ job = aiplatform.CustomJob.from_local_script(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=test_constants.TrainingJobConstants._TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ )
+
+ assert job.job_spec.worker_pool_specs[0].container_spec == _TEST_CONTAINER_SPEC
+
+ job.run(sync=sync)
+
+ job.wait()
+
+ assert (
+ job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.usefixtures("mock_python_package_to_gcs")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_from_local_script_raises_with_no_staging_bucket(
+ self, get_custom_job_mock, create_custom_job_mock, sync
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ with pytest.raises(RuntimeError):
+ job = aiplatform.CustomJob.from_local_script( # noqa: F841
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=test_constants.TrainingJobConstants._TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_builtin_open",
+ "mock_python_package_to_gcs",
+ "get_experiment_run_run_mock",
+ "get_tensorboard_run_artifact_not_found_mock",
+ "update_context_mock",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ @mock.patch.object(jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(jobs, "_LOG_WAIT_TIME", 1)
+ def test_create_from_local_script_prebuilt_container_with_all_args(
+ self, get_custom_job_mock, create_custom_job_mock, sync
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob.from_local_script(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=test_constants.TrainingJobConstants._TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_PREBUILT_CONTAINER_IMAGE,
+ args=_TEST_RUN_ARGS,
+ requirements=test_constants.TrainingJobConstants._TEST_REQUIREMENTS,
+ environment_variables=test_constants.TrainingJobConstants._TEST_ENVIRONMENT_VARIABLES,
+ replica_count=test_constants.TrainingJobConstants._TEST_REPLICA_COUNT,
+ machine_type=test_constants.TrainingJobConstants._TEST_MACHINE_TYPE,
+ accelerator_type=test_constants.TrainingJobConstants._TEST_ACCELERATOR_TYPE,
+ accelerator_count=test_constants.TrainingJobConstants._TEST_ACCELERATOR_COUNT,
+ boot_disk_type=test_constants.TrainingJobConstants._TEST_BOOT_DISK_TYPE,
+ boot_disk_size_gb=test_constants.TrainingJobConstants._TEST_BOOT_DISK_SIZE_GB,
+ reduction_server_replica_count=test_constants.TrainingJobConstants._TEST_REDUCTION_SERVER_REPLICA_COUNT,
+ reduction_server_machine_type=test_constants.TrainingJobConstants._TEST_REDUCTION_SERVER_MACHINE_TYPE,
+ reduction_server_container_uri=test_constants.TrainingJobConstants._TEST_REDUCTION_SERVER_CONTAINER_URI,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ enable_autolog=True,
+ )
+
+ expected_python_package_spec = _TEST_PYTHON_PACKAGE_SPEC
+ expected_python_package_spec.args = _TEST_RUN_ARGS
+ expected_python_package_spec.env = [
+ {"name": key, "value": value}
+ for key, value in test_constants.TrainingJobConstants._TEST_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ assert (
+ job.job_spec.worker_pool_specs[0].python_package_spec
+ == expected_python_package_spec
+ )
+ assert job._enable_autolog is True
+
+ job.run(
+ experiment=_TEST_EXPERIMENT, experiment_run=_TEST_EXPERIMENT_RUN, sync=sync
+ )
+
+ job.wait()
+
+ assert (
+ job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_builtin_open",
+ "mock_python_package_to_gcs",
+ "get_experiment_run_run_mock",
+ "get_tensorboard_run_artifact_not_found_mock",
+ "update_context_mock",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ @mock.patch.object(jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(jobs, "_LOG_WAIT_TIME", 1)
+ def test_create_from_local_script_custom_container_with_all_args(
+ self, get_custom_job_mock, create_custom_job_mock, sync
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob.from_local_script(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=test_constants.TrainingJobConstants._TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ args=_TEST_RUN_ARGS,
+ requirements=test_constants.TrainingJobConstants._TEST_REQUIREMENTS,
+ environment_variables=test_constants.TrainingJobConstants._TEST_ENVIRONMENT_VARIABLES,
+ replica_count=test_constants.TrainingJobConstants._TEST_REPLICA_COUNT,
+ machine_type=test_constants.TrainingJobConstants._TEST_MACHINE_TYPE,
+ accelerator_type=test_constants.TrainingJobConstants._TEST_ACCELERATOR_TYPE,
+ accelerator_count=test_constants.TrainingJobConstants._TEST_ACCELERATOR_COUNT,
+ boot_disk_type=test_constants.TrainingJobConstants._TEST_BOOT_DISK_TYPE,
+ boot_disk_size_gb=test_constants.TrainingJobConstants._TEST_BOOT_DISK_SIZE_GB,
+ reduction_server_replica_count=test_constants.TrainingJobConstants._TEST_REDUCTION_SERVER_REPLICA_COUNT,
+ reduction_server_machine_type=test_constants.TrainingJobConstants._TEST_REDUCTION_SERVER_MACHINE_TYPE,
+ reduction_server_container_uri=test_constants.TrainingJobConstants._TEST_REDUCTION_SERVER_CONTAINER_URI,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ enable_autolog=True,
+ )
+
+ expected_container_spec = copy.deepcopy(_TEST_CONTAINER_SPEC)
+ expected_container_spec.command[-1] += " " + " ".join(_TEST_RUN_ARGS)
+ expected_container_spec.env = [
+ {"name": key, "value": value}
+ for key, value in test_constants.TrainingJobConstants._TEST_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ assert (
+ job.job_spec.worker_pool_specs[0].container_spec == expected_container_spec
+ )
+ assert job._enable_autolog is True
+
+ job.run(
+ experiment=_TEST_EXPERIMENT, experiment_run=_TEST_EXPERIMENT_RUN, sync=sync
+ )
+
+ job.wait()
+
+ assert (
+ job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.usefixtures("mock_builtin_open", "mock_python_package_to_gcs")
+ def test_create_from_local_script_enable_autolog_no_experiment_error(self):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob.from_local_script(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=test_constants.TrainingJobConstants._TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ enable_autolog=True,
+ )
+
+ with pytest.raises(ValueError):
+ job.run()
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @mock.patch.object(jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(jobs, "_LOG_WAIT_TIME", 1)
+ def test_create_custom_job_with_enable_web_access(
+ self,
+ create_custom_job_mock_with_enable_web_access,
+ get_custom_job_mock_with_enable_web_access,
+ sync,
+ caplog,
+ ):
+ caplog.set_level(logging.INFO)
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ )
+
+ job.run(
+ enable_web_access=_TEST_ENABLE_WEB_ACCESS,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ job.wait_for_resource_creation()
+
+ job.wait()
+
+ # TODO: b/383923584: Re-enable this test once the parent issue is fixed
+ # assert "workerpool0-0" in caplog.text
+
+ assert job.resource_name == _TEST_CUSTOM_JOB_NAME
+
+ expected_custom_job = _get_custom_job_proto_with_enable_web_access()
+
+ create_custom_job_mock_with_enable_web_access.assert_called_once_with(
+ parent=_TEST_PARENT,
+ custom_job=expected_custom_job,
+ timeout=None,
+ )
+
+ assert job.job_spec == expected_custom_job.job_spec
+ assert (
+ job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED
+ )
+ caplog.clear()
+
+ def test_get_web_access_uris(self, get_custom_job_mock_with_enable_web_access):
+ job = aiplatform.CustomJob.get(_TEST_CUSTOM_JOB_NAME)
+ while True:
+ if job.web_access_uris:
+ assert job.web_access_uris == _TEST_WEB_ACCESS_URIS
+ break
+
+ @mock.patch.object(jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(jobs, "_LOG_WAIT_TIME", 1)
+ def test_log_access_web_uris_after_get(
+ self, get_custom_job_mock_with_enable_web_access
+ ):
+ job = aiplatform.CustomJob.get(_TEST_CUSTOM_JOB_NAME)
+ job._block_until_complete()
+ assert job._logged_web_access_uris == set(_TEST_WEB_ACCESS_URIS.values())
+
+ def test_get_web_access_uris_job_succeeded(
+ self, get_custom_job_mock_with_enable_web_access_succeeded
+ ):
+ job = aiplatform.CustomJob.get(_TEST_CUSTOM_JOB_NAME)
+ assert not job.web_access_uris
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_custom_job_with_tensorboard(
+ self, create_custom_job_mock_with_tensorboard, get_custom_job_mock, sync
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ tensorboard=_TEST_TENSORBOARD_NAME,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ job.wait()
+
+ expected_custom_job = _get_custom_job_proto()
+ expected_custom_job.job_spec.tensorboard = _TEST_TENSORBOARD_NAME
+
+ create_custom_job_mock_with_tensorboard.assert_called_once_with(
+ parent=_TEST_PARENT,
+ custom_job=expected_custom_job,
+ timeout=None,
+ )
+
+ expected_custom_job = _get_custom_job_proto()
+
+ assert job.job_spec == expected_custom_job.job_spec
+ assert (
+ job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED
+ )
+
+ def test_create_custom_job_without_base_output_dir(
+ self,
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ )
+
+ assert job.job_spec.base_output_directory.output_uri_prefix.startswith(
+ f"{_TEST_STAGING_BUCKET}/aiplatform-custom-job"
+ )
+
+ @pytest.mark.usefixtures("get_custom_job_mock", "create_custom_job_mock")
+ def test_check_custom_job_availability(self):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = aiplatform.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ worker_pool_specs=_TEST_WORKER_POOL_SPEC,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ labels=_TEST_LABELS,
+ )
+
+ assert not job._resource_is_available
+ assert job.__repr__().startswith(
+ "", "tmp.log"]
+ serving_container_environment_variables = {"custom_key": "custom_value"}
+ serving_container_ports = [5555]
+ credential_path = tmp_path / "key.json"
+ credential_path.write_text("")
+ host_port = 6666
+ environment = {k: v for k, v in serving_container_environment_variables.items()}
+ environment[prediction.AIP_HTTP_PORT] = serving_container_ports[0]
+ environment[prediction.AIP_HEALTH_ROUTE] = serving_container_health_route
+ environment[prediction.AIP_PREDICT_ROUTE] = serving_container_predict_route
+ environment[prediction.AIP_STORAGE_URI] = artifact_uri
+ environment[
+ run._ADC_ENVIRONMENT_VARIABLE
+ ] = run._DEFAULT_CONTAINER_CRED_KEY_PATH
+ volumes = [f"{credential_path}:{run._DEFAULT_CONTAINER_CRED_KEY_PATH}"]
+
+ run.run_prediction_container(
+ self.IMAGE_URI,
+ artifact_uri=artifact_uri,
+ serving_container_predict_route=serving_container_predict_route,
+ serving_container_health_route=serving_container_health_route,
+ serving_container_command=serving_container_command,
+ serving_container_args=serving_container_args,
+ serving_container_environment_variables=serving_container_environment_variables,
+ serving_container_ports=serving_container_ports,
+ credential_path=credential_path,
+ host_port=host_port,
+ )
+
+ docker_client_mock.containers.run.assert_called_once_with(
+ self.IMAGE_URI,
+ command=serving_container_args,
+ entrypoint=serving_container_command,
+ ports={serving_container_ports[0]: host_port},
+ environment=environment,
+ volumes=volumes,
+ device_requests=None,
+ detach=True,
+ )
+
+ @mock.patch.dict(os.environ, {}, clear=True)
+ def test_run_prediction_container_with_envs_replaced_by_envs(
+ self, tmp_path, docker_client_mock
+ ):
+ serving_container_environment_variables = {
+ "VAR_1": "foo",
+ "VAR_2": "$(VAR_3) bar",
+ "VAR_3": "$(VAR_1) bar",
+ "VAR_4": "$$(VAR_1)",
+ }
+ environment = {k: v for k, v in serving_container_environment_variables.items()}
+ environment[prediction.AIP_HTTP_PORT] = prediction.DEFAULT_AIP_HTTP_PORT
+ environment[prediction.AIP_HEALTH_ROUTE] = None
+ environment[prediction.AIP_PREDICT_ROUTE] = None
+ environment[prediction.AIP_STORAGE_URI] = ""
+ # Envs referencing earlier entries will be changed. Those envs referencing later
+ # entries won't be changed.
+ environment["VAR_3"] = "foo bar"
+ # Double $$ will be replaced with a single $.
+ environment["VAR_4"] = "$(VAR_1)"
+
+ run.run_prediction_container(
+ self.IMAGE_URI,
+ serving_container_environment_variables=serving_container_environment_variables,
+ )
+
+ docker_client_mock.containers.run.assert_called_once_with(
+ self.IMAGE_URI,
+ command=None,
+ entrypoint=None,
+ ports={prediction.DEFAULT_AIP_HTTP_PORT: None},
+ environment=environment,
+ volumes=[],
+ device_requests=None,
+ detach=True,
+ )
+
+ @mock.patch.dict(os.environ, {}, clear=True)
+ def test_run_prediction_container_with_command_replaced_by_envs(
+ self, tmp_path, docker_client_mock
+ ):
+ serving_container_command = ["$(VAR_1)", "$$(VAR_1)", "$(VAR_2)"]
+ serving_container_environment_variables = {
+ "VAR_1": "foo",
+ }
+ environment = {k: v for k, v in serving_container_environment_variables.items()}
+ environment[prediction.AIP_HTTP_PORT] = prediction.DEFAULT_AIP_HTTP_PORT
+ environment[prediction.AIP_HEALTH_ROUTE] = None
+ environment[prediction.AIP_PREDICT_ROUTE] = None
+ environment[prediction.AIP_STORAGE_URI] = ""
+ # Command references existing environment variables.
+ expected_entrypoint = ["foo", "$(VAR_1)", "$(VAR_2)"]
+
+ run.run_prediction_container(
+ self.IMAGE_URI,
+ serving_container_command=serving_container_command,
+ serving_container_environment_variables=serving_container_environment_variables,
+ )
+
+ docker_client_mock.containers.run.assert_called_once_with(
+ self.IMAGE_URI,
+ command=None,
+ entrypoint=expected_entrypoint,
+ ports={prediction.DEFAULT_AIP_HTTP_PORT: None},
+ environment=environment,
+ volumes=[],
+ device_requests=None,
+ detach=True,
+ )
+
+ @mock.patch.dict(os.environ, {}, clear=True)
+ def test_run_prediction_container_with_args_replaced_by_envs(
+ self, tmp_path, docker_client_mock
+ ):
+ serving_container_args = ["$(VAR_1)", "$$(VAR_1)", "$(VAR_2)"]
+ serving_container_environment_variables = {
+ "VAR_1": "foo",
+ }
+ environment = {k: v for k, v in serving_container_environment_variables.items()}
+ environment[prediction.AIP_HTTP_PORT] = prediction.DEFAULT_AIP_HTTP_PORT
+ environment[prediction.AIP_HEALTH_ROUTE] = None
+ environment[prediction.AIP_PREDICT_ROUTE] = None
+ environment[prediction.AIP_STORAGE_URI] = ""
+ # Args references existing environment variables.
+ expected_command = ["foo", "$(VAR_1)", "$(VAR_2)"]
+
+ run.run_prediction_container(
+ self.IMAGE_URI,
+ serving_container_args=serving_container_args,
+ serving_container_environment_variables=serving_container_environment_variables,
+ )
+
+ docker_client_mock.containers.run.assert_called_once_with(
+ self.IMAGE_URI,
+ command=expected_command,
+ entrypoint=None,
+ ports={prediction.DEFAULT_AIP_HTTP_PORT: None},
+ environment=environment,
+ volumes=[],
+ device_requests=None,
+ detach=True,
+ )
+
+ def test_run_prediction_container_credential_from_adc(
+ self, tmp_path, docker_client_mock
+ ):
+ credential_path = tmp_path / "key.json"
+ credential_path.write_text("")
+ volumes = [f"{credential_path}:{run._DEFAULT_CONTAINER_CRED_KEY_PATH}"]
+
+ with mock.patch.dict(
+ os.environ, {run._ADC_ENVIRONMENT_VARIABLE: credential_path.as_posix()}
+ ):
+ run.run_prediction_container(self.IMAGE_URI)
+
+ docker_client_mock.containers.run.assert_called_once_with(
+ self.IMAGE_URI,
+ command=None,
+ entrypoint=None,
+ ports={prediction.DEFAULT_AIP_HTTP_PORT: None},
+ environment={
+ prediction.AIP_HTTP_PORT: prediction.DEFAULT_AIP_HTTP_PORT,
+ prediction.AIP_HEALTH_ROUTE: None,
+ prediction.AIP_PREDICT_ROUTE: None,
+ prediction.AIP_STORAGE_URI: "",
+ run._ADC_ENVIRONMENT_VARIABLE: run._DEFAULT_CONTAINER_CRED_KEY_PATH,
+ },
+ volumes=volumes,
+ device_requests=None,
+ detach=True,
+ )
+
+ @mock.patch.dict(os.environ, {}, clear=True)
+ def test_run_prediction_container_gpu_count(self, docker_client_mock):
+ gpu_count = 1
+ gpu_capabilities = [["gpu"]]
+
+ run.run_prediction_container(
+ self.IMAGE_URI,
+ gpu_count=gpu_count,
+ gpu_capabilities=gpu_capabilities,
+ )
+
+ docker_client_mock.containers.run.assert_called_once_with(
+ self.IMAGE_URI,
+ command=None,
+ entrypoint=None,
+ ports={prediction.DEFAULT_AIP_HTTP_PORT: None},
+ environment={
+ prediction.AIP_HTTP_PORT: prediction.DEFAULT_AIP_HTTP_PORT,
+ prediction.AIP_HEALTH_ROUTE: None,
+ prediction.AIP_PREDICT_ROUTE: None,
+ prediction.AIP_STORAGE_URI: "",
+ },
+ volumes=[],
+ device_requests=[
+ docker.types.DeviceRequest(
+ count=gpu_count, capabilities=gpu_capabilities
+ )
+ ],
+ detach=True,
+ )
+
+ @mock.patch.dict(os.environ, {}, clear=True)
+ def test_run_prediction_container_gpu_device_ids(self, docker_client_mock):
+ gpu_device_ids = ["1"]
+ gpu_capabilities = [["gpu"]]
+
+ run.run_prediction_container(
+ self.IMAGE_URI,
+ gpu_device_ids=gpu_device_ids,
+ gpu_capabilities=gpu_capabilities,
+ )
+
+ docker_client_mock.containers.run.assert_called_once_with(
+ self.IMAGE_URI,
+ command=None,
+ entrypoint=None,
+ ports={prediction.DEFAULT_AIP_HTTP_PORT: None},
+ environment={
+ prediction.AIP_HTTP_PORT: prediction.DEFAULT_AIP_HTTP_PORT,
+ prediction.AIP_HEALTH_ROUTE: None,
+ prediction.AIP_PREDICT_ROUTE: None,
+ prediction.AIP_STORAGE_URI: "",
+ },
+ volumes=[],
+ device_requests=[
+ docker.types.DeviceRequest(
+ device_ids=gpu_device_ids, capabilities=gpu_capabilities
+ )
+ ],
+ detach=True,
+ )
+
+ def test_run_prediction_container_artifact_uri_is_local_path_default_workdir(
+ self, tmp_path, docker_client_mock
+ ):
+ artifact_uri = tmp_path / "models"
+ artifact_uri.mkdir()
+ fake_model_artifact = artifact_uri / "model.pb"
+ fake_model_artifact.write_text("")
+ environment = {}
+ environment[prediction.AIP_HTTP_PORT] = prediction.DEFAULT_AIP_HTTP_PORT
+ environment[prediction.AIP_HEALTH_ROUTE] = None
+ environment[prediction.AIP_PREDICT_ROUTE] = None
+ environment[prediction.AIP_STORAGE_URI] = utils.DEFAULT_MOUNTED_MODEL_DIRECTORY
+ environment[
+ run._ADC_ENVIRONMENT_VARIABLE
+ ] = run._DEFAULT_CONTAINER_CRED_KEY_PATH
+ credential_path = tmp_path / "key.json"
+ credential_path.write_text("")
+ volumes = [
+ f"{fake_model_artifact.as_posix()}:{utils.DEFAULT_MOUNTED_MODEL_DIRECTORY + '/model.pb'}",
+ f"{credential_path}:{run._DEFAULT_CONTAINER_CRED_KEY_PATH}",
+ ]
+
+ with mock.patch.dict(
+ os.environ, {run._ADC_ENVIRONMENT_VARIABLE: credential_path.as_posix()}
+ ):
+ run.run_prediction_container(
+ self.IMAGE_URI,
+ artifact_uri=artifact_uri.as_posix(),
+ )
+
+ docker_client_mock.containers.run.assert_called_once_with(
+ self.IMAGE_URI,
+ command=None,
+ entrypoint=None,
+ ports={prediction.DEFAULT_AIP_HTTP_PORT: None},
+ environment=environment,
+ volumes=volumes,
+ device_requests=None,
+ detach=True,
+ )
+
+ def test_run_prediction_container_artifact_uri_is_local_path_but_not_exists(
+ self, tmp_path, docker_client_mock
+ ):
+ artifact_uri = tmp_path / "models"
+ environment = {}
+ environment[prediction.AIP_HTTP_PORT] = prediction.DEFAULT_AIP_HTTP_PORT
+ environment[prediction.AIP_HEALTH_ROUTE] = None
+ environment[prediction.AIP_PREDICT_ROUTE] = None
+ environment[prediction.AIP_STORAGE_URI] = utils.DEFAULT_WORKDIR
+ environment[
+ run._ADC_ENVIRONMENT_VARIABLE
+ ] = run._DEFAULT_CONTAINER_CRED_KEY_PATH
+ credential_path = tmp_path / "key.json"
+ credential_path.write_text("")
+ expected_message = (
+ "artifact_uri should be specified as either a GCS uri which starts with "
+ f"`{prediction_utils.GCS_URI_PREFIX}` or a path to a local directory. "
+ f'However, "{artifact_uri}" does not exist.'
+ )
+
+ with pytest.raises(ValueError) as exception:
+ with mock.patch.dict(
+ os.environ, {run._ADC_ENVIRONMENT_VARIABLE: credential_path.as_posix()}
+ ):
+ run.run_prediction_container(
+ self.IMAGE_URI,
+ artifact_uri=artifact_uri.as_posix(),
+ )
+
+ assert str(exception.value) == expected_message
+
+ def test_run_prediction_container_credential_path_not_exists(
+ self, docker_client_mock
+ ):
+ credential_path = "key.json"
+ expected_message = f'credential_path does not exist: "{credential_path}".'
+
+ with pytest.raises(ValueError) as exception:
+ run.run_prediction_container(
+ self.IMAGE_URI, credential_path=credential_path
+ )
+
+ assert str(exception.value) == expected_message
+
+ @mock.patch.dict(os.environ, {run._ADC_ENVIRONMENT_VARIABLE: "key.json"})
+ def test_run_prediction_container_adc_value_not_exists(self, docker_client_mock):
+ expected_message = (
+ f"The file from the environment variable {run._ADC_ENVIRONMENT_VARIABLE} does "
+ f'not exist: "key.json".'
+ )
+
+ with pytest.raises(ValueError) as exception:
+ run.run_prediction_container(self.IMAGE_URI)
+
+ assert str(exception.value) == expected_message
+
+ def test_print_container_logs(self, docker_container_mock):
+ with mock.patch(
+ "google.cloud.aiplatform.docker_utils.run._logger"
+ ) as logger_mock:
+ logs_len = run.print_container_logs(docker_container_mock)
+
+ assert logs_len == _TEST_CONTAINER_LOGS_LEN
+ assert docker_container_mock.logs.called
+ assert logger_mock.info.call_count == _TEST_CONTAINER_LOGS_LEN
+
+ def test_print_container_logs_with_start_index(self, docker_container_mock):
+ start_index = 1
+ with mock.patch(
+ "google.cloud.aiplatform.docker_utils.run._logger"
+ ) as logger_mock:
+ logs_len = run.print_container_logs(
+ docker_container_mock, start_index=start_index
+ )
+
+ assert logs_len == _TEST_CONTAINER_LOGS_LEN
+ assert docker_container_mock.logs.called
+ assert logger_mock.info.call_count == (_TEST_CONTAINER_LOGS_LEN - start_index)
+
+ def test_print_container_logs_with_message(self, docker_container_mock):
+ with mock.patch(
+ "google.cloud.aiplatform.docker_utils.run._logger"
+ ) as logger_mock:
+ logs_len = run.print_container_logs(
+ docker_container_mock, message="Test message:"
+ )
+
+ assert logs_len == _TEST_CONTAINER_LOGS_LEN
+ assert docker_container_mock.logs.called
+ assert logger_mock.info.call_count == _TEST_CONTAINER_LOGS_LEN + 1
+
+
+class TestBuild:
+ BASE_IMAGE = "python:3.7"
+ SOURCE_DIR = "src"
+ HOST_WORKDIR_BASENAME = "user_code"
+ HOST_WORKDIR = f"./{SOURCE_DIR}/{HOST_WORKDIR_BASENAME}"
+ HOME = utils.DEFAULT_HOME
+ WORKDIR = utils.DEFAULT_WORKDIR
+ SCRIPT = "./user_code/entrypoint.py"
+ SCRIPT_PACKAGE_PATH = "user_code/entrypoint.py"
+ PYTHON_MODULE = "custom.python.module"
+ PACKAGE = utils.Package(
+ script=SCRIPT, package_path=HOST_WORKDIR, python_module=None
+ )
+ PACKAGE_WITH_PYTHON_MODULE = utils.Package(
+ script=SCRIPT, package_path=HOST_WORKDIR, python_module=PYTHON_MODULE
+ )
+ PACKAGE_NO_SCRIPT_AND_MODULE = utils.Package(
+ script=None, package_path=HOST_WORKDIR, python_module=None
+ )
+ OUTPUT_IMAGE_NAME = "test_image:latest"
+ REQUIREMENTS_FILE = "requirements.txt"
+ EXTRA_PACKAGE = "custom_package.tar.gz"
+ SETUP_FILE = "setup.py"
+ PIP = "pip"
+ PYTHON = "python"
+
+ def test_make_dockerfile(self):
+ result = build.make_dockerfile(
+ self.BASE_IMAGE, self.PACKAGE, self.WORKDIR, self.HOME
+ )
+
+ assert f"FROM {self.BASE_IMAGE}\n" in result
+ assert f"WORKDIR {self.WORKDIR}\n" in result
+ assert f"ENV HOME={self.HOME}\n" in result
+ assert 'COPY [".", "."]\n' in result
+ assert f'ENTRYPOINT ["python", "{self.SCRIPT}"]' in result
+
+ def test_make_dockerfile_with_python_module(self):
+ result = build.make_dockerfile(
+ self.BASE_IMAGE, self.PACKAGE_WITH_PYTHON_MODULE, self.WORKDIR, self.HOME
+ )
+
+ assert f"FROM {self.BASE_IMAGE}\n" in result
+ assert f"WORKDIR {self.WORKDIR}\n" in result
+ assert f"ENV HOME={self.HOME}\n" in result
+ assert 'COPY [".", "."]\n' in result
+ assert f'ENTRYPOINT ["python", "-m", "{self.PYTHON_MODULE}"]' in result
+
+ def test_make_dockerfile_no_script_and_module(self):
+ result = build.make_dockerfile(
+ self.BASE_IMAGE, self.PACKAGE_NO_SCRIPT_AND_MODULE, self.WORKDIR, self.HOME
+ )
+
+ assert f"FROM {self.BASE_IMAGE}\n" in result
+ assert f"WORKDIR {self.WORKDIR}\n" in result
+ assert f"ENV HOME={self.HOME}\n" in result
+ assert 'COPY [".", "."]\n' in result
+ assert "ENTRYPOINT" not in result
+
+ def test_make_dockerfile_with_requirements_path(self):
+ requirements_path = "./requirements.txt"
+
+ result = build.make_dockerfile(
+ self.BASE_IMAGE,
+ self.PACKAGE,
+ self.WORKDIR,
+ self.HOME,
+ requirements_path=requirements_path,
+ )
+
+ assert f"FROM {self.BASE_IMAGE}\n" in result
+ assert f"WORKDIR {self.WORKDIR}\n" in result
+ assert f"ENV HOME={self.HOME}\n" in result
+ assert 'COPY [".", "."]\n' in result
+ assert f'ENTRYPOINT ["python", "{self.SCRIPT}"]' in result
+ assert (
+ f"RUN pip install --no-cache-dir --force-reinstall -r {requirements_path}\n"
+ in result
+ )
+
+ def test_make_dockerfile_with_setup_path(self):
+ setup_path = "./custom_setup.py"
+
+ result = build.make_dockerfile(
+ self.BASE_IMAGE,
+ self.PACKAGE,
+ self.WORKDIR,
+ self.HOME,
+ setup_path=setup_path,
+ )
+
+ assert f"FROM {self.BASE_IMAGE}\n" in result
+ assert f"WORKDIR {self.WORKDIR}\n" in result
+ assert f"ENV HOME={self.HOME}\n" in result
+ assert 'COPY [".", "."]\n' in result
+ assert f'ENTRYPOINT ["python", "{self.SCRIPT}"]' in result
+ assert f'COPY ["{setup_path}", "./setup.py"]\n' in result
+ assert "RUN pip install --no-cache-dir --force-reinstall .\n" in result
+
+ def test_make_dockerfile_with_extra_requirements(self):
+ extra_requirement = "custom_package==1.0"
+
+ result = build.make_dockerfile(
+ self.BASE_IMAGE,
+ self.PACKAGE,
+ self.WORKDIR,
+ self.HOME,
+ extra_requirements=[extra_requirement],
+ )
+
+ assert f"FROM {self.BASE_IMAGE}\n" in result
+ assert f"WORKDIR {self.WORKDIR}\n" in result
+ assert f"ENV HOME={self.HOME}\n" in result
+ assert 'COPY [".", "."]\n' in result
+ assert f'ENTRYPOINT ["python", "{self.SCRIPT}"]' in result
+ assert (
+ f"RUN pip install --no-cache-dir --force-reinstall {extra_requirement}\n"
+ in result
+ )
+
+ def test_make_dockerfile_with_extra_packages(self):
+ extra_package_basename = "custom_package"
+ extra_package = f"./{extra_package_basename}"
+
+ result = build.make_dockerfile(
+ self.BASE_IMAGE,
+ self.PACKAGE,
+ self.WORKDIR,
+ self.HOME,
+ extra_packages=[extra_package],
+ )
+
+ assert f"FROM {self.BASE_IMAGE}\n" in result
+ assert f"WORKDIR {self.WORKDIR}\n" in result
+ assert f"ENV HOME={self.HOME}\n" in result
+ assert 'COPY [".", "."]\n' in result
+ assert f'ENTRYPOINT ["python", "{self.SCRIPT}"]' in result
+ assert (
+ f"RUN pip install --no-cache-dir --force-reinstall {extra_package}\n"
+ in result
+ )
+
+ def test_make_dockerfile_with_extra_dirs(self):
+ extra_dir = "./subdir"
+
+ result = build.make_dockerfile(
+ self.BASE_IMAGE,
+ self.PACKAGE,
+ self.WORKDIR,
+ self.HOME,
+ extra_dirs=[extra_dir],
+ )
+
+ assert f"FROM {self.BASE_IMAGE}\n" in result
+ assert f"WORKDIR {self.WORKDIR}\n" in result
+ assert f"ENV HOME={self.HOME}\n" in result
+ assert 'COPY [".", "."]\n' in result
+ assert f'ENTRYPOINT ["python", "{self.SCRIPT}"]' in result
+ assert f'COPY ["{extra_dir}", "{extra_dir}"]\n' in result
+
+ def test_make_dockerfile_with_exposed_ports(self):
+ exposed_port = 8080
+
+ result = build.make_dockerfile(
+ self.BASE_IMAGE,
+ self.PACKAGE,
+ self.WORKDIR,
+ self.HOME,
+ exposed_ports=[exposed_port],
+ )
+
+ assert f"FROM {self.BASE_IMAGE}\n" in result
+ assert f"WORKDIR {self.WORKDIR}\n" in result
+ assert f"ENV HOME={self.HOME}\n" in result
+ assert 'COPY [".", "."]\n' in result
+ assert f'ENTRYPOINT ["python", "{self.SCRIPT}"]' in result
+ assert f"EXPOSE {exposed_port}\n" in result
+
+ def test_make_dockerfile_with_environment_variables(self):
+ environment_variables = {
+ "FAKE_ENV1": "FAKE_VALUE1",
+ "FAKE_ENV2": "FAKE_VALUE2",
+ }
+
+ result = build.make_dockerfile(
+ self.BASE_IMAGE,
+ self.PACKAGE,
+ self.WORKDIR,
+ self.HOME,
+ environment_variables=environment_variables,
+ )
+
+ assert f"FROM {self.BASE_IMAGE}\n" in result
+ assert f"WORKDIR {self.WORKDIR}\n" in result
+ assert f"ENV HOME={self.HOME}\n" in result
+ assert 'COPY [".", "."]\n' in result
+ assert f'ENTRYPOINT ["python", "{self.SCRIPT}"]' in result
+ assert "ENV FAKE_ENV1=FAKE_VALUE1\n" in result
+ assert "ENV FAKE_ENV2=FAKE_VALUE2\n" in result
+
+ def test_build_image(self, make_dockerfile_mock, execute_command_mock):
+ image = build.build_image(
+ self.BASE_IMAGE, self.HOST_WORKDIR, self.OUTPUT_IMAGE_NAME
+ )
+
+ make_dockerfile_mock.assert_called_once_with(
+ self.BASE_IMAGE,
+ utils.Package(
+ script=None,
+ package_path=self.HOST_WORKDIR,
+ python_module=None,
+ ),
+ utils.DEFAULT_WORKDIR,
+ utils.DEFAULT_HOME,
+ requirements_path=None,
+ setup_path=None,
+ extra_requirements=None,
+ extra_packages=None,
+ extra_dirs=None,
+ exposed_ports=None,
+ pip_command=self.PIP,
+ python_command=self.PYTHON,
+ )
+ execute_command_mock.assert_called_once_with(
+ [
+ "docker",
+ "build",
+ "--no-cache",
+ "-t",
+ self.OUTPUT_IMAGE_NAME,
+ "--rm",
+ "-f-",
+ self.HOST_WORKDIR,
+ ],
+ input_str=make_dockerfile_mock.return_value,
+ )
+ assert image.name == self.OUTPUT_IMAGE_NAME
+ assert image.default_home == self.HOME
+ assert image.default_workdir == self.WORKDIR
+
+ def test_build_image_with_python_module(
+ self, make_dockerfile_mock, execute_command_mock
+ ):
+ image = build.build_image(
+ self.BASE_IMAGE,
+ self.HOST_WORKDIR,
+ self.OUTPUT_IMAGE_NAME,
+ python_module=self.PYTHON_MODULE,
+ )
+
+ make_dockerfile_mock.assert_called_once_with(
+ self.BASE_IMAGE,
+ utils.Package(
+ script=None,
+ package_path=self.HOST_WORKDIR,
+ python_module=self.PYTHON_MODULE,
+ ),
+ utils.DEFAULT_WORKDIR,
+ utils.DEFAULT_HOME,
+ requirements_path=None,
+ setup_path=None,
+ extra_requirements=None,
+ extra_packages=None,
+ extra_dirs=None,
+ exposed_ports=None,
+ pip_command=self.PIP,
+ python_command=self.PYTHON,
+ )
+ execute_command_mock.assert_called_once_with(
+ [
+ "docker",
+ "build",
+ "--no-cache",
+ "-t",
+ self.OUTPUT_IMAGE_NAME,
+ "--rm",
+ "-f-",
+ self.HOST_WORKDIR,
+ ],
+ input_str=make_dockerfile_mock.return_value,
+ )
+ assert image.name == self.OUTPUT_IMAGE_NAME
+ assert image.default_home == self.HOME
+ assert image.default_workdir == self.WORKDIR
+
+ def test_build_image_with_extra_requirements(
+ self, make_dockerfile_mock, execute_command_mock
+ ):
+ extra_requirements = ["custom_package==1.0"]
+ image = build.build_image(
+ self.BASE_IMAGE,
+ self.HOST_WORKDIR,
+ self.OUTPUT_IMAGE_NAME,
+ extra_requirements=extra_requirements,
+ )
+
+ make_dockerfile_mock.assert_called_once_with(
+ self.BASE_IMAGE,
+ utils.Package(
+ script=None,
+ package_path=self.HOST_WORKDIR,
+ python_module=None,
+ ),
+ utils.DEFAULT_WORKDIR,
+ utils.DEFAULT_HOME,
+ requirements_path=None,
+ setup_path=None,
+ extra_requirements=extra_requirements,
+ extra_packages=None,
+ extra_dirs=None,
+ exposed_ports=None,
+ pip_command=self.PIP,
+ python_command=self.PYTHON,
+ )
+ execute_command_mock.assert_called_once_with(
+ [
+ "docker",
+ "build",
+ "--no-cache",
+ "-t",
+ self.OUTPUT_IMAGE_NAME,
+ "--rm",
+ "-f-",
+ self.HOST_WORKDIR,
+ ],
+ input_str=make_dockerfile_mock.return_value,
+ )
+ assert image.name == self.OUTPUT_IMAGE_NAME
+ assert image.default_home == self.HOME
+ assert image.default_workdir == self.WORKDIR
+
+ def test_build_image_with_requirements_path(
+ self, tmp_path, make_dockerfile_mock, execute_command_mock
+ ):
+ source_dir = tmp_path / self.SOURCE_DIR
+ source_dir.mkdir()
+ host_workdir = tmp_path / self.HOST_WORKDIR
+ host_workdir.mkdir()
+ requirements_file = host_workdir / self.REQUIREMENTS_FILE
+ requirements_file.write_text("")
+
+ image = build.build_image(
+ self.BASE_IMAGE,
+ host_workdir.as_posix(),
+ self.OUTPUT_IMAGE_NAME,
+ requirements_path=requirements_file.as_posix(),
+ )
+
+ make_dockerfile_mock.assert_called_once_with(
+ self.BASE_IMAGE,
+ utils.Package(
+ script=None,
+ package_path=host_workdir.as_posix(),
+ python_module=None,
+ ),
+ utils.DEFAULT_WORKDIR,
+ utils.DEFAULT_HOME,
+ requirements_path=self.REQUIREMENTS_FILE,
+ setup_path=None,
+ extra_requirements=None,
+ extra_packages=None,
+ extra_dirs=None,
+ exposed_ports=None,
+ pip_command=self.PIP,
+ python_command=self.PYTHON,
+ )
+ execute_command_mock.assert_called_once_with(
+ [
+ "docker",
+ "build",
+ "--no-cache",
+ "-t",
+ self.OUTPUT_IMAGE_NAME,
+ "--rm",
+ "-f-",
+ host_workdir.as_posix(),
+ ],
+ input_str=make_dockerfile_mock.return_value,
+ )
+ assert image.name == self.OUTPUT_IMAGE_NAME
+ assert image.default_home == self.HOME
+ assert image.default_workdir == self.WORKDIR
+
+ def test_build_image_not_found_requirements_path(self, make_dockerfile_mock):
+ requirements_path = f"./another_src/{self.REQUIREMENTS_FILE}"
+ expected_message = f'The requirements_path "{requirements_path}" must exist.'
+
+ with pytest.raises(ValueError) as exception:
+ _ = build.build_image(
+ self.BASE_IMAGE,
+ self.HOST_WORKDIR,
+ self.OUTPUT_IMAGE_NAME,
+ requirements_path=requirements_path,
+ )
+
+ assert not make_dockerfile_mock.called
+ assert str(exception.value) == expected_message
+
+ def test_build_image_invalid_requirements_path(
+ self, tmp_path, make_dockerfile_mock
+ ):
+ source_dir = tmp_path / self.SOURCE_DIR
+ source_dir.mkdir()
+ host_workdir = tmp_path / self.HOST_WORKDIR
+ host_workdir.mkdir()
+ another_dir = tmp_path / "another_dir"
+ another_dir.mkdir()
+ requirements_file = another_dir / self.REQUIREMENTS_FILE
+ requirements_file.write_text("")
+ expected_message = (
+ f'The requirements_path "{requirements_file}" must be in "{host_workdir}".'
+ )
+
+ with pytest.raises(ValueError) as exception:
+ _ = build.build_image(
+ self.BASE_IMAGE,
+ host_workdir.as_posix(),
+ self.OUTPUT_IMAGE_NAME,
+ requirements_path=requirements_file.as_posix(),
+ )
+
+ assert not make_dockerfile_mock.called
+ assert str(exception.value) == expected_message
+
+ def test_build_image_with_setup_path(
+ self, tmp_path, make_dockerfile_mock, execute_command_mock
+ ):
+ source_dir = tmp_path / self.SOURCE_DIR
+ source_dir.mkdir()
+ host_workdir = tmp_path / self.HOST_WORKDIR
+ host_workdir.mkdir()
+ setup_file = host_workdir / self.SETUP_FILE
+ setup_file.write_text("")
+
+ image = build.build_image(
+ self.BASE_IMAGE,
+ host_workdir.as_posix(),
+ self.OUTPUT_IMAGE_NAME,
+ setup_path=setup_file.as_posix(),
+ )
+
+ make_dockerfile_mock.assert_called_once_with(
+ self.BASE_IMAGE,
+ utils.Package(
+ script=None,
+ package_path=host_workdir.as_posix(),
+ python_module=None,
+ ),
+ utils.DEFAULT_WORKDIR,
+ utils.DEFAULT_HOME,
+ requirements_path=None,
+ setup_path=self.SETUP_FILE,
+ extra_requirements=None,
+ extra_packages=None,
+ extra_dirs=None,
+ exposed_ports=None,
+ pip_command=self.PIP,
+ python_command=self.PYTHON,
+ )
+ execute_command_mock.assert_called_once_with(
+ [
+ "docker",
+ "build",
+ "--no-cache",
+ "-t",
+ self.OUTPUT_IMAGE_NAME,
+ "--rm",
+ "-f-",
+ host_workdir.as_posix(),
+ ],
+ input_str=make_dockerfile_mock.return_value,
+ )
+ assert image.name == self.OUTPUT_IMAGE_NAME
+ assert image.default_home == self.HOME
+ assert image.default_workdir == self.WORKDIR
+
+ def test_build_image_not_found_setup_path(self, make_dockerfile_mock):
+ setup_path = f"./another_src/{self.SETUP_FILE}"
+ expected_message = f'The setup_path "{setup_path}" must exist.'
+
+ with pytest.raises(ValueError) as exception:
+ _ = build.build_image(
+ self.BASE_IMAGE,
+ self.HOST_WORKDIR,
+ self.OUTPUT_IMAGE_NAME,
+ setup_path=setup_path,
+ )
+
+ assert not make_dockerfile_mock.called
+ assert str(exception.value) == expected_message
+
+ def test_build_image_invalid_setup_path(self, tmp_path, make_dockerfile_mock):
+ source_dir = tmp_path / self.SOURCE_DIR
+ source_dir.mkdir()
+ host_workdir = tmp_path / self.HOST_WORKDIR
+ host_workdir.mkdir()
+ another_dir = tmp_path / "another_dir"
+ another_dir.mkdir()
+ setup_file = another_dir / self.SETUP_FILE
+ setup_file.write_text("")
+ expected_message = f'The setup_path "{setup_file}" must be in "{host_workdir}".'
+
+ with pytest.raises(ValueError) as exception:
+ _ = build.build_image(
+ self.BASE_IMAGE,
+ host_workdir.as_posix(),
+ self.OUTPUT_IMAGE_NAME,
+ setup_path=setup_file.as_posix(),
+ )
+
+ assert not make_dockerfile_mock.called
+ assert str(exception.value) == expected_message
+
+ def test_build_image_with_extra_packages(
+ self, tmp_path, make_dockerfile_mock, execute_command_mock
+ ):
+ source_dir = tmp_path / self.SOURCE_DIR
+ source_dir.mkdir()
+ host_workdir = tmp_path / self.HOST_WORKDIR
+ host_workdir.mkdir()
+ extra_package = host_workdir / self.EXTRA_PACKAGE
+ extra_package.write_text("")
+
+ image = build.build_image(
+ self.BASE_IMAGE,
+ host_workdir.as_posix(),
+ self.OUTPUT_IMAGE_NAME,
+ extra_packages=[extra_package.as_posix()],
+ )
+
+ make_dockerfile_mock.assert_called_once_with(
+ self.BASE_IMAGE,
+ utils.Package(
+ script=None,
+ package_path=host_workdir.as_posix(),
+ python_module=None,
+ ),
+ utils.DEFAULT_WORKDIR,
+ utils.DEFAULT_HOME,
+ requirements_path=None,
+ setup_path=None,
+ extra_requirements=None,
+ extra_packages=[self.EXTRA_PACKAGE],
+ extra_dirs=None,
+ exposed_ports=None,
+ pip_command=self.PIP,
+ python_command=self.PYTHON,
+ )
+ execute_command_mock.assert_called_once_with(
+ [
+ "docker",
+ "build",
+ "--no-cache",
+ "-t",
+ self.OUTPUT_IMAGE_NAME,
+ "--rm",
+ "-f-",
+ host_workdir.as_posix(),
+ ],
+ input_str=make_dockerfile_mock.return_value,
+ )
+ assert image.name == self.OUTPUT_IMAGE_NAME
+ assert image.default_home == self.HOME
+ assert image.default_workdir == self.WORKDIR
+
+ def test_build_image_not_found_extra_packages(self, make_dockerfile_mock):
+ extra_package = f"./another_src/{self.EXTRA_PACKAGE}"
+ expected_message = f'The extra_packages "{extra_package}" must exist.'
+
+ with pytest.raises(ValueError) as exception:
+ _ = build.build_image(
+ self.BASE_IMAGE,
+ self.HOST_WORKDIR,
+ self.OUTPUT_IMAGE_NAME,
+ extra_packages=[extra_package],
+ )
+
+ assert not make_dockerfile_mock.called
+ assert str(exception.value) == expected_message
+
+ def test_build_image_invalid_extra_packages(
+ self, tmp_path, make_dockerfile_mock, execute_command_mock
+ ):
+ source_dir = tmp_path / self.SOURCE_DIR
+ source_dir.mkdir()
+ host_workdir = tmp_path / self.HOST_WORKDIR
+ host_workdir.mkdir()
+ another_dir = tmp_path / "another_dir"
+ another_dir.mkdir()
+ extra_package = another_dir / self.EXTRA_PACKAGE
+ extra_package.write_text("")
+ expected_message = (
+ f'The extra_packages "{extra_package}" must be in "{host_workdir}".'
+ )
+
+ with pytest.raises(ValueError) as exception:
+ _ = build.build_image(
+ self.BASE_IMAGE,
+ host_workdir.as_posix(),
+ self.OUTPUT_IMAGE_NAME,
+ extra_packages=[extra_package.as_posix()],
+ )
+
+ assert not make_dockerfile_mock.called
+ assert str(exception.value) == expected_message
+
+ def test_build_image_with_container_workdir(
+ self, make_dockerfile_mock, execute_command_mock
+ ):
+ container_workdir = "/custom_workdir"
+
+ image = build.build_image(
+ self.BASE_IMAGE,
+ self.HOST_WORKDIR,
+ self.OUTPUT_IMAGE_NAME,
+ container_workdir=container_workdir,
+ )
+
+ make_dockerfile_mock.assert_called_once_with(
+ self.BASE_IMAGE,
+ utils.Package(
+ script=None,
+ package_path=self.HOST_WORKDIR,
+ python_module=None,
+ ),
+ container_workdir,
+ utils.DEFAULT_HOME,
+ requirements_path=None,
+ setup_path=None,
+ extra_requirements=None,
+ extra_packages=None,
+ extra_dirs=None,
+ exposed_ports=None,
+ pip_command=self.PIP,
+ python_command=self.PYTHON,
+ )
+ execute_command_mock.assert_called_once_with(
+ [
+ "docker",
+ "build",
+ "--no-cache",
+ "-t",
+ self.OUTPUT_IMAGE_NAME,
+ "--rm",
+ "-f-",
+ self.HOST_WORKDIR,
+ ],
+ input_str=make_dockerfile_mock.return_value,
+ )
+ assert image.name == self.OUTPUT_IMAGE_NAME
+ assert image.default_home == self.HOME
+ assert image.default_workdir == container_workdir
+
+ def test_build_image_with_container_home(
+ self, make_dockerfile_mock, execute_command_mock
+ ):
+ container_home = "/custom_home"
+
+ image = build.build_image(
+ self.BASE_IMAGE,
+ self.HOST_WORKDIR,
+ self.OUTPUT_IMAGE_NAME,
+ container_home=container_home,
+ )
+
+ make_dockerfile_mock.assert_called_once_with(
+ self.BASE_IMAGE,
+ utils.Package(
+ script=None,
+ package_path=self.HOST_WORKDIR,
+ python_module=None,
+ ),
+ utils.DEFAULT_WORKDIR,
+ container_home,
+ requirements_path=None,
+ setup_path=None,
+ extra_requirements=None,
+ extra_packages=None,
+ extra_dirs=None,
+ exposed_ports=None,
+ pip_command=self.PIP,
+ python_command=self.PYTHON,
+ )
+ execute_command_mock.assert_called_once_with(
+ [
+ "docker",
+ "build",
+ "--no-cache",
+ "-t",
+ self.OUTPUT_IMAGE_NAME,
+ "--rm",
+ "-f-",
+ self.HOST_WORKDIR,
+ ],
+ input_str=make_dockerfile_mock.return_value,
+ )
+ assert image.name == self.OUTPUT_IMAGE_NAME
+ assert image.default_home == container_home
+ assert image.default_workdir == self.WORKDIR
+
+ def test_build_image_with_extra_dirs(
+ self, make_dockerfile_mock, execute_command_mock
+ ):
+ extra_dirs = ["./another_src"]
+ image = build.build_image(
+ self.BASE_IMAGE,
+ self.HOST_WORKDIR,
+ self.OUTPUT_IMAGE_NAME,
+ extra_dirs=extra_dirs,
+ )
+
+ make_dockerfile_mock.assert_called_once_with(
+ self.BASE_IMAGE,
+ utils.Package(
+ script=None,
+ package_path=self.HOST_WORKDIR,
+ python_module=None,
+ ),
+ utils.DEFAULT_WORKDIR,
+ utils.DEFAULT_HOME,
+ requirements_path=None,
+ setup_path=None,
+ extra_requirements=None,
+ extra_packages=None,
+ extra_dirs=extra_dirs,
+ exposed_ports=None,
+ pip_command=self.PIP,
+ python_command=self.PYTHON,
+ )
+ execute_command_mock.assert_called_once_with(
+ [
+ "docker",
+ "build",
+ "--no-cache",
+ "-t",
+ self.OUTPUT_IMAGE_NAME,
+ "--rm",
+ "-f-",
+ self.HOST_WORKDIR,
+ ],
+ input_str=make_dockerfile_mock.return_value,
+ )
+ assert image.name == self.OUTPUT_IMAGE_NAME
+ assert image.default_home == self.HOME
+ assert image.default_workdir == self.WORKDIR
+
+ def test_build_image_with_exposed_ports(
+ self, make_dockerfile_mock, execute_command_mock
+ ):
+ exposed_ports = [8080]
+ image = build.build_image(
+ self.BASE_IMAGE,
+ self.HOST_WORKDIR,
+ self.OUTPUT_IMAGE_NAME,
+ exposed_ports=exposed_ports,
+ )
+
+ make_dockerfile_mock.assert_called_once_with(
+ self.BASE_IMAGE,
+ utils.Package(
+ script=None,
+ package_path=self.HOST_WORKDIR,
+ python_module=None,
+ ),
+ utils.DEFAULT_WORKDIR,
+ utils.DEFAULT_HOME,
+ requirements_path=None,
+ setup_path=None,
+ extra_requirements=None,
+ extra_packages=None,
+ extra_dirs=None,
+ exposed_ports=exposed_ports,
+ pip_command=self.PIP,
+ python_command=self.PYTHON,
+ )
+ execute_command_mock.assert_called_once_with(
+ [
+ "docker",
+ "build",
+ "--no-cache",
+ "-t",
+ self.OUTPUT_IMAGE_NAME,
+ "--rm",
+ "-f-",
+ self.HOST_WORKDIR,
+ ],
+ input_str=make_dockerfile_mock.return_value,
+ )
+ assert image.name == self.OUTPUT_IMAGE_NAME
+ assert image.default_home == self.HOME
+ assert image.default_workdir == self.WORKDIR
+
+ def test_build_image_fail(
+ self, make_dockerfile_mock, execute_command_return_code_1_mock
+ ):
+ command = [
+ "docker",
+ "build",
+ "--no-cache",
+ "-t",
+ self.OUTPUT_IMAGE_NAME,
+ "--rm",
+ "-f-",
+ self.HOST_WORKDIR,
+ ]
+ return_code = 1
+ expected_message = textwrap.dedent(
+ """
+ Docker failed with error code {return_code}.
+ Command: {command}
+ """.format(
+ return_code=return_code, command=" ".join(command)
+ )
+ )
+
+ with pytest.raises(errors.DockerError) as exception:
+ _ = build.build_image(
+ self.BASE_IMAGE,
+ self.HOST_WORKDIR,
+ self.OUTPUT_IMAGE_NAME,
+ )
+
+ make_dockerfile_mock.assert_called_once_with(
+ self.BASE_IMAGE,
+ utils.Package(
+ script=None,
+ package_path=self.HOST_WORKDIR,
+ python_module=None,
+ ),
+ utils.DEFAULT_WORKDIR,
+ utils.DEFAULT_HOME,
+ requirements_path=None,
+ setup_path=None,
+ extra_requirements=None,
+ extra_packages=None,
+ extra_dirs=None,
+ exposed_ports=None,
+ pip_command=self.PIP,
+ python_command=self.PYTHON,
+ )
+ execute_command_return_code_1_mock.assert_called_once_with(
+ command,
+ input_str=make_dockerfile_mock.return_value,
+ )
+ assert exception.value.message == expected_message
+ assert exception.value.cmd == command
+ assert exception.value.exit_code == return_code
+
+
+class TestLocalUtil:
+ @mock.patch("subprocess.Popen", MockedPopen)
+ def test_execute_command(self, textiowrapper_mock):
+ return_code = local_util.execute_command(["ls"])
+
+ assert return_code == 0
+
+ @mock.patch("subprocess.Popen", MockedPopen)
+ def test_execute_command_with_input_str(self, textiowrapper_mock):
+ return_code = local_util.execute_command(["ls"], "Test string.")
+
+ assert return_code == 0
+
+
+class TestErrors:
+ def test_raise_docker_error_with_command(self):
+ command = ["ls", "-l"]
+ return_code = 1
+ expected_message = textwrap.dedent(
+ """
+ Docker failed with error code {return_code}.
+ Command: {command}
+ """.format(
+ return_code=return_code, command=" ".join(command)
+ )
+ )
+
+ with pytest.raises(errors.DockerError) as exception:
+ errors.raise_docker_error_with_command(command, return_code)
+
+ assert exception.value.message == expected_message
+ assert exception.value.cmd == command
+ assert exception.value.exit_code == return_code
+
+
+class TestUtils:
+ IMAGE_URI = "test_image:latest"
+
+ def test_check_image_exists_locally(self, docker_client_mock):
+ assert utils.check_image_exists_locally(self.IMAGE_URI) is True
+
+ def test_check_image_exists_locally_image_not_found(
+ self, docker_client_mock_image_get_not_found
+ ):
+ assert utils.check_image_exists_locally(self.IMAGE_URI) is False
+
+ def test_check_image_exists_locally_image_api_error(
+ self, docker_client_mock_image_get_api_error
+ ):
+ assert utils.check_image_exists_locally(self.IMAGE_URI) is False
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_end_to_end.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_end_to_end.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a5ccef3476c0c03a1c8e3c2e42f8fdc0a231764
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_end_to_end.py
@@ -0,0 +1,547 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+
+from importlib import reload
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import models
+from google.cloud.aiplatform import schema
+
+from google.cloud.aiplatform.compat.types import (
+ dataset as gca_dataset,
+ io as gca_io,
+ model as gca_model,
+ pipeline_state as gca_pipeline_state,
+ training_pipeline as gca_training_pipeline,
+)
+
+import constants as test_constants
+
+from google.protobuf import json_format
+from google.protobuf import struct_pb2
+
+
+# Training job test variables
+_TEST_CREDENTIALS = test_constants.TrainingJobConstants._TEST_CREDENTIALS
+_TEST_JOB_DISPLAY_NAME = "test-display-name"
+_TEST_SERVING_CONTAINER_IMAGE = (
+ test_constants.TrainingJobConstants._TEST_TRAINING_CONTAINER_IMAGE
+)
+_TEST_SERVING_CONTAINER_PREDICTION_ROUTE = (
+ test_constants.TrainingJobConstants._TEST_SERVING_CONTAINER_PREDICTION_ROUTE
+)
+_TEST_SERVING_CONTAINER_HEALTH_ROUTE = (
+ test_constants.TrainingJobConstants._TEST_SERVING_CONTAINER_HEALTH_ROUTE
+)
+_TEST_BASE_OUTPUT_DIR = "gs://test-base-output-dir"
+_TEST_MACHINE_TYPE = test_constants.TrainingJobConstants._TEST_MACHINE_TYPE
+_TEST_ACCELERATOR_TYPE = test_constants.TrainingJobConstants._TEST_ACCELERATOR_TYPE
+_TEST_MODEL_DISPLAY_NAME = test_constants.TrainingJobConstants._TEST_MODEL_DISPLAY_NAME
+_TEST_TRAINING_FRACTION_SPLIT = (
+ test_constants.TrainingJobConstants._TEST_TRAINING_FRACTION_SPLIT
+)
+_TEST_VALIDATION_FRACTION_SPLIT = (
+ test_constants.TrainingJobConstants._TEST_VALIDATION_FRACTION_SPLIT
+)
+_TEST_TEST_FRACTION_SPLIT = (
+ test_constants.TrainingJobConstants._TEST_TEST_FRACTION_SPLIT
+)
+_TEST_BOOT_DISK_TYPE_DEFAULT = (
+ test_constants.TrainingJobConstants._TEST_BOOT_DISK_TYPE_DEFAULT
+)
+_TEST_BOOT_DISK_SIZE_GB_DEFAULT = (
+ test_constants.TrainingJobConstants._TEST_BOOT_DISK_SIZE_GB_DEFAULT
+)
+
+
+# Dataset test variables
+_TEST_DATA_LABEL_ITEMS = test_constants.DatasetConstants._TEST_DATA_LABEL_ITEMS
+_TEST_IMPORT_SCHEMA_URI = test_constants.DatasetConstants._TEST_IMPORT_SCHEMA_URI
+_TEST_IMPORT_SCHEMA_URI_IMAGE = (
+ test_constants.DatasetConstants._TEST_IMPORT_SCHEMA_URI_IMAGE
+)
+_TEST_REQUEST_METADATA = test_constants.DatasetConstants._TEST_REQUEST_METADATA
+_TEST_NAME = test_constants.DatasetConstants._TEST_NAME
+_TEST_SOURCE_URI_GCS = test_constants.DatasetConstants._TEST_SOURCE_URI_GCS
+_TEST_ENCRYPTION_KEY_NAME = test_constants.ProjectConstants._TEST_ENCRYPTION_KEY_NAME
+_TEST_ENCRYPTION_SPEC = test_constants.ProjectConstants._TEST_ENCRYPTION_SPEC
+
+
+def make_training_pipeline(state, add_training_task_metadata=True):
+ return gca_training_pipeline.TrainingPipeline(
+ name=test_constants.TrainingJobConstants._TEST_PIPELINE_RESOURCE_NAME,
+ state=state,
+ model_to_upload=gca_model.Model(
+ name=test_constants.TrainingJobConstants._TEST_MODEL_NAME
+ ),
+ training_task_inputs={
+ "tensorboard": test_constants.TrainingJobConstants._TEST_TENSORBOARD_RESOURCE_NAME
+ },
+ training_task_metadata={
+ "backingCustomJob": test_constants.TrainingJobConstants._TEST_CUSTOM_JOB_RESOURCE_NAME
+ }
+ if add_training_task_metadata
+ else None,
+ )
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestEndToEnd:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.usefixtures(
+ "get_dataset_mock",
+ "create_endpoint_mock",
+ "get_endpoint_mock",
+ "deploy_model_mock",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_dataset_create_to_model_predict(
+ self,
+ create_dataset_mock, # noqa: F811
+ import_data_mock, # noqa: F811
+ predict_client_predict_mock, # noqa: F811
+ mock_python_package_to_gcs, # noqa: F811
+ mock_pipeline_service_create, # noqa: F811
+ mock_model_service_get, # noqa: F811
+ mock_pipeline_service_get, # noqa: F811
+ sync,
+ ):
+
+ aiplatform.init(
+ project=test_constants.ProjectConstants._TEST_PROJECT,
+ staging_bucket=test_constants.TrainingJobConstants._TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ my_dataset = aiplatform.ImageDataset.create(
+ display_name=test_constants.DatasetConstants._TEST_DISPLAY_NAME,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ my_dataset.import_data(
+ gcs_source=_TEST_SOURCE_URI_GCS,
+ import_schema_uri=_TEST_IMPORT_SCHEMA_URI,
+ data_item_labels=_TEST_DATA_LABEL_ITEMS,
+ sync=sync,
+ import_request_timeout=None,
+ )
+
+ job = aiplatform.CustomTrainingJob(
+ display_name=_TEST_JOB_DISPLAY_NAME,
+ script_path=test_constants.TrainingJobConstants._TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=test_constants.TrainingJobConstants._TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ model_from_job = job.run(
+ dataset=my_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=test_constants.TrainingJobConstants._TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=test_constants.TrainingJobConstants._TEST_ACCELERATOR_TYPE,
+ accelerator_count=test_constants.TrainingJobConstants._TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ created_endpoint = models.Endpoint.create(
+ display_name=test_constants.EndpointConstants._TEST_DISPLAY_NAME,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ my_endpoint = model_from_job.deploy(
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ sync=sync,
+ deploy_request_timeout=None,
+ )
+
+ endpoint_deploy_return = created_endpoint.deploy(model_from_job, sync=sync)
+
+ assert endpoint_deploy_return is None
+
+ if not sync:
+ # Accessing attribute in Endpoint that has not been created raises informatively
+ with pytest.raises(
+ RuntimeError, match=r"Endpoint resource has not been created."
+ ):
+ my_endpoint.network
+
+ my_endpoint.wait()
+ created_endpoint.wait()
+
+ test_prediction = created_endpoint.predict(
+ instances=[[1.0, 2.0, 3.0], [1.0, 3.0, 4.0]], parameters={"param": 3.0}
+ )
+
+ true_prediction = models.Prediction(
+ predictions=test_constants.EndpointConstants._TEST_PREDICTION,
+ deployed_model_id=test_constants.EndpointConstants._TEST_ID,
+ model_resource_name=model_from_job.resource_name,
+ model_version_id=model_from_job.version_id,
+ )
+
+ assert true_prediction == test_prediction
+ predict_client_predict_mock.assert_called_once_with(
+ endpoint=test_constants.EndpointConstants._TEST_ENDPOINT_NAME,
+ instances=[[1.0, 2.0, 3.0], [1.0, 3.0, 4.0]],
+ parameters={"param": 3.0},
+ timeout=None,
+ )
+
+ expected_dataset = gca_dataset.Dataset(
+ display_name=test_constants.DatasetConstants._TEST_DISPLAY_NAME,
+ metadata_schema_uri=test_constants.DatasetConstants._TEST_METADATA_SCHEMA_URI_NONTABULAR,
+ metadata=test_constants.DatasetConstants._TEST_NONTABULAR_DATASET_METADATA,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+
+ expected_import_config = gca_dataset.ImportDataConfig(
+ gcs_source=gca_io.GcsSource(uris=[_TEST_SOURCE_URI_GCS]),
+ import_schema_uri=_TEST_IMPORT_SCHEMA_URI,
+ data_item_labels=_TEST_DATA_LABEL_ITEMS,
+ )
+
+ create_dataset_mock.assert_called_once_with(
+ parent=test_constants.ProjectConstants._TEST_PARENT,
+ dataset=expected_dataset,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ import_data_mock.assert_called_once_with(
+ name=_TEST_NAME,
+ import_configs=[expected_import_config],
+ timeout=None,
+ )
+
+ expected_dataset.name = _TEST_NAME
+ assert my_dataset._gca_resource == expected_dataset
+
+ mock_python_package_to_gcs.assert_called_once_with(
+ gcs_staging_dir=test_constants.TrainingJobConstants._TEST_BUCKET_NAME,
+ project=test_constants.ProjectConstants._TEST_PROJECT,
+ credentials=initializer.global_config.credentials,
+ )
+
+ true_args = test_constants.TrainingJobConstants._TEST_RUN_ARGS
+
+ true_worker_pool_spec = {
+ "replica_count": test_constants.TrainingJobConstants._TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": test_constants.TrainingJobConstants._TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": test_constants.TrainingJobConstants._TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": test_constants.TrainingJobConstants._TEST_MODULE_NAME,
+ "package_uris": [
+ test_constants.TrainingJobConstants._TEST_OUTPUT_PYTHON_PACKAGE_PATH
+ ],
+ "args": true_args,
+ },
+ }
+
+ true_fraction_split = gca_training_pipeline.FractionSplit(
+ training_fraction=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction=_TEST_TEST_FRACTION_SPLIT,
+ )
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ container_spec=true_container_spec,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ fraction_split=true_fraction_split,
+ dataset_id=my_dataset.name,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_JOB_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=test_constants.TrainingJobConstants._TEST_MODEL_NAME,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @pytest.mark.usefixtures(
+ "get_dataset_mock",
+ "create_endpoint_mock",
+ "get_endpoint_mock",
+ "deploy_model_mock",
+ )
+ def test_dataset_create_to_model_predict_with_pipeline_fail(
+ self,
+ create_dataset_mock, # noqa: F811
+ import_data_mock, # noqa: F811
+ mock_python_package_to_gcs, # noqa: F811
+ mock_pipeline_service_create_and_get_with_fail, # noqa: F811
+ mock_model_service_get, # noqa: F811
+ ):
+
+ sync = False
+
+ aiplatform.init(
+ project=test_constants.ProjectConstants._TEST_PROJECT,
+ staging_bucket=test_constants.TrainingJobConstants._TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ )
+
+ my_dataset = aiplatform.ImageDataset.create(
+ display_name=test_constants.DatasetConstants._TEST_DISPLAY_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ my_dataset.import_data(
+ gcs_source=_TEST_SOURCE_URI_GCS,
+ import_schema_uri=_TEST_IMPORT_SCHEMA_URI,
+ data_item_labels=_TEST_DATA_LABEL_ITEMS,
+ sync=sync,
+ import_request_timeout=None,
+ )
+
+ job = aiplatform.CustomTrainingJob(
+ display_name=_TEST_JOB_DISPLAY_NAME,
+ script_path=test_constants.TrainingJobConstants._TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=test_constants.TrainingJobConstants._TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ created_endpoint = models.Endpoint.create(
+ display_name=test_constants.EndpointConstants._TEST_DISPLAY_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ model_from_job = job.run(
+ dataset=my_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=test_constants.TrainingJobConstants._TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=test_constants.TrainingJobConstants._TEST_MACHINE_TYPE,
+ accelerator_type=test_constants.TrainingJobConstants._TEST_ACCELERATOR_TYPE,
+ accelerator_count=test_constants.TrainingJobConstants._TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ with pytest.raises(RuntimeError):
+ my_endpoint = model_from_job.deploy(sync=sync)
+ my_endpoint.wait()
+
+ with pytest.raises(RuntimeError):
+ endpoint_deploy_return = created_endpoint.deploy(model_from_job, sync=sync)
+ assert endpoint_deploy_return is None
+ created_endpoint.wait()
+
+ expected_dataset = gca_dataset.Dataset(
+ display_name=test_constants.DatasetConstants._TEST_DISPLAY_NAME,
+ metadata_schema_uri=test_constants.DatasetConstants._TEST_METADATA_SCHEMA_URI_NONTABULAR,
+ metadata=test_constants.DatasetConstants._TEST_NONTABULAR_DATASET_METADATA,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+
+ expected_import_config = gca_dataset.ImportDataConfig(
+ gcs_source=gca_io.GcsSource(uris=[_TEST_SOURCE_URI_GCS]),
+ import_schema_uri=_TEST_IMPORT_SCHEMA_URI,
+ data_item_labels=_TEST_DATA_LABEL_ITEMS,
+ )
+
+ create_dataset_mock.assert_called_once_with(
+ parent=test_constants.ProjectConstants._TEST_PARENT,
+ dataset=expected_dataset,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ import_data_mock.assert_called_once_with(
+ name=_TEST_NAME,
+ import_configs=[expected_import_config],
+ timeout=None,
+ )
+
+ expected_dataset.name = _TEST_NAME
+ assert my_dataset._gca_resource == expected_dataset
+
+ mock_python_package_to_gcs.assert_called_once_with(
+ gcs_staging_dir=test_constants.TrainingJobConstants._TEST_BUCKET_NAME,
+ project=test_constants.ProjectConstants._TEST_PROJECT,
+ credentials=initializer.global_config.credentials,
+ )
+
+ true_args = test_constants.TrainingJobConstants._TEST_RUN_ARGS
+
+ true_worker_pool_spec = {
+ "replica_count": test_constants.TrainingJobConstants._TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": test_constants.TrainingJobConstants._TEST_MACHINE_TYPE,
+ "accelerator_type": test_constants.TrainingJobConstants._TEST_ACCELERATOR_TYPE,
+ "accelerator_count": test_constants.TrainingJobConstants._TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": test_constants.TrainingJobConstants._TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": test_constants.TrainingJobConstants._TEST_MODULE_NAME,
+ "package_uris": [
+ test_constants.TrainingJobConstants._TEST_OUTPUT_PYTHON_PACKAGE_PATH
+ ],
+ "args": true_args,
+ },
+ }
+
+ true_fraction_split = gca_training_pipeline.FractionSplit(
+ training_fraction=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction=_TEST_TEST_FRACTION_SPLIT,
+ )
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ container_spec=true_container_spec,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ fraction_split=true_fraction_split,
+ dataset_id=my_dataset.name,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_JOB_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create_and_get_with_fail[0].assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert (
+ job._gca_resource
+ is mock_pipeline_service_create_and_get_with_fail[1].return_value
+ )
+
+ mock_model_service_get.assert_not_called()
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ assert job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_endpoints.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_endpoints.py
new file mode 100644
index 0000000000000000000000000000000000000000..82bdaa3f52de6483611420730b89c01f5ff82967
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_endpoints.py
@@ -0,0 +1,3984 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import copy
+from datetime import datetime, timedelta
+from importlib import reload
+import json
+import requests
+from unittest import mock
+from google.protobuf import duration_pb2
+
+from google.api_core import operation as ga_operation
+from google.auth import credentials as auth_credentials
+from google.auth.transport import requests as google_auth_requests
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import explain
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import models
+from google.cloud.aiplatform import utils
+from google.cloud.aiplatform.compat.services import (
+ deployment_resource_pool_service_client_v1,
+ deployment_resource_pool_service_client_v1beta1,
+ endpoint_service_client,
+ endpoint_service_client_v1beta1,
+ model_service_client,
+ prediction_service_async_client,
+ prediction_service_async_client_v1beta1,
+ prediction_service_client,
+ prediction_service_client_v1beta1,
+)
+from google.cloud.aiplatform.compat.types import (
+ deployment_resource_pool_v1 as gca_deployment_resource_pool_v1,
+ deployment_resource_pool_v1beta1 as gca_deployment_resource_pool_v1beta1,
+ encryption_spec as gca_encryption_spec,
+ endpoint_service_v1beta1 as gca_endpoint_service_v1beta1,
+ endpoint_service as gca_endpoint_service,
+ endpoint_v1beta1 as gca_endpoint_v1beta1,
+ endpoint as gca_endpoint,
+ io as gca_io,
+ machine_resources_v1beta1 as gca_machine_resources_v1beta1,
+ machine_resources as gca_machine_resources,
+ model as gca_model,
+ prediction_service_v1beta1 as gca_prediction_service_v1beta1,
+ prediction_service as gca_prediction_service,
+ service_networking as gca_service_networking,
+)
+from google.cloud.aiplatform.preview import models as preview_models
+import constants as test_constants
+import pytest
+import urllib3
+
+from google.protobuf import field_mask_pb2
+
+
+_TEST_PROJECT = test_constants.ProjectConstants._TEST_PROJECT
+_TEST_PROJECT_2 = "test-project-2"
+_TEST_LOCATION = test_constants.ProjectConstants._TEST_LOCATION
+_TEST_LOCATION_2 = "europe-west4"
+
+_TEST_DISPLAY_NAME = test_constants.EndpointConstants._TEST_DISPLAY_NAME
+_TEST_DISPLAY_NAME_2 = test_constants.EndpointConstants._TEST_DISPLAY_NAME_2
+_TEST_DISPLAY_NAME_3 = test_constants.EndpointConstants._TEST_DISPLAY_NAME_3
+_TEST_ID = test_constants.EndpointConstants._TEST_ID
+_TEST_ID_2 = test_constants.EndpointConstants._TEST_ID_2
+_TEST_ID_3 = test_constants.EndpointConstants._TEST_ID_3
+_TEST_DESCRIPTION = "test-description"
+_TEST_REQUEST_METADATA = ()
+_TEST_TIMEOUT = None
+_TEST_PREDICT_TIMEOUT = 100
+
+_TEST_ENDPOINT_NAME = test_constants.EndpointConstants._TEST_ENDPOINT_NAME
+_TEST_ENDPOINT_NAME_2 = test_constants.EndpointConstants._TEST_ENDPOINT_NAME_2
+_TEST_ENDPOINT_NAME_ALT_LOCATION = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION_2}/endpoints/{_TEST_ID}"
+)
+_TEST_PARENT = test_constants.ProjectConstants._TEST_PARENT
+_TEST_MODEL_NAME = test_constants.EndpointConstants._TEST_MODEL_NAME
+_TEST_DRP_NAME = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/deploymentResourcePools/{_TEST_ID}"
+_TEST_VERSION_ID = test_constants.EndpointConstants._TEST_VERSION_ID
+
+_TEST_NETWORK = f"projects/{_TEST_PROJECT}/global/networks/{_TEST_ID}"
+_TEST_PROJECT_ALLOWLIST = [_TEST_PROJECT]
+_TEST_ENDPOINT_OVERRIDE = "endpoint-override.aiplatform.vertex.goog"
+
+_TEST_DEDICATED_ENDPOINT_DNS = (
+ f"{_TEST_ID}.{_TEST_PROJECT}.{_TEST_LOCATION}-aiplatform.vertex.goog"
+)
+
+_TEST_MODEL_ID = test_constants.EndpointConstants._TEST_MODEL_ID
+_TEST_METADATA = {"foo": "bar"}
+_TEST_PREDICTION = test_constants.EndpointConstants._TEST_PREDICTION
+_TEST_INSTANCES = [[1.0, 2.0, 3.0], [1.0, 3.0, 4.0]]
+_TEST_RAW_INPUTS = b"input bytes"
+_TEST_RAW_OUTPUTS = b"output bytes"
+_TEST_INPUTS = [{"dtype": "BOOL"}]
+_TEST_OUTPUTS = [{"dtype": "STRING"}]
+_TEST_METHOD_NAME = "test-method-name"
+_TEST_CREDENTIALS = mock.Mock(spec=auth_credentials.AnonymousCredentials())
+_TEST_SERVICE_ACCOUNT = test_constants.ProjectConstants._TEST_SERVICE_ACCOUNT
+
+_TEST_DEPLOYED_MODELS = test_constants.EndpointConstants._TEST_DEPLOYED_MODELS
+
+_TEST_TRAFFIC_SPLIT = test_constants.EndpointConstants._TEST_TRAFFIC_SPLIT
+
+_TEST_LONG_TRAFFIC_SPLIT = {
+ "m1": 40,
+ "m2": 10,
+ "m3": 30,
+ "m4": 0,
+ "m5": 20,
+}
+_TEST_LONG_TRAFFIC_SPLIT_SORTED_IDS = ["m4", "m2", "m5", "m3", "m1"]
+_TEST_LONG_DEPLOYED_MODELS = [
+ gca_endpoint.DeployedModel(id=id, display_name=f"{id}_display_name")
+ for id in ["m1", "m2", "m3", "m4", "m5", "m6", "m7"]
+]
+
+_TEST_MACHINE_TYPE = "n1-standard-32"
+_TEST_ACCELERATOR_TYPE = "NVIDIA_TESLA_P100"
+_TEST_ACCELERATOR_COUNT = 2
+
+_TEST_METRIC_NAME_CPU_UTILIZATION = (
+ "aiplatform.googleapis.com/prediction/online/cpu/utilization"
+)
+_TEST_METRIC_NAME_GPU_UTILIZATION = (
+ "aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle"
+)
+
+_TEST_EXPLANATIONS = [gca_prediction_service.explanation.Explanation(attributions=[])]
+_TEST_V1BETA1_EXPLANATIONS = [
+ gca_prediction_service_v1beta1.explanation.Explanation(attributions=[])
+]
+
+_TEST_ATTRIBUTIONS = [
+ gca_prediction_service.explanation.Attribution(
+ baseline_output_value=1.0,
+ instance_output_value=2.0,
+ feature_attributions=3.0,
+ output_index=[1, 2, 3],
+ output_display_name="abc",
+ approximation_error=6.0,
+ output_name="xyz",
+ )
+]
+
+_TEST_V1BETA1_ATTRIBUTIONS = [
+ gca_prediction_service_v1beta1.explanation.Attribution(
+ baseline_output_value=1.0,
+ instance_output_value=2.0,
+ feature_attributions=3.0,
+ output_index=[1, 2, 3],
+ output_display_name="abc",
+ approximation_error=6.0,
+ output_name="xyz",
+ )
+]
+
+_TEST_EXPLANATION_METADATA = explain.ExplanationMetadata(
+ inputs={
+ "features": explain.ExplanationMetadata.InputMetadata(
+ {
+ "input_tensor_name": "dense_input",
+ "encoding": "BAG_OF_FEATURES",
+ "modality": "numeric",
+ "index_feature_mapping": ["abc", "def", "ghj"],
+ }
+ )
+ },
+ outputs={
+ "medv": explain.ExplanationMetadata.OutputMetadata(
+ {"output_tensor_name": "dense_2"}
+ )
+ },
+)
+_TEST_EXPLANATION_PARAMETERS = explain.ExplanationParameters(
+ {"sampled_shapley_attribution": {"path_count": 10}}
+)
+
+_TEST_SHAPLEY_EXPLANATION_SPEC_OVERRIDE = {
+ "parameters": {"sampled_shapley_attribution": {"path_count": 10}}
+}
+
+_TEST_XRAI_EXPLANATION_SPEC_OVERRIDE = {
+ "parameters": {"xrai_attribution": {"step_count": 50}}
+}
+
+_TEST_CONCURRENT_EXPLANATION_SPEC_OVERRIDE = {
+ "shapley": _TEST_SHAPLEY_EXPLANATION_SPEC_OVERRIDE,
+ "xrai": _TEST_XRAI_EXPLANATION_SPEC_OVERRIDE,
+}
+
+# CMEK encryption
+_TEST_ENCRYPTION_KEY_NAME = "key_1234"
+_TEST_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_ENCRYPTION_KEY_NAME
+)
+
+_TEST_ENDPOINT_GAPIC = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME, name=_TEST_ENDPOINT_NAME
+)
+
+_TEST_ENDPOINT_LIST = [
+ gca_endpoint.Endpoint(
+ name=_TEST_ENDPOINT_NAME,
+ display_name="aac",
+ create_time=datetime.now() - timedelta(minutes=15),
+ ),
+ gca_endpoint.Endpoint(
+ name=_TEST_ENDPOINT_NAME,
+ display_name="aab",
+ create_time=datetime.now() - timedelta(minutes=5),
+ ),
+ gca_endpoint.Endpoint(
+ name=_TEST_ENDPOINT_NAME,
+ display_name="aaa",
+ create_time=datetime.now() - timedelta(minutes=10),
+ ),
+]
+
+_TEST_PRIVATE_ENDPOINT_LIST = [
+ gca_endpoint.Endpoint(
+ name=_TEST_ENDPOINT_NAME,
+ display_name="aac",
+ create_time=datetime.now() - timedelta(minutes=15),
+ network=_TEST_NETWORK,
+ ),
+ gca_endpoint.Endpoint(
+ name=_TEST_ENDPOINT_NAME_2,
+ display_name="psc",
+ create_time=datetime.now() - timedelta(minutes=15),
+ private_service_connect_config=gca_service_networking.PrivateServiceConnectConfig(
+ enable_private_service_connect=True,
+ project_allowlist=_TEST_PROJECT_ALLOWLIST,
+ ),
+ ),
+]
+
+_TEST_LIST_FILTER = 'display_name="abc"'
+_TEST_LIST_ORDER_BY_CREATE_TIME = "create_time desc"
+_TEST_LIST_ORDER_BY_DISPLAY_NAME = "display_name"
+
+_TEST_LABELS = {"my_key": "my_value"}
+
+_TEST_REQUEST_RESPONSE_LOGGING_SAMPLING_RATE = 0.1
+_TEST_REQUEST_RESPONSE_LOGGING_BQ_DEST = (
+ output_uri
+) = f"bq://{_TEST_PROJECT}/test_dataset/test_table"
+_TEST_REQUEST_RESPONSE_LOGGING_CONFIG = (
+ gca_endpoint.PredictRequestResponseLoggingConfig(
+ enabled=True,
+ sampling_rate=_TEST_REQUEST_RESPONSE_LOGGING_SAMPLING_RATE,
+ bigquery_destination=gca_io.BigQueryDestination(
+ output_uri=_TEST_REQUEST_RESPONSE_LOGGING_BQ_DEST
+ ),
+ )
+)
+
+_TEST_INFERENCE_TIMEOUT = 100
+_TEST_CLIENT_CONNECTION_CONFIG = gca_endpoint.ClientConnectionConfig(
+ inference_timeout=duration_pb2.Duration(seconds=_TEST_INFERENCE_TIMEOUT)
+)
+
+"""
+----------------------------------------------------------------------------
+Endpoint Fixtures
+----------------------------------------------------------------------------
+"""
+
+
+@pytest.fixture
+def get_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ yield get_endpoint_mock
+
+
+@pytest.fixture
+def get_empty_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(name=_TEST_ENDPOINT_NAME)
+ yield get_endpoint_mock
+
+
+@pytest.fixture
+def get_endpoint_alt_location_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME_ALT_LOCATION,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ yield get_endpoint_mock
+
+
+@pytest.fixture
+def get_endpoint_with_models_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME,
+ deployed_models=_TEST_DEPLOYED_MODELS,
+ traffic_split=_TEST_TRAFFIC_SPLIT,
+ )
+ yield get_endpoint_mock
+
+
+@pytest.fixture
+def get_endpoint_with_many_models_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME,
+ deployed_models=_TEST_LONG_DEPLOYED_MODELS,
+ traffic_split=_TEST_LONG_TRAFFIC_SPLIT,
+ )
+ yield get_endpoint_mock
+
+
+@pytest.fixture
+def get_dedicated_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ dedicated_endpoint_enabled=True,
+ dedicated_endpoint_dns=_TEST_DEDICATED_ENDPOINT_DNS,
+ )
+ yield get_endpoint_mock
+
+
+@pytest.fixture
+def get_model_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_MODEL_NAME,
+ )
+ yield get_model_mock
+
+
+@pytest.fixture
+def create_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "create_endpoint"
+ ) as create_endpoint_mock:
+ create_endpoint_lro_mock = mock.Mock(ga_operation.Operation)
+ create_endpoint_lro_mock.result.return_value = gca_endpoint.Endpoint(
+ name=_TEST_ENDPOINT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ create_endpoint_mock.return_value = create_endpoint_lro_mock
+ yield create_endpoint_mock
+
+
+@pytest.fixture
+def create_dedicated_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "create_endpoint"
+ ) as create_dedicated_endpoint_mock:
+ create_dedicated_endpoint_lro_mock = mock.Mock(ga_operation.Operation)
+ create_dedicated_endpoint_lro_mock.result.return_value = gca_endpoint.Endpoint(
+ name=_TEST_ENDPOINT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ dedicated_endpoint_enabled=True,
+ )
+ create_dedicated_endpoint_mock.return_value = create_dedicated_endpoint_lro_mock
+ yield create_dedicated_endpoint_mock
+
+
+@pytest.fixture
+def update_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "update_endpoint"
+ ) as update_endpoint_mock:
+ update_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ yield update_endpoint_mock
+
+
+@pytest.fixture
+def deploy_model_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "deploy_model"
+ ) as deploy_model_mock:
+ deployed_model = gca_endpoint.DeployedModel(
+ model=_TEST_MODEL_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ )
+ deploy_model_lro_mock = mock.Mock(ga_operation.Operation)
+ deploy_model_lro_mock.result.return_value = (
+ gca_endpoint_service.DeployModelResponse(
+ deployed_model=deployed_model,
+ )
+ )
+ deploy_model_mock.return_value = deploy_model_lro_mock
+ yield deploy_model_mock
+
+
+@pytest.fixture
+def preview_deploy_model_mock():
+ with mock.patch.object(
+ endpoint_service_client_v1beta1.EndpointServiceClient, "deploy_model"
+ ) as preview_deploy_model_mock:
+ deployed_model = gca_endpoint_v1beta1.DeployedModel(
+ model=_TEST_MODEL_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ )
+ deploy_model_lro_mock = mock.Mock(ga_operation.Operation)
+ deploy_model_lro_mock.result.return_value = (
+ gca_endpoint_service_v1beta1.DeployModelResponse(
+ deployed_model=deployed_model,
+ )
+ )
+ preview_deploy_model_mock.return_value = deploy_model_lro_mock
+ yield preview_deploy_model_mock
+
+
+@pytest.fixture
+def deploy_model_with_explanations_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "deploy_model"
+ ) as deploy_model_mock:
+ deployed_model = gca_endpoint.DeployedModel(
+ model=_TEST_MODEL_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ )
+ deploy_model_lro_mock = mock.Mock(ga_operation.Operation)
+ deploy_model_lro_mock.result.return_value = (
+ gca_endpoint_service.DeployModelResponse(
+ deployed_model=deployed_model,
+ )
+ )
+ deploy_model_mock.return_value = deploy_model_lro_mock
+ yield deploy_model_mock
+
+
+@pytest.fixture
+def undeploy_model_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "undeploy_model"
+ ) as undeploy_model_mock:
+ undeploy_model_lro_mock = mock.Mock(ga_operation.Operation)
+ undeploy_model_lro_mock.result.return_value = (
+ gca_endpoint_service.UndeployModelResponse()
+ )
+ undeploy_model_mock.return_value = undeploy_model_lro_mock
+ yield undeploy_model_mock
+
+
+@pytest.fixture
+def delete_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "delete_endpoint"
+ ) as delete_endpoint_mock:
+ delete_endpoint_lro_mock = mock.Mock(ga_operation.Operation)
+ delete_endpoint_lro_mock.result.return_value = (
+ gca_endpoint_service.DeleteEndpointRequest()
+ )
+ delete_endpoint_mock.return_value = delete_endpoint_lro_mock
+ yield delete_endpoint_mock
+
+
+@pytest.fixture
+def sdk_private_undeploy_mock():
+ """Mocks the high-level Endpoint._undeploy() SDK private method"""
+ with mock.patch.object(aiplatform.Endpoint, "_undeploy") as sdk_undeploy_mock:
+ sdk_undeploy_mock.return_value = None
+ yield sdk_undeploy_mock
+
+
+@pytest.fixture
+def sdk_undeploy_all_mock():
+ """Mocks the high-level Endpoint.undeploy_all() SDK method"""
+ with mock.patch.object(
+ aiplatform.Endpoint, "undeploy_all"
+ ) as sdk_undeploy_all_mock:
+ sdk_undeploy_all_mock.return_value = None
+ yield sdk_undeploy_all_mock
+
+
+@pytest.fixture
+def list_endpoints_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "list_endpoints"
+ ) as list_endpoints_mock:
+ list_endpoints_mock.return_value = _TEST_ENDPOINT_LIST
+ yield list_endpoints_mock
+
+
+@pytest.fixture
+def create_endpoint_client_mock():
+ with mock.patch.object(
+ initializer.global_config,
+ "create_client",
+ autospec=True,
+ ) as create_endpoint_client_mock:
+ endpoint_client_mock = mock.Mock(
+ spec=endpoint_service_client.EndpointServiceClient
+ )
+ endpoint_client_mock.get_endpoint.return_value = _TEST_ENDPOINT_GAPIC
+ create_endpoint_client_mock.return_value = endpoint_client_mock
+ yield create_endpoint_client_mock
+
+
+@pytest.fixture
+def predict_client_predict_mock():
+ with mock.patch.object(
+ prediction_service_client.PredictionServiceClient, "predict"
+ ) as predict_mock:
+ predict_mock.return_value = gca_prediction_service.PredictResponse(
+ deployed_model_id=_TEST_MODEL_ID,
+ metadata=_TEST_METADATA,
+ model_version_id=_TEST_VERSION_ID,
+ model=_TEST_MODEL_NAME,
+ )
+ predict_mock.return_value.predictions.extend(_TEST_PREDICTION)
+ yield predict_mock
+
+
+@pytest.fixture
+def predict_endpoint_http_mock():
+ resp = requests.Response()
+ resp.status_code = 200
+ resp._content = json.dumps(
+ {
+ "predictions": _TEST_PREDICTION,
+ "metadata": _TEST_METADATA,
+ "deployedModelId": _TEST_DEPLOYED_MODELS[0].id,
+ "model": _TEST_MODEL_NAME,
+ "modelVersionId": "1",
+ }
+ ).encode("utf-8")
+ with mock.patch.object(
+ google_auth_requests.AuthorizedSession, "post"
+ ) as predict_mock:
+ predict_mock.return_value = resp
+ yield predict_mock
+
+
+@pytest.fixture
+def predict_async_client_predict_mock():
+ response = gca_prediction_service.PredictResponse(
+ deployed_model_id=_TEST_MODEL_ID,
+ metadata=_TEST_METADATA,
+ model_version_id=_TEST_VERSION_ID,
+ model=_TEST_MODEL_NAME,
+ )
+ response.predictions.extend(_TEST_PREDICTION)
+ with mock.patch.object(
+ target=prediction_service_async_client.PredictionServiceAsyncClient,
+ attribute="predict",
+ return_value=response,
+ ) as predict_mock:
+ yield predict_mock
+
+
+@pytest.fixture
+def predict_client_direct_predict_mock():
+ with mock.patch.object(
+ prediction_service_client.PredictionServiceClient, "direct_predict"
+ ) as direct_predict_mock:
+ direct_predict_mock.return_value = gca_prediction_service.DirectPredictResponse(
+ outputs=_TEST_OUTPUTS
+ )
+ yield direct_predict_mock
+
+
+@pytest.fixture
+def predict_client_direct_predict_async_mock():
+ response = gca_prediction_service.DirectPredictResponse(outputs=_TEST_OUTPUTS)
+ with mock.patch.object(
+ target=prediction_service_async_client.PredictionServiceAsyncClient,
+ attribute="direct_predict",
+ return_value=response,
+ ) as direct_predict_mock:
+ yield direct_predict_mock
+
+
+@pytest.fixture
+def predict_client_direct_raw_predict_mock():
+ with mock.patch.object(
+ prediction_service_client.PredictionServiceClient, "direct_raw_predict"
+ ) as direct_raw_predict_mock:
+ direct_raw_predict_mock.return_value = (
+ gca_prediction_service.DirectRawPredictResponse(output=_TEST_RAW_OUTPUTS)
+ )
+ yield direct_raw_predict_mock
+
+
+@pytest.fixture
+def predict_client_direct_raw_predict_async_mock():
+ response = gca_prediction_service.DirectRawPredictResponse(output=_TEST_RAW_OUTPUTS)
+ with mock.patch.object(
+ target=prediction_service_async_client.PredictionServiceAsyncClient,
+ attribute="direct_raw_predict",
+ return_value=response,
+ ) as direct_raw_predict_mock:
+ yield direct_raw_predict_mock
+
+
+@pytest.fixture
+def predict_client_stream_direct_predict_mock():
+ with mock.patch.object(
+ prediction_service_client.PredictionServiceClient, "stream_direct_predict"
+ ) as stream_direct_predict_mock:
+ stream_direct_predict_mock.return_value = (
+ gca_prediction_service.StreamDirectPredictResponse(outputs=_TEST_OUTPUTS),
+ gca_prediction_service.StreamDirectPredictResponse(outputs=_TEST_OUTPUTS),
+ )
+ yield stream_direct_predict_mock
+
+
+@pytest.fixture
+def predict_client_stream_direct_raw_predict_mock():
+ with mock.patch.object(
+ prediction_service_client.PredictionServiceClient, "stream_direct_raw_predict"
+ ) as stream_direct_raw_predict_mock:
+ stream_direct_raw_predict_mock.return_value = (
+ gca_prediction_service.StreamDirectRawPredictResponse(
+ output=_TEST_RAW_OUTPUTS
+ ),
+ gca_prediction_service.StreamDirectRawPredictResponse(
+ output=_TEST_RAW_OUTPUTS
+ ),
+ )
+ yield stream_direct_raw_predict_mock
+
+
+@pytest.fixture
+def predict_client_explain_mock():
+ with mock.patch.object(
+ prediction_service_client.PredictionServiceClient, "explain"
+ ) as predict_mock:
+ predict_mock.return_value = gca_prediction_service.ExplainResponse(
+ deployed_model_id=_TEST_MODEL_ID,
+ )
+ predict_mock.return_value.predictions.extend(_TEST_PREDICTION)
+ predict_mock.return_value.explanations.extend(_TEST_EXPLANATIONS)
+ predict_mock.return_value.explanations[0].attributions.extend(
+ _TEST_ATTRIBUTIONS
+ )
+ yield predict_mock
+
+
+@pytest.fixture
+def predict_client_v1beta1_explain_mock():
+ with mock.patch.object(
+ prediction_service_client_v1beta1.PredictionServiceClient, "explain"
+ ) as predict_mock:
+ predict_mock.return_value = gca_prediction_service_v1beta1.ExplainResponse(
+ deployed_model_id=_TEST_MODEL_ID,
+ )
+ predict_mock.return_value.predictions.extend(_TEST_PREDICTION)
+ predict_mock.return_value.explanations.extend(_TEST_V1BETA1_EXPLANATIONS)
+ predict_mock.return_value.explanations[0].attributions.extend(
+ _TEST_V1BETA1_ATTRIBUTIONS
+ )
+ predict_mock.return_value.concurrent_explanations = {
+ "shapley": gca_prediction_service_v1beta1.ExplainResponse.ConcurrentExplanation(
+ explanations=_TEST_V1BETA1_EXPLANATIONS,
+ )
+ }
+ yield predict_mock
+
+
+@pytest.fixture
+def predict_async_client_v1beta1_explain_mock():
+ with mock.patch.object(
+ prediction_service_async_client_v1beta1.PredictionServiceAsyncClient, "explain"
+ ) as predict_mock:
+ predict_mock.return_value = gca_prediction_service_v1beta1.ExplainResponse(
+ deployed_model_id=_TEST_MODEL_ID,
+ )
+ predict_mock.return_value.predictions.extend(_TEST_PREDICTION)
+ predict_mock.return_value.explanations.extend(_TEST_V1BETA1_EXPLANATIONS)
+ predict_mock.return_value.explanations[0].attributions.extend(
+ _TEST_V1BETA1_ATTRIBUTIONS
+ )
+ yield predict_mock
+
+
+@pytest.fixture
+def predict_async_client_explain_mock():
+ response = gca_prediction_service.ExplainResponse(
+ deployed_model_id=_TEST_MODEL_ID,
+ )
+ response.predictions.extend(_TEST_PREDICTION)
+ response.explanations.extend(_TEST_EXPLANATIONS)
+ response.explanations[0].attributions.extend(_TEST_ATTRIBUTIONS)
+
+ with mock.patch.object(
+ target=prediction_service_async_client.PredictionServiceAsyncClient,
+ attribute="explain",
+ return_value=response,
+ ) as explain_mock:
+ yield explain_mock
+
+
+@pytest.fixture
+def preview_get_drp_mock():
+ with mock.patch.object(
+ deployment_resource_pool_service_client_v1beta1.DeploymentResourcePoolServiceClient,
+ "get_deployment_resource_pool",
+ ) as get_drp_mock:
+ machine_spec = gca_machine_resources_v1beta1.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+
+ autoscaling_metric_specs = [
+ gca_machine_resources_v1beta1.AutoscalingMetricSpec(
+ metric_name=_TEST_METRIC_NAME_CPU_UTILIZATION, target=70
+ ),
+ gca_machine_resources_v1beta1.AutoscalingMetricSpec(
+ metric_name=_TEST_METRIC_NAME_GPU_UTILIZATION, target=70
+ ),
+ ]
+
+ dedicated_resources = gca_machine_resources_v1beta1.DedicatedResources(
+ machine_spec=machine_spec,
+ min_replica_count=10,
+ max_replica_count=20,
+ autoscaling_metric_specs=autoscaling_metric_specs,
+ )
+
+ get_drp_mock.return_value = (
+ gca_deployment_resource_pool_v1beta1.DeploymentResourcePool(
+ name=_TEST_DRP_NAME,
+ dedicated_resources=dedicated_resources,
+ )
+ )
+ yield get_drp_mock
+
+
+@pytest.fixture
+def get_drp_mock():
+ with mock.patch.object(
+ deployment_resource_pool_service_client_v1.DeploymentResourcePoolServiceClient,
+ "get_deployment_resource_pool",
+ ) as get_drp_mock:
+ machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+
+ autoscaling_metric_specs = [
+ gca_machine_resources.AutoscalingMetricSpec(
+ metric_name=_TEST_METRIC_NAME_CPU_UTILIZATION, target=70
+ ),
+ gca_machine_resources.AutoscalingMetricSpec(
+ metric_name=_TEST_METRIC_NAME_GPU_UTILIZATION, target=70
+ ),
+ ]
+
+ dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=machine_spec,
+ min_replica_count=10,
+ max_replica_count=20,
+ autoscaling_metric_specs=autoscaling_metric_specs,
+ )
+
+ get_drp_mock.return_value = (
+ gca_deployment_resource_pool_v1.DeploymentResourcePool(
+ name=_TEST_DRP_NAME,
+ dedicated_resources=dedicated_resources,
+ )
+ )
+ yield get_drp_mock
+
+
+"""
+----------------------------------------------------------------------------
+Private Endpoint Fixtures
+----------------------------------------------------------------------------
+"""
+
+
+@pytest.fixture
+def create_psa_private_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "create_endpoint"
+ ) as create_private_endpoint_mock:
+ create_private_endpoint_lro_mock = mock.Mock(ga_operation.Operation)
+ create_private_endpoint_lro_mock.result.return_value = gca_endpoint.Endpoint(
+ name=_TEST_ENDPOINT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ network=_TEST_NETWORK,
+ )
+ create_private_endpoint_mock.return_value = create_private_endpoint_lro_mock
+ yield create_private_endpoint_mock
+
+
+@pytest.fixture
+def get_psa_private_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME,
+ network=_TEST_NETWORK,
+ )
+ yield get_endpoint_mock
+
+
+@pytest.fixture
+def get_psa_private_endpoint_with_model_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME,
+ network=_TEST_NETWORK,
+ deployed_models=[_TEST_DEPLOYED_MODELS[0]],
+ )
+ yield get_endpoint_mock
+
+
+@pytest.fixture
+def create_psc_private_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "create_endpoint"
+ ) as create_private_endpoint_mock:
+ create_private_endpoint_lro_mock = mock.Mock(ga_operation.Operation)
+ create_private_endpoint_lro_mock.result.return_value = gca_endpoint.Endpoint(
+ name=_TEST_ENDPOINT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ private_service_connect_config=gca_service_networking.PrivateServiceConnectConfig(
+ enable_private_service_connect=True,
+ project_allowlist=_TEST_PROJECT_ALLOWLIST,
+ ),
+ )
+ create_private_endpoint_mock.return_value = create_private_endpoint_lro_mock
+ yield create_private_endpoint_mock
+
+
+@pytest.fixture
+def get_psc_private_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME,
+ private_service_connect_config=gca_service_networking.PrivateServiceConnectConfig(
+ enable_private_service_connect=True,
+ project_allowlist=_TEST_PROJECT_ALLOWLIST,
+ ),
+ )
+ yield get_endpoint_mock
+
+
+@pytest.fixture
+def get_psc_private_endpoint_with_many_model_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME,
+ private_service_connect_config=gca_service_networking.PrivateServiceConnectConfig(
+ enable_private_service_connect=True,
+ project_allowlist=_TEST_PROJECT_ALLOWLIST,
+ ),
+ deployed_models=_TEST_LONG_DEPLOYED_MODELS,
+ traffic_split=_TEST_LONG_TRAFFIC_SPLIT,
+ )
+ yield get_endpoint_mock
+
+
+@pytest.fixture
+def predict_private_endpoint_mock():
+ with mock.patch.object(urllib3.PoolManager, "request") as predict_mock:
+ predict_mock.return_value = urllib3.response.HTTPResponse(
+ status=200,
+ body=json.dumps(
+ {
+ "predictions": _TEST_PREDICTION,
+ "metadata": _TEST_METADATA,
+ "deployedModelId": _TEST_DEPLOYED_MODELS[0].id,
+ "model": _TEST_MODEL_NAME,
+ "modelVersionId": "1",
+ }
+ ),
+ )
+ yield predict_mock
+
+
+@pytest.fixture
+def stream_raw_predict_private_endpoint_mock():
+ with mock.patch.object(
+ google_auth_requests.AuthorizedSession, "post"
+ ) as stream_raw_predict_mock:
+ # Create a mock response object
+ mock_response = mock.Mock(spec=requests.Response)
+
+ # Configure the mock to be used as a context manager
+ stream_raw_predict_mock.return_value.__enter__.return_value = mock_response
+
+ # Set the status code to 200 (OK)
+ mock_response.status_code = 200
+
+ # Simulate streaming data with iter_lines
+ mock_response.iter_lines = mock.Mock(
+ return_value=iter(
+ [
+ json.dumps(
+ {
+ "predictions": [1.0, 2.0, 3.0],
+ "metadata": {"key": "value"},
+ "deployedModelId": "model-id-123",
+ "model": "model-name",
+ "modelVersionId": "1",
+ }
+ ).encode("utf-8"),
+ json.dumps(
+ {
+ "predictions": [4.0, 5.0, 6.0],
+ "metadata": {"key": "value"},
+ "deployedModelId": "model-id-123",
+ "model": "model-name",
+ "modelVersionId": "1",
+ }
+ ).encode("utf-8"),
+ ]
+ )
+ )
+
+ yield stream_raw_predict_mock
+
+
+@pytest.fixture
+def health_check_private_endpoint_mock():
+ with mock.patch.object(urllib3.PoolManager, "request") as health_check_mock:
+ health_check_mock.return_value = urllib3.response.HTTPResponse(status=200)
+ yield health_check_mock
+
+
+@pytest.fixture
+def list_private_endpoints_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "list_endpoints"
+ ) as list_endpoints_mock:
+ list_endpoints_mock.return_value = _TEST_PRIVATE_ENDPOINT_LIST
+ yield list_endpoints_mock
+
+
+@pytest.fixture
+def sdk_undeploy_mock():
+ """Mocks the high-level PrivateEndpoint.undeploy() SDK method"""
+ with mock.patch.object(aiplatform.PrivateEndpoint, "undeploy") as sdk_undeploy_mock:
+ sdk_undeploy_mock.return_value = None
+ yield sdk_undeploy_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestEndpoint:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_constructor(self, create_endpoint_client_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ models.Endpoint(_TEST_ENDPOINT_NAME)
+ create_endpoint_client_mock.assert_has_calls(
+ [
+ mock.call(
+ client_class=utils.EndpointClientWithOverride,
+ credentials=initializer.global_config.credentials,
+ location_override=_TEST_LOCATION,
+ appended_user_agent=None,
+ ),
+ ]
+ )
+
+ def test_lazy_constructor_with_endpoint_id(self, get_endpoint_mock):
+ ep = models.Endpoint(_TEST_ID)
+ assert ep._gca_resource.name == _TEST_ENDPOINT_NAME
+ assert ep._skipped_getter_call()
+ assert not get_endpoint_mock.called
+
+ def test_lazy_constructor_with_endpoint_name(self, get_endpoint_mock):
+ ep = models.Endpoint(_TEST_ENDPOINT_NAME)
+ assert ep._gca_resource.name == _TEST_ENDPOINT_NAME
+ assert ep._skipped_getter_call()
+ assert not get_endpoint_mock.called
+
+ def test_lazy_constructor_calls_get_on_property_access(self, get_endpoint_mock):
+ ep = models.Endpoint(_TEST_ENDPOINT_NAME)
+ assert ep._gca_resource.name == _TEST_ENDPOINT_NAME
+ assert ep._skipped_getter_call()
+ assert not get_endpoint_mock.called
+
+ ep.display_name # Retrieve a property that requires a call to Endpoint getter
+ get_endpoint_mock.assert_called_with(
+ name=_TEST_ENDPOINT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_lazy_constructor_with_custom_project(self, get_endpoint_mock):
+ ep = models.Endpoint(endpoint_name=_TEST_ID, project=_TEST_PROJECT_2)
+ test_endpoint_resource_name = (
+ endpoint_service_client.EndpointServiceClient.endpoint_path(
+ _TEST_PROJECT_2, _TEST_LOCATION, _TEST_ID
+ )
+ )
+ assert not get_endpoint_mock.called
+
+ ep.name # Retrieve a property that requires a call to Endpoint getter
+ get_endpoint_mock.assert_called_with(
+ name=test_endpoint_resource_name, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_constructor_with_conflicting_location(self):
+ """Passing a full resource name with `_TEST_LOCATION` and providing `_TEST_LOCATION_2` as location"""
+
+ with pytest.raises(RuntimeError) as err:
+ models.Endpoint(
+ endpoint_name=_TEST_ENDPOINT_NAME, location=_TEST_LOCATION_2
+ )
+
+ assert err.match(
+ regexp=r"is provided, but different from the resource location"
+ )
+
+ def test_lazy_constructor_with_custom_location(
+ self, get_endpoint_alt_location_mock
+ ):
+ ep = models.Endpoint(endpoint_name=_TEST_ID, location=_TEST_LOCATION_2)
+ test_endpoint_resource_name = (
+ endpoint_service_client.EndpointServiceClient.endpoint_path(
+ _TEST_PROJECT, _TEST_LOCATION_2, _TEST_ID
+ )
+ )
+
+ # Get Endpoint not called due to lazy loading
+ assert not get_endpoint_alt_location_mock.called
+
+ ep.network # Accessing a property that requires calling getter
+
+ get_endpoint_alt_location_mock.assert_called_with(
+ name=test_endpoint_resource_name, retry=base._DEFAULT_RETRY
+ )
+
+ def test_constructor_with_custom_credentials(self, create_endpoint_client_mock):
+ creds = auth_credentials.AnonymousCredentials()
+
+ models.Endpoint(_TEST_ENDPOINT_NAME, credentials=creds)
+ create_endpoint_client_mock.assert_has_calls(
+ [
+ mock.call(
+ client_class=utils.EndpointClientWithOverride,
+ credentials=creds,
+ location_override=_TEST_LOCATION,
+ appended_user_agent=None,
+ ),
+ ]
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_init_aiplatform_with_encryption_key_name_and_create_endpoint(
+ self, create_endpoint_mock, sync
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ )
+ my_endpoint = models.Endpoint.create(
+ display_name=_TEST_DISPLAY_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ my_endpoint.wait()
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC
+ )
+ create_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ endpoint=expected_endpoint,
+ endpoint_id=None,
+ metadata=(),
+ timeout=None,
+ )
+
+ expected_endpoint.name = _TEST_ENDPOINT_NAME
+ assert my_endpoint._gca_resource == expected_endpoint
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create(self, create_endpoint_mock, sync):
+ my_endpoint = models.Endpoint.create(
+ display_name=_TEST_DISPLAY_NAME,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ my_endpoint.wait()
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC
+ )
+ create_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ endpoint=expected_endpoint,
+ endpoint_id=None,
+ metadata=(),
+ timeout=None,
+ )
+
+ expected_endpoint.name = _TEST_ENDPOINT_NAME
+ assert my_endpoint._gca_resource == expected_endpoint
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_with_endpoint_id(self, create_endpoint_mock, sync):
+ my_endpoint = models.Endpoint.create(
+ display_name=_TEST_DISPLAY_NAME,
+ endpoint_id=_TEST_ID,
+ description=_TEST_DESCRIPTION,
+ sync=sync,
+ create_request_timeout=None,
+ )
+ if not sync:
+ my_endpoint.wait()
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ )
+ create_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ endpoint=expected_endpoint,
+ endpoint_id=_TEST_ID,
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_with_timeout(self, create_endpoint_mock, sync):
+ my_endpoint = models.Endpoint.create(
+ display_name=_TEST_DISPLAY_NAME,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ sync=sync,
+ create_request_timeout=180.0,
+ )
+
+ if not sync:
+ my_endpoint.wait()
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC
+ )
+ create_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ endpoint=expected_endpoint,
+ endpoint_id=None,
+ metadata=(),
+ timeout=180.0,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_with_timeout_not_explicitly_set(self, create_endpoint_mock, sync):
+ my_endpoint = models.Endpoint.create(
+ display_name=_TEST_DISPLAY_NAME,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ sync=sync,
+ )
+
+ if not sync:
+ my_endpoint.wait()
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC
+ )
+ create_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ endpoint=expected_endpoint,
+ endpoint_id=None,
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_dedicated_endpoint(self, create_dedicated_endpoint_mock, sync):
+ test_endpoint = models.Endpoint.create(
+ display_name=_TEST_DISPLAY_NAME,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ dedicated_endpoint_enabled=True,
+ sync=sync,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ dedicated_endpoint_enabled=True,
+ )
+
+ create_dedicated_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ endpoint=expected_endpoint,
+ metadata=(),
+ timeout=None,
+ endpoint_id=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_dedicated_endpoint_with_timeout(
+ self, create_dedicated_endpoint_mock, sync
+ ):
+ my_endpoint = models.Endpoint.create(
+ display_name=_TEST_DISPLAY_NAME,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ dedicated_endpoint_enabled=True,
+ sync=sync,
+ inference_timeout=_TEST_INFERENCE_TIMEOUT,
+ )
+ if not sync:
+ my_endpoint.wait()
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ dedicated_endpoint_enabled=True,
+ client_connection_config=_TEST_CLIENT_CONNECTION_CONFIG,
+ )
+ create_dedicated_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ endpoint=expected_endpoint,
+ metadata=(),
+ timeout=None,
+ endpoint_id=None,
+ )
+
+ @pytest.mark.usefixtures("get_empty_endpoint_mock")
+ def test_accessing_properties_with_no_resource_raises(
+ self,
+ ):
+ """Ensure a descriptive RuntimeError is raised when the
+ GAPIC object has not been populated"""
+
+ my_endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME)
+
+ # Create a gca_resource without `name` being populated
+ my_endpoint._gca_resource = gca_endpoint.Endpoint(create_time=datetime.now())
+
+ with pytest.raises(RuntimeError) as e:
+ my_endpoint.gca_resource
+ e.match(regexp=r"Endpoint resource has not been created.")
+
+ with pytest.raises(RuntimeError) as e:
+ my_endpoint.network
+ e.match(regexp=r"Endpoint resource has not been created.")
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_with_description(self, create_endpoint_mock, sync):
+ my_endpoint = models.Endpoint.create(
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ sync=sync,
+ create_request_timeout=None,
+ )
+ if not sync:
+ my_endpoint.wait()
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ )
+ create_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ endpoint=expected_endpoint,
+ endpoint_id=None,
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_with_labels(self, create_endpoint_mock, sync):
+ my_endpoint = models.Endpoint.create(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ sync=sync,
+ create_request_timeout=None,
+ )
+ if not sync:
+ my_endpoint.wait()
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ )
+ create_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ endpoint=expected_endpoint,
+ endpoint_id=None,
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_with_request_response_logging(self, create_endpoint_mock, sync):
+ my_endpoint = models.Endpoint.create(
+ display_name=_TEST_DISPLAY_NAME,
+ enable_request_response_logging=True,
+ request_response_logging_sampling_rate=_TEST_REQUEST_RESPONSE_LOGGING_SAMPLING_RATE,
+ request_response_logging_bq_destination_table=_TEST_REQUEST_RESPONSE_LOGGING_BQ_DEST,
+ sync=sync,
+ create_request_timeout=None,
+ )
+ if not sync:
+ my_endpoint.wait()
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ predict_request_response_logging_config=_TEST_REQUEST_RESPONSE_LOGGING_CONFIG,
+ )
+ create_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ endpoint=expected_endpoint,
+ endpoint_id=None,
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_update_endpoint(self, update_endpoint_mock):
+ endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ endpoint.update(
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABELS,
+ )
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ name=_TEST_ENDPOINT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABELS,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+
+ expected_update_mask = field_mask_pb2.FieldMask(
+ paths=["display_name", "description", "labels"]
+ )
+
+ update_endpoint_mock.assert_called_once_with(
+ endpoint=expected_endpoint,
+ update_mask=expected_update_mask,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=_TEST_TIMEOUT,
+ )
+
+ update_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ name=_TEST_ENDPOINT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABELS,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_with_models_mock")
+ def test_update_traffic_split(self, update_endpoint_mock):
+ endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+
+ endpoint.update(traffic_split={_TEST_ID: 10, _TEST_ID_2: 80, _TEST_ID_3: 10})
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ name=_TEST_ENDPOINT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ deployed_models=_TEST_DEPLOYED_MODELS,
+ traffic_split={_TEST_ID: 10, _TEST_ID_2: 80, _TEST_ID_3: 10},
+ )
+ expected_update_mask = field_mask_pb2.FieldMask(paths=["traffic_split"])
+
+ update_endpoint_mock.assert_called_once_with(
+ endpoint=expected_endpoint,
+ update_mask=expected_update_mask,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=_TEST_TIMEOUT,
+ )
+
+ update_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME,
+ traffic_split={_TEST_ID: 10, _TEST_ID_2: 80, _TEST_ID_3: 10},
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy(self, deploy_model_mock, sync):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(
+ test_model,
+ sync=sync,
+ deploy_request_timeout=None,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_timeout(self, deploy_model_mock, sync):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(
+ test_model,
+ sync=sync,
+ deploy_request_timeout=180.0,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=180.0,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_timeout_not_explicitly_set(self, deploy_model_mock, sync):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(
+ test_model,
+ sync=sync,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_display_name(self, deploy_model_mock, sync):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(
+ model=test_model,
+ deployed_model_display_name=_TEST_DISPLAY_NAME,
+ sync=sync,
+ deploy_request_timeout=None,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=_TEST_DISPLAY_NAME,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_raise_error_traffic_80(self, sync):
+ with pytest.raises(ValueError):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(model=test_model, traffic_percentage=80, sync=sync)
+
+ if not sync:
+ test_endpoint.wait()
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_raise_error_traffic_120(self, sync):
+ with pytest.raises(ValueError):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(model=test_model, traffic_percentage=120, sync=sync)
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_raise_error_traffic_negative(self, sync):
+ with pytest.raises(ValueError):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(model=test_model, traffic_percentage=-18, sync=sync)
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_raise_error_min_replica(self, sync):
+ with pytest.raises(ValueError):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(model=test_model, min_replica_count=-1, sync=sync)
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_raise_error_max_replica(self, sync):
+ with pytest.raises(ValueError):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(model=test_model, max_replica_count=-2, sync=sync)
+
+ @pytest.mark.usefixtures("get_endpoint_with_models_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_raise_error_traffic_split(self, sync):
+ with pytest.raises(ValueError):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(model=test_model, traffic_split={"a": 99}, sync=sync)
+
+ @pytest.mark.usefixtures("get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_traffic_percent(self, deploy_model_mock, sync):
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME,
+ traffic_split={"model1": 100},
+ )
+
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(
+ model=test_model,
+ traffic_percentage=70,
+ sync=sync,
+ deploy_request_timeout=None,
+ )
+ if not sync:
+ test_endpoint.wait()
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"model1": 30, "0": 70},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_with_models_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_traffic_split(self, deploy_model_mock, sync):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(
+ model=test_model,
+ traffic_split={_TEST_ID: 10, _TEST_ID_2: 40, _TEST_ID_3: 10, "0": 40},
+ sync=sync,
+ deploy_request_timeout=None,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={_TEST_ID: 10, _TEST_ID_2: 40, _TEST_ID_3: 10, "0": 40},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_dedicated_resources(self, deploy_model_mock, sync):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ test_endpoint.deploy(
+ model=test_model,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ sync=sync,
+ deploy_request_timeout=None,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+ expected_dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ expected_deployed_model = gca_endpoint.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_autoscaling_target_cpu_utilization(
+ self, deploy_model_mock, sync
+ ):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ test_endpoint.deploy(
+ model=test_model,
+ machine_type=_TEST_MACHINE_TYPE,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ sync=sync,
+ deploy_request_timeout=None,
+ autoscaling_target_cpu_utilization=70,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ )
+
+ expected_autoscaling_metric_spec = gca_machine_resources.AutoscalingMetricSpec(
+ metric_name=_TEST_METRIC_NAME_CPU_UTILIZATION,
+ target=70,
+ )
+
+ expected_dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ expected_dedicated_resources.autoscaling_metric_specs.extend(
+ [expected_autoscaling_metric_spec]
+ )
+
+ expected_deployed_model = gca_endpoint.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_autoscaling_target_accelerator_duty_cycle(
+ self, deploy_model_mock, sync
+ ):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ test_endpoint.deploy(
+ model=test_model,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ sync=sync,
+ deploy_request_timeout=None,
+ autoscaling_target_accelerator_duty_cycle=70,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+
+ expected_autoscaling_metric_spec = gca_machine_resources.AutoscalingMetricSpec(
+ metric_name=_TEST_METRIC_NAME_GPU_UTILIZATION,
+ target=70,
+ )
+
+ expected_dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ expected_dedicated_resources.autoscaling_metric_specs.extend(
+ [expected_autoscaling_metric_spec]
+ )
+
+ expected_deployed_model = gca_endpoint.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_autoscaling_target_accelerator_duty_cycle_and_no_accelerator_type_or_count_raises(
+ self, sync
+ ):
+ with pytest.raises(ValueError):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ test_endpoint.deploy(
+ model=test_model,
+ sync=sync,
+ autoscaling_target_accelerator_duty_cycle=70,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_explanations(self, deploy_model_with_explanations_mock, sync):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ test_endpoint.deploy(
+ model=test_model,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ explanation_metadata=_TEST_EXPLANATION_METADATA,
+ explanation_parameters=_TEST_EXPLANATION_PARAMETERS,
+ sync=sync,
+ deploy_request_timeout=None,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+ expected_dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ expected_deployed_model = gca_endpoint.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ explanation_spec=gca_endpoint.explanation.ExplanationSpec(
+ metadata=_TEST_EXPLANATION_METADATA,
+ parameters=_TEST_EXPLANATION_PARAMETERS,
+ ),
+ )
+ deploy_model_with_explanations_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_min_replica_count(self, deploy_model_mock, sync):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(
+ model=test_model,
+ min_replica_count=2,
+ sync=sync,
+ deploy_request_timeout=None,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=2,
+ max_replica_count=2,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_max_replica_count(self, deploy_model_mock, sync):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(
+ model=test_model,
+ max_replica_count=2,
+ sync=sync,
+ deploy_request_timeout=None,
+ )
+ if not sync:
+ test_endpoint.wait()
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=2,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_disable_container_logging(self, deploy_model_mock, sync):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(
+ test_model,
+ sync=sync,
+ deploy_request_timeout=None,
+ disable_container_logging=True,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ disable_container_logging=True,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_endpoint_mock", "get_model_mock", "preview_get_drp_mock"
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_preview_deploy_with_deployment_resource_pool(
+ self, preview_deploy_model_mock, sync
+ ):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME).preview
+ test_model = models.Model(_TEST_ID).preview
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.SHARED_RESOURCES,
+ )
+ test_drp = preview_models.DeploymentResourcePool(_TEST_DRP_NAME)
+
+ test_endpoint.deploy(
+ model=test_model,
+ deployment_resource_pool=test_drp,
+ sync=sync,
+ deploy_request_timeout=None,
+ )
+ if not sync:
+ test_endpoint.wait()
+
+ deployed_model = gca_endpoint_v1beta1.DeployedModel(
+ shared_resources=_TEST_DRP_NAME,
+ model=test_model.resource_name,
+ display_name=None,
+ enable_container_logging=True,
+ )
+ preview_deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_preview_deploy_with_fast_tryout_enabled(
+ self, preview_deploy_model_mock, sync
+ ):
+ test_endpoint = preview_models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = preview_models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES,
+ )
+
+ test_endpoint.deploy(
+ model=test_model,
+ sync=sync,
+ deploy_request_timeout=None,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ fast_tryout_enabled=True,
+ disable_container_logging=True,
+ )
+ if not sync:
+ test_endpoint.wait()
+
+ expected_machine_spec = gca_machine_resources_v1beta1.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+ expected_dedicated_resources = gca_machine_resources_v1beta1.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ expected_deployed_model = gca_endpoint_v1beta1.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ faster_deployment_config=gca_endpoint_v1beta1.FasterDeploymentConfig(
+ fast_tryout_enabled=True
+ ),
+ )
+ preview_deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_fast_tryout_enabled(self, deploy_model_mock, sync):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES,
+ )
+
+ test_endpoint.deploy(
+ model=test_model,
+ sync=sync,
+ deploy_request_timeout=None,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ fast_tryout_enabled=True,
+ disable_container_logging=True,
+ )
+ if not sync:
+ test_endpoint.wait()
+
+ expected_machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+ expected_dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ expected_deployed_model = gca_endpoint.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ disable_container_logging=True,
+ faster_deployment_config=gca_endpoint.FasterDeploymentConfig(
+ fast_tryout_enabled=True
+ ),
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_preview_deploy_with_system_labels(self, preview_deploy_model_mock, sync):
+ test_endpoint = preview_models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = preview_models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES,
+ )
+
+ test_endpoint.deploy(
+ model=test_model,
+ sync=sync,
+ deploy_request_timeout=None,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ system_labels=_TEST_LABELS,
+ )
+ if not sync:
+ test_endpoint.wait()
+
+ expected_machine_spec = gca_machine_resources_v1beta1.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+ expected_dedicated_resources = gca_machine_resources_v1beta1.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ expected_deployed_model = gca_endpoint_v1beta1.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ faster_deployment_config=gca_endpoint_v1beta1.FasterDeploymentConfig(),
+ enable_container_logging=True,
+ system_labels=_TEST_LABELS,
+ )
+ preview_deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_system_labels(self, deploy_model_mock, sync):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES,
+ )
+
+ test_endpoint.deploy(
+ model=test_model,
+ sync=sync,
+ deploy_request_timeout=None,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ system_labels=_TEST_LABELS,
+ )
+ if not sync:
+ test_endpoint.wait()
+
+ expected_machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+ expected_dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ expected_deployed_model = gca_endpoint.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ system_labels=_TEST_LABELS,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock", "get_drp_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_deployment_resource_pool(self, deploy_model_mock, sync):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.SHARED_RESOURCES,
+ )
+ test_drp = models.DeploymentResourcePool(_TEST_DRP_NAME)
+
+ test_endpoint.deploy(
+ model=test_model,
+ deployment_resource_pool=test_drp,
+ sync=sync,
+ deploy_request_timeout=None,
+ )
+ if not sync:
+ test_endpoint.wait()
+
+ deployed_model = gca_endpoint.DeployedModel(
+ shared_resources=_TEST_DRP_NAME,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize(
+ "model1, model2, model3, percent",
+ [
+ (100, None, None, 70),
+ (50, 50, None, 70),
+ (40, 60, None, 75),
+ (40, 60, None, 88),
+ (88, 12, None, 36),
+ (11, 89, None, 18),
+ (1, 99, None, 80),
+ (1, 2, 97, 68),
+ (99, 1, 0, 22),
+ (0, 0, 100, 18),
+ (7, 87, 6, 46),
+ ],
+ )
+ def test_allocate_traffic(self, model1, model2, model3, percent):
+ old_split = {}
+ if model1 is not None:
+ old_split["model1"] = model1
+ if model2 is not None:
+ old_split["model2"] = model2
+ if model3 is not None:
+ old_split["model3"] = model3
+
+ new_split = models.Endpoint._allocate_traffic(old_split, percent)
+ new_split_sum = 0
+ for model in new_split:
+ new_split_sum += new_split[model]
+
+ assert new_split_sum == 100
+ assert new_split["0"] == percent
+
+ @pytest.mark.parametrize(
+ "model1, model2, model3, deployed_model",
+ [
+ (100, None, None, "model1"),
+ (50, 50, None, "model1"),
+ (40, 60, None, "model2"),
+ (40, 60, None, "model1"),
+ (88, 12, None, "model1"),
+ (11, 89, None, "model1"),
+ (1, 99, None, "model2"),
+ (1, 2, 97, "model1"),
+ (99, 1, 0, "model2"),
+ (0, 0, 100, "model3"),
+ (7, 87, 6, "model2"),
+ ],
+ )
+ def test_unallocate_traffic(self, model1, model2, model3, deployed_model):
+ old_split = {}
+ if model1 is not None:
+ old_split["model1"] = model1
+ if model2 is not None:
+ old_split["model2"] = model2
+ if model3 is not None:
+ old_split["model3"] = model3
+
+ new_split = models.Endpoint._unallocate_traffic(old_split, deployed_model)
+ new_split_sum = 0
+ for model in new_split:
+ new_split_sum += new_split[model]
+
+ assert new_split_sum == 100 or new_split_sum == 0
+ assert new_split[deployed_model] == 0
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_undeploy(self, undeploy_model_mock, sync):
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME,
+ traffic_split={"model1": 100},
+ )
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ assert dict(test_endpoint.traffic_split) == {"model1": 100}
+ test_endpoint.undeploy("model1", sync=sync)
+ if not sync:
+ test_endpoint.wait()
+ undeploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model_id="model1",
+ traffic_split={},
+ metadata=(),
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_undeploy_with_traffic_split(self, undeploy_model_mock, sync):
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME,
+ traffic_split={"model1": 40, "model2": 60},
+ )
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_endpoint.undeploy(
+ deployed_model_id="model1",
+ traffic_split={"model1": 0, "model2": 100},
+ sync=sync,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ undeploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model_id="model1",
+ traffic_split={"model2": 100},
+ metadata=(),
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_undeploy_raise_error_traffic_split_total(self, sync):
+ with pytest.raises(ValueError) as e:
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_endpoint.undeploy(
+ deployed_model_id="model1", traffic_split={"model2": 99}, sync=sync
+ )
+
+ assert e.match("Sum of all traffic within traffic split needs to be 100.")
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_undeploy_raise_error_undeployed_model_traffic(self, sync):
+ with pytest.raises(ValueError) as e:
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_endpoint.undeploy(
+ deployed_model_id="model1",
+ traffic_split={"model1": 50, "model2": 50},
+ sync=sync,
+ )
+
+ assert e.match("Model being undeployed should have 0 traffic.")
+
+ @pytest.mark.usefixtures("get_endpoint_with_models_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_undeploy_raises_error_on_zero_leftover_traffic(self, sync):
+ """
+ Attempting to undeploy model with 100% traffic on an Endpoint with
+ multiple models deployed without an updated traffic_split should
+ raise an informative error.
+ """
+
+ traffic_remaining = _TEST_TRAFFIC_SPLIT[_TEST_ID_2]
+
+ assert traffic_remaining == 100 # Confirm this model has all traffic
+ assert sum(_TEST_TRAFFIC_SPLIT.values()) == 100 # Mock traffic sums to 100%
+
+ with pytest.raises(ValueError) as e:
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_endpoint.undeploy(
+ deployed_model_id=_TEST_ID_2,
+ sync=sync,
+ )
+
+ assert e.match(
+ f"Undeploying deployed model '{_TEST_ID_2}' would leave the remaining "
+ f"traffic split at 0%."
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_with_models_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_undeploy_zero_traffic_model_without_new_traffic_split(
+ self, undeploy_model_mock, sync
+ ):
+ """
+ Attempting to undeploy model with zero traffic without providing
+ a new traffic split should not raise any errors.
+ """
+
+ traffic_remaining = _TEST_TRAFFIC_SPLIT[_TEST_ID_3]
+
+ assert not traffic_remaining # Confirm there is zero traffic
+
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_endpoint.undeploy(
+ deployed_model_id=_TEST_ID_3,
+ sync=sync,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_new_traffic_split = copy.deepcopy(_TEST_TRAFFIC_SPLIT)
+ expected_new_traffic_split.pop(_TEST_ID_3)
+
+ undeploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model_id=_TEST_ID_3,
+ traffic_split=expected_new_traffic_split,
+ metadata=(),
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_predict(self, predict_client_predict_mock):
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction = test_endpoint.predict(
+ instances=_TEST_INSTANCES, parameters={"param": 3.0}
+ )
+
+ true_prediction = models.Prediction(
+ predictions=_TEST_PREDICTION,
+ deployed_model_id=_TEST_ID,
+ metadata=_TEST_METADATA,
+ model_version_id=_TEST_VERSION_ID,
+ model_resource_name=_TEST_MODEL_NAME,
+ )
+
+ assert true_prediction == test_prediction
+ predict_client_predict_mock.assert_called_once_with(
+ endpoint=_TEST_ENDPOINT_NAME,
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_dedicated_endpoint_mock")
+ def test_predict_dedicated_endpoint(self, predict_endpoint_http_mock):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+
+ test_prediction = test_endpoint.predict(
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ use_dedicated_endpoint=True,
+ )
+
+ true_prediction = models.Prediction(
+ predictions=_TEST_PREDICTION,
+ deployed_model_id=_TEST_ID,
+ metadata=_TEST_METADATA,
+ model_version_id=_TEST_VERSION_ID,
+ model_resource_name=_TEST_MODEL_NAME,
+ )
+
+ assert true_prediction == test_prediction
+ predict_endpoint_http_mock.assert_called_once_with(
+ url=f"https://{_TEST_DEDICATED_ENDPOINT_DNS}/v1/projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/endpoints/{_TEST_ID}:predict",
+ data='{"instances": [[1.0, 2.0, 3.0], [1.0, 3.0, 4.0]], "parameters": {"param": 3.0}}',
+ headers={"Content-Type": "application/json"},
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_dedicated_endpoint_mock")
+ def test_predict_dedicated_endpoint_with_timeout(self, predict_endpoint_http_mock):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+
+ test_prediction = test_endpoint.predict(
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ use_dedicated_endpoint=True,
+ timeout=_TEST_PREDICT_TIMEOUT,
+ )
+
+ true_prediction = models.Prediction(
+ predictions=_TEST_PREDICTION,
+ deployed_model_id=_TEST_ID,
+ metadata=_TEST_METADATA,
+ model_version_id=_TEST_VERSION_ID,
+ model_resource_name=_TEST_MODEL_NAME,
+ )
+
+ assert true_prediction == test_prediction
+ predict_endpoint_http_mock.assert_called_once_with(
+ url=f"https://{_TEST_DEDICATED_ENDPOINT_DNS}/v1/projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/endpoints/{_TEST_ID}:predict",
+ data='{"instances": [[1.0, 2.0, 3.0], [1.0, 3.0, 4.0]], "parameters": {"param": 3.0}}',
+ headers={"Content-Type": "application/json"},
+ timeout=_TEST_PREDICT_TIMEOUT,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_predict_use_dedicated_endpoint_for_regular_endpoint(self):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+
+ with pytest.raises(ValueError) as err:
+ test_endpoint.predict(
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ use_dedicated_endpoint=True,
+ )
+ assert err.match(
+ regexp=r"Dedicated endpoint is not enabled or DNS is empty."
+ "Please make sure endpoint has dedicated endpoint enabled"
+ "and model are ready before making a prediction."
+ )
+
+ @pytest.mark.usefixtures("get_dedicated_endpoint_mock")
+ def test_raw_predict_dedicated_endpoint(self, predict_endpoint_http_mock):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+
+ test_prediction = test_endpoint.raw_predict(
+ body=_TEST_RAW_INPUTS,
+ headers={"Content-Type": "application/json"},
+ use_dedicated_endpoint=True,
+ )
+
+ true_prediction = requests.Response()
+ true_prediction.status_code = 200
+ true_prediction._content = json.dumps(
+ {
+ "predictions": _TEST_PREDICTION,
+ "metadata": _TEST_METADATA,
+ "deployedModelId": _TEST_DEPLOYED_MODELS[0].id,
+ "model": _TEST_MODEL_NAME,
+ "modelVersionId": "1",
+ }
+ ).encode("utf-8")
+ assert true_prediction.status_code == test_prediction.status_code
+ assert true_prediction.text == test_prediction.text
+ predict_endpoint_http_mock.assert_called_once_with(
+ url=f"https://{_TEST_DEDICATED_ENDPOINT_DNS}/v1/projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/endpoints/{_TEST_ID}:rawPredict",
+ data=_TEST_RAW_INPUTS,
+ headers={"Content-Type": "application/json"},
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_dedicated_endpoint_mock")
+ def test_raw_predict_dedicated_endpoint_with_timeout(
+ self, predict_endpoint_http_mock
+ ):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+
+ test_prediction = test_endpoint.raw_predict(
+ body=_TEST_RAW_INPUTS,
+ headers={"Content-Type": "application/json"},
+ use_dedicated_endpoint=True,
+ timeout=_TEST_PREDICT_TIMEOUT,
+ )
+
+ true_prediction = requests.Response()
+ true_prediction.status_code = 200
+ true_prediction._content = json.dumps(
+ {
+ "predictions": _TEST_PREDICTION,
+ "metadata": _TEST_METADATA,
+ "deployedModelId": _TEST_DEPLOYED_MODELS[0].id,
+ "model": _TEST_MODEL_NAME,
+ "modelVersionId": "1",
+ }
+ ).encode("utf-8")
+ assert true_prediction.status_code == test_prediction.status_code
+ assert true_prediction.text == test_prediction.text
+ predict_endpoint_http_mock.assert_called_once_with(
+ url=f"https://{_TEST_DEDICATED_ENDPOINT_DNS}/v1/projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/endpoints/{_TEST_ID}:rawPredict",
+ data=_TEST_RAW_INPUTS,
+ headers={"Content-Type": "application/json"},
+ timeout=_TEST_PREDICT_TIMEOUT,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_raw_predict_use_dedicated_endpoint_for_regular_endpoint(self):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+
+ with pytest.raises(ValueError) as err:
+ test_endpoint.raw_predict(
+ body=_TEST_RAW_INPUTS,
+ headers={"Content-Type": "application/json"},
+ use_dedicated_endpoint=True,
+ )
+ assert err.match(
+ regexp=r"Dedicated endpoint is not enabled or DNS is empty."
+ "Please make sure endpoint has dedicated endpoint enabled"
+ "and model are ready before making a prediction."
+ )
+
+ @pytest.mark.asyncio
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ async def test_predict_async(self, predict_async_client_predict_mock):
+ """Tests the Endpoint.predict_async method."""
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction = await test_endpoint.predict_async(
+ instances=_TEST_INSTANCES, parameters={"param": 3.0}
+ )
+
+ true_prediction = models.Prediction(
+ predictions=_TEST_PREDICTION,
+ deployed_model_id=_TEST_ID,
+ metadata=_TEST_METADATA,
+ model_version_id=_TEST_VERSION_ID,
+ model_resource_name=_TEST_MODEL_NAME,
+ )
+
+ assert true_prediction == test_prediction
+ predict_async_client_predict_mock.assert_called_once_with(
+ endpoint=_TEST_ENDPOINT_NAME,
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_explain(self, predict_client_explain_mock):
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction = test_endpoint.explain(
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ deployed_model_id=_TEST_MODEL_ID,
+ )
+ expected_explanations = _TEST_EXPLANATIONS
+ expected_explanations[0].attributions.extend(_TEST_ATTRIBUTIONS)
+
+ expected_prediction = models.Prediction(
+ predictions=_TEST_PREDICTION,
+ deployed_model_id=_TEST_ID,
+ explanations=expected_explanations,
+ )
+
+ assert expected_prediction == test_prediction
+ predict_client_explain_mock.assert_called_once_with(
+ endpoint=_TEST_ENDPOINT_NAME,
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ deployed_model_id=_TEST_MODEL_ID,
+ timeout=None,
+ )
+
+ @pytest.mark.asyncio
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ async def test_explain_async(self, predict_async_client_explain_mock):
+ """Tests the Endpoint.explain_async method."""
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction = await test_endpoint.explain_async(
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ deployed_model_id=_TEST_MODEL_ID,
+ )
+ expected_explanations = _TEST_EXPLANATIONS
+ expected_explanations[0].attributions.extend(_TEST_ATTRIBUTIONS)
+
+ expected_prediction = models.Prediction(
+ predictions=_TEST_PREDICTION,
+ deployed_model_id=_TEST_ID,
+ explanations=expected_explanations,
+ )
+
+ assert expected_prediction == test_prediction
+ predict_async_client_explain_mock.assert_called_once_with(
+ endpoint=_TEST_ENDPOINT_NAME,
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ deployed_model_id=_TEST_MODEL_ID,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_predict_with_timeout(self, predict_client_predict_mock):
+ test_endpoint = models.Endpoint(_TEST_ID)
+
+ test_endpoint.predict(
+ instances=_TEST_INSTANCES, parameters={"param": 3.0}, timeout=10.0
+ )
+
+ predict_client_predict_mock.assert_called_once_with(
+ endpoint=_TEST_ENDPOINT_NAME,
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ timeout=10.0,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_predict_with_timeout_not_explicitly_set(self, predict_client_predict_mock):
+ test_endpoint = models.Endpoint(_TEST_ID)
+
+ test_endpoint.predict(
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ )
+
+ predict_client_predict_mock.assert_called_once_with(
+ endpoint=_TEST_ENDPOINT_NAME,
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_direct_predict(self, predict_client_direct_predict_mock):
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction = test_endpoint.direct_predict(inputs=_TEST_INPUTS)
+
+ true_prediction = models.Prediction(
+ predictions=_TEST_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ )
+
+ assert true_prediction == test_prediction
+ predict_client_direct_predict_mock.assert_called_once_with(
+ request={
+ "endpoint": _TEST_ENDPOINT_NAME,
+ "inputs": _TEST_INPUTS,
+ "parameters": None,
+ },
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_direct_predict_with_parameters(self, predict_client_direct_predict_mock):
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction = test_endpoint.direct_predict(
+ inputs=_TEST_INPUTS, parameters={"param": 3.0}
+ )
+
+ true_prediction = models.Prediction(
+ predictions=_TEST_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ )
+
+ assert true_prediction == test_prediction
+ predict_client_direct_predict_mock.assert_called_once_with(
+ request={
+ "endpoint": _TEST_ENDPOINT_NAME,
+ "inputs": _TEST_INPUTS,
+ "parameters": {"param": 3.0},
+ },
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_direct_predict_with_timeout(self, predict_client_direct_predict_mock):
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction = test_endpoint.direct_predict(
+ inputs=_TEST_INPUTS, timeout=10.0
+ )
+
+ true_prediction = models.Prediction(
+ predictions=_TEST_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ )
+
+ assert true_prediction == test_prediction
+ predict_client_direct_predict_mock.assert_called_once_with(
+ request={
+ "endpoint": _TEST_ENDPOINT_NAME,
+ "inputs": _TEST_INPUTS,
+ "parameters": None,
+ },
+ timeout=10.0,
+ )
+
+ @pytest.mark.asyncio
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ async def test_direct_predict_async(self, predict_client_direct_predict_async_mock):
+ """Tests the Endpoint.predict_async method."""
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction = await test_endpoint.direct_predict_async(
+ inputs=_TEST_INPUTS, parameters=None
+ )
+
+ true_prediction = models.Prediction(
+ predictions=_TEST_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ )
+
+ assert true_prediction == test_prediction
+ predict_client_direct_predict_async_mock.assert_called_once_with(
+ request={
+ "endpoint": _TEST_ENDPOINT_NAME,
+ "inputs": _TEST_INPUTS,
+ "parameters": None,
+ },
+ timeout=None,
+ )
+
+ @pytest.mark.asyncio
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ async def test_direct_predict_async_with_parameters(
+ self, predict_client_direct_predict_async_mock
+ ):
+ """Tests the Endpoint.predict_async method."""
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction = await test_endpoint.direct_predict_async(
+ inputs=_TEST_INPUTS, parameters={"param": 3.0}
+ )
+
+ true_prediction = models.Prediction(
+ predictions=_TEST_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ )
+
+ assert true_prediction == test_prediction
+ predict_client_direct_predict_async_mock.assert_called_once_with(
+ request={
+ "endpoint": _TEST_ENDPOINT_NAME,
+ "inputs": _TEST_INPUTS,
+ "parameters": {"param": 3.0},
+ },
+ timeout=None,
+ )
+
+ @pytest.mark.asyncio
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ async def test_direct_predict_async_with_timeout(
+ self, predict_client_direct_predict_async_mock
+ ):
+ """Tests the Endpoint.predict_async method."""
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction = await test_endpoint.direct_predict_async(
+ inputs=_TEST_INPUTS, timeout=10.0
+ )
+
+ true_prediction = models.Prediction(
+ predictions=_TEST_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ )
+
+ assert true_prediction == test_prediction
+ predict_client_direct_predict_async_mock.assert_called_once_with(
+ request={
+ "endpoint": _TEST_ENDPOINT_NAME,
+ "inputs": _TEST_INPUTS,
+ "parameters": None,
+ },
+ timeout=10.0,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_direct_raw_predict(self, predict_client_direct_raw_predict_mock):
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction = test_endpoint.direct_raw_predict(
+ method_name=_TEST_METHOD_NAME, request=_TEST_RAW_INPUTS
+ )
+
+ true_prediction = models.Prediction(
+ predictions=_TEST_RAW_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ )
+
+ assert true_prediction == test_prediction
+ predict_client_direct_raw_predict_mock.assert_called_once_with(
+ request={
+ "endpoint": _TEST_ENDPOINT_NAME,
+ "method_name": _TEST_METHOD_NAME,
+ "input": _TEST_RAW_INPUTS,
+ },
+ timeout=None,
+ )
+
+ @pytest.mark.asyncio
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ async def test_direct_raw_predict_async(
+ self, predict_client_direct_raw_predict_async_mock
+ ):
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction = await test_endpoint.direct_raw_predict_async(
+ method_name=_TEST_METHOD_NAME, request=_TEST_RAW_INPUTS
+ )
+
+ true_prediction = models.Prediction(
+ predictions=_TEST_RAW_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ )
+
+ assert true_prediction == test_prediction
+ predict_client_direct_raw_predict_async_mock.assert_called_once_with(
+ request={
+ "endpoint": _TEST_ENDPOINT_NAME,
+ "method_name": _TEST_METHOD_NAME,
+ "input": _TEST_RAW_INPUTS,
+ },
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_direct_raw_predict_with_timeout(
+ self, predict_client_direct_raw_predict_mock
+ ):
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction = test_endpoint.direct_raw_predict(
+ method_name=_TEST_METHOD_NAME, request=_TEST_RAW_INPUTS, timeout=10.0
+ )
+
+ true_prediction = models.Prediction(
+ predictions=_TEST_RAW_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ )
+
+ assert true_prediction == test_prediction
+ predict_client_direct_raw_predict_mock.assert_called_once_with(
+ request={
+ "endpoint": _TEST_ENDPOINT_NAME,
+ "method_name": _TEST_METHOD_NAME,
+ "input": _TEST_RAW_INPUTS,
+ },
+ timeout=10.0,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_stream_direct_predict(self, predict_client_stream_direct_predict_mock):
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction_iterator = test_endpoint.stream_direct_predict(
+ inputs_iterator=iter([_TEST_INPUTS]), parameters=None
+ )
+ test_prediction = list(test_prediction_iterator)
+
+ true_prediction = [
+ models.Prediction(
+ predictions=_TEST_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ ),
+ models.Prediction(
+ predictions=_TEST_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ ),
+ ]
+
+ assert true_prediction == test_prediction
+ predict_client_stream_direct_predict_mock.assert_called_once()
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_stream_direct_predict_with_parameters(
+ self, predict_client_stream_direct_predict_mock
+ ):
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction_iterator = test_endpoint.stream_direct_predict(
+ inputs_iterator=iter([_TEST_INPUTS]), parameters={"param": 3.0}
+ )
+ test_prediction = list(test_prediction_iterator)
+
+ true_prediction = [
+ models.Prediction(
+ predictions=_TEST_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ ),
+ models.Prediction(
+ predictions=_TEST_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ ),
+ ]
+
+ assert true_prediction == test_prediction
+ predict_client_stream_direct_predict_mock.assert_called_once()
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_stream_direct_predict_with_timeout(
+ self, predict_client_stream_direct_predict_mock
+ ):
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction_iterator = test_endpoint.stream_direct_predict(
+ inputs_iterator=iter([_TEST_INPUTS]), parameters=None, timeout=10.0
+ )
+ test_prediction = list(test_prediction_iterator)
+
+ true_prediction = [
+ models.Prediction(
+ predictions=_TEST_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ ),
+ models.Prediction(
+ predictions=_TEST_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ ),
+ ]
+
+ assert true_prediction == test_prediction
+ predict_client_stream_direct_predict_mock.assert_called_once()
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_stream_direct_raw_predict(
+ self, predict_client_stream_direct_raw_predict_mock
+ ):
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction_iterator = test_endpoint.stream_direct_raw_predict(
+ method_name=_TEST_METHOD_NAME, requests=iter([_TEST_RAW_INPUTS])
+ )
+ test_prediction = list(test_prediction_iterator)
+
+ true_prediction = [
+ models.Prediction(
+ predictions=_TEST_RAW_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ ),
+ models.Prediction(
+ predictions=_TEST_RAW_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ ),
+ ]
+
+ assert true_prediction == test_prediction
+ predict_client_stream_direct_raw_predict_mock.assert_called_once()
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_stream_direct_raw_predict_with_timeout(
+ self, predict_client_stream_direct_raw_predict_mock
+ ):
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_prediction_iterator = test_endpoint.stream_direct_raw_predict(
+ method_name=_TEST_METHOD_NAME,
+ requests=iter([_TEST_RAW_INPUTS]),
+ timeout=10.0,
+ )
+ test_prediction = list(test_prediction_iterator)
+
+ true_prediction = [
+ models.Prediction(
+ predictions=_TEST_RAW_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ ),
+ models.Prediction(
+ predictions=_TEST_RAW_OUTPUTS,
+ deployed_model_id=None,
+ metadata=None,
+ model_version_id=None,
+ model_resource_name=None,
+ ),
+ ]
+
+ assert true_prediction == test_prediction
+ predict_client_stream_direct_raw_predict_mock.assert_called_once()
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_explain_with_timeout(self, predict_client_explain_mock):
+ test_endpoint = models.Endpoint(_TEST_ID)
+
+ test_endpoint.explain(
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ deployed_model_id=_TEST_MODEL_ID,
+ timeout=10.0,
+ )
+
+ predict_client_explain_mock.assert_called_once_with(
+ endpoint=_TEST_ENDPOINT_NAME,
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ deployed_model_id=_TEST_MODEL_ID,
+ timeout=10.0,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_explain_with_timeout_not_explicitly_set(self, predict_client_explain_mock):
+ test_endpoint = models.Endpoint(_TEST_ID)
+
+ test_endpoint.explain(
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ deployed_model_id=_TEST_MODEL_ID,
+ )
+
+ predict_client_explain_mock.assert_called_once_with(
+ endpoint=_TEST_ENDPOINT_NAME,
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ deployed_model_id=_TEST_MODEL_ID,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_explain_with_explaination_spec_override(
+ self, predict_client_v1beta1_explain_mock
+ ):
+ test_endpoint = aiplatform.Endpoint(_TEST_ID).preview
+
+ test_endpoint.explain(
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ deployed_model_id=_TEST_MODEL_ID,
+ explanation_spec_override=_TEST_SHAPLEY_EXPLANATION_SPEC_OVERRIDE,
+ )
+
+ predict_client_v1beta1_explain_mock.assert_called_once_with(
+ mock.ANY,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_explain_with_concurrent_explaination_spec_override(
+ self, predict_client_v1beta1_explain_mock
+ ):
+ test_endpoint = aiplatform.Endpoint(_TEST_ID).preview
+
+ test_endpoint.explain(
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ deployed_model_id=_TEST_MODEL_ID,
+ concurrent_explanation_spec_override=_TEST_CONCURRENT_EXPLANATION_SPEC_OVERRIDE,
+ )
+
+ predict_client_v1beta1_explain_mock.assert_called_once_with(
+ mock.ANY,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ async def test_explain_async_with_explaination_spec_override(
+ self, predict_async_client_v1beta1_explain_mock
+ ):
+ test_endpoint = aiplatform.Endpoint(_TEST_ID).preview
+
+ await test_endpoint.explain(
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ deployed_model_id=_TEST_MODEL_ID,
+ explanation_spec_override=_TEST_SHAPLEY_EXPLANATION_SPEC_OVERRIDE,
+ )
+
+ predict_async_client_v1beta1_explain_mock.assert_called_once_with(
+ mock.ANY,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ async def test_explain_async_with_concurrent_explaination_spec_override(
+ self, predict_async_client_v1beta1_explain_mock
+ ):
+ test_endpoint = aiplatform.Endpoint(_TEST_ID).preview
+
+ await test_endpoint.explain(
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ deployed_model_id=_TEST_MODEL_ID,
+ concurrent_explanation_spec_override=_TEST_CONCURRENT_EXPLANATION_SPEC_OVERRIDE,
+ )
+
+ predict_async_client_v1beta1_explain_mock.assert_called_once_with(
+ mock.ANY,
+ timeout=None,
+ )
+
+ def test_list_models(self, get_endpoint_with_models_mock):
+ ept = aiplatform.Endpoint(_TEST_ID)
+ my_models = ept.list_models()
+
+ assert my_models == _TEST_DEPLOYED_MODELS
+
+ @pytest.mark.usefixtures("get_endpoint_with_many_models_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_undeploy_all(self, sdk_private_undeploy_mock, sync):
+ ept = aiplatform.Endpoint(_TEST_ID)
+ ept.undeploy_all(sync=sync)
+
+ if not sync:
+ ept.wait()
+
+ # undeploy_all() results in an undeploy() call for each deployed_model
+ # Models are undeployed in ascending order of traffic percentage
+ expected_models_to_undeploy = ["m6", "m7"] + _TEST_LONG_TRAFFIC_SPLIT_SORTED_IDS
+ sdk_private_undeploy_mock.assert_has_calls(
+ [
+ mock.call(deployed_model_id=deployed_model_id, sync=sync)
+ for deployed_model_id in expected_models_to_undeploy
+ ],
+ )
+
+ @pytest.mark.usefixtures("list_endpoints_mock")
+ def test_list_endpoint_has_prediction_client(self):
+ """Test call to Endpoint.list() and ensure Endpoints have prediction client set"""
+ ep_list = aiplatform.Endpoint.list(order_by=_TEST_LIST_ORDER_BY_CREATE_TIME)
+
+ assert ep_list # Ensure list is not empty
+
+ # Confirm every Endpoint object in the list has a prediction client
+ assert all(
+ [
+ isinstance(
+ e._prediction_client, aiplatform.utils.PredictionClientWithOverride
+ )
+ for e in ep_list
+ ]
+ )
+
+ def test_list_endpoint_order_by_time(self, list_endpoints_mock):
+ """Test call to Endpoint.list() and ensure list is returned in descending order of create_time"""
+
+ ep_list = aiplatform.Endpoint.list(
+ filter=_TEST_LIST_FILTER, order_by=_TEST_LIST_ORDER_BY_CREATE_TIME
+ )
+
+ # `order_by` is not passed to API since it is not an accepted field
+ list_endpoints_mock.assert_called_once_with(
+ request={"parent": _TEST_PARENT, "filter": _TEST_LIST_FILTER}
+ )
+
+ assert len(ep_list) == len(_TEST_ENDPOINT_LIST)
+
+ for ep in ep_list:
+ assert isinstance(ep, aiplatform.Endpoint)
+
+ assert ep_list[0].create_time > ep_list[1].create_time > ep_list[2].create_time
+
+ def test_list_endpoint_order_by_display_name(self, list_endpoints_mock):
+ """Test call to Endpoint.list() and ensure list is returned in order of display_name"""
+
+ ep_list = aiplatform.Endpoint.list(
+ filter=_TEST_LIST_FILTER, order_by=_TEST_LIST_ORDER_BY_DISPLAY_NAME
+ )
+
+ # `order_by` is not passed to API since it is not an accepted field
+ list_endpoints_mock.assert_called_once_with(
+ request={"parent": _TEST_PARENT, "filter": _TEST_LIST_FILTER}
+ )
+
+ assert len(ep_list) == len(_TEST_ENDPOINT_LIST)
+
+ for ep in ep_list:
+ assert isinstance(ep, aiplatform.Endpoint)
+
+ assert (
+ ep_list[0].display_name < ep_list[1].display_name < ep_list[2].display_name
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_with_models_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_delete_endpoint_without_force(
+ self, sdk_undeploy_all_mock, delete_endpoint_mock, sync
+ ):
+ ept = aiplatform.Endpoint(_TEST_ID)
+ ept.delete(sync=sync)
+
+ if not sync:
+ ept.wait()
+
+ # undeploy_all() should not be called unless force is set to True
+ sdk_undeploy_all_mock.assert_not_called()
+
+ delete_endpoint_mock.assert_called_once_with(name=_TEST_ENDPOINT_NAME)
+
+ @pytest.mark.usefixtures("get_endpoint_with_models_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_delete_endpoint_with_force(
+ self, sdk_undeploy_all_mock, delete_endpoint_mock, sync
+ ):
+ ept = aiplatform.Endpoint(_TEST_ID)
+ ept.delete(force=True, sync=sync)
+
+ if not sync:
+ ept.wait()
+
+ # undeploy_all() should be called if force is set to True
+ sdk_undeploy_all_mock.assert_called_once()
+
+ delete_endpoint_mock.assert_called_once_with(name=_TEST_ENDPOINT_NAME)
+
+
+class TestPrivateEndpoint:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_psa(self, create_psa_private_endpoint_mock, sync):
+ test_endpoint = models.PrivateEndpoint.create(
+ display_name=_TEST_DISPLAY_NAME,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ network=_TEST_NETWORK,
+ sync=sync,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME, network=_TEST_NETWORK
+ )
+
+ create_psa_private_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ endpoint=expected_endpoint,
+ metadata=(),
+ timeout=None,
+ endpoint_id=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_psc(self, create_psc_private_endpoint_mock, sync):
+ test_endpoint = models.PrivateEndpoint.create(
+ display_name=_TEST_DISPLAY_NAME,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ private_service_connect_config=models.PrivateEndpoint.PrivateServiceConnectConfig(
+ project_allowlist=_TEST_PROJECT_ALLOWLIST
+ ),
+ sync=sync,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ private_service_connect_config=gca_service_networking.PrivateServiceConnectConfig(
+ enable_private_service_connect=True,
+ project_allowlist=_TEST_PROJECT_ALLOWLIST,
+ ),
+ )
+
+ create_psc_private_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ endpoint=expected_endpoint,
+ metadata=(),
+ timeout=None,
+ endpoint_id=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_psc_with_timeout(self, create_psc_private_endpoint_mock, sync):
+ test_endpoint = models.PrivateEndpoint.create(
+ display_name=_TEST_DISPLAY_NAME,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ private_service_connect_config=models.PrivateEndpoint.PrivateServiceConnectConfig(
+ project_allowlist=_TEST_PROJECT_ALLOWLIST
+ ),
+ sync=sync,
+ inference_timeout=_TEST_INFERENCE_TIMEOUT,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ private_service_connect_config=gca_service_networking.PrivateServiceConnectConfig(
+ enable_private_service_connect=True,
+ project_allowlist=_TEST_PROJECT_ALLOWLIST,
+ ),
+ client_connection_config=_TEST_CLIENT_CONNECTION_CONFIG,
+ )
+
+ create_psc_private_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ endpoint=expected_endpoint,
+ metadata=(),
+ timeout=None,
+ endpoint_id=None,
+ )
+
+ @pytest.mark.usefixtures("get_psa_private_endpoint_with_model_mock")
+ def test_psa_predict(self, predict_private_endpoint_mock):
+ test_endpoint = models.PrivateEndpoint(_TEST_ID)
+ test_prediction = test_endpoint.predict(
+ instances=_TEST_INSTANCES, parameters={"param": 3.0}
+ )
+
+ true_prediction = models.Prediction(
+ predictions=_TEST_PREDICTION,
+ deployed_model_id=_TEST_ID,
+ metadata=_TEST_METADATA,
+ )
+
+ assert true_prediction == test_prediction
+ predict_private_endpoint_mock.assert_called_once_with(
+ method="POST",
+ url="",
+ body='{"instances": [[1.0, 2.0, 3.0], [1.0, 3.0, 4.0]]}',
+ headers={"Content-Type": "application/json"},
+ )
+
+ @pytest.mark.usefixtures("get_psc_private_endpoint_mock")
+ def test_psc_predict(self, predict_private_endpoint_mock):
+ test_endpoint = models.PrivateEndpoint(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, endpoint_name=_TEST_ID
+ )
+ test_prediction = test_endpoint.predict(
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ endpoint_override=_TEST_ENDPOINT_OVERRIDE,
+ )
+
+ true_prediction = models.Prediction(
+ predictions=_TEST_PREDICTION,
+ deployed_model_id=_TEST_DEPLOYED_MODELS[0].id,
+ metadata=_TEST_METADATA,
+ model_version_id="1",
+ model_resource_name=_TEST_MODEL_NAME,
+ )
+
+ assert true_prediction == test_prediction
+ predict_private_endpoint_mock.assert_called_once_with(
+ method="POST",
+ url=f"https://{_TEST_ENDPOINT_OVERRIDE}/v1/projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/endpoints/{_TEST_ID}:predict",
+ body='{"instances": [[1.0, 2.0, 3.0], [1.0, 3.0, 4.0]]}',
+ headers={
+ "Content-Type": "application/json",
+ "Authorization": "Bearer None",
+ },
+ )
+
+ @pytest.mark.usefixtures("get_psc_private_endpoint_mock")
+ def test_psc_stream_raw_predict(self, stream_raw_predict_private_endpoint_mock):
+ test_endpoint = models.PrivateEndpoint(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, endpoint_name=_TEST_ID
+ )
+
+ test_prediction_iterator = test_endpoint.stream_raw_predict(
+ body='{"instances": [[1.0, 2.0, 3.0], [1.0, 3.0, 4.0]]}',
+ headers={
+ "Content-Type": "application/json",
+ "Authorization": "Bearer None",
+ },
+ endpoint_override=_TEST_ENDPOINT_OVERRIDE,
+ )
+
+ test_prediction = list(test_prediction_iterator)
+
+ stream_raw_predict_private_endpoint_mock.assert_called_once_with(
+ url=f"https://{_TEST_ENDPOINT_OVERRIDE}/v1/projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/endpoints/{_TEST_ID}:streamRawPredict",
+ data='{"instances": [[1.0, 2.0, 3.0], [1.0, 3.0, 4.0]]}',
+ headers={
+ "Content-Type": "application/json",
+ "Authorization": "Bearer None",
+ },
+ stream=True,
+ verify=False,
+ )
+
+ # Validate the content of the returned predictions
+ expected_predictions = [
+ json.dumps(
+ {
+ "predictions": [1.0, 2.0, 3.0],
+ "metadata": {"key": "value"},
+ "deployedModelId": "model-id-123",
+ "model": "model-name",
+ "modelVersionId": "1",
+ }
+ ).encode("utf-8"),
+ json.dumps(
+ {
+ "predictions": [4.0, 5.0, 6.0],
+ "metadata": {"key": "value"},
+ "deployedModelId": "model-id-123",
+ "model": "model-name",
+ "modelVersionId": "1",
+ }
+ ).encode("utf-8"),
+ ]
+ assert test_prediction == expected_predictions
+
+ @pytest.mark.usefixtures("get_psc_private_endpoint_mock")
+ def test_psc_predict_without_endpoint_override(self):
+ test_endpoint = models.PrivateEndpoint(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, endpoint_name=_TEST_ID
+ )
+
+ with pytest.raises(ValueError) as err:
+ test_endpoint.predict(
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ )
+ assert err.match(
+ regexp=r"Cannot make a predict request because endpoint override is"
+ "not provided. Please ensure an endpoint override is"
+ "provided."
+ )
+
+ @pytest.mark.usefixtures("get_psc_private_endpoint_mock")
+ def test_psc_predict_with_invalid_endpoint_override(self):
+ test_endpoint = models.PrivateEndpoint(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, endpoint_name=_TEST_ID
+ )
+
+ with pytest.raises(ValueError) as err:
+ test_endpoint.predict(
+ instances=_TEST_INSTANCES,
+ parameters={"param": 3.0},
+ endpoint_override="invalid@endpoint.override",
+ )
+ assert err.match(
+ regexp=r"Invalid endpoint override provided. Please only use IP"
+ "address or DNS."
+ )
+
+ @pytest.mark.usefixtures("get_psa_private_endpoint_with_model_mock")
+ def test_psa_health_check(self, health_check_private_endpoint_mock):
+ test_endpoint = models.PrivateEndpoint(_TEST_ID)
+ test_health_check = test_endpoint.health_check()
+
+ true_health_check = True
+
+ assert true_health_check == test_health_check
+ health_check_private_endpoint_mock.assert_called_once_with(
+ method="GET", url="", body=None, headers=None
+ )
+
+ @pytest.mark.usefixtures("get_psc_private_endpoint_mock")
+ def test_psc_health_check(self):
+ test_endpoint = models.PrivateEndpoint(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, endpoint_name=_TEST_ID
+ )
+
+ with pytest.raises(RuntimeError) as err:
+ test_endpoint.health_check()
+ assert err.match(
+ regexp=r"Health check request is not supported on PSC based Private Endpoint."
+ )
+
+ @pytest.mark.usefixtures("get_psa_private_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_psa_deploy(self, deploy_model_mock, sync):
+ test_endpoint = models.PrivateEndpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(
+ test_model,
+ sync=sync,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ metadata=(),
+ timeout=None,
+ traffic_split=None,
+ )
+
+ @pytest.mark.usefixtures("get_psa_private_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_psa_deploy_traffic_split_not_supported(self, deploy_model_mock, sync):
+ test_endpoint = models.PrivateEndpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+
+ with pytest.raises(ValueError) as err:
+ test_endpoint.deploy(
+ test_model, sync=sync, traffic_split=_TEST_TRAFFIC_SPLIT
+ )
+ assert err.match(
+ regexp=r"Traffic split is not supported for PSA based PrivateEndpoint."
+ )
+
+ @pytest.mark.usefixtures("get_psc_private_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_psc_deploy_traffic_split(self, deploy_model_mock, sync):
+ test_endpoint = models.PrivateEndpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(
+ model=test_model, sync=sync, traffic_split=_TEST_TRAFFIC_SPLIT
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ metadata=(),
+ timeout=None,
+ traffic_split=_TEST_TRAFFIC_SPLIT,
+ )
+
+ @pytest.mark.usefixtures("get_psc_private_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_psc_deploy_with_traffic_percentage(self, deploy_model_mock, sync):
+ test_endpoint = models.PrivateEndpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_endpoint._gca_resource.traffic_split = {"model1": 100}
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+
+ test_endpoint.deploy(
+ model=test_model,
+ traffic_percentage=70,
+ sync=sync,
+ )
+ if not sync:
+ test_endpoint.wait()
+
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"model1": 30, "0": 70},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_psa_private_endpoint_with_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_psa_undeploy(self, undeploy_model_mock, sync):
+ test_endpoint = models.PrivateEndpoint(_TEST_ENDPOINT_NAME)
+ test_endpoint.undeploy("model1", sync=sync)
+
+ if not sync:
+ test_endpoint.wait()
+
+ undeploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model_id="model1",
+ metadata=(),
+ traffic_split={},
+ )
+
+ @pytest.mark.usefixtures("get_psc_private_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_psc_undeploy(self, undeploy_model_mock, sync):
+ test_endpoint = models.PrivateEndpoint(_TEST_ENDPOINT_NAME)
+ test_endpoint.undeploy("model1", sync=sync)
+
+ if not sync:
+ test_endpoint.wait()
+
+ undeploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model_id="model1",
+ metadata=(),
+ traffic_split={},
+ )
+
+ @pytest.mark.usefixtures("get_psc_private_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_psc_undeploy_with_traffic_split(self, undeploy_model_mock, sync):
+ test_endpoint = models.PrivateEndpoint(_TEST_ENDPOINT_NAME)
+ test_endpoint._gca_resource.traffic_split = {"model1": 40, "model2": 60}
+ test_endpoint.undeploy(
+ deployed_model_id="model1",
+ traffic_split={"model1": 0, "model2": 100},
+ sync=sync,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ undeploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model_id="model1",
+ traffic_split={"model2": 100},
+ metadata=(),
+ )
+
+ @pytest.mark.usefixtures("get_psc_private_endpoint_mock")
+ def test_psc_update_traffic_split(self, update_endpoint_mock):
+ endpoint = models.PrivateEndpoint(_TEST_ENDPOINT_NAME)
+
+ endpoint.update(traffic_split={_TEST_ID: 10, _TEST_ID_2: 80, _TEST_ID_3: 10})
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ name=_TEST_ENDPOINT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ private_service_connect_config=gca_service_networking.PrivateServiceConnectConfig(
+ enable_private_service_connect=True,
+ project_allowlist=_TEST_PROJECT_ALLOWLIST,
+ ),
+ traffic_split={_TEST_ID: 10, _TEST_ID_2: 80, _TEST_ID_3: 10},
+ )
+ expected_update_mask = field_mask_pb2.FieldMask(paths=["traffic_split"])
+
+ update_endpoint_mock.assert_called_once_with(
+ endpoint=expected_endpoint,
+ update_mask=expected_update_mask,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=_TEST_TIMEOUT,
+ )
+
+ update_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME,
+ private_service_connect_config=gca_service_networking.PrivateServiceConnectConfig(
+ enable_private_service_connect=True,
+ project_allowlist=_TEST_PROJECT_ALLOWLIST,
+ ),
+ traffic_split={_TEST_ID: 10, _TEST_ID_2: 80, _TEST_ID_3: 10},
+ )
+
+ @pytest.mark.usefixtures("get_psc_private_endpoint_with_many_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_undeploy_all(self, sdk_private_undeploy_mock, sync):
+ test_endpoint = aiplatform.Endpoint(_TEST_ID)
+ test_endpoint.undeploy_all(sync=sync)
+
+ if not sync:
+ test_endpoint.wait()
+
+ # undeploy_all() results in an undeploy() call for each deployed_model
+ # Models are undeployed in ascending order of traffic percentage
+ expected_models_to_undeploy = ["m6", "m7"] + _TEST_LONG_TRAFFIC_SPLIT_SORTED_IDS
+ sdk_private_undeploy_mock.assert_has_calls(
+ [
+ mock.call(deployed_model_id=deployed_model_id, sync=sync)
+ for deployed_model_id in expected_models_to_undeploy
+ ],
+ )
+
+ @pytest.mark.usefixtures("get_psa_private_endpoint_with_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_psa_delete_without_force(
+ self, sdk_undeploy_mock, delete_endpoint_mock, sync
+ ):
+ test_endpoint = models.PrivateEndpoint(_TEST_ENDPOINT_NAME)
+ test_endpoint.delete(sync=sync)
+
+ if not sync:
+ test_endpoint.wait()
+
+ # undeploy() should not be called unless force is set to True
+ sdk_undeploy_mock.assert_not_called()
+
+ delete_endpoint_mock.assert_called_once_with(name=_TEST_ENDPOINT_NAME)
+
+ @pytest.mark.usefixtures("get_psa_private_endpoint_with_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_psa_delete_with_force(self, sdk_undeploy_mock, delete_endpoint_mock, sync):
+ test_endpoint = models.PrivateEndpoint(_TEST_ENDPOINT_NAME)
+ test_endpoint._gca_resource.deployed_models = [_TEST_DEPLOYED_MODELS[0]]
+ test_endpoint.delete(sync=sync)
+
+ if not sync:
+ test_endpoint.wait()
+
+ # undeploy() should not be called unless force is set to True
+ sdk_undeploy_mock.called_once_with(deployed_model_id=_TEST_ID, sync=sync)
+
+ delete_endpoint_mock.assert_called_once_with(name=_TEST_ENDPOINT_NAME)
+
+ @pytest.mark.usefixtures("get_psc_private_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_psc_delete_with_force(
+ self, sdk_undeploy_all_mock, delete_endpoint_mock, sync
+ ):
+ test_endpoint = aiplatform.Endpoint(_TEST_ID)
+ test_endpoint.delete(force=True, sync=sync)
+
+ if not sync:
+ test_endpoint.wait()
+
+ # undeploy_all() should be called if force is set to True
+ sdk_undeploy_all_mock.assert_called_once()
+
+ delete_endpoint_mock.assert_called_once_with(name=_TEST_ENDPOINT_NAME)
+
+ @pytest.mark.usefixtures("get_psc_private_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_psc_delete_without_force(
+ self, sdk_undeploy_all_mock, delete_endpoint_mock, sync
+ ):
+ test_endpoint = aiplatform.Endpoint(_TEST_ID)
+ test_endpoint.delete(sync=sync)
+
+ if not sync:
+ test_endpoint.wait()
+
+ # undeploy_all() should not be called unless force is set to True
+ sdk_undeploy_all_mock.assert_not_called()
+
+ delete_endpoint_mock.assert_called_once_with(name=_TEST_ENDPOINT_NAME)
+
+ @pytest.mark.usefixtures("list_private_endpoints_mock")
+ def test_list(self):
+ ep_list = aiplatform.PrivateEndpoint.list()
+ assert len(ep_list) == 2 # Ensure list include both PSA and PSC endpoints
+
+ def test_construct_sdk_resource_from_gapic_uses_resource_project(self):
+ PROJECT = "my-project"
+ LOCATION = "me-west1"
+ endpoint_name = f"projects/{PROJECT}/locations/{LOCATION}/endpoints/123"
+ endpoint = aiplatform.Endpoint._construct_sdk_resource_from_gapic(
+ models.gca_endpoint_compat.Endpoint(name=endpoint_name)
+ )
+ assert endpoint.project == PROJECT
+ assert endpoint.location == LOCATION
+ assert endpoint.project != _TEST_PROJECT
+ assert endpoint.location != _TEST_LOCATION
+
+ endpoint2 = aiplatform.Endpoint._construct_sdk_resource_from_gapic(
+ models.gca_endpoint_compat.Endpoint(name=endpoint_name),
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ assert endpoint2.project != _TEST_PROJECT
+ assert endpoint2.location != _TEST_LOCATION
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_explain_lit.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_explain_lit.py
new file mode 100644
index 0000000000000000000000000000000000000000..68f8f3e65be0e524524ac82b637d0c29ebfa4ffc
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_explain_lit.py
@@ -0,0 +1,648 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import explainable_ai_sdk
+import os
+import pandas as pd
+import pytest
+import tensorflow as tf
+
+from google.auth import credentials as auth_credentials
+from google.cloud import aiplatform
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform.compat.types import (
+ endpoint as gca_endpoint,
+ prediction_service as gca_prediction_service,
+ explanation as gca_explanation,
+)
+
+# TODO (b/301592787): update testing_extra_require deps to numpy >= 1.22.0 when it doesn't cause conflicts
+try:
+ from lit_nlp import notebook # noqa: F401
+except ImportError:
+ pytest.skip(
+ "Skipping test_explain_list due to dependency conflict with numpy",
+ allow_module_level=True,
+ )
+
+from google.cloud.aiplatform.explain.lit import ( # noqa: E402
+ create_lit_dataset,
+ create_lit_model,
+ create_lit_model_from_endpoint,
+ open_lit,
+ set_up_and_open_lit,
+)
+from google.cloud.aiplatform.compat.services import ( # noqa: E402
+ endpoint_service_client,
+ prediction_service_client,
+)
+from importlib import reload # noqa: E402
+from lit_nlp.api import types as lit_types # noqa: E402
+from lit_nlp import notebook # noqa: E402, F811
+from unittest import mock # noqa: E402
+
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_ID = "1028944691210842416"
+_TEST_ID_2 = "4366591682456584192"
+_TEST_ID_3 = "5820582938582924817"
+_TEST_DISPLAY_NAME = "test-display-name"
+_TEST_DISPLAY_NAME_2 = "test-display-name-2"
+_TEST_DISPLAY_NAME_3 = "test-display-name-3"
+_TEST_ENDPOINT_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/endpoints/{_TEST_ID}"
+)
+_TEST_CREDENTIALS = mock.Mock(spec=auth_credentials.AnonymousCredentials())
+_TEST_EXPLANATION_METADATA = aiplatform.explain.ExplanationMetadata(
+ inputs={
+ "features": {
+ "input_tensor_name": "dense_input",
+ "encoding": "BAG_OF_FEATURES",
+ "modality": "numeric",
+ "index_feature_mapping": ["abc", "def", "ghj"],
+ }
+ },
+ outputs={"medv": {"output_tensor_name": "dense_2"}},
+)
+_TEST_EXPLANATION_PARAMETERS = aiplatform.explain.ExplanationParameters(
+ {"sampled_shapley_attribution": {"path_count": 10}}
+)
+_TEST_DEPLOYED_MODELS = [
+ gca_endpoint.DeployedModel(id=_TEST_ID, display_name=_TEST_DISPLAY_NAME),
+ gca_endpoint.DeployedModel(id=_TEST_ID_2, display_name=_TEST_DISPLAY_NAME_2),
+ gca_endpoint.DeployedModel(id=_TEST_ID_3, display_name=_TEST_DISPLAY_NAME_3),
+]
+_TEST_DEPLOYED_MODELS_WITH_EXPLANATION = [
+ gca_endpoint.DeployedModel(
+ id=_TEST_ID,
+ display_name=_TEST_DISPLAY_NAME,
+ explanation_spec=gca_explanation.ExplanationSpec(
+ metadata=_TEST_EXPLANATION_METADATA,
+ parameters=_TEST_EXPLANATION_PARAMETERS,
+ ),
+ ),
+ gca_endpoint.DeployedModel(
+ id=_TEST_ID_2,
+ display_name=_TEST_DISPLAY_NAME_2,
+ explanation_spec=gca_explanation.ExplanationSpec(
+ metadata=_TEST_EXPLANATION_METADATA,
+ parameters=_TEST_EXPLANATION_PARAMETERS,
+ ),
+ ),
+ gca_endpoint.DeployedModel(
+ id=_TEST_ID_3,
+ display_name=_TEST_DISPLAY_NAME_3,
+ explanation_spec=gca_explanation.ExplanationSpec(
+ metadata=_TEST_EXPLANATION_METADATA,
+ parameters=_TEST_EXPLANATION_PARAMETERS,
+ ),
+ ),
+]
+_TEST_TRAFFIC_SPLIT = {_TEST_ID: 0, _TEST_ID_2: 100, _TEST_ID_3: 0}
+_TEST_DICT_PREDICTION = [{"label": 1.0}]
+_TEST_LIST_PREDICTION = [[1.0]]
+_TEST_EXPLANATIONS = [gca_prediction_service.explanation.Explanation(attributions=[])]
+_TEST_ATTRIBUTIONS = [
+ gca_prediction_service.explanation.Attribution(
+ baseline_output_value=1.0,
+ instance_output_value=2.0,
+ feature_attributions={"feature_1": 3.0, "feature_2": 2.0},
+ output_index=[1, 2, 3],
+ output_display_name="abc",
+ approximation_error=6.0,
+ output_name="xyz",
+ )
+]
+
+
+@pytest.fixture()
+def lit_widget_mock():
+ widget_mock = mock.MagicMock(notebook.LitWidget)
+ yield widget_mock
+
+
+@pytest.fixture()
+def init_lit_widget_mock(lit_widget_mock):
+ with mock.patch.object(notebook, "LitWidget") as widget_init_mock:
+ widget_init_mock.return_value = lit_widget_mock
+ yield widget_init_mock
+
+
+@pytest.fixture
+def widget_render_mock(lit_widget_mock):
+ with mock.patch.object(lit_widget_mock, "render") as render_mock:
+ yield render_mock
+
+
+@pytest.fixture
+def sampled_shapley_explainer_mock():
+ with mock.patch.object(
+ explainable_ai_sdk, "SampledShapleyConfig", create=True
+ ) as config_mock:
+ yield config_mock
+
+
+@pytest.fixture
+def load_model_from_local_path_mock():
+ with mock.patch.object(
+ explainable_ai_sdk, "load_model_from_local_path", autospec=True
+ ) as explainer_mock:
+ model_mock = mock.Mock()
+ explanation_mock = mock.Mock()
+ explanation_mock.feature_importance.return_value = {
+ "feature_1": 0.01,
+ "feature_2": 0.1,
+ }
+ model_mock.explain.return_value = [explanation_mock]
+ explainer_mock.return_value = model_mock
+ yield explainer_mock
+
+
+@pytest.fixture
+def feature_types():
+ yield collections.OrderedDict(
+ [("feature_1", lit_types.Scalar()), ("feature_2", lit_types.Scalar())]
+ )
+
+
+@pytest.fixture
+def label_types():
+ yield collections.OrderedDict([("label", lit_types.RegressionScore())])
+
+
+@pytest.fixture
+def set_up_sequential(tmpdir, feature_types, label_types):
+ # Set up a sequential model
+ seq_model = tf.keras.models.Sequential()
+ seq_model.add(tf.keras.layers.Dense(32, activation="relu", input_shape=(2,)))
+ seq_model.add(tf.keras.layers.Dense(32, activation="relu"))
+ seq_model.add(tf.keras.layers.Dense(1, activation="sigmoid"))
+ saved_model_path = str(tmpdir.mkdir("tmp"))
+ tf.saved_model.save(seq_model, saved_model_path)
+ yield feature_types, label_types, saved_model_path
+
+
+@pytest.fixture
+def set_up_pandas_dataframe_and_columns():
+ dataframe = pd.DataFrame.from_dict(
+ {"feature_1": [1.0], "feature_2": [3.0], "label": [1.0]}
+ )
+ columns = collections.OrderedDict(
+ [
+ ("feature_1", lit_types.Scalar()),
+ ("feature_2", lit_types.Scalar()),
+ ("label", lit_types.RegressionScore()),
+ ]
+ )
+ yield dataframe, columns
+
+
+@pytest.fixture
+def get_endpoint_with_models_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME,
+ deployed_models=_TEST_DEPLOYED_MODELS,
+ traffic_split=_TEST_TRAFFIC_SPLIT,
+ )
+ yield get_endpoint_mock
+
+
+@pytest.fixture
+def get_endpoint_with_models_with_explanation_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_DISPLAY_NAME,
+ name=_TEST_ENDPOINT_NAME,
+ deployed_models=_TEST_DEPLOYED_MODELS_WITH_EXPLANATION,
+ traffic_split=_TEST_TRAFFIC_SPLIT,
+ )
+ yield get_endpoint_mock
+
+
+@pytest.fixture
+def predict_client_predict_dict_mock():
+ with mock.patch.object(
+ prediction_service_client.PredictionServiceClient, "predict"
+ ) as predict_mock:
+ predict_mock.return_value = gca_prediction_service.PredictResponse(
+ deployed_model_id=_TEST_ID
+ )
+ predict_mock.return_value.predictions.extend(_TEST_DICT_PREDICTION)
+ yield predict_mock
+
+
+@pytest.fixture
+def predict_client_explain_dict_mock():
+ with mock.patch.object(
+ prediction_service_client.PredictionServiceClient, "explain"
+ ) as predict_mock:
+ predict_mock.return_value = gca_prediction_service.ExplainResponse(
+ deployed_model_id=_TEST_ID,
+ )
+ predict_mock.return_value.predictions.extend(_TEST_DICT_PREDICTION)
+ predict_mock.return_value.explanations.extend(_TEST_EXPLANATIONS)
+ predict_mock.return_value.explanations[0].attributions.extend(
+ _TEST_ATTRIBUTIONS
+ )
+ yield predict_mock
+
+
+@pytest.fixture
+def predict_client_predict_list_mock():
+ with mock.patch.object(
+ prediction_service_client.PredictionServiceClient, "predict"
+ ) as predict_mock:
+ predict_mock.return_value = gca_prediction_service.PredictResponse(
+ deployed_model_id=_TEST_ID
+ )
+ predict_mock.return_value.predictions.extend(_TEST_LIST_PREDICTION)
+ yield predict_mock
+
+
+@pytest.fixture
+def predict_client_explain_list_mock():
+ with mock.patch.object(
+ prediction_service_client.PredictionServiceClient, "explain"
+ ) as predict_mock:
+ predict_mock.return_value = gca_prediction_service.ExplainResponse(
+ deployed_model_id=_TEST_ID,
+ )
+ predict_mock.return_value.predictions.extend(_TEST_LIST_PREDICTION)
+ predict_mock.return_value.explanations.extend(_TEST_EXPLANATIONS)
+ predict_mock.return_value.explanations[0].attributions.extend(
+ _TEST_ATTRIBUTIONS
+ )
+ yield predict_mock
+
+
+class TestExplainLit:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_create_lit_dataset_from_pandas_returns_dataset(
+ self,
+ set_up_pandas_dataframe_and_columns,
+ ):
+ pd_dataset, lit_columns = set_up_pandas_dataframe_and_columns
+ lit_dataset = create_lit_dataset(pd_dataset, lit_columns)
+ expected_examples = [
+ {"feature_1": 1.0, "feature_2": 3.0, "label": 1.0},
+ ]
+
+ assert lit_dataset.spec() == dict(lit_columns)
+ assert expected_examples == lit_dataset._examples
+
+ def test_create_lit_model_from_tensorflow_returns_model(self, set_up_sequential):
+ feature_types, label_types, saved_model_path = set_up_sequential
+ lit_model = create_lit_model(saved_model_path, feature_types, label_types)
+ test_inputs = [
+ {"feature_1": 1.0, "feature_2": 2.0},
+ ]
+ outputs = lit_model.predict_minibatch(test_inputs)
+
+ assert lit_model.input_spec() == dict(feature_types)
+ assert lit_model.output_spec() == dict(label_types)
+ assert len(outputs) == 1
+ for item in outputs:
+ assert item.keys() == {"label"}
+ assert len(item.values()) == 1
+
+ @mock.patch.dict(os.environ, {"LIT_PROXY_URL": "auto"})
+ @pytest.mark.usefixtures(
+ "sampled_shapley_explainer_mock", "load_model_from_local_path_mock"
+ )
+ def test_create_lit_model_from_tensorflow_with_xai_returns_model(
+ self, set_up_sequential
+ ):
+ feature_types, label_types, saved_model_path = set_up_sequential
+ lit_model = create_lit_model(saved_model_path, feature_types, label_types)
+ test_inputs = [
+ {"feature_1": 1.0, "feature_2": 2.0},
+ ]
+ outputs = lit_model.predict_minibatch(test_inputs)
+
+ assert lit_model.input_spec() == dict(feature_types)
+ assert lit_model.output_spec() == dict(
+ {
+ **label_types,
+ "feature_attribution": lit_types.FeatureSalience(signed=True),
+ }
+ )
+ assert len(outputs) == 1
+ for item in outputs:
+ assert item.keys() == {"label", "feature_attribution"}
+ assert len(item.values()) == 2
+
+ @pytest.mark.usefixtures(
+ "predict_client_predict_dict_mock", "get_endpoint_with_models_mock"
+ )
+ @pytest.mark.parametrize("model_id", [None, _TEST_ID])
+ def test_create_lit_model_from_dict_endpoint_returns_model(
+ self, feature_types, label_types, model_id
+ ):
+ endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME)
+ lit_model = create_lit_model_from_endpoint(
+ endpoint, feature_types, label_types, model_id
+ )
+ test_inputs = [
+ {"feature_1": 1.0, "feature_2": 2.0},
+ ]
+ outputs = lit_model.predict_minibatch(test_inputs)
+
+ assert lit_model.input_spec() == dict(feature_types)
+ assert lit_model.output_spec() == dict(label_types)
+ assert len(outputs) == 1
+ for item in outputs:
+ assert item.keys() == {"label"}
+ assert len(item.values()) == 1
+
+ @pytest.mark.usefixtures(
+ "predict_client_explain_dict_mock",
+ "get_endpoint_with_models_with_explanation_mock",
+ )
+ @pytest.mark.parametrize("model_id", [None, _TEST_ID])
+ def test_create_lit_model_from_dict_endpoint_with_xai_returns_model(
+ self, feature_types, label_types, model_id
+ ):
+ endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME)
+ lit_model = create_lit_model_from_endpoint(
+ endpoint, feature_types, label_types, model_id
+ )
+ test_inputs = [
+ {"feature_1": 1.0, "feature_2": 2.0},
+ ]
+ outputs = lit_model.predict_minibatch(test_inputs)
+
+ assert lit_model.input_spec() == dict(feature_types)
+ assert lit_model.output_spec() == dict(
+ {
+ **label_types,
+ "feature_attribution": lit_types.FeatureSalience(signed=True),
+ }
+ )
+ assert len(outputs) == 1
+ for item in outputs:
+ assert item.keys() == {"label", "feature_attribution"}
+ assert len(item.values()) == 2
+
+ @pytest.mark.usefixtures(
+ "predict_client_predict_dict_mock", "get_endpoint_with_models_mock"
+ )
+ @pytest.mark.parametrize("model_id", [None, _TEST_ID])
+ def test_create_lit_model_from_dict_endpoint_name_returns_model(
+ self, feature_types, label_types, model_id
+ ):
+ lit_model = create_lit_model_from_endpoint(
+ _TEST_ENDPOINT_NAME, feature_types, label_types, model_id
+ )
+ test_inputs = [
+ {"feature_1": 1.0, "feature_2": 2.0},
+ ]
+ outputs = lit_model.predict_minibatch(test_inputs)
+
+ assert lit_model.input_spec() == dict(feature_types)
+ assert lit_model.output_spec() == dict(label_types)
+ assert len(outputs) == 1
+ for item in outputs:
+ assert item.keys() == {"label"}
+ assert len(item.values()) == 1
+
+ @pytest.mark.usefixtures(
+ "predict_client_explain_dict_mock",
+ "get_endpoint_with_models_with_explanation_mock",
+ )
+ @pytest.mark.parametrize("model_id", [None, _TEST_ID])
+ def test_create_lit_model_from_dict_endpoint_name_with_xai_returns_model(
+ self, feature_types, label_types, model_id
+ ):
+ lit_model = create_lit_model_from_endpoint(
+ _TEST_ENDPOINT_NAME, feature_types, label_types, model_id
+ )
+ test_inputs = [
+ {"feature_1": 1.0, "feature_2": 2.0},
+ ]
+ outputs = lit_model.predict_minibatch(test_inputs)
+
+ assert lit_model.input_spec() == dict(feature_types)
+ assert lit_model.output_spec() == dict(
+ {
+ **label_types,
+ "feature_attribution": lit_types.FeatureSalience(signed=True),
+ }
+ )
+ assert len(outputs) == 1
+ for item in outputs:
+ assert item.keys() == {"label", "feature_attribution"}
+ assert len(item.values()) == 2
+
+ @pytest.mark.usefixtures(
+ "predict_client_predict_list_mock", "get_endpoint_with_models_mock"
+ )
+ @pytest.mark.parametrize("model_id", [None, _TEST_ID])
+ def test_create_lit_model_from_list_endpoint_returns_model(
+ self, feature_types, label_types, model_id
+ ):
+ endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME)
+ lit_model = create_lit_model_from_endpoint(
+ endpoint, feature_types, label_types, model_id
+ )
+ test_inputs = [
+ {"feature_1": 1.0, "feature_2": 2.0},
+ ]
+ outputs = lit_model.predict_minibatch(test_inputs)
+
+ assert lit_model.input_spec() == dict(feature_types)
+ assert lit_model.output_spec() == dict(label_types)
+ assert len(outputs) == 1
+ for item in outputs:
+ assert item.keys() == {"label"}
+ assert len(item.values()) == 1
+
+ @pytest.mark.usefixtures(
+ "predict_client_explain_list_mock",
+ "get_endpoint_with_models_with_explanation_mock",
+ )
+ @pytest.mark.parametrize("model_id", [None, _TEST_ID])
+ def test_create_lit_model_from_list_endpoint_with_xai_returns_model(
+ self, feature_types, label_types, model_id
+ ):
+ endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME)
+ lit_model = create_lit_model_from_endpoint(
+ endpoint, feature_types, label_types, model_id
+ )
+ test_inputs = [
+ {"feature_1": 1.0, "feature_2": 2.0},
+ ]
+ outputs = lit_model.predict_minibatch(test_inputs)
+
+ assert lit_model.input_spec() == dict(feature_types)
+ assert lit_model.output_spec() == dict(
+ {
+ **label_types,
+ "feature_attribution": lit_types.FeatureSalience(signed=True),
+ }
+ )
+ assert len(outputs) == 1
+ for item in outputs:
+ assert item.keys() == {"label", "feature_attribution"}
+ assert len(item.values()) == 2
+
+ @pytest.mark.usefixtures(
+ "predict_client_predict_list_mock", "get_endpoint_with_models_mock"
+ )
+ @pytest.mark.parametrize("model_id", [None, _TEST_ID])
+ def test_create_lit_model_from_list_endpoint_name_returns_model(
+ self, feature_types, label_types, model_id
+ ):
+ lit_model = create_lit_model_from_endpoint(
+ _TEST_ENDPOINT_NAME, feature_types, label_types, model_id
+ )
+ test_inputs = [
+ {"feature_1": 1.0, "feature_2": 2.0},
+ ]
+ outputs = lit_model.predict_minibatch(test_inputs)
+
+ assert lit_model.input_spec() == dict(feature_types)
+ assert lit_model.output_spec() == dict(label_types)
+ assert len(outputs) == 1
+ for item in outputs:
+ assert item.keys() == {"label"}
+ assert len(item.values()) == 1
+
+ @pytest.mark.usefixtures(
+ "predict_client_explain_list_mock",
+ "get_endpoint_with_models_with_explanation_mock",
+ )
+ @pytest.mark.parametrize("model_id", [None, _TEST_ID])
+ def test_create_lit_model_from_list_endpoint_name_with_xai_returns_model(
+ self, feature_types, label_types, model_id
+ ):
+ lit_model = create_lit_model_from_endpoint(
+ _TEST_ENDPOINT_NAME, feature_types, label_types, model_id
+ )
+ test_inputs = [
+ {"feature_1": 1.0, "feature_2": 2.0},
+ ]
+ outputs = lit_model.predict_minibatch(test_inputs)
+
+ assert lit_model.input_spec() == dict(feature_types)
+ assert lit_model.output_spec() == dict(
+ {
+ **label_types,
+ "feature_attribution": lit_types.FeatureSalience(signed=True),
+ }
+ )
+ assert len(outputs) == 1
+ for item in outputs:
+ assert item.keys() == {"label", "feature_attribution"}
+ assert len(item.values()) == 2
+
+ @pytest.mark.usefixtures("init_lit_widget_mock")
+ def test_open_lit(
+ self, set_up_sequential, set_up_pandas_dataframe_and_columns, widget_render_mock
+ ):
+ pd_dataset, lit_columns = set_up_pandas_dataframe_and_columns
+ lit_dataset = create_lit_dataset(pd_dataset, lit_columns)
+ feature_types, label_types, saved_model_path = set_up_sequential
+ lit_model = create_lit_model(saved_model_path, feature_types, label_types)
+
+ open_lit({"model": lit_model}, {"dataset": lit_dataset})
+ widget_render_mock.assert_called_once()
+
+ @pytest.mark.usefixtures("init_lit_widget_mock")
+ def test_set_up_and_open_lit(
+ self, set_up_sequential, set_up_pandas_dataframe_and_columns, widget_render_mock
+ ):
+ pd_dataset, lit_columns = set_up_pandas_dataframe_and_columns
+ feature_types, label_types, saved_model_path = set_up_sequential
+ lit_dataset, lit_model = set_up_and_open_lit(
+ pd_dataset, lit_columns, saved_model_path, feature_types, label_types
+ )
+
+ expected_examples = [
+ {"feature_1": 1.0, "feature_2": 3.0, "label": 1.0},
+ ]
+ test_inputs = [
+ {"feature_1": 1.0, "feature_2": 2.0},
+ ]
+ outputs = lit_model.predict_minibatch(test_inputs)
+
+ assert lit_dataset.spec() == dict(lit_columns)
+ assert expected_examples == lit_dataset._examples
+
+ assert lit_model.input_spec() == dict(feature_types)
+ assert lit_model.output_spec() == dict(label_types)
+ assert len(outputs) == 1
+ for item in outputs:
+ assert item.keys() == {"label"}
+ assert len(item.values()) == 1
+
+ widget_render_mock.assert_called_once()
+
+ @pytest.mark.usefixtures("init_lit_widget_mock")
+ @mock.patch.dict(os.environ, {"LIT_PROXY_URL": "auto"})
+ @pytest.mark.usefixtures(
+ "sampled_shapley_explainer_mock", "load_model_from_local_path_mock"
+ )
+ def test_set_up_and_open_lit_with_xai(
+ self, set_up_sequential, set_up_pandas_dataframe_and_columns, widget_render_mock
+ ):
+ pd_dataset, lit_columns = set_up_pandas_dataframe_and_columns
+ feature_types, label_types, saved_model_path = set_up_sequential
+ lit_dataset, lit_model = set_up_and_open_lit(
+ pd_dataset, lit_columns, saved_model_path, feature_types, label_types
+ )
+
+ expected_examples = [
+ {"feature_1": 1.0, "feature_2": 3.0, "label": 1.0},
+ ]
+ test_inputs = [
+ {"feature_1": 1.0, "feature_2": 2.0},
+ ]
+ outputs = lit_model.predict_minibatch(test_inputs)
+
+ assert lit_dataset.spec() == dict(lit_columns)
+ assert expected_examples == lit_dataset._examples
+
+ assert lit_model.input_spec() == dict(feature_types)
+ assert lit_model.output_spec() == dict(
+ {
+ **label_types,
+ "feature_attribution": lit_types.FeatureSalience(signed=True),
+ }
+ )
+ assert len(outputs) == 1
+ for item in outputs:
+ assert item.keys() == {"label", "feature_attribution"}
+ assert len(item.values()) == 2
+
+ widget_render_mock.assert_called_once()
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_explain_saved_model_metadata_builder_tf1_test.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_explain_saved_model_metadata_builder_tf1_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..d80356642afafe6f8dc96a2ca8f6297623419603
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_explain_saved_model_metadata_builder_tf1_test.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+import tensorflow.compat.v1 as tf
+
+from google.cloud.aiplatform import models
+from google.cloud.aiplatform.explain.metadata.tf.v1 import (
+ saved_model_metadata_builder,
+)
+from google.cloud.aiplatform.compat.types import explanation_metadata
+import constants as test_constants
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class SavedModelMetadataBuilderTF1Test(tf.test.TestCase):
+ def _set_up(self):
+ self.sess = tf.Session(graph=tf.Graph())
+ with self.sess.graph.as_default():
+ self.x = tf.placeholder(shape=[None, 10], dtype=tf.float32, name="inp")
+ weights = tf.constant(1.0, shape=(10, 2), name="weights")
+ bias_weight = tf.constant(1.0, shape=(2,), name="bias")
+ self.linear_layer = tf.add(tf.matmul(self.x, weights), bias_weight)
+ self.prediction = tf.nn.relu(self.linear_layer)
+ # save the model
+ self.model_path = self.get_temp_dir()
+ builder = tf.saved_model.builder.SavedModelBuilder(self.model_path)
+ tensor_info_x = tf.saved_model.utils.build_tensor_info(self.x)
+ tensor_info_pred = tf.saved_model.utils.build_tensor_info(self.prediction)
+ tensor_info_lin = tf.saved_model.utils.build_tensor_info(self.linear_layer)
+ prediction_signature = (
+ tf.saved_model.signature_def_utils.build_signature_def(
+ inputs={"x": tensor_info_x},
+ outputs={"y": tensor_info_pred},
+ method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME,
+ )
+ )
+ double_output_signature = (
+ tf.saved_model.signature_def_utils.build_signature_def(
+ inputs={"x": tensor_info_x},
+ outputs={"y": tensor_info_pred, "lin": tensor_info_lin},
+ method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME,
+ )
+ )
+
+ builder.add_meta_graph_and_variables(
+ self.sess,
+ [tf.saved_model.tag_constants.SERVING],
+ signature_def_map={
+ tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: prediction_signature,
+ "double": double_output_signature,
+ },
+ )
+ builder.save()
+
+ def test_get_metadata_correct_inputs(self):
+ self._set_up()
+ md_builder = saved_model_metadata_builder.SavedModelMetadataBuilder(
+ self.model_path, tags=[tf.saved_model.tag_constants.SERVING]
+ )
+ expected_md = {
+ "inputs": {"x": {"inputTensorName": "inp:0"}},
+ "outputs": {"y": {"outputTensorName": "Relu:0"}},
+ }
+
+ assert md_builder.get_metadata() == expected_md
+
+ def test_get_metadata_protobuf_correct_inputs(self):
+ self._set_up()
+ md_builder = saved_model_metadata_builder.SavedModelMetadataBuilder(
+ self.model_path, tags=[tf.saved_model.tag_constants.SERVING]
+ )
+ expected_object = explanation_metadata.ExplanationMetadata(
+ inputs={"x": {"input_tensor_name": "inp:0"}},
+ outputs={"y": {"output_tensor_name": "Relu:0"}},
+ )
+
+ assert md_builder.get_metadata_protobuf() == expected_object
+
+ def test_get_metadata_double_output(self):
+ self._set_up()
+ md_builder = saved_model_metadata_builder.SavedModelMetadataBuilder(
+ self.model_path, signature_name="double", outputs_to_explain=["lin"]
+ )
+
+ expected_md = {
+ "inputs": {"x": {"inputTensorName": "inp:0"}},
+ "outputs": {"lin": {"outputTensorName": "Add:0"}},
+ }
+
+ assert md_builder.get_metadata() == expected_md
+
+ def test_get_metadata_protobuf_double_output(self):
+ self._set_up()
+ md_builder = saved_model_metadata_builder.SavedModelMetadataBuilder(
+ self.model_path, signature_name="double", outputs_to_explain=["lin"]
+ )
+
+ expected_object = explanation_metadata.ExplanationMetadata(
+ inputs={"x": {"input_tensor_name": "inp:0"}},
+ outputs={"lin": {"output_tensor_name": "Add:0"}},
+ )
+
+ assert md_builder.get_metadata_protobuf() == expected_object
+
+ @pytest.mark.usefixtures("upload_model_mock", "get_model_mock")
+ def test_model_upload_compatibility(self):
+ self._set_up()
+ md_builder = saved_model_metadata_builder.SavedModelMetadataBuilder(
+ self.model_path, tags=[tf.saved_model.tag_constants.SERVING]
+ )
+
+ generated_md = md_builder.get_metadata_protobuf()
+
+ try:
+ models.Model.upload(
+ display_name=test_constants.ModelConstants._TEST_MODEL_NAME,
+ serving_container_image_uri=test_constants.ModelConstants._TEST_SERVING_CONTAINER_IMAGE,
+ explanation_parameters=test_constants.ModelConstants._TEST_EXPLANATION_PARAMETERS,
+ explanation_metadata=generated_md, # Test metadata from builder
+ labels=test_constants.ModelConstants._TEST_LABEL,
+ )
+ except TypeError as e:
+ if "Parameter to MergeFrom() must be instance of same class" in str(e):
+ pytest.fail(
+ f"Model.upload() expects different proto version, more info: {e}"
+ )
+ else:
+ raise e
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_explain_saved_model_metadata_builder_tf2_test.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_explain_saved_model_metadata_builder_tf2_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f6be1dc3d303010e4da6bff820e1f7861da7365
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_explain_saved_model_metadata_builder_tf2_test.py
@@ -0,0 +1,215 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+import tensorflow as tf
+import numpy as np
+
+from google.cloud.aiplatform import models
+from google.cloud.aiplatform.explain.metadata.tf.v2 import (
+ saved_model_metadata_builder,
+)
+from google.cloud.aiplatform.compat.types import explanation_metadata
+import constants as test_constants
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class SavedModelMetadataBuilderTF2Test(tf.test.TestCase):
+ def _set_up_sequential(self):
+ # Set up for the sequential.
+ self.seq_model = tf.keras.models.Sequential()
+ self.seq_model.add(tf.keras.layers.Dense(32, activation="relu", input_dim=10))
+ self.seq_model.add(tf.keras.layers.Dense(32, activation="relu"))
+ self.seq_model.add(tf.keras.layers.Dense(1, activation="sigmoid"))
+ self.saved_model_path = self.get_temp_dir()
+ tf.saved_model.save(self.seq_model, self.saved_model_path)
+
+ def test_get_metadata_sequential(self):
+ self._set_up_sequential()
+
+ builder = saved_model_metadata_builder.SavedModelMetadataBuilder(
+ self.saved_model_path
+ )
+ generated_md = builder.get_metadata()
+ expected_md = {
+ "outputs": {"dense_2": {"outputTensorName": "dense_2"}},
+ "inputs": {"dense_input": {"inputTensorName": "dense_input"}},
+ }
+ assert expected_md == generated_md
+
+ def test_get_metadata_protobuf_sequential(self):
+ self._set_up_sequential()
+
+ builder = saved_model_metadata_builder.SavedModelMetadataBuilder(
+ self.saved_model_path
+ )
+ generated_object = builder.get_metadata_protobuf()
+ expected_object = explanation_metadata.ExplanationMetadata(
+ inputs={"dense_input": {"input_tensor_name": "dense_input"}},
+ outputs={"dense_2": {"output_tensor_name": "dense_2"}},
+ )
+ assert expected_object == generated_object
+
+ def test_get_metadata_functional(self):
+ inputs1 = tf.keras.Input(shape=(10,), name="model_input1")
+ inputs2 = tf.keras.Input(shape=(10,), name="model_input2")
+ x = tf.keras.layers.Dense(32, activation="relu")(inputs1)
+ x = tf.keras.layers.Dense(32, activation="relu")(x)
+ x = tf.keras.layers.concatenate([x, inputs2])
+ outputs = tf.keras.layers.Dense(1, activation="sigmoid")(x)
+ fun_model = tf.keras.Model(
+ inputs=[inputs1, inputs2], outputs=outputs, name="fun"
+ )
+ model_dir = self.get_temp_dir()
+ tf.saved_model.save(fun_model, model_dir)
+ builder = saved_model_metadata_builder.SavedModelMetadataBuilder(model_dir)
+ generated_md = builder.get_metadata()
+ expected_md = {
+ "inputs": {
+ "model_input1": {"inputTensorName": "model_input1"},
+ "model_input2": {"inputTensorName": "model_input2"},
+ },
+ "outputs": {"dense_2": {"outputTensorName": "dense_2"}},
+ }
+ assert expected_md == generated_md
+
+ def test_get_metadata_subclassed_model(self):
+ class MyModel(tf.keras.Model):
+ def __init__(self, num_classes=2):
+ super(MyModel, self).__init__(name="my_model")
+ self.num_classes = num_classes
+ self.dense_1 = tf.keras.layers.Dense(32, activation="relu")
+ self.dense_2 = tf.keras.layers.Dense(num_classes, activation="sigmoid")
+
+ def call(self, inputs):
+ x = self.dense_1(inputs)
+ return self.dense_2(x)
+
+ subclassed_model = MyModel()
+ subclassed_model.compile(loss="categorical_crossentropy")
+ np.random.seed(0)
+ x_train = np.random.random((1, 100))
+ y_train = np.random.randint(2, size=(1, 2))
+ subclassed_model.fit(x_train, y_train, batch_size=1, epochs=1)
+ model_dir = self.get_temp_dir()
+ tf.saved_model.save(subclassed_model, model_dir)
+
+ builder = saved_model_metadata_builder.SavedModelMetadataBuilder(model_dir)
+ generated_md = builder.get_metadata()
+ expected_md = {
+ "inputs": {"input_1": {"inputTensorName": "input_1"}},
+ "outputs": {"output_1": {"outputTensorName": "output_1"}},
+ }
+ assert expected_md == generated_md
+
+ @pytest.mark.skip(reason="Failing for Python 3.11, tracked in b/293506827.")
+ def test_non_keras_model(self):
+ class CustomModuleWithOutputName(tf.Module):
+ def __init__(self):
+ super(CustomModuleWithOutputName, self).__init__()
+ self.v = tf.Variable(1.0)
+
+ @tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
+ def __call__(self, x):
+ return {"custom_output_name": x * self.v}
+
+ module_output = CustomModuleWithOutputName()
+ call_output = module_output.__call__.get_concrete_function(
+ tf.TensorSpec(None, tf.float32)
+ )
+ model_dir = self.get_temp_dir()
+ tf.saved_model.save(
+ module_output, model_dir, signatures={"serving_default": call_output}
+ )
+
+ builder = saved_model_metadata_builder.SavedModelMetadataBuilder(model_dir)
+ generated_md = builder.get_metadata()
+ expected_md = {
+ "inputs": {"x": {"inputTensorName": "x"}},
+ "outputs": {
+ "custom_output_name": {"outputTensorName": "custom_output_name"}
+ },
+ }
+ assert expected_md == generated_md
+
+ def test_model_with_feature_column(self):
+ feature_columns = [
+ tf.feature_column.embedding_column(
+ tf.feature_column.categorical_column_with_vocabulary_list(
+ "mode", ["fixed", "normal", "reversible"]
+ ),
+ dimension=8,
+ ),
+ tf.feature_column.numeric_column("age"),
+ ]
+ feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
+
+ model = tf.keras.Sequential(
+ [
+ feature_layer,
+ tf.keras.layers.Dense(128, activation="relu"),
+ tf.keras.layers.Dense(1),
+ ]
+ )
+
+ model.compile(
+ optimizer="adam",
+ loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
+ metrics=["accuracy"],
+ )
+
+ model.fit(
+ {"age": np.array([20, 1]), "mode": np.array(["fixed", "normal"])},
+ np.array([0, 1]),
+ )
+ model_dir = self.get_temp_dir()
+ tf.saved_model.save(model, model_dir)
+ builder = saved_model_metadata_builder.SavedModelMetadataBuilder(model_dir)
+ generated_md = builder.get_metadata()
+ expected_md = {
+ "inputs": {
+ "age": {"inputTensorName": "age", "modality": "categorical"},
+ "mode": {"inputTensorName": "mode", "modality": "categorical"},
+ },
+ "outputs": {"output_1": {"outputTensorName": "output_1"}},
+ }
+ assert expected_md == generated_md
+
+ @pytest.mark.usefixtures("upload_model_mock", "get_model_mock")
+ def test_model_upload_compatibility(self):
+ self._set_up_sequential()
+
+ builder = saved_model_metadata_builder.SavedModelMetadataBuilder(
+ self.saved_model_path
+ )
+ generated_md = builder.get_metadata_protobuf()
+
+ try:
+ models.Model.upload(
+ display_name=test_constants.ModelConstants._TEST_MODEL_NAME,
+ serving_container_image_uri=test_constants.ModelConstants._TEST_SERVING_CONTAINER_IMAGE,
+ explanation_parameters=test_constants.ModelConstants._TEST_EXPLANATION_PARAMETERS,
+ explanation_metadata=generated_md, # Test metadata from builder
+ labels=test_constants.ModelConstants._TEST_LABEL,
+ )
+ except TypeError as e:
+ if "Parameter to MergeFrom() must be instance of same class" in str(e):
+ pytest.fail(
+ f"Model.upload() expects different proto version, more info: {e}"
+ )
+ else:
+ raise e
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_featurestores.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_featurestores.py
new file mode 100644
index 0000000000000000000000000000000000000000..3049b98135cbea5c11642b1fce75875dabc02ff9
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_featurestores.py
@@ -0,0 +1,3789 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import copy
+import pytest
+import datetime
+import pandas as pd
+import uuid
+
+from unittest import mock
+from importlib import reload
+from unittest.mock import MagicMock, patch
+
+from google.api_core import operation
+from google.protobuf import field_mask_pb2, timestamp_pb2
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import utils
+from google.cloud.aiplatform.utils import resource_manager_utils
+
+from google.cloud.aiplatform.utils import featurestore_utils
+from google.cloud.aiplatform.featurestore.feature import Feature
+from google.cloud.aiplatform.compat.services import (
+ featurestore_service_client,
+)
+from google.cloud.aiplatform.compat.services import (
+ featurestore_online_serving_service_client,
+)
+from google.cloud.aiplatform.compat.services import (
+ featurestore_online_serving_service_client_v1beta1,
+)
+
+from google.cloud.aiplatform.compat.types import (
+ encryption_spec as gca_encryption_spec,
+)
+from google.cloud.aiplatform.compat.types import (
+ entity_type as gca_entity_type,
+)
+from google.cloud.aiplatform.compat.types import feature as gca_feature
+from google.cloud.aiplatform.compat.types import (
+ feature_selector as gca_feature_selector,
+)
+from google.cloud.aiplatform.compat.types import (
+ featurestore as gca_featurestore,
+)
+from google.cloud.aiplatform.compat.types import (
+ featurestore_service as gca_featurestore_service,
+)
+from google.cloud.aiplatform.compat.types import (
+ featurestore_online_service as gca_featurestore_online_service,
+)
+from google.cloud.aiplatform.compat.types import io as gca_io
+from google.cloud.aiplatform.compat.types import types as gca_types
+from google.cloud.aiplatform.compat.types import (
+ featurestore_online_service_v1beta1 as gca_featurestore_online_service_v1beta1,
+)
+from google.cloud.aiplatform.compat.types import (
+ types_v1beta1 as gca_types_v1beta1,
+)
+
+from google.cloud import bigquery
+from google.cloud import bigquery_storage
+from google.cloud.bigquery_storage_v1.types import stream as gcbqs_stream
+
+from google.cloud import resourcemanager
+
+# project
+_TEST_PROJECT = "test-project"
+_TEST_PROJECT_DIFF = "test-project-diff"
+_TEST_LOCATION = "us-central1"
+_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+
+_TEST_FEATURE_TIME_DATETIME = datetime.datetime(
+ year=2022, month=1, day=1, hour=11, minute=59, second=59
+)
+
+_TEST_FEATURE_TIME_DATETIME_UTC = datetime.datetime(
+ year=2022,
+ month=1,
+ day=1,
+ hour=11,
+ minute=59,
+ second=59,
+ tzinfo=datetime.timezone.utc,
+)
+_TEST_FEATURE_TIMESTAMP = timestamp_pb2.Timestamp(seconds=1681323171)
+
+
+# featurestore
+_TEST_FEATURESTORE_ID = "featurestore_id"
+_TEST_FEATURESTORE_NAME = f"{_TEST_PARENT}/featurestores/{_TEST_FEATURESTORE_ID}"
+_TEST_FEATURESTORE_INVALID = f"{_TEST_PARENT}/featurestore/{_TEST_FEATURESTORE_ID}"
+
+# featurestore online
+_TEST_ONLINE_SERVING_CONFIG = 1
+_TEST_ONLINE_SERVING_CONFIG_UPDATE = 2
+
+# entity_type
+_TEST_ENTITY_TYPE_ID = "entity_type_id"
+_TEST_ENTITY_TYPE_NAME = f"{_TEST_FEATURESTORE_NAME}/entityTypes/{_TEST_ENTITY_TYPE_ID}"
+_TEST_ENTITY_TYPE_INVALID = (
+ f"{_TEST_FEATURESTORE_NAME}/entityType/{_TEST_ENTITY_TYPE_ID}"
+)
+
+# feature
+_TEST_FEATURE_ID = "feature_id"
+_TEST_FEATURE_NAME = f"{_TEST_ENTITY_TYPE_NAME}/features/{_TEST_FEATURE_ID}"
+_TEST_FEATURE_INVALID = f"{_TEST_ENTITY_TYPE_NAME}/feature/{_TEST_FEATURE_ID}"
+_TEST_FEATURE_VALUE_TYPE_STR = "INT64"
+_TEST_FEATURE_VALUE = 99
+_TEST_FEATURE_VALUE_TYPE_ENUM = 9
+_TEST_FEATURE_ID_INVALID = "1feature_id"
+
+_TEST_BOOL_TYPE = gca_feature.Feature.ValueType.BOOL
+_TEST_BOOL_ARR_TYPE = gca_feature.Feature.ValueType.BOOL_ARRAY
+_TEST_DOUBLE_TYPE = gca_feature.Feature.ValueType.DOUBLE
+_TEST_DOUBLE_ARR_TYPE = gca_feature.Feature.ValueType.DOUBLE_ARRAY
+_TEST_INT_TYPE = gca_feature.Feature.ValueType.INT64
+_TEST_INT_ARR_TYPE = gca_feature.Feature.ValueType.INT64_ARRAY
+_TEST_STR_TYPE = gca_feature.Feature.ValueType.STRING
+_TEST_STR_ARR_TYPE = gca_feature.Feature.ValueType.STRING_ARRAY
+_TEST_BYTES_TYPE = gca_feature.Feature.ValueType.BYTES
+
+_FEATURE_VALUE_TYPE_KEYS = {
+ _TEST_BOOL_TYPE: "bool_value",
+ _TEST_BOOL_ARR_TYPE: "bool_array_value",
+ _TEST_DOUBLE_TYPE: "double_value",
+ _TEST_DOUBLE_ARR_TYPE: "double_array_value",
+ _TEST_INT_TYPE: "int64_value",
+ _TEST_INT_ARR_TYPE: "int64_array_value",
+ _TEST_STR_TYPE: "string_value",
+ _TEST_STR_ARR_TYPE: "string_array_value",
+ _TEST_BYTES_TYPE: "bytes_value",
+}
+
+_TEST_FEATURE_VALUE_TYPE = _TEST_INT_TYPE
+_TEST_FEATURE_VALUE_TYPE_BQ_FIELD_TYPE = "INT64"
+_TEST_FEATURE_VALUE_TYPE_BQ_MODE = "NULLABLE"
+
+_ARRAY_FEATURE_VALUE_TYPE_TO_GCA_TYPE_MAP = {
+ _TEST_BOOL_ARR_TYPE: gca_types.BoolArray,
+ _TEST_DOUBLE_ARR_TYPE: gca_types.DoubleArray,
+ _TEST_INT_ARR_TYPE: gca_types.Int64Array,
+ _TEST_STR_ARR_TYPE: gca_types.StringArray,
+}
+
+_TEST_BOOL_COL = "bool_col"
+_TEST_BOOL_ARR_COL = "bool_array_col"
+_TEST_DOUBLE_COL = "double_col"
+_TEST_DOUBLE_ARR_COL = "double_array_col"
+_TEST_INT_COL = "int64_col"
+_TEST_INT_ARR_COL = "int64_array_col"
+_TEST_STR_COL = "string_col"
+_TEST_STR_ARR_COL = "string_array_col"
+_TEST_BYTES_COL = "bytes_col"
+
+_TEST_FEATURE_IDS_FOR_DF_CONSTRUCTION = [
+ _TEST_BOOL_COL,
+ _TEST_BOOL_ARR_COL,
+ _TEST_DOUBLE_COL,
+ _TEST_DOUBLE_ARR_COL,
+ _TEST_INT_COL,
+ _TEST_INT_ARR_COL,
+ _TEST_STR_COL,
+ _TEST_STR_ARR_COL,
+ _TEST_BYTES_COL,
+]
+
+_TEST_FEATURE_VALUE_TYPES_FOR_DF_CONSTRUCTION = [
+ _TEST_BOOL_TYPE,
+ _TEST_BOOL_ARR_TYPE,
+ _TEST_DOUBLE_TYPE,
+ _TEST_DOUBLE_ARR_TYPE,
+ _TEST_INT_TYPE,
+ _TEST_INT_ARR_TYPE,
+ _TEST_STR_TYPE,
+ _TEST_STR_ARR_TYPE,
+ _TEST_BYTES_TYPE,
+]
+
+# misc
+_TEST_DESCRIPTION = "my description"
+_TEST_LABELS = {"my_key": "my_value"}
+_TEST_DESCRIPTION_UPDATE = "my description update"
+_TEST_LABELS_UPDATE = {"my_key_update": "my_value_update"}
+
+# request_metadata
+_TEST_REQUEST_METADATA = ()
+
+# CMEK encryption
+_TEST_ENCRYPTION_KEY_NAME = "key_1234"
+_TEST_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_ENCRYPTION_KEY_NAME
+)
+
+# Lists
+_TEST_FEATURESTORE_LIST = [
+ gca_featurestore.Featurestore(
+ name=_TEST_FEATURESTORE_NAME,
+ online_serving_config=gca_featurestore.Featurestore.OnlineServingConfig(
+ fixed_node_count=_TEST_ONLINE_SERVING_CONFIG
+ ),
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ ),
+ gca_featurestore.Featurestore(
+ name=_TEST_FEATURESTORE_NAME,
+ online_serving_config=gca_featurestore.Featurestore.OnlineServingConfig(
+ fixed_node_count=_TEST_ONLINE_SERVING_CONFIG
+ ),
+ ),
+ gca_featurestore.Featurestore(
+ name=_TEST_FEATURESTORE_NAME,
+ online_serving_config=gca_featurestore.Featurestore.OnlineServingConfig(
+ fixed_node_count=_TEST_ONLINE_SERVING_CONFIG
+ ),
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ ),
+]
+
+_TEST_ENTITY_TYPE_LIST = [
+ gca_entity_type.EntityType(
+ name=_TEST_ENTITY_TYPE_NAME,
+ ),
+ gca_entity_type.EntityType(
+ name=_TEST_ENTITY_TYPE_NAME,
+ ),
+ gca_entity_type.EntityType(
+ name=_TEST_ENTITY_TYPE_NAME,
+ ),
+]
+
+_TEST_FEATURE_LIST = [
+ gca_feature.Feature(
+ name=_TEST_FEATURE_NAME,
+ ),
+ gca_feature.Feature(
+ name=_TEST_FEATURE_NAME,
+ ),
+ gca_feature.Feature(
+ name=_TEST_FEATURE_NAME,
+ ),
+]
+
+_TEST_FEATURE_CONFIGS = {
+ "my_feature_id_1": {"value_type": _TEST_FEATURE_VALUE_TYPE_STR},
+}
+
+_TEST_IMPORTING_FEATURE_ID = "my_feature_id_1"
+_TEST_IMPORTING_FEATURE_SOURCE_FIELD = "my_feature_id_1_source_field"
+
+_TEST_IMPORTING_FEATURE_IDS = ["my_feature_id_1"]
+
+_TEST_IMPORTING_FEATURE_SOURCE_FIELDS = {
+ "my_feature_id_1": "my_feature_id_1_source_field",
+}
+
+_TEST_SERVING_FEATURE_IDS = {
+ "my_entity_type_id_1": ["my_feature_id_1_1", "my_feature_id_1_2"],
+ "my_entity_type_id_2": ["my_feature_id_2_1", "my_feature_id_2_2"],
+}
+
+_TEST_FEATURE_TIME_FIELD = "feature_time_field"
+_TEST_FEATURE_TIME = datetime.datetime.now()
+
+_TEST_BQ_SOURCE_URI = "bq://project.dataset.table_name"
+_TEST_GCS_AVRO_SOURCE_URIS = [
+ "gs://my_bucket/my_file_1.avro",
+]
+_TEST_GCS_CSV_SOURCE_URI = "gs://my_bucket/my_file_1.csv"
+_TEST_GCS_CSV_SOURCE_URIS = [
+ "gs://my_bucket/my_file_1.csv",
+]
+_TEST_GCS_SOURCE_TYPE_CSV = "csv"
+_TEST_GCS_SOURCE_TYPE_AVRO = "avro"
+_TEST_GCS_SOURCE_TYPE_INVALID = "json"
+
+_TEST_BATCH_SERVE_START_TIME = datetime.datetime.now()
+_TEST_BQ_DESTINATION_URI = "bq://project.dataset.table_name"
+_TEST_GCS_OUTPUT_URI_PREFIX = "gs://my_bucket/path/to_prefix"
+
+_TEST_GCS_DESTINATION_TYPE_CSV = "csv"
+_TEST_GCS_DESTINATION_TYPE_TFRECORD = "tfrecord"
+_TEST_GCS_DESTINATION_TYPE_INVALID = "json"
+
+_TEST_BQ_SOURCE = gca_io.BigQuerySource(input_uri=_TEST_BQ_SOURCE_URI)
+_TEST_AVRO_SOURCE = gca_io.AvroSource(
+ gcs_source=gca_io.GcsSource(uris=_TEST_GCS_AVRO_SOURCE_URIS)
+)
+_TEST_CSV_SOURCE = gca_io.CsvSource(
+ gcs_source=gca_io.GcsSource(uris=_TEST_GCS_CSV_SOURCE_URIS)
+)
+
+_TEST_BQ_DESTINATION = gca_io.BigQueryDestination(output_uri=_TEST_BQ_DESTINATION_URI)
+_TEST_CSV_DESTINATION = gca_io.CsvDestination(
+ gcs_destination=gca_io.GcsDestination(output_uri_prefix=_TEST_GCS_OUTPUT_URI_PREFIX)
+)
+_TEST_TFRECORD_DESTINATION = gca_io.TFRecordDestination(
+ gcs_destination=gca_io.GcsDestination(output_uri_prefix=_TEST_GCS_OUTPUT_URI_PREFIX)
+)
+
+_TEST_READ_ENTITY_ID = "entity_id_1"
+_TEST_READ_ENTITY_IDS = ["entity_id_1"]
+
+_TEST_BASE_HEADER_PROTO = (
+ gca_featurestore_online_service.ReadFeatureValuesResponse.Header()
+)
+_TEST_BASE_ENTITY_VIEW_PROTO = (
+ gca_featurestore_online_service.ReadFeatureValuesResponse.EntityView()
+)
+_TEST_BASE_DATA_PROTO = (
+ gca_featurestore_online_service.ReadFeatureValuesResponse.EntityView.Data()
+)
+
+
+def _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id, feature_ids, feature_destination_fields=None
+):
+ feature_destination_fields = feature_destination_fields or {}
+ entity_type_spec_proto = gca_featurestore_service.BatchReadFeatureValuesRequest.EntityTypeSpec(
+ entity_type_id=entity_type_id,
+ feature_selector=gca_feature_selector.FeatureSelector(
+ id_matcher=gca_feature_selector.IdMatcher(ids=feature_ids)
+ ),
+ settings=[
+ gca_featurestore_service.DestinationFeatureSetting(
+ feature_id=feature_id, destination_field=feature_destination_field
+ )
+ for feature_id, feature_destination_field in feature_destination_fields.items()
+ ]
+ or None,
+ )
+ return entity_type_spec_proto
+
+
+def _get_header_proto(feature_ids):
+ header_proto = copy.deepcopy(_TEST_BASE_HEADER_PROTO)
+ header_proto.feature_descriptors = [
+ gca_featurestore_online_service.ReadFeatureValuesResponse.FeatureDescriptor(
+ id=feature_id
+ )
+ for feature_id in feature_ids
+ ]
+ return header_proto
+
+
+def _get_data_proto(feature_value_type, feature_value):
+ data_proto = copy.deepcopy(_TEST_BASE_DATA_PROTO)
+ if feature_value is not None:
+ if feature_value_type in _ARRAY_FEATURE_VALUE_TYPE_TO_GCA_TYPE_MAP:
+ array_proto = _ARRAY_FEATURE_VALUE_TYPE_TO_GCA_TYPE_MAP[
+ feature_value_type
+ ]()
+ array_proto.values = feature_value
+ feature_value = array_proto
+ data_proto.value = gca_featurestore_online_service.FeatureValue(
+ {_FEATURE_VALUE_TYPE_KEYS[feature_value_type]: feature_value}
+ )
+ return data_proto
+
+
+def _get_entity_view_proto(entity_id, feature_value_types, feature_values):
+ entity_view_proto = copy.deepcopy(_TEST_BASE_ENTITY_VIEW_PROTO)
+ entity_view_proto.entity_id = entity_id
+ entity_view_data = []
+ for feature_value_type, feature_value in zip(feature_value_types, feature_values):
+ data = _get_data_proto(feature_value_type, feature_value)
+ entity_view_data.append(data)
+ entity_view_proto.data = entity_view_data
+ return entity_view_proto
+
+
+def uuid_mock():
+ return uuid.UUID(int=1)
+
+
+# All Resource Manager Mocks
+@pytest.fixture
+def get_project_mock():
+ with patch.object(
+ resourcemanager.ProjectsClient, "get_project"
+ ) as get_project_mock:
+ get_project_mock.return_value = resourcemanager.Project(
+ project_id=_TEST_PROJECT,
+ )
+ yield get_project_mock
+
+
+# All BigQuery Mocks
+@pytest.fixture
+def bq_client_mock():
+ mock = MagicMock(bigquery.client.Client)
+ yield mock
+
+
+@pytest.fixture
+def bq_dataset_mock():
+ mock = MagicMock(bigquery.dataset.Dataset)
+ yield mock
+
+
+@pytest.fixture
+def bq_init_client_mock(bq_client_mock):
+ with patch.object(bigquery, "Client") as bq_init_client_mock:
+ bq_init_client_mock.return_value = bq_client_mock
+ yield bq_init_client_mock
+
+
+@pytest.fixture
+def bq_init_dataset_mock(bq_dataset_mock):
+ with patch.object(bigquery, "Dataset") as bq_init_dataset_mock:
+ bq_init_dataset_mock.return_value = bq_dataset_mock
+ yield bq_init_dataset_mock
+
+
+@pytest.fixture
+def bq_create_dataset_mock(bq_client_mock):
+ with patch.object(bq_client_mock, "create_dataset") as bq_create_dataset_mock:
+ yield bq_create_dataset_mock
+
+
+@pytest.fixture
+def bq_load_table_from_dataframe_mock(bq_client_mock):
+ with patch.object(
+ bq_client_mock, "load_table_from_dataframe"
+ ) as bq_load_table_from_dataframe_mock:
+ yield bq_load_table_from_dataframe_mock
+
+
+@pytest.fixture
+def bq_delete_dataset_mock(bq_client_mock):
+ with patch.object(bq_client_mock, "delete_dataset") as bq_delete_dataset_mock:
+ yield bq_delete_dataset_mock
+
+
+@pytest.fixture
+def bq_delete_table_mock(bq_client_mock):
+ with patch.object(bq_client_mock, "delete_table") as bq_delete_table_mock:
+ yield bq_delete_table_mock
+
+
+@pytest.fixture
+def bqs_client_mock():
+ mock = MagicMock(bigquery_storage.BigQueryReadClient)
+ yield mock
+
+
+@pytest.fixture
+def bqs_init_client_mock(bqs_client_mock):
+ with patch.object(bigquery_storage, "BigQueryReadClient") as bqs_init_client_mock:
+ bqs_init_client_mock.return_value = bqs_client_mock
+ yield bqs_init_client_mock
+
+
+@pytest.fixture
+def bqs_create_read_session(bqs_client_mock):
+ with patch.object(
+ bqs_client_mock, "create_read_session"
+ ) as bqs_create_read_session:
+ read_session_proto = gcbqs_stream.ReadSession()
+ read_session_proto.streams = [gcbqs_stream.ReadStream()]
+ bqs_create_read_session.return_value = read_session_proto
+ yield bqs_create_read_session
+
+
+@pytest.fixture
+def bq_schema_field_mock():
+ mock = MagicMock(bigquery.SchemaField)
+ yield mock
+
+
+@pytest.fixture
+def bq_init_schema_field_mock(bq_schema_field_mock):
+ with patch.object(bigquery, "SchemaField") as bq_init_schema_field_mock:
+ bq_init_schema_field_mock.return_value = bq_schema_field_mock
+ yield bq_init_schema_field_mock
+
+
+# All Featurestore Mocks
+@pytest.fixture
+def get_featurestore_mock():
+ with patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "get_featurestore"
+ ) as get_featurestore_mock:
+ get_featurestore_mock.return_value = gca_featurestore.Featurestore(
+ name=_TEST_FEATURESTORE_NAME,
+ online_serving_config=gca_featurestore.Featurestore.OnlineServingConfig(
+ fixed_node_count=_TEST_ONLINE_SERVING_CONFIG
+ ),
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ yield get_featurestore_mock
+
+
+@pytest.fixture
+def update_featurestore_mock():
+ with patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "update_featurestore"
+ ) as update_featurestore_mock:
+ update_featurestore_lro_mock = mock.Mock(operation.Operation)
+ update_featurestore_mock.return_value = update_featurestore_lro_mock
+ yield update_featurestore_mock
+
+
+@pytest.fixture
+def list_featurestores_mock():
+ with patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "list_featurestores"
+ ) as list_featurestores_mock:
+ list_featurestores_mock.return_value = _TEST_FEATURESTORE_LIST
+ yield list_featurestores_mock
+
+
+@pytest.fixture
+def delete_featurestore_mock():
+ with mock.patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "delete_featurestore"
+ ) as delete_featurestore_mock:
+ delete_featurestore_lro_mock = mock.Mock(operation.Operation)
+ delete_featurestore_mock.return_value = delete_featurestore_lro_mock
+ yield delete_featurestore_mock
+
+
+@pytest.fixture
+def search_features_mock():
+ with patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "search_features"
+ ) as search_features_mock:
+ search_features_mock.return_value = _TEST_FEATURE_LIST
+ yield search_features_mock
+
+
+@pytest.fixture
+def create_featurestore_mock():
+ with patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "create_featurestore"
+ ) as create_featurestore_mock:
+ create_featurestore_lro_mock = mock.Mock(operation.Operation)
+ create_featurestore_lro_mock.result.return_value = (
+ gca_featurestore.Featurestore(
+ name=_TEST_FEATURESTORE_NAME,
+ online_serving_config=gca_featurestore.Featurestore.OnlineServingConfig(
+ fixed_node_count=_TEST_ONLINE_SERVING_CONFIG
+ ),
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ )
+ create_featurestore_mock.return_value = create_featurestore_lro_mock
+ yield create_featurestore_mock
+
+
+@pytest.fixture
+def batch_read_feature_values_mock():
+ with patch.object(
+ featurestore_service_client.FeaturestoreServiceClient,
+ "batch_read_feature_values",
+ ) as batch_read_feature_values_mock:
+ batch_read_feature_values_lro_mock = mock.Mock(operation.Operation)
+ batch_read_feature_values_mock.return_value = batch_read_feature_values_lro_mock
+ yield batch_read_feature_values_mock
+
+
+# ALL EntityType Mocks
+@pytest.fixture
+def get_entity_type_mock():
+ with patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "get_entity_type"
+ ) as get_entity_type_mock:
+ get_entity_type_mock.return_value = gca_entity_type.EntityType(
+ name=_TEST_ENTITY_TYPE_NAME,
+ )
+ yield get_entity_type_mock
+
+
+@pytest.fixture
+def update_entity_type_mock():
+ with patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "update_entity_type"
+ ) as update_entity_type_mock:
+ update_entity_type_mock.return_value = gca_entity_type.EntityType(
+ name=_TEST_ENTITY_TYPE_NAME,
+ labels=_TEST_LABELS_UPDATE,
+ )
+ yield update_entity_type_mock
+
+
+@pytest.fixture
+def list_entity_types_mock():
+ with patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "list_entity_types"
+ ) as list_entity_types_mock:
+ list_entity_types_mock.return_value = _TEST_ENTITY_TYPE_LIST
+ yield list_entity_types_mock
+
+
+@pytest.fixture
+def delete_entity_type_mock():
+ with mock.patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "delete_entity_type"
+ ) as delete_entity_type_mock:
+ delete_entity_type_lro_mock = mock.Mock(operation.Operation)
+ delete_entity_type_mock.return_value = delete_entity_type_lro_mock
+ yield delete_entity_type_mock
+
+
+@pytest.fixture
+def create_entity_type_mock():
+ with patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "create_entity_type"
+ ) as create_entity_type_mock:
+ create_entity_type_lro_mock = mock.Mock(operation.Operation)
+ create_entity_type_lro_mock.result.return_value = gca_entity_type.EntityType(
+ name=_TEST_ENTITY_TYPE_NAME
+ )
+ create_entity_type_mock.return_value = create_entity_type_lro_mock
+ yield create_entity_type_mock
+
+
+@pytest.fixture
+def import_feature_values_mock():
+ with patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "import_feature_values"
+ ) as import_feature_values_mock:
+ import_feature_values_lro_mock = mock.Mock(operation.Operation)
+ import_feature_values_mock.return_value = import_feature_values_lro_mock
+ yield import_feature_values_mock
+
+
+@pytest.fixture
+def read_feature_values_mock():
+ with patch.object(
+ featurestore_online_serving_service_client.FeaturestoreOnlineServingServiceClient,
+ "read_feature_values",
+ ) as read_feature_values_mock:
+ read_feature_values_mock.return_value = (
+ gca_featurestore_online_service.ReadFeatureValuesResponse(
+ header=_get_header_proto(feature_ids=[_TEST_FEATURE_ID]),
+ entity_view=_get_entity_view_proto(
+ entity_id=_TEST_READ_ENTITY_ID,
+ feature_value_types=[_TEST_FEATURE_VALUE_TYPE],
+ feature_values=[_TEST_FEATURE_VALUE],
+ ),
+ )
+ )
+ yield read_feature_values_mock
+
+
+@pytest.fixture
+def streaming_read_feature_values_mock():
+ with patch.object(
+ featurestore_online_serving_service_client.FeaturestoreOnlineServingServiceClient,
+ "streaming_read_feature_values",
+ ) as streaming_read_feature_values_mock:
+ streaming_read_feature_values_mock.return_value = [
+ gca_featurestore_online_service.ReadFeatureValuesResponse(
+ header=_get_header_proto(feature_ids=[_TEST_FEATURE_ID])
+ ),
+ gca_featurestore_online_service.ReadFeatureValuesResponse(
+ entity_view=_get_entity_view_proto(
+ entity_id=_TEST_READ_ENTITY_ID,
+ feature_value_types=[_TEST_FEATURE_VALUE_TYPE],
+ feature_values=[_TEST_FEATURE_VALUE],
+ ),
+ ),
+ ]
+ yield streaming_read_feature_values_mock
+
+
+@pytest.fixture
+def preview_write_feature_values_mock():
+ with patch.object(
+ featurestore_online_serving_service_client_v1beta1.FeaturestoreOnlineServingServiceClient,
+ "write_feature_values",
+ ) as write_feature_values_mock:
+ write_feature_values_mock.return_value = (
+ gca_featurestore_online_service_v1beta1.WriteFeatureValuesResponse()
+ )
+ yield write_feature_values_mock
+
+
+@pytest.fixture
+def write_feature_values_mock():
+ with patch.object(
+ featurestore_online_serving_service_client.FeaturestoreOnlineServingServiceClient,
+ "write_feature_values",
+ ) as write_feature_values_mock:
+ write_feature_values_mock.return_value = (
+ gca_featurestore_online_service.WriteFeatureValuesResponse()
+ )
+ yield write_feature_values_mock
+
+
+# ALL Feature Mocks
+@pytest.fixture
+def get_feature_mock():
+ with patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "get_feature"
+ ) as get_feature_mock:
+ get_feature_mock.return_value = gca_feature.Feature(
+ name=_TEST_FEATURE_NAME, value_type=_TEST_FEATURE_VALUE_TYPE
+ )
+ yield get_feature_mock
+
+
+@pytest.fixture
+def update_feature_mock():
+ with patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "update_feature"
+ ) as update_feature_mock:
+ update_feature_mock.return_value = gca_feature.Feature(
+ name=_TEST_FEATURE_NAME,
+ value_type=_TEST_FEATURE_VALUE_TYPE_ENUM,
+ )
+ yield update_feature_mock
+
+
+@pytest.fixture
+def list_features_mock():
+ with patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "list_features"
+ ) as list_features_mock:
+ list_features_mock.return_value = _TEST_FEATURE_LIST
+ yield list_features_mock
+
+
+@pytest.fixture
+def delete_feature_mock():
+ with mock.patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "delete_feature"
+ ) as delete_feature_mock:
+ delete_feature_lro_mock = mock.Mock(operation.Operation)
+ delete_feature_mock.return_value = delete_feature_lro_mock
+ yield delete_feature_mock
+
+
+@pytest.fixture
+def create_feature_mock():
+ with patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "create_feature"
+ ) as create_feature_mock:
+ create_feature_lro_mock = mock.Mock(operation.Operation)
+ create_feature_lro_mock.result.return_value = gca_feature.Feature(
+ name=_TEST_FEATURE_NAME,
+ value_type=_TEST_FEATURE_VALUE_TYPE_ENUM,
+ )
+ create_feature_mock.return_value = create_feature_lro_mock
+ yield create_feature_mock
+
+
+@pytest.fixture
+def batch_create_features_mock():
+ with patch.object(
+ featurestore_service_client.FeaturestoreServiceClient, "batch_create_features"
+ ) as batch_create_features_mock:
+ batch_create_features_lro_mock = mock.Mock(operation.Operation)
+ batch_create_features_mock.return_value = batch_create_features_lro_mock
+ yield batch_create_features_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestFeaturestoreUtils:
+ @pytest.mark.parametrize(
+ "resource_id",
+ ["resource_id", "resource_id12345", "_resource_id", "_123456"],
+ )
+ def test_validate_resource_id(self, resource_id: str):
+ featurestore_utils.validate_id(resource_id)
+
+ @pytest.mark.parametrize(
+ "resource_id",
+ [
+ "12345resource_id",
+ "resource_id/1234",
+ "_resource_id/1234",
+ "resource-id-1234",
+ "123456",
+ "c" * 61,
+ ],
+ )
+ def test_validate_invalid_resource_id(self, resource_id: str):
+ with pytest.raises(ValueError):
+ featurestore_utils.validate_id(resource_id)
+
+ @pytest.mark.parametrize(
+ "feature_id",
+ ["resource_id", "resource_id12345", "_resource_id", "_123456"],
+ )
+ def test_validate_feature_id(self, feature_id: str):
+ assert featurestore_utils.validate_feature_id(feature_id=feature_id) is None
+
+ @pytest.mark.parametrize(
+ "feature_id",
+ [
+ "12345resource_id",
+ "resource_id/1234",
+ "_resource_id/1234",
+ "resource-id-1234",
+ "123456",
+ "c" * 61,
+ "entity_id",
+ "Entity_ID",
+ "feature_timestamp",
+ "Feature_Timestamp",
+ "arrival_timestamp",
+ "Arrival_Timestamp",
+ ],
+ )
+ def test_validate_feature_id_with_raise(self, feature_id: str):
+ with pytest.raises(ValueError):
+ featurestore_utils.validate_feature_id(feature_id=feature_id)
+
+ @pytest.mark.parametrize(
+ "value_type",
+ [
+ "BOOL",
+ "BOOL_ARRAY",
+ "DOUBLE",
+ "DOUBLE_ARRAY",
+ "INT64",
+ "INT64_ARRAY",
+ "STRING",
+ "STRING_ARRAY",
+ "BYTES",
+ ],
+ )
+ def test_validate_value_type(self, value_type: str):
+ assert featurestore_utils.validate_value_type(value_type=value_type) is None
+
+ @pytest.mark.parametrize(
+ "value_type",
+ [
+ "INT",
+ "INT_array",
+ "STR",
+ "double",
+ "bool",
+ "array",
+ "INT32",
+ "VALUE_TYPE_UNSPECIFIED",
+ ],
+ )
+ def test_validate_value_type_with_raise(self, value_type: str):
+ with pytest.raises(ValueError):
+ featurestore_utils.validate_value_type(value_type=value_type)
+
+
+class Test_FeatureConfig:
+ def test_feature_config_return_create_feature_request(self):
+
+ featureConfig = featurestore_utils._FeatureConfig(
+ feature_id=_TEST_FEATURE_ID,
+ value_type=_TEST_FEATURE_VALUE_TYPE_STR,
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABELS,
+ )
+
+ gapic_feature = gca_feature.Feature(
+ description=_TEST_DESCRIPTION,
+ value_type=_TEST_FEATURE_VALUE_TYPE_ENUM,
+ labels=_TEST_LABELS,
+ )
+
+ expected_request = gca_featurestore_service.CreateFeatureRequest(
+ feature=gapic_feature,
+ feature_id=_TEST_FEATURE_ID,
+ )
+
+ assert featureConfig.get_create_feature_request() == expected_request
+
+ def test_feature_config_create_feature_request_raises_invalid_feature_id(self):
+ featureConfig = featurestore_utils._FeatureConfig(
+ feature_id=_TEST_FEATURE_ID_INVALID,
+ value_type=_TEST_FEATURE_VALUE_TYPE_STR,
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABELS,
+ )
+ with pytest.raises(ValueError):
+ featureConfig.get_create_feature_request()
+
+ @pytest.mark.parametrize("value_type", ["INT", "VALUE_TYPE_UNSPECIFIED"])
+ def test_feature_config_create_feature_request_raises_invalid_value_type(
+ self, value_type
+ ):
+ featureConfig = featurestore_utils._FeatureConfig(
+ feature_id=_TEST_FEATURE_ID,
+ value_type=value_type,
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABELS,
+ )
+ with pytest.raises(ValueError):
+ featureConfig.get_create_feature_request()
+
+
+class TestFeaturestore:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize(
+ "featurestore_name", [_TEST_FEATURESTORE_ID, _TEST_FEATURESTORE_NAME]
+ )
+ def test_init_featurestore(self, featurestore_name, get_featurestore_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore = aiplatform.Featurestore(featurestore_name=featurestore_name)
+
+ get_featurestore_mock.assert_called_once_with(
+ name=my_featurestore.resource_name, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ def test_get_entity_type(self, get_entity_type_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_ID
+ )
+ my_entity_type = my_featurestore.get_entity_type(
+ entity_type_id=_TEST_ENTITY_TYPE_ID
+ )
+
+ get_entity_type_mock.assert_called_once_with(
+ name=_TEST_ENTITY_TYPE_NAME, retry=base._DEFAULT_RETRY
+ )
+ assert isinstance(my_entity_type, aiplatform.EntityType)
+
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ def test_update_featurestore(self, update_featurestore_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_ID
+ )
+ my_featurestore.update(
+ labels=_TEST_LABELS_UPDATE,
+ update_request_timeout=None,
+ )
+
+ expected_featurestore = gca_featurestore.Featurestore(
+ name=_TEST_FEATURESTORE_NAME,
+ labels=_TEST_LABELS_UPDATE,
+ online_serving_config=gca_featurestore.Featurestore.OnlineServingConfig(),
+ )
+ update_featurestore_mock.assert_called_once_with(
+ featurestore=expected_featurestore,
+ update_mask=field_mask_pb2.FieldMask(paths=["labels"]),
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ def test_update_featurestore_with_timeout(self, update_featurestore_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_ID
+ )
+ my_featurestore.update(
+ labels=_TEST_LABELS_UPDATE,
+ update_request_timeout=180.0,
+ )
+
+ expected_featurestore = gca_featurestore.Featurestore(
+ name=_TEST_FEATURESTORE_NAME,
+ labels=_TEST_LABELS_UPDATE,
+ online_serving_config=gca_featurestore.Featurestore.OnlineServingConfig(),
+ )
+ update_featurestore_mock.assert_called_once_with(
+ featurestore=expected_featurestore,
+ update_mask=field_mask_pb2.FieldMask(paths=["labels"]),
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=180.0,
+ )
+
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ def test_update_featurestore_with_timeout_not_explicitly_set(
+ self, update_featurestore_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_ID
+ )
+ my_featurestore.update(
+ labels=_TEST_LABELS_UPDATE,
+ )
+
+ expected_featurestore = gca_featurestore.Featurestore(
+ name=_TEST_FEATURESTORE_NAME,
+ labels=_TEST_LABELS_UPDATE,
+ online_serving_config=gca_featurestore.Featurestore.OnlineServingConfig(),
+ )
+ update_featurestore_mock.assert_called_once_with(
+ featurestore=expected_featurestore,
+ update_mask=field_mask_pb2.FieldMask(paths=["labels"]),
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ def test_update_featurestore_online(self, update_featurestore_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_ID
+ )
+ my_featurestore.update_online_store(
+ fixed_node_count=_TEST_ONLINE_SERVING_CONFIG_UPDATE,
+ update_request_timeout=None,
+ )
+
+ expected_featurestore = gca_featurestore.Featurestore(
+ name=_TEST_FEATURESTORE_NAME,
+ online_serving_config=gca_featurestore.Featurestore.OnlineServingConfig(
+ fixed_node_count=_TEST_ONLINE_SERVING_CONFIG_UPDATE
+ ),
+ )
+ update_featurestore_mock.assert_called_once_with(
+ featurestore=expected_featurestore,
+ update_mask=field_mask_pb2.FieldMask(
+ paths=["online_serving_config.fixed_node_count"]
+ ),
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ def test_list_featurestores(self, list_featurestores_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore_list = aiplatform.Featurestore.list()
+
+ list_featurestores_mock.assert_called_once_with(
+ request={"parent": _TEST_PARENT}
+ )
+ assert len(my_featurestore_list) == len(_TEST_FEATURESTORE_LIST)
+ for my_featurestore in my_featurestore_list:
+ assert isinstance(my_featurestore, aiplatform.Featurestore)
+
+ @pytest.mark.parametrize(
+ "force, sync",
+ [
+ (None, True),
+ (True, True),
+ (False, True),
+ (None, False),
+ (True, False),
+ (False, False),
+ ],
+ )
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ def test_delete_featurestore(self, delete_featurestore_mock, force, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_ID
+ )
+ my_featurestore.delete(sync=sync, force=force)
+
+ if not sync:
+ my_featurestore.wait()
+
+ delete_featurestore_mock.assert_called_once_with(
+ name=my_featurestore.resource_name,
+ force=force,
+ )
+
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ def test_list_entity_types(self, list_entity_types_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_ID,
+ )
+ my_entity_type_list = my_featurestore.list_entity_types()
+
+ list_entity_types_mock.assert_called_once_with(
+ request={"parent": _TEST_FEATURESTORE_NAME}
+ )
+ assert len(my_entity_type_list) == len(_TEST_ENTITY_TYPE_LIST)
+ for my_entity_type in my_entity_type_list:
+ assert isinstance(my_entity_type, aiplatform.EntityType)
+
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ def test_list_entity_types_with_no_init(self, list_entity_types_mock):
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_ID,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ my_entity_type_list = my_featurestore.list_entity_types()
+
+ list_entity_types_mock.assert_called_once_with(
+ request={"parent": _TEST_FEATURESTORE_NAME}
+ )
+ assert len(my_entity_type_list) == len(_TEST_ENTITY_TYPE_LIST)
+ for my_entity_type in my_entity_type_list:
+ assert isinstance(my_entity_type, aiplatform.EntityType)
+
+ @pytest.mark.parametrize(
+ "force, sync",
+ [
+ (None, True),
+ (True, True),
+ (False, True),
+ (None, False),
+ (True, False),
+ (False, False),
+ ],
+ )
+ @pytest.mark.usefixtures("get_featurestore_mock", "get_entity_type_mock")
+ def test_delete_entity_types(self, delete_entity_type_mock, force, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_ID
+ )
+ my_featurestore.delete_entity_types(
+ entity_type_ids=[_TEST_ENTITY_TYPE_ID, _TEST_ENTITY_TYPE_ID],
+ sync=sync,
+ force=force,
+ )
+
+ if not sync:
+ my_featurestore.wait()
+
+ delete_entity_type_mock.assert_has_calls(
+ calls=[
+ mock.call(name=_TEST_ENTITY_TYPE_NAME, force=force),
+ mock.call(name=_TEST_ENTITY_TYPE_NAME, force=force),
+ ],
+ any_order=True,
+ )
+
+ @pytest.mark.usefixtures("get_featurestore_mock", "get_entity_type_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_entity_type(self, create_entity_type_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_NAME
+ )
+ my_entity_type = my_featurestore.create_entity_type(
+ entity_type_id=_TEST_ENTITY_TYPE_ID,
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABELS,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ my_entity_type.wait()
+
+ expected_entity_type = gca_entity_type.EntityType(
+ labels=_TEST_LABELS,
+ description=_TEST_DESCRIPTION,
+ )
+ create_entity_type_mock.assert_called_once_with(
+ parent=_TEST_FEATURESTORE_NAME,
+ entity_type=expected_entity_type,
+ entity_type_id=_TEST_ENTITY_TYPE_ID,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_featurestore_mock", "get_entity_type_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_entity_type_with_timeout(self, create_entity_type_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_NAME
+ )
+ my_entity_type = my_featurestore.create_entity_type(
+ entity_type_id=_TEST_ENTITY_TYPE_ID,
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABELS,
+ sync=sync,
+ create_request_timeout=180.0,
+ )
+
+ if not sync:
+ my_entity_type.wait()
+
+ expected_entity_type = gca_entity_type.EntityType(
+ labels=_TEST_LABELS,
+ description=_TEST_DESCRIPTION,
+ )
+ create_entity_type_mock.assert_called_once_with(
+ parent=_TEST_FEATURESTORE_NAME,
+ entity_type=expected_entity_type,
+ entity_type_id=_TEST_ENTITY_TYPE_ID,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=180.0,
+ )
+
+ @pytest.mark.usefixtures("get_featurestore_mock", "get_entity_type_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_entity_type_with_timeout_not_explicitly_set(
+ self, create_entity_type_mock, sync
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_NAME
+ )
+ my_entity_type = my_featurestore.create_entity_type(
+ entity_type_id=_TEST_ENTITY_TYPE_ID,
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABELS,
+ sync=sync,
+ )
+
+ if not sync:
+ my_entity_type.wait()
+
+ expected_entity_type = gca_entity_type.EntityType(
+ labels=_TEST_LABELS,
+ description=_TEST_DESCRIPTION,
+ )
+ create_entity_type_mock.assert_called_once_with(
+ parent=_TEST_FEATURESTORE_NAME,
+ entity_type=expected_entity_type,
+ entity_type_id=_TEST_ENTITY_TYPE_ID,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_featurestore(self, create_featurestore_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore = aiplatform.Featurestore.create(
+ featurestore_id=_TEST_FEATURESTORE_ID,
+ online_store_fixed_node_count=_TEST_ONLINE_SERVING_CONFIG,
+ labels=_TEST_LABELS,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ my_featurestore.wait()
+
+ expected_featurestore = gca_featurestore.Featurestore(
+ labels=_TEST_LABELS,
+ online_serving_config=gca_featurestore.Featurestore.OnlineServingConfig(
+ fixed_node_count=_TEST_ONLINE_SERVING_CONFIG
+ ),
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ create_featurestore_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ featurestore=expected_featurestore,
+ featurestore_id=_TEST_FEATURESTORE_ID,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_featurestore_with_timeout(self, create_featurestore_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore = aiplatform.Featurestore.create(
+ featurestore_id=_TEST_FEATURESTORE_ID,
+ online_store_fixed_node_count=_TEST_ONLINE_SERVING_CONFIG,
+ labels=_TEST_LABELS,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ create_request_timeout=180.0,
+ )
+
+ if not sync:
+ my_featurestore.wait()
+
+ expected_featurestore = gca_featurestore.Featurestore(
+ labels=_TEST_LABELS,
+ online_serving_config=gca_featurestore.Featurestore.OnlineServingConfig(
+ fixed_node_count=_TEST_ONLINE_SERVING_CONFIG
+ ),
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ create_featurestore_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ featurestore=expected_featurestore,
+ featurestore_id=_TEST_FEATURESTORE_ID,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=180.0,
+ )
+
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_featurestore_with_timeout_not_explicitly_set(
+ self, create_featurestore_mock, sync
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore = aiplatform.Featurestore.create(
+ featurestore_id=_TEST_FEATURESTORE_ID,
+ online_store_fixed_node_count=_TEST_ONLINE_SERVING_CONFIG,
+ labels=_TEST_LABELS,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ )
+
+ if not sync:
+ my_featurestore.wait()
+
+ expected_featurestore = gca_featurestore.Featurestore(
+ labels=_TEST_LABELS,
+ online_serving_config=gca_featurestore.Featurestore.OnlineServingConfig(
+ fixed_node_count=_TEST_ONLINE_SERVING_CONFIG
+ ),
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ create_featurestore_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ featurestore=expected_featurestore,
+ featurestore_id=_TEST_FEATURESTORE_ID,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ @pytest.mark.parametrize(
+ "serving_feature_ids, feature_destination_fields, expected_entity_type_specs",
+ [
+ (
+ {
+ "my_entity_type_id_1": ["my_feature_id_1_1", "my_feature_id_1_2"],
+ "my_entity_type_id_2": ["my_feature_id_2_1", "my_feature_id_2_2"],
+ },
+ None,
+ [
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_1",
+ feature_ids=["my_feature_id_1_1", "my_feature_id_1_2"],
+ ),
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_2",
+ feature_ids=["my_feature_id_2_1", "my_feature_id_2_2"],
+ ),
+ ],
+ ),
+ (
+ {
+ "my_entity_type_id_1": ["my_feature_id_1_1", "my_feature_id_1_2"],
+ "my_entity_type_id_2": ["my_feature_id_2_1", "my_feature_id_2_2"],
+ },
+ {
+ f"{_TEST_FEATURESTORE_NAME}/entityTypes/my_entity_type_id_1/features/my_feature_id_1_1": "my_feature_id_1_1_dest",
+ f"{_TEST_FEATURESTORE_NAME}/entityTypes/my_entity_type_id_1/features/my_feature_id_1_2": "my_feature_id_1_2_dest",
+ },
+ [
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_1",
+ feature_ids=["my_feature_id_1_1", "my_feature_id_1_2"],
+ feature_destination_fields={
+ "my_feature_id_1_1": "my_feature_id_1_1_dest",
+ "my_feature_id_1_2": "my_feature_id_1_2_dest",
+ },
+ ),
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_2",
+ feature_ids=["my_feature_id_2_1", "my_feature_id_2_2"],
+ ),
+ ],
+ ),
+ (
+ {
+ "my_entity_type_id_1": ["my_feature_id_1_1", "my_feature_id_1_2"],
+ "my_entity_type_id_2": ["my_feature_id_2_1", "my_feature_id_2_2"],
+ },
+ {
+ f"{_TEST_FEATURESTORE_NAME}/entityTypes/my_entity_type_id_1/features/my_feature_id_1_1": "my_feature_id_1_1_dest",
+ f"{_TEST_FEATURESTORE_NAME}/entityTypes/my_entity_type_id_2/features/my_feature_id_2_1": "my_feature_id_2_1_dest",
+ },
+ [
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_1",
+ feature_ids=["my_feature_id_1_1", "my_feature_id_1_2"],
+ feature_destination_fields={
+ "my_feature_id_1_1": "my_feature_id_1_1_dest"
+ },
+ ),
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_2",
+ feature_ids=["my_feature_id_2_1", "my_feature_id_2_2"],
+ feature_destination_fields={
+ "my_feature_id_2_1": "my_feature_id_2_1_dest"
+ },
+ ),
+ ],
+ ),
+ ],
+ )
+ def test_validate_and_get_batch_read_feature_values_request(
+ self,
+ serving_feature_ids,
+ feature_destination_fields,
+ expected_entity_type_specs,
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT)
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_NAME
+ )
+ expected_batch_read_feature_values_request = (
+ gca_featurestore_service.BatchReadFeatureValuesRequest(
+ featurestore=_TEST_FEATURESTORE_NAME,
+ destination=gca_featurestore_service.FeatureValueDestination(
+ bigquery_destination=_TEST_BQ_DESTINATION,
+ ),
+ entity_type_specs=expected_entity_type_specs,
+ bigquery_read_instances=_TEST_BQ_SOURCE,
+ )
+ )
+ assert (
+ expected_batch_read_feature_values_request
+ == my_featurestore._validate_and_get_batch_read_feature_values_request(
+ featurestore_name=my_featurestore.resource_name,
+ serving_feature_ids=serving_feature_ids,
+ destination=_TEST_BQ_DESTINATION,
+ read_instances=_TEST_BQ_SOURCE,
+ feature_destination_fields=feature_destination_fields,
+ )
+ )
+
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ @pytest.mark.parametrize(
+ "read_instances_uri, expected_read_instances",
+ [
+ (_TEST_BQ_SOURCE_URI, _TEST_BQ_SOURCE),
+ (_TEST_GCS_CSV_SOURCE_URI, _TEST_CSV_SOURCE),
+ ],
+ )
+ def test_validate_and_get_read_instances(
+ self, read_instances_uri, expected_read_instances
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_NAME
+ )
+ assert (
+ expected_read_instances
+ == my_featurestore._validate_and_get_read_instances(
+ read_instances_uri=read_instances_uri
+ )
+ )
+
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ @pytest.mark.parametrize(
+ "read_instances_uri",
+ [
+ "gcs://my_bucket/my_file_1.csv",
+ "bigquery://project.dataset.table_name",
+ "my_bucket/my_file_1.csv",
+ ],
+ )
+ def test_validate_and_get_read_instances_with_raise(self, read_instances_uri):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_NAME
+ )
+ with pytest.raises(ValueError):
+ my_featurestore._validate_and_get_read_instances(
+ read_instances_uri=read_instances_uri
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ def test_batch_serve_to_bq(self, batch_read_feature_values_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_NAME
+ )
+
+ expected_entity_type_specs = [
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_1",
+ feature_ids=["my_feature_id_1_1", "my_feature_id_1_2"],
+ ),
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_2",
+ feature_ids=["my_feature_id_2_1", "my_feature_id_2_2"],
+ ),
+ ]
+
+ expected_batch_read_feature_values_request = (
+ gca_featurestore_service.BatchReadFeatureValuesRequest(
+ featurestore=my_featurestore.resource_name,
+ destination=gca_featurestore_service.FeatureValueDestination(
+ bigquery_destination=_TEST_BQ_DESTINATION,
+ ),
+ entity_type_specs=expected_entity_type_specs,
+ bigquery_read_instances=_TEST_BQ_SOURCE,
+ )
+ )
+
+ my_featurestore.batch_serve_to_bq(
+ bq_destination_output_uri=_TEST_BQ_DESTINATION_URI,
+ serving_feature_ids=_TEST_SERVING_FEATURE_IDS,
+ read_instances_uri=_TEST_BQ_SOURCE_URI,
+ sync=sync,
+ serve_request_timeout=None,
+ )
+
+ if not sync:
+ my_featurestore.wait()
+
+ batch_read_feature_values_mock.assert_called_once_with(
+ request=expected_batch_read_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ def test_batch_serve_to_bq_with_timeout(self, batch_read_feature_values_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_NAME
+ )
+
+ expected_entity_type_specs = [
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_1",
+ feature_ids=["my_feature_id_1_1", "my_feature_id_1_2"],
+ ),
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_2",
+ feature_ids=["my_feature_id_2_1", "my_feature_id_2_2"],
+ ),
+ ]
+
+ expected_batch_read_feature_values_request = (
+ gca_featurestore_service.BatchReadFeatureValuesRequest(
+ featurestore=my_featurestore.resource_name,
+ destination=gca_featurestore_service.FeatureValueDestination(
+ bigquery_destination=_TEST_BQ_DESTINATION,
+ ),
+ entity_type_specs=expected_entity_type_specs,
+ bigquery_read_instances=_TEST_BQ_SOURCE,
+ )
+ )
+
+ my_featurestore.batch_serve_to_bq(
+ bq_destination_output_uri=_TEST_BQ_DESTINATION_URI,
+ serving_feature_ids=_TEST_SERVING_FEATURE_IDS,
+ read_instances_uri=_TEST_BQ_SOURCE_URI,
+ sync=sync,
+ serve_request_timeout=180.0,
+ )
+
+ if not sync:
+ my_featurestore.wait()
+
+ batch_read_feature_values_mock.assert_called_once_with(
+ request=expected_batch_read_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=180.0,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ def test_batch_serve_to_bq_with_timeout_not_explicitly_set(
+ self, batch_read_feature_values_mock, sync
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_NAME
+ )
+
+ expected_entity_type_specs = [
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_1",
+ feature_ids=["my_feature_id_1_1", "my_feature_id_1_2"],
+ ),
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_2",
+ feature_ids=["my_feature_id_2_1", "my_feature_id_2_2"],
+ ),
+ ]
+
+ expected_batch_read_feature_values_request = (
+ gca_featurestore_service.BatchReadFeatureValuesRequest(
+ featurestore=my_featurestore.resource_name,
+ destination=gca_featurestore_service.FeatureValueDestination(
+ bigquery_destination=_TEST_BQ_DESTINATION,
+ ),
+ entity_type_specs=expected_entity_type_specs,
+ bigquery_read_instances=_TEST_BQ_SOURCE,
+ )
+ )
+
+ my_featurestore.batch_serve_to_bq(
+ bq_destination_output_uri=_TEST_BQ_DESTINATION_URI,
+ serving_feature_ids=_TEST_SERVING_FEATURE_IDS,
+ read_instances_uri=_TEST_BQ_SOURCE_URI,
+ sync=sync,
+ )
+
+ if not sync:
+ my_featurestore.wait()
+
+ batch_read_feature_values_mock.assert_called_once_with(
+ request=expected_batch_read_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ def test_batch_serve_to_bq_with_start_time(
+ self, batch_read_feature_values_mock, sync
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_NAME
+ )
+
+ expected_entity_type_specs = [
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_1",
+ feature_ids=["my_feature_id_1_1", "my_feature_id_1_2"],
+ ),
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_2",
+ feature_ids=["my_feature_id_2_1", "my_feature_id_2_2"],
+ ),
+ ]
+
+ expected_batch_read_feature_values_request = (
+ gca_featurestore_service.BatchReadFeatureValuesRequest(
+ featurestore=my_featurestore.resource_name,
+ destination=gca_featurestore_service.FeatureValueDestination(
+ bigquery_destination=_TEST_BQ_DESTINATION,
+ ),
+ entity_type_specs=expected_entity_type_specs,
+ bigquery_read_instances=_TEST_BQ_SOURCE,
+ start_time=_TEST_BATCH_SERVE_START_TIME,
+ )
+ )
+
+ my_featurestore.batch_serve_to_bq(
+ bq_destination_output_uri=_TEST_BQ_DESTINATION_URI,
+ serving_feature_ids=_TEST_SERVING_FEATURE_IDS,
+ read_instances_uri=_TEST_BQ_SOURCE_URI,
+ sync=sync,
+ serve_request_timeout=None,
+ start_time=_TEST_BATCH_SERVE_START_TIME,
+ )
+
+ if not sync:
+ my_featurestore.wait()
+
+ batch_read_feature_values_mock.assert_called_once_with(
+ request=expected_batch_read_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ def test_batch_serve_to_gcs(self, batch_read_feature_values_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_NAME
+ )
+
+ expected_entity_type_specs = [
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_1",
+ feature_ids=["my_feature_id_1_1", "my_feature_id_1_2"],
+ ),
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_2",
+ feature_ids=["my_feature_id_2_1", "my_feature_id_2_2"],
+ ),
+ ]
+
+ expected_batch_read_feature_values_request = (
+ gca_featurestore_service.BatchReadFeatureValuesRequest(
+ featurestore=my_featurestore.resource_name,
+ destination=gca_featurestore_service.FeatureValueDestination(
+ tfrecord_destination=_TEST_TFRECORD_DESTINATION,
+ ),
+ entity_type_specs=expected_entity_type_specs,
+ csv_read_instances=_TEST_CSV_SOURCE,
+ )
+ )
+
+ my_featurestore.batch_serve_to_gcs(
+ gcs_destination_output_uri_prefix=_TEST_GCS_OUTPUT_URI_PREFIX,
+ gcs_destination_type=_TEST_GCS_DESTINATION_TYPE_TFRECORD,
+ serving_feature_ids=_TEST_SERVING_FEATURE_IDS,
+ read_instances_uri=_TEST_GCS_CSV_SOURCE_URI,
+ sync=sync,
+ serve_request_timeout=None,
+ )
+
+ if not sync:
+ my_featurestore.wait()
+
+ batch_read_feature_values_mock.assert_called_once_with(
+ request=expected_batch_read_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ def test_batch_serve_to_gcs_with_invalid_gcs_destination_type(self):
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_NAME
+ )
+ with pytest.raises(ValueError):
+ my_featurestore.batch_serve_to_gcs(
+ gcs_destination_output_uri_prefix=_TEST_GCS_OUTPUT_URI_PREFIX,
+ gcs_destination_type=_TEST_GCS_DESTINATION_TYPE_INVALID,
+ serving_feature_ids=_TEST_SERVING_FEATURE_IDS,
+ read_instances_uri=_TEST_GCS_CSV_SOURCE_URI,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_featurestore_mock")
+ def test_batch_serve_to_gcs_with_start_time(
+ self, batch_read_feature_values_mock, sync
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_NAME
+ )
+
+ expected_entity_type_specs = [
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_1",
+ feature_ids=["my_feature_id_1_1", "my_feature_id_1_2"],
+ ),
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_2",
+ feature_ids=["my_feature_id_2_1", "my_feature_id_2_2"],
+ ),
+ ]
+
+ expected_batch_read_feature_values_request = (
+ gca_featurestore_service.BatchReadFeatureValuesRequest(
+ featurestore=my_featurestore.resource_name,
+ destination=gca_featurestore_service.FeatureValueDestination(
+ tfrecord_destination=_TEST_TFRECORD_DESTINATION,
+ ),
+ entity_type_specs=expected_entity_type_specs,
+ csv_read_instances=_TEST_CSV_SOURCE,
+ start_time=_TEST_BATCH_SERVE_START_TIME,
+ )
+ )
+
+ my_featurestore.batch_serve_to_gcs(
+ gcs_destination_output_uri_prefix=_TEST_GCS_OUTPUT_URI_PREFIX,
+ gcs_destination_type=_TEST_GCS_DESTINATION_TYPE_TFRECORD,
+ serving_feature_ids=_TEST_SERVING_FEATURE_IDS,
+ read_instances_uri=_TEST_GCS_CSV_SOURCE_URI,
+ sync=sync,
+ serve_request_timeout=None,
+ start_time=_TEST_BATCH_SERVE_START_TIME,
+ )
+
+ if not sync:
+ my_featurestore.wait()
+
+ batch_read_feature_values_mock.assert_called_once_with(
+ request=expected_batch_read_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_featurestore_mock",
+ "bq_init_client_mock",
+ "bq_init_dataset_mock",
+ "bq_create_dataset_mock",
+ "bq_load_table_from_dataframe_mock",
+ "bq_delete_dataset_mock",
+ "bqs_init_client_mock",
+ "bqs_create_read_session",
+ "get_project_mock",
+ )
+ @patch("uuid.uuid4", uuid_mock)
+ def test_batch_serve_to_df(self, batch_read_feature_values_mock):
+
+ aiplatform.init(project=_TEST_PROJECT_DIFF)
+
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_NAME
+ )
+
+ read_instances_df = pd.DataFrame()
+
+ expected_temp_bq_dataset_name = (
+ f"temp_{_TEST_FEATURESTORE_ID}_{uuid.uuid4()}".replace("-", "_")
+ )
+ expecte_temp_bq_dataset_id = f"{_TEST_PROJECT}.{expected_temp_bq_dataset_name}"[
+ :1024
+ ]
+ expected_temp_bq_read_instances_table_id = (
+ f"{expecte_temp_bq_dataset_id}.read_instances"
+ )
+ expected_temp_bq_batch_serve_table_id = (
+ f"{expecte_temp_bq_dataset_id}.batch_serve"
+ )
+
+ expected_entity_type_specs = [
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_1",
+ feature_ids=["my_feature_id_1_1", "my_feature_id_1_2"],
+ ),
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_2",
+ feature_ids=["my_feature_id_2_1", "my_feature_id_2_2"],
+ ),
+ ]
+
+ expected_batch_read_feature_values_request = (
+ gca_featurestore_service.BatchReadFeatureValuesRequest(
+ featurestore=my_featurestore.resource_name,
+ destination=gca_featurestore_service.FeatureValueDestination(
+ bigquery_destination=gca_io.BigQueryDestination(
+ output_uri=f"bq://{expected_temp_bq_batch_serve_table_id}"
+ ),
+ ),
+ entity_type_specs=expected_entity_type_specs,
+ bigquery_read_instances=gca_io.BigQuerySource(
+ input_uri=f"bq://{expected_temp_bq_read_instances_table_id}"
+ ),
+ )
+ )
+
+ my_featurestore.batch_serve_to_df(
+ serving_feature_ids=_TEST_SERVING_FEATURE_IDS,
+ read_instances_df=read_instances_df,
+ serve_request_timeout=None,
+ )
+
+ batch_read_feature_values_mock.assert_called_once_with(
+ request=expected_batch_read_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_featurestore_mock",
+ "bq_init_client_mock",
+ "bq_init_dataset_mock",
+ "bq_create_dataset_mock",
+ "bq_load_table_from_dataframe_mock",
+ "bq_delete_dataset_mock",
+ "bq_delete_table_mock",
+ "bqs_init_client_mock",
+ "bqs_create_read_session",
+ "get_project_mock",
+ )
+ @patch("uuid.uuid4", uuid_mock)
+ def test_batch_serve_to_df_user_specified_bq_dataset(
+ self,
+ batch_read_feature_values_mock,
+ bq_create_dataset_mock,
+ bq_delete_dataset_mock,
+ bq_delete_table_mock,
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT_DIFF)
+
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_NAME
+ )
+
+ read_instances_df = pd.DataFrame()
+
+ expected_temp_bq_dataset_name = "my_dataset_name"
+ expected_temp_bq_dataset_id = (
+ f"{_TEST_PROJECT}.{expected_temp_bq_dataset_name}"[:1024]
+ )
+ expected_temp_bq_batch_serve_table_name = (
+ f"tmp_batch_serve_{uuid.uuid4()}".replace("-", "_")
+ )
+ expected_temp_bq_batch_serve_table_id = (
+ f"{expected_temp_bq_dataset_id}.{expected_temp_bq_batch_serve_table_name}"
+ )
+ expected_temp_bq_read_instances_table_name = (
+ f"tmp_read_instances_{uuid.uuid4()}".replace("-", "_")
+ )
+ expected_temp_bq_read_instances_table_id = f"{expected_temp_bq_dataset_id}.{expected_temp_bq_read_instances_table_name}"
+
+ expected_entity_type_specs = [
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_1",
+ feature_ids=["my_feature_id_1_1", "my_feature_id_1_2"],
+ ),
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_2",
+ feature_ids=["my_feature_id_2_1", "my_feature_id_2_2"],
+ ),
+ ]
+
+ expected_batch_read_feature_values_request = (
+ gca_featurestore_service.BatchReadFeatureValuesRequest(
+ featurestore=my_featurestore.resource_name,
+ destination=gca_featurestore_service.FeatureValueDestination(
+ bigquery_destination=gca_io.BigQueryDestination(
+ output_uri=f"bq://{expected_temp_bq_batch_serve_table_id}"
+ ),
+ ),
+ entity_type_specs=expected_entity_type_specs,
+ bigquery_read_instances=gca_io.BigQuerySource(
+ input_uri=f"bq://{expected_temp_bq_read_instances_table_id}"
+ ),
+ )
+ )
+
+ my_featurestore.batch_serve_to_df(
+ serving_feature_ids=_TEST_SERVING_FEATURE_IDS,
+ read_instances_df=read_instances_df,
+ serve_request_timeout=None,
+ bq_dataset_id=expected_temp_bq_dataset_id,
+ )
+
+ batch_read_feature_values_mock.assert_called_once_with(
+ request=expected_batch_read_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ bq_delete_table_mock.assert_has_calls(
+ calls=[
+ mock.call(expected_temp_bq_batch_serve_table_id),
+ mock.call(expected_temp_bq_read_instances_table_id),
+ ],
+ any_order=True,
+ )
+
+ bq_create_dataset_mock.assert_not_called()
+ bq_delete_dataset_mock.assert_not_called()
+
+ @pytest.mark.usefixtures(
+ "get_featurestore_mock",
+ "bq_init_client_mock",
+ "bq_init_dataset_mock",
+ "bq_create_dataset_mock",
+ "bq_load_table_from_dataframe_mock",
+ "bq_delete_dataset_mock",
+ "bqs_init_client_mock",
+ "bqs_create_read_session",
+ "get_project_mock",
+ )
+ @patch("uuid.uuid4", uuid_mock)
+ def test_batch_serve_to_df_with_start_time(self, batch_read_feature_values_mock):
+
+ aiplatform.init(project=_TEST_PROJECT_DIFF)
+
+ my_featurestore = aiplatform.Featurestore(
+ featurestore_name=_TEST_FEATURESTORE_NAME
+ )
+
+ read_instances_df = pd.DataFrame()
+
+ expected_temp_bq_dataset_name = (
+ f"temp_{_TEST_FEATURESTORE_ID}_{uuid.uuid4()}".replace("-", "_")
+ )
+ expecte_temp_bq_dataset_id = f"{_TEST_PROJECT}.{expected_temp_bq_dataset_name}"[
+ :1024
+ ]
+ expected_temp_bq_read_instances_table_id = (
+ f"{expecte_temp_bq_dataset_id}.read_instances"
+ )
+ expected_temp_bq_batch_serve_table_id = (
+ f"{expecte_temp_bq_dataset_id}.batch_serve"
+ )
+
+ expected_entity_type_specs = [
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_1",
+ feature_ids=["my_feature_id_1_1", "my_feature_id_1_2"],
+ ),
+ _get_entity_type_spec_proto_with_feature_ids(
+ entity_type_id="my_entity_type_id_2",
+ feature_ids=["my_feature_id_2_1", "my_feature_id_2_2"],
+ ),
+ ]
+
+ expected_batch_read_feature_values_request = (
+ gca_featurestore_service.BatchReadFeatureValuesRequest(
+ featurestore=my_featurestore.resource_name,
+ destination=gca_featurestore_service.FeatureValueDestination(
+ bigquery_destination=gca_io.BigQueryDestination(
+ output_uri=f"bq://{expected_temp_bq_batch_serve_table_id}"
+ ),
+ ),
+ entity_type_specs=expected_entity_type_specs,
+ bigquery_read_instances=gca_io.BigQuerySource(
+ input_uri=f"bq://{expected_temp_bq_read_instances_table_id}"
+ ),
+ start_time=_TEST_BATCH_SERVE_START_TIME,
+ )
+ )
+
+ my_featurestore.batch_serve_to_df(
+ serving_feature_ids=_TEST_SERVING_FEATURE_IDS,
+ read_instances_df=read_instances_df,
+ serve_request_timeout=None,
+ start_time=_TEST_BATCH_SERVE_START_TIME,
+ )
+
+ batch_read_feature_values_mock.assert_called_once_with(
+ request=expected_batch_read_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestEntityType:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize(
+ "entity_type_name, featurestore_id",
+ [
+ (_TEST_ENTITY_TYPE_NAME, None),
+ (_TEST_ENTITY_TYPE_ID, _TEST_FEATURESTORE_ID),
+ ],
+ )
+ def test_init_entity_type(
+ self, entity_type_name, featurestore_id, get_entity_type_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ aiplatform.EntityType(
+ entity_type_name=entity_type_name, featurestore_id=featurestore_id
+ )
+
+ get_entity_type_mock.assert_called_once_with(
+ name=_TEST_ENTITY_TYPE_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ def test_get_featurestore(self, get_featurestore_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ my_featurestore = my_entity_type.get_featurestore()
+
+ get_featurestore_mock.assert_called_once_with(
+ name=my_featurestore.resource_name, retry=base._DEFAULT_RETRY
+ )
+ assert isinstance(my_featurestore, aiplatform.Featurestore)
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ def test_get_feature(self, get_feature_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ my_feature = my_entity_type.get_feature(feature_id=_TEST_FEATURE_ID)
+
+ get_feature_mock.assert_called_once_with(
+ name=my_feature.resource_name, retry=base._DEFAULT_RETRY
+ )
+ assert isinstance(my_feature, aiplatform.Feature)
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ def test_update_entity_type(self, update_entity_type_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ my_entity_type.update(
+ labels=_TEST_LABELS_UPDATE,
+ update_request_timeout=None,
+ )
+
+ expected_entity_type = gca_entity_type.EntityType(
+ name=_TEST_ENTITY_TYPE_NAME,
+ labels=_TEST_LABELS_UPDATE,
+ )
+ update_entity_type_mock.assert_called_once_with(
+ entity_type=expected_entity_type,
+ update_mask=field_mask_pb2.FieldMask(paths=["labels"]),
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ assert my_entity_type.labels == _TEST_LABELS_UPDATE
+
+ @pytest.mark.parametrize(
+ "featurestore_name", [_TEST_FEATURESTORE_NAME, _TEST_FEATURESTORE_ID]
+ )
+ def test_list_entity_type(self, featurestore_name, list_entity_types_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_entity_type_list = aiplatform.EntityType.list(
+ featurestore_name=featurestore_name
+ )
+
+ list_entity_types_mock.assert_called_once_with(
+ request={"parent": _TEST_FEATURESTORE_NAME}
+ )
+ assert len(my_entity_type_list) == len(_TEST_ENTITY_TYPE_LIST)
+ for my_entity_type in my_entity_type_list:
+ assert isinstance(my_entity_type, aiplatform.EntityType)
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ def test_list_features(self, list_features_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ my_feature_list = my_entity_type.list_features()
+
+ list_features_mock.assert_called_once_with(
+ request={"parent": _TEST_ENTITY_TYPE_NAME}
+ )
+ assert len(my_feature_list) == len(_TEST_FEATURE_LIST)
+ for my_feature in my_feature_list:
+ assert isinstance(my_feature, aiplatform.Feature)
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ def test_list_features_with_no_init(self, list_features_mock):
+ my_entity_type = aiplatform.EntityType(
+ entity_type_name=_TEST_ENTITY_TYPE_ID,
+ featurestore_id=_TEST_FEATURESTORE_ID,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ my_feature_list = my_entity_type.list_features()
+
+ list_features_mock.assert_called_once_with(
+ request={"parent": _TEST_ENTITY_TYPE_NAME}
+ )
+ assert len(my_feature_list) == len(_TEST_FEATURE_LIST)
+ for my_feature in my_feature_list:
+ assert isinstance(my_feature, aiplatform.Feature)
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_entity_type_mock", "get_feature_mock")
+ def test_delete_features(self, delete_feature_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ my_entity_type.delete_features(
+ feature_ids=[_TEST_FEATURE_ID, _TEST_FEATURE_ID], sync=sync
+ )
+
+ if not sync:
+ my_entity_type.wait()
+
+ delete_feature_mock.assert_has_calls(
+ calls=[
+ mock.call(name=_TEST_FEATURE_NAME),
+ mock.call(name=_TEST_FEATURE_NAME),
+ ],
+ any_order=True,
+ )
+
+ @pytest.mark.usefixtures("get_entity_type_mock", "get_feature_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_feature(self, create_feature_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ my_feature = my_entity_type.create_feature(
+ feature_id=_TEST_FEATURE_ID,
+ value_type=_TEST_FEATURE_VALUE_TYPE_STR,
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABELS,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ my_feature.wait()
+
+ expected_feature = gca_feature.Feature(
+ value_type=_TEST_FEATURE_VALUE_TYPE_ENUM,
+ labels=_TEST_LABELS,
+ description=_TEST_DESCRIPTION,
+ )
+ expected_request = gca_featurestore_service.CreateFeatureRequest(
+ parent=_TEST_ENTITY_TYPE_NAME,
+ feature=expected_feature,
+ feature_id=_TEST_FEATURE_ID,
+ )
+
+ create_feature_mock.assert_called_once_with(
+ request=expected_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_entity_type(self, create_entity_type_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_entity_type = aiplatform.EntityType.create(
+ entity_type_id=_TEST_ENTITY_TYPE_ID,
+ featurestore_name=_TEST_FEATURESTORE_NAME,
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABELS,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ my_entity_type.wait()
+
+ expected_entity_type = gca_entity_type.EntityType(
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABELS,
+ )
+ create_entity_type_mock.assert_called_once_with(
+ parent=_TEST_FEATURESTORE_NAME,
+ entity_type=expected_entity_type,
+ entity_type_id=_TEST_ENTITY_TYPE_ID,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ def test_validate_and_get_create_feature_requests(self):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ create_feature_requests = (
+ my_entity_type._validate_and_get_create_feature_requests(
+ feature_configs=_TEST_FEATURE_CONFIGS
+ )
+ )
+
+ expected_requests = [
+ gca_featurestore_service.CreateFeatureRequest(
+ feature=gca_feature.Feature(value_type=_TEST_FEATURE_VALUE_TYPE_ENUM),
+ feature_id="my_feature_id_1",
+ ),
+ ]
+ assert create_feature_requests == expected_requests
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_batch_create_features(self, batch_create_features_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ my_entity_type.batch_create_features(feature_configs=_TEST_FEATURE_CONFIGS)
+
+ if not sync:
+ my_entity_type.wait()
+
+ expected_requests = [
+ gca_featurestore_service.CreateFeatureRequest(
+ feature=gca_feature.Feature(value_type=_TEST_FEATURE_VALUE_TYPE_ENUM),
+ feature_id="my_feature_id_1",
+ ),
+ ]
+
+ batch_create_features_mock.assert_called_once_with(
+ parent=my_entity_type.resource_name,
+ requests=expected_requests,
+ metadata=_TEST_REQUEST_METADATA,
+ )
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ def test_validate_and_get_import_feature_values_request_with_source_fields(self):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ true_import_feature_values_request = (
+ gca_featurestore_service.ImportFeatureValuesRequest(
+ bigquery_source=_TEST_BQ_SOURCE,
+ feature_time_field=_TEST_FEATURE_TIME_FIELD,
+ entity_type=_TEST_ENTITY_TYPE_NAME,
+ feature_specs=[
+ gca_featurestore_service.ImportFeatureValuesRequest.FeatureSpec(
+ id="my_feature_id_1",
+ source_field="my_feature_id_1_source_field",
+ ),
+ ],
+ )
+ )
+ assert (
+ true_import_feature_values_request
+ == my_entity_type._validate_and_get_import_feature_values_request(
+ entity_type_name=my_entity_type.resource_name,
+ feature_ids=_TEST_IMPORTING_FEATURE_IDS,
+ feature_time=_TEST_FEATURE_TIME_FIELD,
+ data_source=_TEST_BQ_SOURCE,
+ feature_source_fields=_TEST_IMPORTING_FEATURE_SOURCE_FIELDS,
+ )
+ )
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ def test_validate_and_get_import_feature_values_request_without_source_fields(self):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+
+ true_import_feature_values_request = (
+ gca_featurestore_service.ImportFeatureValuesRequest(
+ entity_type=_TEST_ENTITY_TYPE_NAME,
+ feature_specs=[
+ gca_featurestore_service.ImportFeatureValuesRequest.FeatureSpec(
+ id="my_feature_id_1"
+ ),
+ ],
+ csv_source=_TEST_CSV_SOURCE,
+ feature_time=utils.get_timestamp_proto(_TEST_FEATURE_TIME),
+ )
+ )
+ assert (
+ true_import_feature_values_request
+ == my_entity_type._validate_and_get_import_feature_values_request(
+ entity_type_name=my_entity_type.resource_name,
+ feature_ids=_TEST_IMPORTING_FEATURE_IDS,
+ feature_time=_TEST_FEATURE_TIME,
+ data_source=_TEST_CSV_SOURCE,
+ )
+ )
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_ingest_from_bq(self, import_feature_values_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ my_entity_type.ingest_from_bq(
+ feature_ids=_TEST_IMPORTING_FEATURE_IDS,
+ feature_time=_TEST_FEATURE_TIME_FIELD,
+ bq_source_uri=_TEST_BQ_SOURCE_URI,
+ feature_source_fields=_TEST_IMPORTING_FEATURE_SOURCE_FIELDS,
+ sync=sync,
+ ingest_request_timeout=None,
+ )
+
+ if not sync:
+ my_entity_type.wait()
+
+ true_import_feature_values_request = (
+ gca_featurestore_service.ImportFeatureValuesRequest(
+ entity_type=_TEST_ENTITY_TYPE_NAME,
+ feature_specs=[
+ gca_featurestore_service.ImportFeatureValuesRequest.FeatureSpec(
+ id="my_feature_id_1",
+ source_field="my_feature_id_1_source_field",
+ ),
+ ],
+ bigquery_source=_TEST_BQ_SOURCE,
+ feature_time_field=_TEST_FEATURE_TIME_FIELD,
+ )
+ )
+ import_feature_values_mock.assert_called_once_with(
+ request=true_import_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_ingest_from_bq_with_timeout(self, import_feature_values_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ my_entity_type.ingest_from_bq(
+ feature_ids=_TEST_IMPORTING_FEATURE_IDS,
+ feature_time=_TEST_FEATURE_TIME_FIELD,
+ bq_source_uri=_TEST_BQ_SOURCE_URI,
+ feature_source_fields=_TEST_IMPORTING_FEATURE_SOURCE_FIELDS,
+ sync=sync,
+ ingest_request_timeout=180.0,
+ )
+
+ if not sync:
+ my_entity_type.wait()
+
+ true_import_feature_values_request = (
+ gca_featurestore_service.ImportFeatureValuesRequest(
+ entity_type=_TEST_ENTITY_TYPE_NAME,
+ feature_specs=[
+ gca_featurestore_service.ImportFeatureValuesRequest.FeatureSpec(
+ id="my_feature_id_1",
+ source_field="my_feature_id_1_source_field",
+ ),
+ ],
+ bigquery_source=_TEST_BQ_SOURCE,
+ feature_time_field=_TEST_FEATURE_TIME_FIELD,
+ )
+ )
+ import_feature_values_mock.assert_called_once_with(
+ request=true_import_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=180.0,
+ )
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_ingest_from_gcs(self, import_feature_values_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ my_entity_type.ingest_from_gcs(
+ feature_ids=_TEST_IMPORTING_FEATURE_IDS,
+ feature_time=_TEST_FEATURE_TIME,
+ gcs_source_uris=_TEST_GCS_AVRO_SOURCE_URIS,
+ gcs_source_type=_TEST_GCS_SOURCE_TYPE_AVRO,
+ sync=sync,
+ ingest_request_timeout=None,
+ )
+
+ if not sync:
+ my_entity_type.wait()
+
+ true_import_feature_values_request = (
+ gca_featurestore_service.ImportFeatureValuesRequest(
+ entity_type=_TEST_ENTITY_TYPE_NAME,
+ feature_specs=[
+ gca_featurestore_service.ImportFeatureValuesRequest.FeatureSpec(
+ id="my_feature_id_1"
+ ),
+ ],
+ avro_source=_TEST_AVRO_SOURCE,
+ feature_time=utils.get_timestamp_proto(_TEST_FEATURE_TIME),
+ )
+ )
+ import_feature_values_mock.assert_called_once_with(
+ request=true_import_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ def test_ingest_from_gcs_with_invalid_gcs_source_type(self):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ with pytest.raises(ValueError):
+ my_entity_type.ingest_from_gcs(
+ feature_ids=_TEST_IMPORTING_FEATURE_IDS,
+ feature_time=_TEST_FEATURE_TIME_FIELD,
+ gcs_source_uris=_TEST_GCS_CSV_SOURCE_URIS,
+ gcs_source_type=_TEST_GCS_SOURCE_TYPE_INVALID,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_entity_type_mock",
+ "get_feature_mock",
+ "bq_init_client_mock",
+ "bq_init_dataset_mock",
+ "bq_create_dataset_mock",
+ "bq_delete_dataset_mock",
+ "get_project_mock",
+ )
+ @patch("uuid.uuid4", uuid_mock)
+ def test_ingest_from_df_using_column(
+ self,
+ import_feature_values_mock,
+ bq_load_table_from_dataframe_mock,
+ bq_init_schema_field_mock,
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT_DIFF)
+
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ df_source = pd.DataFrame()
+ my_entity_type.ingest_from_df(
+ feature_ids=_TEST_IMPORTING_FEATURE_IDS,
+ feature_time=_TEST_FEATURE_TIME_FIELD,
+ df_source=df_source,
+ feature_source_fields=_TEST_IMPORTING_FEATURE_SOURCE_FIELDS,
+ ingest_request_timeout=None,
+ )
+ expected_temp_bq_dataset_name = (
+ f"temp_{_TEST_FEATURESTORE_ID}_{uuid.uuid4()}".replace("-", "_")
+ )
+ expecte_temp_bq_dataset_id = f"{_TEST_PROJECT}.{expected_temp_bq_dataset_name}"[
+ :1024
+ ]
+ expected_temp_bq_table_id = (
+ f"{expecte_temp_bq_dataset_id}.{_TEST_ENTITY_TYPE_ID}"
+ )
+
+ expected_import_feature_values_request = (
+ gca_featurestore_service.ImportFeatureValuesRequest(
+ entity_type=_TEST_ENTITY_TYPE_NAME,
+ feature_specs=[
+ gca_featurestore_service.ImportFeatureValuesRequest.FeatureSpec(
+ id="my_feature_id_1",
+ source_field="my_feature_id_1_source_field",
+ ),
+ ],
+ bigquery_source=gca_io.BigQuerySource(
+ input_uri=f"bq://{expected_temp_bq_table_id}"
+ ),
+ feature_time_field=_TEST_FEATURE_TIME_FIELD,
+ )
+ )
+
+ bq_init_schema_field_mock.assert_called_once_with(
+ name=_TEST_IMPORTING_FEATURE_SOURCE_FIELD,
+ field_type=_TEST_FEATURE_VALUE_TYPE_BQ_FIELD_TYPE,
+ mode=_TEST_FEATURE_VALUE_TYPE_BQ_MODE,
+ )
+
+ import_feature_values_mock.assert_called_once_with(
+ request=expected_import_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_entity_type_mock",
+ "get_feature_mock",
+ "bq_init_client_mock",
+ "bq_init_dataset_mock",
+ "bq_create_dataset_mock",
+ "bq_delete_dataset_mock",
+ "get_project_mock",
+ )
+ @patch("uuid.uuid4", uuid_mock)
+ def test_ingest_from_df_using_datetime(
+ self,
+ import_feature_values_mock,
+ bq_load_table_from_dataframe_mock,
+ bq_init_schema_field_mock,
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT_DIFF)
+
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ df_source = pd.DataFrame()
+ my_entity_type.ingest_from_df(
+ feature_ids=_TEST_IMPORTING_FEATURE_IDS,
+ feature_time=_TEST_FEATURE_TIME_DATETIME,
+ df_source=df_source,
+ feature_source_fields=_TEST_IMPORTING_FEATURE_SOURCE_FIELDS,
+ ingest_request_timeout=None,
+ )
+
+ expected_temp_bq_dataset_name = (
+ f"temp_{_TEST_FEATURESTORE_ID}_{uuid.uuid4()}".replace("-", "_")
+ )
+ expecte_temp_bq_dataset_id = f"{_TEST_PROJECT}.{expected_temp_bq_dataset_name}"[
+ :1024
+ ]
+ expected_temp_bq_table_id = (
+ f"{expecte_temp_bq_dataset_id}.{_TEST_ENTITY_TYPE_ID}"
+ )
+
+ timestamp_proto = timestamp_pb2.Timestamp()
+ timestamp_proto.FromDatetime(_TEST_FEATURE_TIME_DATETIME)
+
+ expected_import_feature_values_request = (
+ gca_featurestore_service.ImportFeatureValuesRequest(
+ entity_type=_TEST_ENTITY_TYPE_NAME,
+ feature_specs=[
+ gca_featurestore_service.ImportFeatureValuesRequest.FeatureSpec(
+ id="my_feature_id_1",
+ source_field="my_feature_id_1_source_field",
+ ),
+ ],
+ bigquery_source=gca_io.BigQuerySource(
+ input_uri=f"bq://{expected_temp_bq_table_id}"
+ ),
+ feature_time=timestamp_proto,
+ )
+ )
+
+ bq_init_schema_field_mock.assert_called_once_with(
+ name=_TEST_IMPORTING_FEATURE_SOURCE_FIELD,
+ field_type=_TEST_FEATURE_VALUE_TYPE_BQ_FIELD_TYPE,
+ mode=_TEST_FEATURE_VALUE_TYPE_BQ_MODE,
+ )
+
+ import_feature_values_mock.assert_called_once_with(
+ request=expected_import_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize(
+ "feature_value_type, expected_field_type, expected_mode",
+ [
+ ("BOOL", "BOOL", "NULLABLE"),
+ ("BOOL_ARRAY", "BOOL", "REPEATED"),
+ ("DOUBLE", "FLOAT64", "NULLABLE"),
+ ("DOUBLE_ARRAY", "FLOAT64", "REPEATED"),
+ ("INT64", "INT64", "NULLABLE"),
+ ("INT64_ARRAY", "INT64", "REPEATED"),
+ ("STRING", "STRING", "NULLABLE"),
+ ("STRING_ARRAY", "STRING", "REPEATED"),
+ ("BYTES", "BYTES", "NULLABLE"),
+ ],
+ )
+ def test_get_bq_schema_field(
+ self, feature_value_type, expected_field_type, expected_mode
+ ):
+ expected_bq_schema_field = bigquery.SchemaField(
+ name=_TEST_FEATURE_ID,
+ field_type=expected_field_type,
+ mode=expected_mode,
+ )
+ assert expected_bq_schema_field == aiplatform.EntityType._get_bq_schema_field(
+ name=_TEST_FEATURE_ID, feature_value_type=feature_value_type
+ )
+
+ @pytest.mark.usefixtures("get_entity_type_mock", "get_feature_mock")
+ def test_read_single_entity(self, read_feature_values_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ expected_read_feature_values_request = (
+ gca_featurestore_online_service.ReadFeatureValuesRequest(
+ entity_type=my_entity_type.resource_name,
+ entity_id=_TEST_READ_ENTITY_ID,
+ feature_selector=gca_feature_selector.FeatureSelector(
+ id_matcher=gca_feature_selector.IdMatcher(ids=["*"])
+ ),
+ )
+ )
+ result = my_entity_type.read(
+ entity_ids=_TEST_READ_ENTITY_ID,
+ read_request_timeout=None,
+ )
+ read_feature_values_mock.assert_called_once_with(
+ request=expected_read_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+ assert isinstance(result, pd.DataFrame)
+ assert len(result) == 1
+ assert result.entity_id[0] == _TEST_READ_ENTITY_ID
+ assert result.get(_TEST_FEATURE_ID)[0] == _TEST_FEATURE_VALUE
+
+ @pytest.mark.usefixtures("get_entity_type_mock", "get_feature_mock")
+ def test_read_single_entity_with_timeout(self, read_feature_values_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ expected_read_feature_values_request = (
+ gca_featurestore_online_service.ReadFeatureValuesRequest(
+ entity_type=my_entity_type.resource_name,
+ entity_id=_TEST_READ_ENTITY_ID,
+ feature_selector=gca_feature_selector.FeatureSelector(
+ id_matcher=gca_feature_selector.IdMatcher(ids=["*"])
+ ),
+ )
+ )
+ my_entity_type.read(
+ entity_ids=_TEST_READ_ENTITY_ID,
+ read_request_timeout=180.0,
+ )
+ read_feature_values_mock.assert_called_once_with(
+ request=expected_read_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=180.0,
+ )
+
+ @pytest.mark.usefixtures("get_entity_type_mock", "get_feature_mock")
+ def test_read_multiple_entities(self, streaming_read_feature_values_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ expected_streaming_read_feature_values_request = (
+ gca_featurestore_online_service.StreamingReadFeatureValuesRequest(
+ entity_type=my_entity_type.resource_name,
+ entity_ids=_TEST_READ_ENTITY_IDS,
+ feature_selector=gca_feature_selector.FeatureSelector(
+ id_matcher=gca_feature_selector.IdMatcher(ids=[_TEST_FEATURE_ID])
+ ),
+ )
+ )
+ result = my_entity_type.read(
+ entity_ids=_TEST_READ_ENTITY_IDS,
+ feature_ids=_TEST_FEATURE_ID,
+ read_request_timeout=None,
+ )
+ streaming_read_feature_values_mock.assert_called_once_with(
+ request=expected_streaming_read_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+ assert isinstance(result, pd.DataFrame)
+ assert len(result) == 1
+ assert result.entity_id[0] == _TEST_READ_ENTITY_ID
+ assert result.get(_TEST_FEATURE_ID)[0] == _TEST_FEATURE_VALUE
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ @pytest.mark.parametrize(
+ "instance, entity_id, expected_feature_values",
+ [
+ (
+ {"string_test_entity": {"string_feature": "test_string"}},
+ "string_test_entity",
+ {
+ "string_feature": gca_featurestore_online_service_v1beta1.FeatureValue(
+ string_value="test_string"
+ )
+ },
+ ),
+ (
+ pd.DataFrame(
+ data=[{"test_feature_1": 4.9, "test_feature_2": 10}],
+ columns=["test_feature_1", "test_feature_2"],
+ index=["pd_test_entity"],
+ ),
+ "pd_test_entity",
+ {
+ "test_feature_1": gca_featurestore_online_service_v1beta1.FeatureValue(
+ double_value=4.9
+ ),
+ "test_feature_2": gca_featurestore_online_service_v1beta1.FeatureValue(
+ int64_value=10
+ ),
+ },
+ ),
+ ],
+ )
+ def test_preview_write_feature_values(
+ self,
+ instance,
+ entity_id,
+ expected_feature_values,
+ preview_write_feature_values_mock,
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+
+ my_entity_type.preview.write_feature_values(instances=instance)
+
+ preview_write_feature_values_mock.assert_called_once_with(
+ entity_type=my_entity_type.resource_name,
+ payloads=[
+ gca_featurestore_online_service_v1beta1.WriteFeatureValuesPayload(
+ entity_id=entity_id, feature_values=expected_feature_values
+ )
+ ],
+ )
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ @pytest.mark.parametrize(
+ "feature_id, test_value, expected_feature_value",
+ [
+ (
+ "bool_feature_id",
+ False,
+ gca_featurestore_online_service_v1beta1.FeatureValue(bool_value=False),
+ ),
+ (
+ "string_feature_id",
+ "test_string",
+ gca_featurestore_online_service_v1beta1.FeatureValue(
+ string_value="test_string"
+ ),
+ ),
+ (
+ "int_feature_id",
+ 10,
+ gca_featurestore_online_service_v1beta1.FeatureValue(int64_value=10),
+ ),
+ (
+ "double_feature_id",
+ 3.1459,
+ gca_featurestore_online_service_v1beta1.FeatureValue(
+ double_value=3.1459
+ ),
+ ),
+ (
+ "bytes_feature_id",
+ bytes("test_str", "utf-8"),
+ gca_featurestore_online_service_v1beta1.FeatureValue(
+ bytes_value=bytes("test_str", "utf-8")
+ ),
+ ),
+ (
+ "bool_array_feature_id",
+ [False, True, True],
+ gca_featurestore_online_service_v1beta1.FeatureValue(
+ bool_array_value=gca_types_v1beta1.BoolArray(
+ values=[False, True, True]
+ )
+ ),
+ ),
+ (
+ "string_array_feature_id",
+ ["test_string_1", "test_string_2", "test_string_3"],
+ gca_featurestore_online_service_v1beta1.FeatureValue(
+ string_array_value=gca_types_v1beta1.StringArray(
+ values=["test_string_1", "test_string_2", "test_string_3"]
+ )
+ ),
+ ),
+ (
+ "int_array_feature_id",
+ [1, 2, 3],
+ gca_featurestore_online_service_v1beta1.FeatureValue(
+ int64_array_value=gca_types_v1beta1.Int64Array(values=[1, 2, 3])
+ ),
+ ),
+ (
+ "double_array_feature_id",
+ [3.14, 0.5, 1.23],
+ gca_featurestore_online_service_v1beta1.FeatureValue(
+ double_array_value=gca_types_v1beta1.DoubleArray(
+ values=[3.14, 0.5, 1.23]
+ )
+ ),
+ ),
+ ],
+ )
+ def test_preview_convert_value_to_gapic_feature_value(
+ self, feature_id, test_value, expected_feature_value
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+
+ feature_value = my_entity_type.preview._convert_value_to_gapic_feature_value(
+ feature_id=feature_id, value=test_value
+ )
+
+ assert feature_value == expected_feature_value
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ @pytest.mark.parametrize(
+ "feature_id, feature_value",
+ [("test_feature_id", set({1, 2, 3})), ("test_feature_id", [1, 2, "test_str"])],
+ )
+ def test_preview_convert_value_to_gapic_feature_value_raise_error(
+ self, feature_id, feature_value
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ with pytest.raises(ValueError):
+ my_entity_type.preview._convert_value_to_gapic_feature_value(
+ feature_id=feature_id, value=feature_value
+ )
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ @pytest.mark.parametrize(
+ "instance, feature_time, entity_id, expected_feature_values",
+ [
+ ( # 0. Instance is Dict, no feature_time provided.
+ {"string_test_entity": {"string_feature": "test_string"}},
+ None,
+ "string_test_entity",
+ {
+ "string_feature": gca_featurestore_online_service.FeatureValue(
+ string_value="test_string"
+ )
+ },
+ ),
+ ( # 1. Instance is dataframe, no feature_time provided.
+ pd.DataFrame(
+ data=[{"test_feature_1": 4.9, "test_feature_2": 10}],
+ columns=["test_feature_1", "test_feature_2"],
+ index=["pd_test_entity"],
+ ),
+ None,
+ "pd_test_entity",
+ {
+ "test_feature_1": gca_featurestore_online_service.FeatureValue(
+ double_value=4.9
+ ),
+ "test_feature_2": gca_featurestore_online_service.FeatureValue(
+ int64_value=10
+ ),
+ },
+ ),
+ ( # 2. Instance is payload, no feature_time provided.
+ [
+ gca_featurestore_online_service.WriteFeatureValuesPayload(
+ entity_id="string_test_entity",
+ feature_values={
+ "string_feature": gca_featurestore_online_service.FeatureValue(
+ string_value="test_string"
+ )
+ },
+ )
+ ],
+ None,
+ "string_test_entity",
+ {
+ "string_feature": gca_featurestore_online_service.FeatureValue(
+ string_value="test_string"
+ )
+ },
+ ),
+ ( # 3 Instance is Dict, feature_time provided, indicating timestamp column.
+ # Timestamp is datetime.
+ {
+ "string_test_entity": {
+ "string_feature": "test_string",
+ "timestamp_col": _TEST_FEATURE_TIME_DATETIME_UTC,
+ }
+ },
+ "timestamp_col",
+ "string_test_entity",
+ {
+ "string_feature": gca_featurestore_online_service.FeatureValue(
+ string_value="test_string",
+ metadata=gca_featurestore_online_service.FeatureValue.Metadata(
+ generate_time=_TEST_FEATURE_TIME_DATETIME_UTC
+ ),
+ )
+ },
+ ),
+ ( # 4. Instance is dataframe, feature_time provided, indicating timestamp column.
+ # Timestamp is datetime
+ pd.DataFrame(
+ data=[
+ {
+ "test_feature_1": 4.9,
+ "test_feature_2": 10,
+ "feature_timestamp": _TEST_FEATURE_TIME_DATETIME_UTC,
+ }
+ ],
+ columns=["test_feature_1", "test_feature_2", "feature_timestamp"],
+ index=["pd_test_entity"],
+ ),
+ "feature_timestamp",
+ "pd_test_entity",
+ {
+ "test_feature_1": gca_featurestore_online_service.FeatureValue(
+ double_value=4.9,
+ metadata=gca_featurestore_online_service.FeatureValue.Metadata(
+ generate_time=_TEST_FEATURE_TIME_DATETIME_UTC
+ ),
+ ),
+ "test_feature_2": gca_featurestore_online_service.FeatureValue(
+ int64_value=10,
+ metadata=gca_featurestore_online_service.FeatureValue.Metadata(
+ generate_time=_TEST_FEATURE_TIME_DATETIME_UTC
+ ),
+ ),
+ },
+ ),
+ ( # 5. Instance is Payload, feature_time provided but ignored.
+ # Timestamp is datetime.
+ [
+ gca_featurestore_online_service.WriteFeatureValuesPayload(
+ entity_id="string_test_entity",
+ feature_values={
+ "string_feature": gca_featurestore_online_service.FeatureValue(
+ string_value="test_string",
+ metadata=gca_featurestore_online_service.FeatureValue.Metadata(
+ generate_time=_TEST_FEATURE_TIME_DATETIME_UTC
+ ),
+ )
+ },
+ )
+ ],
+ None,
+ "string_test_entity",
+ {
+ "string_feature": gca_featurestore_online_service.FeatureValue(
+ string_value="test_string",
+ metadata=gca_featurestore_online_service.FeatureValue.Metadata(
+ generate_time=_TEST_FEATURE_TIME_DATETIME_UTC
+ ),
+ )
+ },
+ ),
+ ( # 6. Instance is dict, feature_time provided, indicating timestamp column.
+ # Timestamp is Timestamp proto.
+ {
+ "string_test_entity": {
+ "string_feature": "test_string",
+ "timestamp_col": _TEST_FEATURE_TIMESTAMP,
+ }
+ },
+ "timestamp_col",
+ "string_test_entity",
+ {
+ "string_feature": gca_featurestore_online_service.FeatureValue(
+ string_value="test_string",
+ metadata=gca_featurestore_online_service.FeatureValue.Metadata(
+ generate_time=_TEST_FEATURE_TIMESTAMP
+ ),
+ )
+ },
+ ),
+ ( # 7. Instance is dataframe, feature_time provided, indicating
+ # timestamp column. Timestamp is Timestamp proto.
+ pd.DataFrame(
+ data=[
+ {
+ "test_feature_1": 4.9,
+ "test_feature_2": 10,
+ "feature_timestamp": _TEST_FEATURE_TIMESTAMP,
+ }
+ ],
+ columns=["test_feature_1", "test_feature_2", "feature_timestamp"],
+ index=["pd_test_entity"],
+ ),
+ "feature_timestamp",
+ "pd_test_entity",
+ {
+ "test_feature_1": gca_featurestore_online_service.FeatureValue(
+ double_value=4.9,
+ metadata=gca_featurestore_online_service.FeatureValue.Metadata(
+ generate_time=_TEST_FEATURE_TIMESTAMP
+ ),
+ ),
+ "test_feature_2": gca_featurestore_online_service.FeatureValue(
+ int64_value=10,
+ metadata=gca_featurestore_online_service.FeatureValue.Metadata(
+ generate_time=_TEST_FEATURE_TIMESTAMP
+ ),
+ ),
+ },
+ ),
+ ( # 8. Instance is dataframe, feature_time provided, indicating
+ # timestamp column but no timestamp in instance.
+ pd.DataFrame(
+ data=[{"test_feature_1": 4.9, "test_feature_2": 10}],
+ columns=["test_feature_1", "test_feature_2", "feature_timestamp"],
+ index=["pd_test_entity"],
+ ),
+ "feature_timestamp",
+ "pd_test_entity",
+ {
+ "test_feature_1": gca_featurestore_online_service.FeatureValue(
+ double_value=4.9,
+ ),
+ "test_feature_2": gca_featurestore_online_service.FeatureValue(
+ int64_value=10,
+ ),
+ },
+ ),
+ ( # 9 Instance is dict, feature_time provided, indicating timestamp column.
+ # but no timestamp in instance.
+ {"string_test_entity": {"string_feature": "test_string"}},
+ "timestamp_col",
+ "string_test_entity",
+ {
+ "string_feature": gca_featurestore_online_service.FeatureValue(
+ string_value="test_string",
+ )
+ },
+ ),
+ ( # 10 Instance is dict, feature_time provided with datetime value.
+ {"string_test_entity": {"string_feature": "test_string"}},
+ _TEST_FEATURE_TIME_DATETIME_UTC,
+ "string_test_entity",
+ {
+ "string_feature": gca_featurestore_online_service.FeatureValue(
+ string_value="test_string",
+ metadata=gca_featurestore_online_service.FeatureValue.Metadata(
+ generate_time=_TEST_FEATURE_TIME_DATETIME_UTC
+ ),
+ )
+ },
+ ),
+ ( # 11 Instance is Dataframe, feature_time provided with datetime value.
+ pd.DataFrame(
+ data=[{"test_feature_1": 4.9, "test_feature_2": 10}],
+ columns=["test_feature_1", "test_feature_2"],
+ index=["pd_test_entity"],
+ ),
+ _TEST_FEATURE_TIME_DATETIME_UTC,
+ "pd_test_entity",
+ {
+ "test_feature_1": gca_featurestore_online_service.FeatureValue(
+ double_value=4.9,
+ metadata=gca_featurestore_online_service.FeatureValue.Metadata(
+ generate_time=_TEST_FEATURE_TIME_DATETIME_UTC
+ ),
+ ),
+ "test_feature_2": gca_featurestore_online_service.FeatureValue(
+ int64_value=10,
+ metadata=gca_featurestore_online_service.FeatureValue.Metadata(
+ generate_time=_TEST_FEATURE_TIME_DATETIME_UTC
+ ),
+ ),
+ },
+ ),
+ ],
+ )
+ def test_write_feature_values(
+ self,
+ instance,
+ feature_time,
+ entity_id,
+ expected_feature_values,
+ write_feature_values_mock,
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+
+ my_entity_type.write_feature_values(
+ instances=instance, feature_time=feature_time
+ )
+
+ write_feature_values_mock.assert_called_once_with(
+ entity_type=my_entity_type.resource_name,
+ payloads=[
+ gca_featurestore_online_service.WriteFeatureValuesPayload(
+ entity_id=entity_id, feature_values=expected_feature_values
+ )
+ ],
+ )
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ @pytest.mark.parametrize(
+ "feature_id, test_value, expected_feature_value",
+ [
+ (
+ "bool_feature_id",
+ False,
+ gca_featurestore_online_service.FeatureValue(bool_value=False),
+ ),
+ (
+ "string_feature_id",
+ "test_string",
+ gca_featurestore_online_service.FeatureValue(
+ string_value="test_string"
+ ),
+ ),
+ (
+ "int_feature_id",
+ 10,
+ gca_featurestore_online_service.FeatureValue(int64_value=10),
+ ),
+ (
+ "double_feature_id",
+ 3.1459,
+ gca_featurestore_online_service.FeatureValue(double_value=3.1459),
+ ),
+ (
+ "bytes_feature_id",
+ bytes("test_str", "utf-8"),
+ gca_featurestore_online_service.FeatureValue(
+ bytes_value=bytes("test_str", "utf-8")
+ ),
+ ),
+ (
+ "bool_array_feature_id",
+ [False, True, True],
+ gca_featurestore_online_service.FeatureValue(
+ bool_array_value=gca_types.BoolArray(values=[False, True, True])
+ ),
+ ),
+ (
+ "string_array_feature_id",
+ ["test_string_1", "test_string_2", "test_string_3"],
+ gca_featurestore_online_service.FeatureValue(
+ string_array_value=gca_types.StringArray(
+ values=["test_string_1", "test_string_2", "test_string_3"]
+ )
+ ),
+ ),
+ (
+ "int_array_feature_id",
+ [1, 2, 3],
+ gca_featurestore_online_service.FeatureValue(
+ int64_array_value=gca_types.Int64Array(values=[1, 2, 3])
+ ),
+ ),
+ (
+ "double_array_feature_id",
+ [3.14, 0.5, 1.23],
+ gca_featurestore_online_service.FeatureValue(
+ double_array_value=gca_types.DoubleArray(values=[3.14, 0.5, 1.23])
+ ),
+ ),
+ ],
+ )
+ def test_convert_value_to_gapic_feature_value(
+ self, feature_id, test_value, expected_feature_value
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+
+ feature_value = my_entity_type._convert_value_to_gapic_feature_value(
+ feature_id=feature_id, value=test_value
+ )
+
+ assert feature_value == expected_feature_value
+
+ @pytest.mark.usefixtures("get_entity_type_mock")
+ @pytest.mark.parametrize(
+ "feature_id, feature_value",
+ [("test_feature_id", set({1, 2, 3})), ("test_feature_id", [1, 2, "test_str"])],
+ )
+ def test_convert_value_to_gapic_feature_value_raise_error(
+ self, feature_id, feature_value
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
+ with pytest.raises(ValueError):
+ my_entity_type._convert_value_to_gapic_feature_value(
+ feature_id=feature_id, value=feature_value
+ )
+
+ @pytest.mark.parametrize(
+ "feature_ids, feature_value_types, entity_ids, feature_values, expected_df",
+ [
+ (
+ _TEST_FEATURE_IDS_FOR_DF_CONSTRUCTION,
+ _TEST_FEATURE_VALUE_TYPES_FOR_DF_CONSTRUCTION,
+ ["entity_01", "entity_02"],
+ [
+ [
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ True,
+ [True, True],
+ 2.2,
+ [2.2, 4.4],
+ 2,
+ [2, 3],
+ "test1",
+ ["test2", "test3"],
+ b"0",
+ ],
+ ],
+ pd.DataFrame(
+ data=[
+ [
+ "entity_01",
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ "entity_02",
+ True,
+ [True, True],
+ 2.2,
+ [2.2, 4.4],
+ 2,
+ [2, 3],
+ "test1",
+ ["test2", "test3"],
+ b"0",
+ ],
+ ],
+ columns=["entity_id"] + _TEST_FEATURE_IDS_FOR_DF_CONSTRUCTION,
+ ),
+ ),
+ (
+ _TEST_FEATURE_IDS_FOR_DF_CONSTRUCTION,
+ _TEST_FEATURE_VALUE_TYPES_FOR_DF_CONSTRUCTION,
+ ["entity_01", "entity_02"],
+ [
+ [
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [None, None, None, None, None, None, None, None, None],
+ ],
+ pd.DataFrame(
+ data=[
+ [
+ "entity_01",
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ "entity_02",
+ None,
+ None,
+ None,
+ None,
+ None,
+ None,
+ None,
+ None,
+ None,
+ ],
+ ],
+ columns=["entity_id"] + _TEST_FEATURE_IDS_FOR_DF_CONSTRUCTION,
+ ),
+ ),
+ (
+ _TEST_FEATURE_IDS_FOR_DF_CONSTRUCTION,
+ _TEST_FEATURE_VALUE_TYPES_FOR_DF_CONSTRUCTION,
+ ["entity_01"],
+ [[None, None, None, None, None, None, None, None, None]],
+ pd.DataFrame(
+ data=[
+ [
+ "entity_01",
+ None,
+ None,
+ None,
+ None,
+ None,
+ None,
+ None,
+ None,
+ None,
+ ]
+ ],
+ columns=["entity_id"] + _TEST_FEATURE_IDS_FOR_DF_CONSTRUCTION,
+ ),
+ ),
+ (
+ _TEST_FEATURE_IDS_FOR_DF_CONSTRUCTION,
+ _TEST_FEATURE_VALUE_TYPES_FOR_DF_CONSTRUCTION,
+ [
+ "entity_01",
+ "entity_02",
+ "entity_03",
+ "entity_04",
+ "entity_05",
+ "entity_06",
+ "entity_07",
+ "entity_08",
+ "entity_09",
+ "entity_10",
+ ],
+ [
+ [
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ None,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ False,
+ None,
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ False,
+ [True, False],
+ None,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ False,
+ [True, False],
+ 1.2,
+ None,
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ None,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ None,
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ None,
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ None,
+ b"1",
+ ],
+ [
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ None,
+ ],
+ ],
+ pd.DataFrame(
+ data=[
+ [
+ "entity_01",
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ "entity_02",
+ None,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ "entity_03",
+ False,
+ None,
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ "entity_04",
+ False,
+ [True, False],
+ None,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ "entity_05",
+ False,
+ [True, False],
+ 1.2,
+ None,
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ "entity_06",
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ None,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ "entity_07",
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ None,
+ "test",
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ "entity_08",
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ None,
+ ["test1", "test2"],
+ b"1",
+ ],
+ [
+ "entity_09",
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ None,
+ b"1",
+ ],
+ [
+ "entity_10",
+ False,
+ [True, False],
+ 1.2,
+ [1.2, 3.4],
+ 1,
+ [1, 2],
+ "test",
+ ["test1", "test2"],
+ None,
+ ],
+ ],
+ columns=["entity_id"] + _TEST_FEATURE_IDS_FOR_DF_CONSTRUCTION,
+ ),
+ ),
+ ],
+ )
+ def test_construct_dataframe(
+ self,
+ feature_ids,
+ feature_value_types,
+ entity_ids,
+ feature_values,
+ expected_df,
+ ):
+ entity_views = [
+ _get_entity_view_proto(
+ entity_id=entity_id,
+ feature_value_types=feature_value_types,
+ feature_values=entity_feature_values,
+ )
+ for (entity_id, entity_feature_values) in zip(entity_ids, feature_values)
+ ]
+ df = aiplatform.EntityType._construct_dataframe(
+ feature_ids=feature_ids, entity_views=entity_views
+ )
+ assert df.equals(expected_df)
+
+
+class TestFeature:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize(
+ "feature_name, entity_type_id, featurestore_id",
+ [
+ (_TEST_FEATURE_NAME, None, None),
+ (_TEST_FEATURE_ID, _TEST_ENTITY_TYPE_ID, _TEST_FEATURESTORE_ID),
+ ],
+ )
+ def test_init_feature(
+ self, feature_name, entity_type_id, featurestore_id, get_feature_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ aiplatform.Feature(
+ feature_name=feature_name,
+ entity_type_id=entity_type_id,
+ featurestore_id=featurestore_id,
+ )
+ get_feature_mock.assert_called_once_with(
+ name=_TEST_FEATURE_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_feature_raises_with_only_featurestore_id(self):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ with pytest.raises(ValueError):
+ aiplatform.Feature(
+ feature_name=_TEST_FEATURE_NAME,
+ featurestore_id=_TEST_FEATURESTORE_ID,
+ )
+
+ def test_init_feature_raises_with_only_entity_type_id(self):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ with pytest.raises(ValueError):
+ aiplatform.Feature(
+ feature_name=_TEST_FEATURE_NAME,
+ entity_type_id=_TEST_ENTITY_TYPE_ID,
+ )
+
+ @pytest.mark.usefixtures("get_feature_mock")
+ def test_get_featurestore(self, get_featurestore_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_feature = aiplatform.Feature(feature_name=_TEST_FEATURE_NAME)
+ my_featurestore = my_feature.get_featurestore()
+
+ get_featurestore_mock.assert_called_once_with(
+ name=my_featurestore.resource_name, retry=base._DEFAULT_RETRY
+ )
+ assert isinstance(my_featurestore, aiplatform.Featurestore)
+
+ @pytest.mark.usefixtures("get_feature_mock")
+ def test_get_entity_type(self, get_entity_type_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_feature = aiplatform.Feature(feature_name=_TEST_FEATURE_NAME)
+ my_entity_type = my_feature.get_entity_type()
+
+ get_entity_type_mock.assert_called_once_with(
+ name=my_entity_type.resource_name, retry=base._DEFAULT_RETRY
+ )
+ assert isinstance(my_entity_type, aiplatform.EntityType)
+
+ @pytest.mark.usefixtures("get_feature_mock")
+ def test_update_feature(self, update_feature_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_feature = aiplatform.Feature(feature_name=_TEST_FEATURE_NAME)
+ updated_feature = my_feature.update(
+ labels=_TEST_LABELS_UPDATE,
+ update_request_timeout=None,
+ )
+
+ expected_feature = gca_feature.Feature(
+ name=_TEST_FEATURE_NAME,
+ labels=_TEST_LABELS_UPDATE,
+ )
+ update_feature_mock.assert_called_once_with(
+ feature=expected_feature,
+ update_mask=field_mask_pb2.FieldMask(paths=["labels"]),
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ assert isinstance(updated_feature, Feature)
+
+ @pytest.mark.parametrize(
+ "entity_type_name, featurestore_id",
+ [
+ (_TEST_ENTITY_TYPE_NAME, None),
+ (_TEST_ENTITY_TYPE_ID, _TEST_FEATURESTORE_ID),
+ ],
+ )
+ def test_list_features(self, entity_type_name, featurestore_id, list_features_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_feature_list = aiplatform.Feature.list(
+ entity_type_name=entity_type_name, featurestore_id=featurestore_id
+ )
+
+ list_features_mock.assert_called_once_with(
+ request={"parent": _TEST_ENTITY_TYPE_NAME}
+ )
+ assert len(my_feature_list) == len(_TEST_FEATURE_LIST)
+ for my_feature in my_feature_list:
+ assert isinstance(my_feature, aiplatform.Feature)
+
+ @pytest.mark.usefixtures("get_feature_mock")
+ def test_search_features(self, search_features_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_feature_list = aiplatform.Feature.search()
+
+ search_features_mock.assert_called_once_with(
+ request={"location": _TEST_PARENT, "query": None}
+ )
+ assert len(my_feature_list) == len(_TEST_FEATURE_LIST)
+ for my_feature in my_feature_list:
+ assert isinstance(my_feature, aiplatform.Feature)
+
+ @pytest.mark.usefixtures("get_feature_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_feature(self, create_feature_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_feature = aiplatform.Feature.create(
+ feature_id=_TEST_FEATURE_ID,
+ value_type=_TEST_FEATURE_VALUE_TYPE_STR,
+ entity_type_name=_TEST_ENTITY_TYPE_ID,
+ featurestore_id=_TEST_FEATURESTORE_ID,
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABELS,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ my_feature.wait()
+
+ expected_feature = gca_feature.Feature(
+ value_type=_TEST_FEATURE_VALUE_TYPE_ENUM,
+ labels=_TEST_LABELS,
+ description=_TEST_DESCRIPTION,
+ )
+ create_feature_mock.assert_called_once_with(
+ request=gca_featurestore_service.CreateFeatureRequest(
+ parent=_TEST_ENTITY_TYPE_NAME,
+ feature=expected_feature,
+ feature_id=_TEST_FEATURE_ID,
+ ),
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+
+class TestResourceManagerUtils:
+ @pytest.mark.usefixtures("get_project_mock")
+ def test_get_project_id(self):
+ project_id = resource_manager_utils.get_project_id(project_number="123456")
+ assert project_id == _TEST_PROJECT
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_helpers.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..8bc593ba64d7c604b9407f5a80c64eb557292b71
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_helpers.py
@@ -0,0 +1,301 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import importlib
+import pytest
+
+from typing import Sequence
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import helpers
+from google.cloud.aiplatform import initializer
+
+
+class TestContainerUriHelpers:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def _build_predict_uri_kwargs(self, args: Sequence[str]) -> dict:
+ """
+ Takes list of values for all method parameters and return dict of kwargs,
+ dropping keywords that were set as None.
+ """
+ func = helpers.get_prebuilt_prediction_container_uri
+ arg_names = func.__code__.co_varnames[: func.__code__.co_argcount]
+ return {k: v for k, v in dict(zip(arg_names, args)).items() if v is not None}
+
+ @pytest.mark.parametrize(
+ "args, expected_uri",
+ [
+ (
+ ("tensorflow", "2.6", None, None),
+ "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-6:latest",
+ ),
+ (
+ ("tensorflow", "1.15", "europe-west4", None),
+ "europe-docker.pkg.dev/vertex-ai/prediction/tf-cpu.1-15:latest",
+ ),
+ (
+ ("tensorflow", "2.2", None, "gpu"),
+ "us-docker.pkg.dev/vertex-ai/prediction/tf2-gpu.2-2:latest",
+ ),
+ (
+ ("sklearn", "0.24", "asia", "cpu"),
+ "asia-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-24:latest",
+ ),
+ (
+ ("sklearn", "0.20", None, None),
+ "us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-20:latest",
+ ),
+ (
+ ("xgboost", "1.3", None, None),
+ "us-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.1-3:latest",
+ ),
+ (
+ ("xgboost", "0.90", "europe", None),
+ "europe-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.0-90:latest",
+ ),
+ ],
+ )
+ def test_correct_prediction_uri_args(self, args, expected_uri):
+ uri = helpers.get_prebuilt_prediction_container_uri(
+ **self._build_predict_uri_kwargs(args)
+ )
+
+ assert uri == expected_uri
+
+ def test_correct_prediction_uri_args_with_init_location(self):
+ """
+ Ensure that aiplatform.init location is used when region
+ is not provided
+ """
+ uri = aiplatform.helpers.get_prebuilt_prediction_container_uri(
+ "tensorflow", "2.6"
+ )
+ # SDK default location is us-central1
+ assert uri.startswith("us-docker.pkg.dev")
+
+ aiplatform.init(location="asia-northeast3")
+ uri = aiplatform.helpers.get_prebuilt_prediction_container_uri(
+ "tensorflow", "2.6"
+ )
+ assert uri.startswith("asia-docker.pkg.dev")
+
+ aiplatform.init(location="europe-west2")
+ uri = aiplatform.helpers.get_prebuilt_prediction_container_uri(
+ "xgboost", "0.90"
+ )
+ assert uri.startswith("europe-docker.pkg.dev")
+
+ @pytest.mark.parametrize(
+ "args, expected_error_msg",
+ [
+ (
+ ("pytorch", "1.0", None, None),
+ (
+ "No serving container for `pytorch` version `1.0` with accelerator "
+ "`cpu` found. Supported versions include"
+ ),
+ ),
+ (
+ ("tensorflow", "9.15", None, None),
+ (
+ "No serving container for `tensorflow` version `9.15` with accelerator "
+ "`cpu` found. Supported versions include"
+ ),
+ ),
+ (
+ # Make sure region error supercedes version error
+ ("tensorflow", "9.15", "pluto", None),
+ "Unsupported container region `pluto`, supported regions are ",
+ ),
+ (
+ ("tensorflow", "2.2", "narnia", None),
+ "Unsupported container region `narnia`, supported regions are ",
+ ),
+ (
+ ("sklearn", "0.24", "asia", "gpu"),
+ "sklearn containers do not support `gpu` accelerator. Supported accelerators are cpu.",
+ ),
+ (
+ # Make sure framework error supercedes accelerator error
+ ("onnx", "1.9", None, "gpu"),
+ "No containers found for framework `onnx`. Supported frameworks are",
+ ),
+ ],
+ )
+ def test_invalid_prediction_uri_args(self, args, expected_error_msg):
+
+ with pytest.raises(ValueError) as err:
+ helpers.get_prebuilt_prediction_container_uri(
+ **self._build_predict_uri_kwargs(args)
+ )
+
+ assert err.match(expected_error_msg)
+
+ @pytest.mark.parametrize(
+ "image_uri, expected",
+ [
+ (
+ "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-6:latest",
+ True,
+ ),
+ (
+ "europe-docker.pkg.dev/vertex-ai/prediction/tf-cpu.1-15:latest",
+ True,
+ ),
+ (
+ "asia-docker.pkg.dev/vertex-ai/prediction/tf2-gpu.2-2:latest",
+ True,
+ ),
+ (
+ "us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-24:latest",
+ True,
+ ),
+ (
+ "us-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.1-3:latest",
+ True,
+ ),
+ (
+ "us-docker.pkg.dev/vertex-ai/training/tf-cpu.2-7",
+ False,
+ ),
+ (
+ "europe-docker.pkg.dev/vertex-ai/prediction/pytorch-cpu.1-0:latest",
+ True,
+ ),
+ (
+ "europe-docker.pkg.dev/vertex-ai/prediction/onnx-cpu.1-0:latest",
+ False,
+ ),
+ (
+ "us-central1-docker.pkg.dev/vertex-ai/custom-container:latest",
+ False,
+ ),
+ ],
+ )
+ def test_is_prebuilt_prediction_container_uri(self, image_uri, expected):
+ result = helpers.is_prebuilt_prediction_container_uri(image_uri)
+
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "args, expected_uri",
+ [
+ (
+ ("tensorflow", "2.6", None, None),
+ "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-6:latest",
+ ),
+ (
+ ("tensorflow", "1.13", "europe-west4", None),
+ "europe-docker.pkg.dev/vertex-ai/prediction/tf-cpu.1-15:latest",
+ ),
+ (
+ ("tensorflow", "2.7.1", None, "gpu"),
+ "us-docker.pkg.dev/vertex-ai/prediction/tf2-gpu.2-8:latest",
+ ),
+ (
+ ("sklearn", "0.24", "asia", "cpu"),
+ "asia-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-24:latest",
+ ),
+ (
+ ("sklearn", "0.21.2", None, None),
+ "us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-22:latest",
+ ),
+ (
+ ("xgboost", "1.2.1", None, None),
+ "us-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.1-3:latest",
+ ),
+ (
+ ("xgboost", "0.90", "europe", None),
+ "europe-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.0-90:latest",
+ ),
+ ],
+ )
+ def test_get_closest_match_prebuilt_container_uri(self, args, expected_uri):
+ uri = helpers._get_closest_match_prebuilt_container_uri(
+ **self._build_predict_uri_kwargs(args)
+ )
+
+ assert uri == expected_uri
+
+ def test_get_closest_match_prebuilt_container_uri_with_init_location(self):
+ uri = aiplatform.helpers._get_closest_match_prebuilt_container_uri(
+ "tensorflow", "2.6"
+ )
+ # SDK default location is us-central1
+ assert uri.startswith("us-docker.pkg.dev")
+
+ aiplatform.init(location="asia-northeast3")
+ uri = aiplatform.helpers._get_closest_match_prebuilt_container_uri(
+ "tensorflow", "2.6"
+ )
+ assert uri.startswith("asia-docker.pkg.dev")
+
+ aiplatform.init(location="europe-west2")
+ uri = aiplatform.helpers._get_closest_match_prebuilt_container_uri(
+ "xgboost", "0.90"
+ )
+ assert uri.startswith("europe-docker.pkg.dev")
+
+ @pytest.mark.parametrize(
+ "args, expected_error_msg",
+ [
+ (
+ ("lightgbm", "3.0", None, None),
+ "No containers found for framework `lightgbm`. Supported frameworks are",
+ ),
+ (
+ ("tensorflow", "9.15", None, None),
+ (
+ "You are using `tensorflow` version `9.15`. "
+ "Vertex pre-built containers support up to `tensorflow` version "
+ ),
+ ),
+ (
+ # Make sure region error supercedes version error
+ ("tensorflow", "9.15", "pluto", None),
+ "Unsupported container region `pluto`, supported regions are ",
+ ),
+ (
+ ("tensorflow", "2.2", "narnia", None),
+ "Unsupported container region `narnia`, supported regions are ",
+ ),
+ (
+ ("sklearn", "0.24", "asia", "gpu"),
+ "sklearn containers do not support `gpu` accelerator. Supported accelerators are cpu.",
+ ),
+ (
+ # Make sure framework error supercedes accelerator error
+ ("onnx", "1.9", None, "gpu"),
+ "No containers found for framework `onnx`. Supported frameworks are",
+ ),
+ ],
+ )
+ def test_get_closest_match_prebuilt_container_uri_error(
+ self, args, expected_error_msg
+ ):
+ with pytest.raises(ValueError) as err:
+ helpers._get_closest_match_prebuilt_container_uri(
+ **self._build_predict_uri_kwargs(args)
+ )
+
+ assert err.match(expected_error_msg)
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_hyperparameter_tuning_job.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_hyperparameter_tuning_job.py
new file mode 100644
index 0000000000000000000000000000000000000000..5631ad48d20707386621a054c3f7d0924c50794b
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_hyperparameter_tuning_job.py
@@ -0,0 +1,1038 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+
+import copy
+from importlib import reload
+from unittest import mock
+from unittest.mock import patch
+
+import logging
+from google.rpc import status_pb2
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import hyperparameter_tuning as hpt
+from google.cloud.aiplatform import jobs
+from google.cloud.aiplatform.compat.types import (
+ encryption_spec as gca_encryption_spec_compat,
+ hyperparameter_tuning_job as gca_hyperparameter_tuning_job_compat,
+ job_state as gca_job_state_compat,
+ study as gca_study_compat,
+)
+from google.cloud.aiplatform.compat.services import job_service_client
+
+import constants as test_constants
+
+_TEST_PROJECT = test_constants.ProjectConstants._TEST_PROJECT
+_TEST_LOCATION = test_constants.ProjectConstants._TEST_LOCATION
+_TEST_ID = test_constants.TrainingJobConstants._TEST_ID
+_TEST_DISPLAY_NAME = "my_hp_job_1234"
+
+_TEST_PARENT = test_constants.ProjectConstants._TEST_PARENT
+
+_TEST_STAGING_BUCKET = test_constants.TrainingJobConstants._TEST_STAGING_BUCKET
+_TEST_BASE_OUTPUT_DIR = test_constants.TrainingJobConstants._TEST_BASE_OUTPUT_DIR
+
+_TEST_HYPERPARAMETERTUNING_JOB_NAME = (
+ f"{_TEST_PARENT}/hyperparameterTuningJobs/{_TEST_ID}"
+)
+
+# CMEK encryption
+_TEST_DEFAULT_ENCRYPTION_KEY_NAME = "key_default"
+_TEST_DEFAULT_ENCRYPTION_SPEC = gca_encryption_spec_compat.EncryptionSpec(
+ kms_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME
+)
+
+_TEST_SERVICE_ACCOUNT = test_constants.ProjectConstants._TEST_SERVICE_ACCOUNT
+
+
+_TEST_NETWORK = test_constants.TrainingJobConstants._TEST_NETWORK
+
+_TEST_TIMEOUT = test_constants.TrainingJobConstants._TEST_TIMEOUT
+_TEST_RESTART_JOB_ON_WORKER_RESTART = (
+ test_constants.TrainingJobConstants._TEST_RESTART_JOB_ON_WORKER_RESTART
+)
+_TEST_DISABLE_RETRIES = test_constants.TrainingJobConstants._TEST_DISABLE_RETRIES
+_TEST_MAX_WAIT_DURATION = test_constants.TrainingJobConstants._TEST_MAX_WAIT_DURATION
+
+_TEST_METRIC_SPEC_KEY = "test-metric"
+_TEST_METRIC_SPEC_VALUE = "maximize"
+
+_TEST_PARALLEL_TRIAL_COUNT = 8
+_TEST_MAX_TRIAL_COUNT = 64
+_TEST_MAX_FAILED_TRIAL_COUNT = 4
+_TEST_SEARCH_ALGORITHM = "random"
+_TEST_MEASUREMENT_SELECTION = "best"
+
+_TEST_LABELS = test_constants.ProjectConstants._TEST_LABELS
+
+_TEST_CONDITIONAL_PARAMETER_DECAY = hpt.DoubleParameterSpec(
+ min=1e-07, max=1, scale="linear", parent_values=[32, 64]
+)
+_TEST_CONDITIONAL_PARAMETER_LR = hpt.DoubleParameterSpec(
+ min=1e-07, max=1, scale="linear", parent_values=[4, 8, 16]
+)
+
+_TEST_BASE_HYPERPARAMETER_TUNING_JOB_PROTO = gca_hyperparameter_tuning_job_compat.HyperparameterTuningJob(
+ display_name=_TEST_DISPLAY_NAME,
+ study_spec=gca_study_compat.StudySpec(
+ metrics=[
+ gca_study_compat.StudySpec.MetricSpec(
+ metric_id=_TEST_METRIC_SPEC_KEY, goal=_TEST_METRIC_SPEC_VALUE.upper()
+ )
+ ],
+ parameters=[
+ gca_study_compat.StudySpec.ParameterSpec(
+ parameter_id="lr",
+ scale_type=gca_study_compat.StudySpec.ParameterSpec.ScaleType.UNIT_LOG_SCALE,
+ double_value_spec=gca_study_compat.StudySpec.ParameterSpec.DoubleValueSpec(
+ min_value=0.001, max_value=0.1
+ ),
+ ),
+ gca_study_compat.StudySpec.ParameterSpec(
+ parameter_id="units",
+ scale_type=gca_study_compat.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
+ integer_value_spec=gca_study_compat.StudySpec.ParameterSpec.IntegerValueSpec(
+ min_value=4, max_value=1028
+ ),
+ ),
+ gca_study_compat.StudySpec.ParameterSpec(
+ parameter_id="activation",
+ categorical_value_spec=gca_study_compat.StudySpec.ParameterSpec.CategoricalValueSpec(
+ values=["relu", "sigmoid", "elu", "selu", "tanh"]
+ ),
+ ),
+ gca_study_compat.StudySpec.ParameterSpec(
+ parameter_id="batch_size",
+ scale_type=gca_study_compat.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
+ discrete_value_spec=gca_study_compat.StudySpec.ParameterSpec.DiscreteValueSpec(
+ values=[4, 8, 16, 32, 64]
+ ),
+ conditional_parameter_specs=[
+ gca_study_compat.StudySpec.ParameterSpec.ConditionalParameterSpec(
+ parent_discrete_values=gca_study_compat.StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition(
+ values=[32, 64]
+ ),
+ parameter_spec=gca_study_compat.StudySpec.ParameterSpec(
+ double_value_spec=gca_study_compat.StudySpec.ParameterSpec.DoubleValueSpec(
+ min_value=1e-07, max_value=1
+ ),
+ scale_type=gca_study_compat.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
+ parameter_id="decay",
+ ),
+ ),
+ gca_study_compat.StudySpec.ParameterSpec.ConditionalParameterSpec(
+ parent_discrete_values=gca_study_compat.StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition(
+ values=[4, 8, 16]
+ ),
+ parameter_spec=gca_study_compat.StudySpec.ParameterSpec(
+ double_value_spec=gca_study_compat.StudySpec.ParameterSpec.DoubleValueSpec(
+ min_value=1e-07, max_value=1
+ ),
+ scale_type=gca_study_compat.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
+ parameter_id="learning_rate",
+ ),
+ ),
+ ],
+ ),
+ ],
+ algorithm=gca_study_compat.StudySpec.Algorithm.RANDOM_SEARCH,
+ measurement_selection_type=gca_study_compat.StudySpec.MeasurementSelectionType.BEST_MEASUREMENT,
+ ),
+ parallel_trial_count=_TEST_PARALLEL_TRIAL_COUNT,
+ max_trial_count=_TEST_MAX_TRIAL_COUNT,
+ max_failed_trial_count=_TEST_MAX_FAILED_TRIAL_COUNT,
+ trial_job_spec=test_constants.TrainingJobConstants._TEST_BASE_CUSTOM_JOB_PROTO.job_spec,
+ labels=_TEST_LABELS,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+)
+
+_TEST_BASE_TRIAL_PROTO = gca_study_compat.Trial()
+
+
+def _get_hyperparameter_tuning_job_proto(state=None, name=None, error=None):
+ hyperparameter_tuning_job_proto = copy.deepcopy(
+ _TEST_BASE_HYPERPARAMETER_TUNING_JOB_PROTO
+ )
+ hyperparameter_tuning_job_proto.name = name
+ hyperparameter_tuning_job_proto.state = state
+ hyperparameter_tuning_job_proto.error = error
+
+ return hyperparameter_tuning_job_proto
+
+
+def _get_trial_proto(id=None, state=None):
+ trial_proto = copy.deepcopy(_TEST_BASE_TRIAL_PROTO)
+ trial_proto.id = id
+ trial_proto.state = state
+ if state == gca_study_compat.Trial.State.ACTIVE:
+ trial_proto.web_access_uris = (
+ test_constants.TrainingJobConstants._TEST_WEB_ACCESS_URIS
+ )
+ return trial_proto
+
+
+def _get_hyperparameter_tuning_job_proto_with_enable_web_access(
+ state=None, name=None, error=None, trials=[]
+):
+ hyperparameter_tuning_job_proto = _get_hyperparameter_tuning_job_proto(
+ state=state,
+ name=name,
+ error=error,
+ )
+ hyperparameter_tuning_job_proto.trial_job_spec.enable_web_access = (
+ test_constants.TrainingJobConstants._TEST_ENABLE_WEB_ACCESS
+ )
+ if state == gca_job_state_compat.JobState.JOB_STATE_RUNNING:
+ hyperparameter_tuning_job_proto.trials = trials
+ return hyperparameter_tuning_job_proto
+
+
+def _get_hyperparameter_tuning_job_proto_with_spot_strategy(
+ state=None, name=None, error=None, trials=[]
+):
+ hyperparameter_tuning_job_proto = _get_hyperparameter_tuning_job_proto(
+ state=state,
+ name=name,
+ error=error,
+ )
+ hyperparameter_tuning_job_proto.trial_job_spec.scheduling.strategy = (
+ test_constants.TrainingJobConstants._TEST_SPOT_STRATEGY
+ )
+ if state == gca_job_state_compat.JobState.JOB_STATE_RUNNING:
+ hyperparameter_tuning_job_proto.trials = trials
+ return hyperparameter_tuning_job_proto
+
+
+@pytest.fixture
+def get_hyperparameter_tuning_job_mock():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_hyperparameter_tuning_job"
+ ) as get_hyperparameter_tuning_job_mock:
+ get_hyperparameter_tuning_job_mock.side_effect = [
+ _get_hyperparameter_tuning_job_proto(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ ),
+ _get_hyperparameter_tuning_job_proto(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_hyperparameter_tuning_job_proto(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ _get_hyperparameter_tuning_job_proto(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ ]
+ yield get_hyperparameter_tuning_job_mock
+
+
+@pytest.fixture
+def get_hyperparameter_tuning_job_mock_with_enable_web_access():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_hyperparameter_tuning_job"
+ ) as get_hyperparameter_tuning_job_mock:
+ get_hyperparameter_tuning_job_mock.side_effect = [
+ _get_hyperparameter_tuning_job_proto_with_enable_web_access(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ ),
+ _get_hyperparameter_tuning_job_proto_with_enable_web_access(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ trials=[
+ _get_trial_proto(
+ id="1", state=gca_study_compat.Trial.State.REQUESTED
+ ),
+ ],
+ ),
+ _get_hyperparameter_tuning_job_proto_with_enable_web_access(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ trials=[
+ _get_trial_proto(id="1", state=gca_study_compat.Trial.State.ACTIVE),
+ ],
+ ),
+ _get_hyperparameter_tuning_job_proto_with_enable_web_access(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ trials=[
+ _get_trial_proto(id="1", state=gca_study_compat.Trial.State.ACTIVE),
+ ],
+ ),
+ _get_hyperparameter_tuning_job_proto_with_enable_web_access(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ trials=[
+ _get_trial_proto(id="1", state=gca_study_compat.Trial.State.ACTIVE),
+ ],
+ ),
+ _get_hyperparameter_tuning_job_proto_with_enable_web_access(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ trials=[
+ _get_trial_proto(
+ id="1", state=gca_study_compat.Trial.State.SUCCEEDED
+ ),
+ ],
+ ),
+ _get_hyperparameter_tuning_job_proto_with_enable_web_access(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ trials=[
+ _get_trial_proto(
+ id="1", state=gca_study_compat.Trial.State.SUCCEEDED
+ ),
+ ],
+ ),
+ _get_hyperparameter_tuning_job_proto_with_enable_web_access(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ trials=[
+ _get_trial_proto(
+ id="1", state=gca_study_compat.Trial.State.SUCCEEDED
+ ),
+ ],
+ ),
+ _get_hyperparameter_tuning_job_proto_with_enable_web_access(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ trials=[
+ _get_trial_proto(
+ id="1", state=gca_study_compat.Trial.State.SUCCEEDED
+ ),
+ ],
+ ),
+ ]
+ yield get_hyperparameter_tuning_job_mock
+
+
+@pytest.fixture
+def get_hyperparameter_tuning_job_mock_with_fail():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_hyperparameter_tuning_job"
+ ) as get_hyperparameter_tuning_job_mock:
+ get_hyperparameter_tuning_job_mock.side_effect = [
+ _get_hyperparameter_tuning_job_proto(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ ),
+ _get_hyperparameter_tuning_job_proto(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_hyperparameter_tuning_job_proto(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_FAILED,
+ error=status_pb2.Status(message="Test Error"),
+ ),
+ ]
+ yield get_hyperparameter_tuning_job_mock
+
+
+@pytest.fixture
+def get_hyperparameter_tuning_job_mock_with_spot_strategy():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_hyperparameter_tuning_job"
+ ) as get_hyperparameter_tuning_job_mock:
+ get_hyperparameter_tuning_job_mock.side_effect = [
+ _get_hyperparameter_tuning_job_proto_with_spot_strategy(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ ),
+ _get_hyperparameter_tuning_job_proto_with_spot_strategy(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_hyperparameter_tuning_job_proto_with_spot_strategy(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ ]
+ yield get_hyperparameter_tuning_job_mock
+
+
+@pytest.fixture
+def create_hyperparameter_tuning_job_mock():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "create_hyperparameter_tuning_job"
+ ) as create_hyperparameter_tuning_job_mock:
+ create_hyperparameter_tuning_job_mock.return_value = (
+ _get_hyperparameter_tuning_job_proto(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ )
+ )
+ yield create_hyperparameter_tuning_job_mock
+
+
+@pytest.fixture
+def create_hyperparameter_tuning_job_mock_with_enable_web_access():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "create_hyperparameter_tuning_job"
+ ) as create_hyperparameter_tuning_job_mock:
+ create_hyperparameter_tuning_job_mock.return_value = (
+ _get_hyperparameter_tuning_job_proto_with_enable_web_access(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ )
+ )
+ yield create_hyperparameter_tuning_job_mock
+
+
+@pytest.fixture
+def create_hyperparameter_tuning_job_mock_fail():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "create_hyperparameter_tuning_job"
+ ) as create_hyperparameter_tuning_job_mock:
+ create_hyperparameter_tuning_job_mock.side_effect = RuntimeError("Mock fail")
+ yield create_hyperparameter_tuning_job_mock
+
+
+@pytest.fixture
+def create_hyperparameter_tuning_job_mock_with_tensorboard():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "create_hyperparameter_tuning_job"
+ ) as create_hyperparameter_tuning_job_mock:
+ hyperparameter_tuning_job_proto = _get_hyperparameter_tuning_job_proto(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ )
+ hyperparameter_tuning_job_proto.trial_job_spec.tensorboard = (
+ test_constants.TensorboardConstants._TEST_TENSORBOARD_NAME
+ )
+ create_hyperparameter_tuning_job_mock.return_value = (
+ hyperparameter_tuning_job_proto
+ )
+ yield create_hyperparameter_tuning_job_mock
+
+
+@pytest.fixture
+def create_hyperparameter_tuning_job_mock_with_spot_strategy():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "create_hyperparameter_tuning_job"
+ ) as create_hyperparameter_tuning_job_mock:
+ create_hyperparameter_tuning_job_mock.return_value = (
+ _get_hyperparameter_tuning_job_proto_with_spot_strategy(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ )
+ )
+ yield create_hyperparameter_tuning_job_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestHyperparameterTuningJob:
+ def setup_method(self):
+ reload(aiplatform.initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ aiplatform.initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @mock.patch.object(jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(jobs, "_LOG_WAIT_TIME", 1)
+ def test_create_hyperparameter_tuning_job(
+ self,
+ create_hyperparameter_tuning_job_mock,
+ get_hyperparameter_tuning_job_mock,
+ sync,
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ custom_job = aiplatform.CustomJob(
+ display_name=test_constants.TrainingJobConstants._TEST_DISPLAY_NAME,
+ worker_pool_specs=test_constants.TrainingJobConstants._TEST_WORKER_POOL_SPEC,
+ base_output_dir=test_constants.TrainingJobConstants._TEST_BASE_OUTPUT_DIR,
+ )
+
+ job = aiplatform.HyperparameterTuningJob(
+ display_name=_TEST_DISPLAY_NAME,
+ custom_job=custom_job,
+ metric_spec={_TEST_METRIC_SPEC_KEY: _TEST_METRIC_SPEC_VALUE},
+ parameter_spec={
+ "lr": hpt.DoubleParameterSpec(min=0.001, max=0.1, scale="log"),
+ "units": hpt.IntegerParameterSpec(min=4, max=1028, scale="linear"),
+ "activation": hpt.CategoricalParameterSpec(
+ values=["relu", "sigmoid", "elu", "selu", "tanh"]
+ ),
+ "batch_size": hpt.DiscreteParameterSpec(
+ values=[4, 8, 16, 32, 64],
+ scale="linear",
+ conditional_parameter_spec={
+ "decay": _TEST_CONDITIONAL_PARAMETER_DECAY,
+ "learning_rate": _TEST_CONDITIONAL_PARAMETER_LR,
+ },
+ ),
+ },
+ parallel_trial_count=_TEST_PARALLEL_TRIAL_COUNT,
+ max_trial_count=_TEST_MAX_TRIAL_COUNT,
+ max_failed_trial_count=_TEST_MAX_FAILED_TRIAL_COUNT,
+ search_algorithm=_TEST_SEARCH_ALGORITHM,
+ measurement_selection=_TEST_MEASUREMENT_SELECTION,
+ labels=_TEST_LABELS,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ job.wait()
+
+ expected_hyperparameter_tuning_job = _get_hyperparameter_tuning_job_proto()
+
+ create_hyperparameter_tuning_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ hyperparameter_tuning_job=expected_hyperparameter_tuning_job,
+ timeout=None,
+ )
+
+ assert job.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED
+ assert job.network == _TEST_NETWORK
+ assert job.trials == []
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_hyperparameter_tuning_job_with_timeout(
+ self,
+ create_hyperparameter_tuning_job_mock,
+ get_hyperparameter_tuning_job_mock,
+ sync,
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ custom_job = aiplatform.CustomJob(
+ display_name=test_constants.TrainingJobConstants._TEST_DISPLAY_NAME,
+ worker_pool_specs=test_constants.TrainingJobConstants._TEST_WORKER_POOL_SPEC,
+ base_output_dir=test_constants.TrainingJobConstants._TEST_BASE_OUTPUT_DIR,
+ )
+
+ job = aiplatform.HyperparameterTuningJob(
+ display_name=_TEST_DISPLAY_NAME,
+ custom_job=custom_job,
+ metric_spec={_TEST_METRIC_SPEC_KEY: _TEST_METRIC_SPEC_VALUE},
+ parameter_spec={
+ "lr": hpt.DoubleParameterSpec(min=0.001, max=0.1, scale="log"),
+ "units": hpt.IntegerParameterSpec(min=4, max=1028, scale="linear"),
+ "activation": hpt.CategoricalParameterSpec(
+ values=["relu", "sigmoid", "elu", "selu", "tanh"]
+ ),
+ "batch_size": hpt.DiscreteParameterSpec(
+ values=[4, 8, 16, 32, 64],
+ scale="linear",
+ conditional_parameter_spec={
+ "decay": _TEST_CONDITIONAL_PARAMETER_DECAY,
+ "learning_rate": _TEST_CONDITIONAL_PARAMETER_LR,
+ },
+ ),
+ },
+ parallel_trial_count=_TEST_PARALLEL_TRIAL_COUNT,
+ max_trial_count=_TEST_MAX_TRIAL_COUNT,
+ max_failed_trial_count=_TEST_MAX_FAILED_TRIAL_COUNT,
+ search_algorithm=_TEST_SEARCH_ALGORITHM,
+ measurement_selection=_TEST_MEASUREMENT_SELECTION,
+ labels=_TEST_LABELS,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ create_request_timeout=180.0,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ job.wait()
+
+ expected_hyperparameter_tuning_job = _get_hyperparameter_tuning_job_proto()
+
+ create_hyperparameter_tuning_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ hyperparameter_tuning_job=expected_hyperparameter_tuning_job,
+ timeout=180.0,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_hyperparameter_tuning_job_with_fail_raises(
+ self,
+ create_hyperparameter_tuning_job_mock,
+ get_hyperparameter_tuning_job_mock_with_fail,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ custom_job = aiplatform.CustomJob(
+ display_name=test_constants.TrainingJobConstants._TEST_DISPLAY_NAME,
+ worker_pool_specs=test_constants.TrainingJobConstants._TEST_WORKER_POOL_SPEC,
+ base_output_dir=test_constants.TrainingJobConstants._TEST_BASE_OUTPUT_DIR,
+ )
+
+ job = aiplatform.HyperparameterTuningJob(
+ display_name=_TEST_DISPLAY_NAME,
+ custom_job=custom_job,
+ metric_spec={_TEST_METRIC_SPEC_KEY: _TEST_METRIC_SPEC_VALUE},
+ parameter_spec={
+ "lr": hpt.DoubleParameterSpec(min=0.001, max=0.1, scale="log"),
+ "units": hpt.IntegerParameterSpec(min=4, max=1028, scale="linear"),
+ "activation": hpt.CategoricalParameterSpec(
+ values=["relu", "sigmoid", "elu", "selu", "tanh"]
+ ),
+ "batch_size": hpt.DiscreteParameterSpec(
+ values=[4, 8, 16, 32, 64],
+ scale="linear",
+ conditional_parameter_spec={
+ "decay": _TEST_CONDITIONAL_PARAMETER_DECAY,
+ "learning_rate": _TEST_CONDITIONAL_PARAMETER_LR,
+ },
+ ),
+ },
+ parallel_trial_count=_TEST_PARALLEL_TRIAL_COUNT,
+ max_trial_count=_TEST_MAX_TRIAL_COUNT,
+ max_failed_trial_count=_TEST_MAX_FAILED_TRIAL_COUNT,
+ search_algorithm=_TEST_SEARCH_ALGORITHM,
+ measurement_selection=_TEST_MEASUREMENT_SELECTION,
+ labels=_TEST_LABELS,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ job.wait()
+
+ expected_hyperparameter_tuning_job = _get_hyperparameter_tuning_job_proto()
+
+ create_hyperparameter_tuning_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ hyperparameter_tuning_job=expected_hyperparameter_tuning_job,
+ timeout=None,
+ )
+
+ assert job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_FAILED
+
+ @pytest.mark.usefixtures("create_hyperparameter_tuning_job_mock_fail")
+ def test_run_hyperparameter_tuning_job_with_fail_at_creation(self):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ custom_job = aiplatform.CustomJob(
+ display_name=test_constants.TrainingJobConstants._TEST_DISPLAY_NAME,
+ worker_pool_specs=test_constants.TrainingJobConstants._TEST_WORKER_POOL_SPEC,
+ base_output_dir=test_constants.TrainingJobConstants._TEST_BASE_OUTPUT_DIR,
+ )
+
+ job = aiplatform.HyperparameterTuningJob(
+ display_name=_TEST_DISPLAY_NAME,
+ custom_job=custom_job,
+ metric_spec={_TEST_METRIC_SPEC_KEY: _TEST_METRIC_SPEC_VALUE},
+ parameter_spec={
+ "lr": hpt.DoubleParameterSpec(min=0.001, max=0.1, scale="log"),
+ "units": hpt.IntegerParameterSpec(min=4, max=1028, scale="linear"),
+ "activation": hpt.CategoricalParameterSpec(
+ values=["relu", "sigmoid", "elu", "selu", "tanh"]
+ ),
+ "batch_size": hpt.DiscreteParameterSpec(
+ values=[4, 8, 16, 32, 64],
+ scale="linear",
+ conditional_parameter_spec={
+ "decay": _TEST_CONDITIONAL_PARAMETER_DECAY,
+ "learning_rate": _TEST_CONDITIONAL_PARAMETER_LR,
+ },
+ ),
+ },
+ parallel_trial_count=_TEST_PARALLEL_TRIAL_COUNT,
+ max_trial_count=_TEST_MAX_TRIAL_COUNT,
+ max_failed_trial_count=_TEST_MAX_FAILED_TRIAL_COUNT,
+ search_algorithm=_TEST_SEARCH_ALGORITHM,
+ measurement_selection=_TEST_MEASUREMENT_SELECTION,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=False,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ job.wait_for_resource_creation()
+ assert e.match("Mock fail")
+
+ with pytest.raises(RuntimeError) as e:
+ job.resource_name
+ assert e.match(
+ "HyperparameterTuningJob resource has not been created. Resource failed with: Mock fail"
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ job.network
+ assert e.match(
+ "HyperparameterTuningJob resource has not been created. Resource failed with: Mock fail"
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ job.trials
+ assert e.match(
+ "HyperparameterTuningJob resource has not been created. Resource failed with: Mock fail"
+ )
+
+ def test_hyperparameter_tuning_job_get_state_raises_without_run(self):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ custom_job = aiplatform.CustomJob(
+ display_name=test_constants.TrainingJobConstants._TEST_DISPLAY_NAME,
+ worker_pool_specs=test_constants.TrainingJobConstants._TEST_WORKER_POOL_SPEC,
+ base_output_dir=test_constants.TrainingJobConstants._TEST_BASE_OUTPUT_DIR,
+ )
+
+ job = aiplatform.HyperparameterTuningJob(
+ display_name=_TEST_DISPLAY_NAME,
+ custom_job=custom_job,
+ metric_spec={_TEST_METRIC_SPEC_KEY: _TEST_METRIC_SPEC_VALUE},
+ parameter_spec={
+ "lr": hpt.DoubleParameterSpec(min=0.001, max=0.1, scale="log"),
+ "units": hpt.IntegerParameterSpec(min=4, max=1028, scale="linear"),
+ "activation": hpt.CategoricalParameterSpec(
+ values=["relu", "sigmoid", "elu", "selu", "tanh"]
+ ),
+ "batch_size": hpt.DiscreteParameterSpec(
+ values=[4, 8, 16, 32, 64],
+ scale="linear",
+ conditional_parameter_spec={
+ "decay": _TEST_CONDITIONAL_PARAMETER_DECAY,
+ "learning_rate": _TEST_CONDITIONAL_PARAMETER_LR,
+ },
+ ),
+ },
+ parallel_trial_count=_TEST_PARALLEL_TRIAL_COUNT,
+ max_trial_count=_TEST_MAX_TRIAL_COUNT,
+ max_failed_trial_count=_TEST_MAX_FAILED_TRIAL_COUNT,
+ search_algorithm=_TEST_SEARCH_ALGORITHM,
+ measurement_selection=_TEST_MEASUREMENT_SELECTION,
+ )
+
+ with pytest.raises(RuntimeError):
+ print(job.state)
+
+ def test_get_hyperparameter_tuning_job(self, get_hyperparameter_tuning_job_mock):
+
+ job = aiplatform.HyperparameterTuningJob.get(
+ _TEST_HYPERPARAMETERTUNING_JOB_NAME
+ )
+
+ get_hyperparameter_tuning_job_mock.assert_called_once_with(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+ assert (
+ job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_PENDING
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_hyperparameter_tuning_job_with_tensorboard(
+ self,
+ create_hyperparameter_tuning_job_mock_with_tensorboard,
+ get_hyperparameter_tuning_job_mock,
+ sync,
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ custom_job = aiplatform.CustomJob(
+ display_name=test_constants.TrainingJobConstants._TEST_DISPLAY_NAME,
+ worker_pool_specs=test_constants.TrainingJobConstants._TEST_WORKER_POOL_SPEC,
+ base_output_dir=test_constants.TrainingJobConstants._TEST_BASE_OUTPUT_DIR,
+ )
+
+ job = aiplatform.HyperparameterTuningJob(
+ display_name=_TEST_DISPLAY_NAME,
+ custom_job=custom_job,
+ metric_spec={_TEST_METRIC_SPEC_KEY: _TEST_METRIC_SPEC_VALUE},
+ parameter_spec={
+ "lr": hpt.DoubleParameterSpec(min=0.001, max=0.1, scale="log"),
+ "units": hpt.IntegerParameterSpec(min=4, max=1028, scale="linear"),
+ "activation": hpt.CategoricalParameterSpec(
+ values=["relu", "sigmoid", "elu", "selu", "tanh"]
+ ),
+ "batch_size": hpt.DiscreteParameterSpec(
+ values=[4, 8, 16, 32, 64],
+ scale="linear",
+ conditional_parameter_spec={
+ "decay": _TEST_CONDITIONAL_PARAMETER_DECAY,
+ "learning_rate": _TEST_CONDITIONAL_PARAMETER_LR,
+ },
+ ),
+ },
+ parallel_trial_count=_TEST_PARALLEL_TRIAL_COUNT,
+ max_trial_count=_TEST_MAX_TRIAL_COUNT,
+ max_failed_trial_count=_TEST_MAX_FAILED_TRIAL_COUNT,
+ search_algorithm=_TEST_SEARCH_ALGORITHM,
+ measurement_selection=_TEST_MEASUREMENT_SELECTION,
+ labels=_TEST_LABELS,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ tensorboard=test_constants.TensorboardConstants._TEST_TENSORBOARD_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ job.wait()
+
+ expected_hyperparameter_tuning_job = _get_hyperparameter_tuning_job_proto()
+ expected_hyperparameter_tuning_job.trial_job_spec.tensorboard = (
+ test_constants.TensorboardConstants._TEST_TENSORBOARD_NAME
+ )
+
+ create_hyperparameter_tuning_job_mock_with_tensorboard.assert_called_once_with(
+ parent=_TEST_PARENT,
+ hyperparameter_tuning_job=expected_hyperparameter_tuning_job,
+ timeout=None,
+ )
+
+ assert (
+ job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @mock.patch.object(jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(jobs, "_LOG_WAIT_TIME", 1)
+ def test_create_hyperparameter_tuning_job_with_enable_web_access(
+ self,
+ create_hyperparameter_tuning_job_mock_with_enable_web_access,
+ get_hyperparameter_tuning_job_mock_with_enable_web_access,
+ sync,
+ caplog,
+ ):
+ caplog.set_level(logging.INFO)
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ custom_job = aiplatform.CustomJob(
+ display_name=test_constants.TrainingJobConstants._TEST_DISPLAY_NAME,
+ worker_pool_specs=test_constants.TrainingJobConstants._TEST_WORKER_POOL_SPEC,
+ base_output_dir=test_constants.TrainingJobConstants._TEST_BASE_OUTPUT_DIR,
+ )
+
+ job = aiplatform.HyperparameterTuningJob(
+ display_name=_TEST_DISPLAY_NAME,
+ custom_job=custom_job,
+ metric_spec={_TEST_METRIC_SPEC_KEY: _TEST_METRIC_SPEC_VALUE},
+ parameter_spec={
+ "lr": hpt.DoubleParameterSpec(min=0.001, max=0.1, scale="log"),
+ "units": hpt.IntegerParameterSpec(min=4, max=1028, scale="linear"),
+ "activation": hpt.CategoricalParameterSpec(
+ values=["relu", "sigmoid", "elu", "selu", "tanh"]
+ ),
+ "batch_size": hpt.DiscreteParameterSpec(
+ values=[4, 8, 16, 32, 64],
+ scale="linear",
+ conditional_parameter_spec={
+ "decay": _TEST_CONDITIONAL_PARAMETER_DECAY,
+ "learning_rate": _TEST_CONDITIONAL_PARAMETER_LR,
+ },
+ ),
+ },
+ parallel_trial_count=_TEST_PARALLEL_TRIAL_COUNT,
+ max_trial_count=_TEST_MAX_TRIAL_COUNT,
+ max_failed_trial_count=_TEST_MAX_FAILED_TRIAL_COUNT,
+ search_algorithm=_TEST_SEARCH_ALGORITHM,
+ measurement_selection=_TEST_MEASUREMENT_SELECTION,
+ labels=_TEST_LABELS,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ enable_web_access=test_constants.TrainingJobConstants._TEST_ENABLE_WEB_ACCESS,
+ sync=sync,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ job.wait()
+ # TODO: b/383923584: Re-enable this test once the parent issue is fixed
+ # assert "workerpool0-0" in caplog.text
+
+ expected_hyperparameter_tuning_job = (
+ _get_hyperparameter_tuning_job_proto_with_enable_web_access()
+ )
+
+ create_hyperparameter_tuning_job_mock_with_enable_web_access.assert_called_once_with(
+ parent=_TEST_PARENT,
+ hyperparameter_tuning_job=expected_hyperparameter_tuning_job,
+ timeout=None,
+ )
+
+ assert job.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED
+ assert job.network == _TEST_NETWORK
+ assert job.trials == []
+
+ caplog.clear()
+
+ @mock.patch.object(jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(jobs, "_LOG_WAIT_TIME", 1)
+ def test_log_enable_web_access_after_get_hyperparameter_tuning_job(
+ self,
+ get_hyperparameter_tuning_job_mock_with_enable_web_access,
+ ):
+
+ hp_job = aiplatform.HyperparameterTuningJob.get(
+ _TEST_HYPERPARAMETERTUNING_JOB_NAME
+ )
+ hp_job._block_until_complete()
+ assert hp_job._logged_web_access_uris == set(
+ test_constants.TrainingJobConstants._TEST_WEB_ACCESS_URIS.values()
+ )
+
+ def test_create_hyperparameter_tuning_job_with_spot_strategy(
+ self,
+ create_hyperparameter_tuning_job_mock_with_spot_strategy,
+ get_hyperparameter_tuning_job_mock_with_spot_strategy,
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ custom_job = aiplatform.CustomJob(
+ display_name=test_constants.TrainingJobConstants._TEST_DISPLAY_NAME,
+ worker_pool_specs=test_constants.TrainingJobConstants._TEST_WORKER_POOL_SPEC,
+ base_output_dir=test_constants.TrainingJobConstants._TEST_BASE_OUTPUT_DIR,
+ )
+
+ job = aiplatform.HyperparameterTuningJob(
+ display_name=_TEST_DISPLAY_NAME,
+ custom_job=custom_job,
+ metric_spec={_TEST_METRIC_SPEC_KEY: _TEST_METRIC_SPEC_VALUE},
+ parameter_spec={
+ "lr": hpt.DoubleParameterSpec(min=0.001, max=0.1, scale="log"),
+ "units": hpt.IntegerParameterSpec(min=4, max=1028, scale="linear"),
+ "activation": hpt.CategoricalParameterSpec(
+ values=["relu", "sigmoid", "elu", "selu", "tanh"]
+ ),
+ "batch_size": hpt.DiscreteParameterSpec(
+ values=[4, 8, 16, 32, 64],
+ scale="linear",
+ conditional_parameter_spec={
+ "decay": _TEST_CONDITIONAL_PARAMETER_DECAY,
+ "learning_rate": _TEST_CONDITIONAL_PARAMETER_LR,
+ },
+ ),
+ },
+ parallel_trial_count=_TEST_PARALLEL_TRIAL_COUNT,
+ max_trial_count=_TEST_MAX_TRIAL_COUNT,
+ max_failed_trial_count=_TEST_MAX_FAILED_TRIAL_COUNT,
+ search_algorithm=_TEST_SEARCH_ALGORITHM,
+ measurement_selection=_TEST_MEASUREMENT_SELECTION,
+ labels=_TEST_LABELS,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ scheduling_strategy=test_constants.TrainingJobConstants._TEST_SPOT_STRATEGY,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ job.wait()
+
+ expected_hyperparameter_tuning_job = (
+ _get_hyperparameter_tuning_job_proto_with_spot_strategy()
+ )
+
+ create_hyperparameter_tuning_job_mock_with_spot_strategy.assert_called_once_with(
+ parent=_TEST_PARENT,
+ hyperparameter_tuning_job=expected_hyperparameter_tuning_job,
+ timeout=None,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_hyperparameter_tuning_job_persistent_resource.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_hyperparameter_tuning_job_persistent_resource.py
new file mode 100644
index 0000000000000000000000000000000000000000..792d9c51b045d468d059cc81fba63713c41821c6
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_hyperparameter_tuning_job_persistent_resource.py
@@ -0,0 +1,319 @@
+# -*- coding: utf-8 -*-
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import copy
+from importlib import reload
+from unittest import mock
+from unittest.mock import patch
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform.compat.services import (
+ job_service_client_v1,
+)
+from google.cloud.aiplatform import hyperparameter_tuning as hpt
+from google.cloud.aiplatform.compat.types import (
+ custom_job_v1,
+ encryption_spec_v1,
+ hyperparameter_tuning_job_v1,
+ io_v1,
+ job_state_v1 as gca_job_state_compat,
+ study_v1 as gca_study_compat,
+)
+from google.cloud.aiplatform import jobs
+import constants as test_constants
+import pytest
+
+from google.protobuf import duration_pb2
+
+_TEST_PROJECT = test_constants.ProjectConstants._TEST_PROJECT
+_TEST_LOCATION = test_constants.ProjectConstants._TEST_LOCATION
+_TEST_ID = "1028944691210842416"
+_TEST_DISPLAY_NAME = test_constants.TrainingJobConstants._TEST_DISPLAY_NAME
+
+_TEST_PARENT = test_constants.ProjectConstants._TEST_PARENT
+
+_TEST_HYPERPARAMETERTUNING_JOB_NAME = (
+ f"{_TEST_PARENT}/hyperparameterTuningJobs/{_TEST_ID}"
+)
+
+_TEST_PREBUILT_CONTAINER_IMAGE = "gcr.io/cloud-aiplatform/container:image"
+
+_TEST_RUN_ARGS = test_constants.TrainingJobConstants._TEST_RUN_ARGS
+_TEST_EXPERIMENT = "test-experiment"
+_TEST_EXPERIMENT_RUN = "test-experiment-run"
+
+_TEST_STAGING_BUCKET = test_constants.TrainingJobConstants._TEST_STAGING_BUCKET
+
+# CMEK encryption
+_TEST_DEFAULT_ENCRYPTION_KEY_NAME = "key_1234"
+_TEST_DEFAULT_ENCRYPTION_SPEC = encryption_spec_v1.EncryptionSpec(
+ kms_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME
+)
+
+_TEST_SERVICE_ACCOUNT = test_constants.ProjectConstants._TEST_SERVICE_ACCOUNT
+
+_TEST_METRIC_SPEC_KEY = "test-metric"
+_TEST_METRIC_SPEC_VALUE = "maximize"
+
+_TEST_PARALLEL_TRIAL_COUNT = 8
+_TEST_MAX_TRIAL_COUNT = 64
+_TEST_MAX_FAILED_TRIAL_COUNT = 4
+_TEST_SEARCH_ALGORITHM = "random"
+_TEST_MEASUREMENT_SELECTION = "best"
+
+_TEST_NETWORK = test_constants.TrainingJobConstants._TEST_NETWORK
+
+_TEST_TIMEOUT = test_constants.TrainingJobConstants._TEST_TIMEOUT
+_TEST_RESTART_JOB_ON_WORKER_RESTART = (
+ test_constants.TrainingJobConstants._TEST_RESTART_JOB_ON_WORKER_RESTART
+)
+_TEST_DISABLE_RETRIES = test_constants.TrainingJobConstants._TEST_DISABLE_RETRIES
+
+_TEST_LABELS = test_constants.ProjectConstants._TEST_LABELS
+
+_TEST_CONDITIONAL_PARAMETER_DECAY = hpt.DoubleParameterSpec(
+ min=1e-07, max=1, scale="linear", parent_values=[32, 64]
+)
+_TEST_CONDITIONAL_PARAMETER_LR = hpt.DoubleParameterSpec(
+ min=1e-07, max=1, scale="linear", parent_values=[4, 8, 16]
+)
+
+
+# Persistent Resource
+_TEST_PERSISTENT_RESOURCE_ID = "test-persistent-resource-1"
+
+_TEST_TRIAL_JOB_SPEC = custom_job_v1.CustomJobSpec(
+ worker_pool_specs=test_constants.TrainingJobConstants._TEST_WORKER_POOL_SPEC,
+ base_output_directory=io_v1.GcsDestination(
+ output_uri_prefix=test_constants.TrainingJobConstants._TEST_BASE_OUTPUT_DIR
+ ),
+ scheduling=custom_job_v1.Scheduling(
+ timeout=duration_pb2.Duration(seconds=_TEST_TIMEOUT),
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ ),
+ service_account=test_constants.ProjectConstants._TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID,
+)
+
+_TEST_BASE_HYPERPARAMETER_TUNING_JOB_WITH_PERSISTENT_RESOURCE_PROTO = hyperparameter_tuning_job_v1.HyperparameterTuningJob(
+ display_name=_TEST_DISPLAY_NAME,
+ study_spec=gca_study_compat.StudySpec(
+ metrics=[
+ gca_study_compat.StudySpec.MetricSpec(
+ metric_id=_TEST_METRIC_SPEC_KEY, goal=_TEST_METRIC_SPEC_VALUE.upper()
+ )
+ ],
+ parameters=[
+ gca_study_compat.StudySpec.ParameterSpec(
+ parameter_id="lr",
+ scale_type=gca_study_compat.StudySpec.ParameterSpec.ScaleType.UNIT_LOG_SCALE,
+ double_value_spec=gca_study_compat.StudySpec.ParameterSpec.DoubleValueSpec(
+ min_value=0.001, max_value=0.1
+ ),
+ ),
+ gca_study_compat.StudySpec.ParameterSpec(
+ parameter_id="units",
+ scale_type=gca_study_compat.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
+ integer_value_spec=gca_study_compat.StudySpec.ParameterSpec.IntegerValueSpec(
+ min_value=4, max_value=1028
+ ),
+ ),
+ gca_study_compat.StudySpec.ParameterSpec(
+ parameter_id="activation",
+ categorical_value_spec=gca_study_compat.StudySpec.ParameterSpec.CategoricalValueSpec(
+ values=["relu", "sigmoid", "elu", "selu", "tanh"]
+ ),
+ ),
+ gca_study_compat.StudySpec.ParameterSpec(
+ parameter_id="batch_size",
+ scale_type=gca_study_compat.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
+ discrete_value_spec=gca_study_compat.StudySpec.ParameterSpec.DiscreteValueSpec(
+ values=[4, 8, 16, 32, 64]
+ ),
+ conditional_parameter_specs=[
+ gca_study_compat.StudySpec.ParameterSpec.ConditionalParameterSpec(
+ parent_discrete_values=gca_study_compat.StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition(
+ values=[32, 64]
+ ),
+ parameter_spec=gca_study_compat.StudySpec.ParameterSpec(
+ double_value_spec=gca_study_compat.StudySpec.ParameterSpec.DoubleValueSpec(
+ min_value=1e-07, max_value=1
+ ),
+ scale_type=gca_study_compat.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
+ parameter_id="decay",
+ ),
+ ),
+ gca_study_compat.StudySpec.ParameterSpec.ConditionalParameterSpec(
+ parent_discrete_values=gca_study_compat.StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition(
+ values=[4, 8, 16]
+ ),
+ parameter_spec=gca_study_compat.StudySpec.ParameterSpec(
+ double_value_spec=gca_study_compat.StudySpec.ParameterSpec.DoubleValueSpec(
+ min_value=1e-07, max_value=1
+ ),
+ scale_type=gca_study_compat.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
+ parameter_id="learning_rate",
+ ),
+ ),
+ ],
+ ),
+ ],
+ algorithm=gca_study_compat.StudySpec.Algorithm.RANDOM_SEARCH,
+ measurement_selection_type=gca_study_compat.StudySpec.MeasurementSelectionType.BEST_MEASUREMENT,
+ ),
+ parallel_trial_count=_TEST_PARALLEL_TRIAL_COUNT,
+ max_trial_count=_TEST_MAX_TRIAL_COUNT,
+ max_failed_trial_count=_TEST_MAX_FAILED_TRIAL_COUNT,
+ trial_job_spec=_TEST_TRIAL_JOB_SPEC,
+ labels=_TEST_LABELS,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+)
+
+
+def _get_hyperparameter_tuning_job_proto(state=None, name=None, error=None):
+ hyperparameter_tuning_job_proto = copy.deepcopy(
+ _TEST_BASE_HYPERPARAMETER_TUNING_JOB_WITH_PERSISTENT_RESOURCE_PROTO
+ )
+ hyperparameter_tuning_job_proto.name = name
+ hyperparameter_tuning_job_proto.state = state
+ hyperparameter_tuning_job_proto.error = error
+
+ return hyperparameter_tuning_job_proto
+
+
+@pytest.fixture
+def create_hyperparameter_tuning_job_mock():
+ with mock.patch.object(
+ job_service_client_v1.JobServiceClient, "create_hyperparameter_tuning_job"
+ ) as create_hyperparameter_tuning_job_mock:
+ create_hyperparameter_tuning_job_mock.return_value = (
+ _get_hyperparameter_tuning_job_proto(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ )
+ )
+ yield create_hyperparameter_tuning_job_mock
+
+
+@pytest.fixture
+def get_hyperparameter_tuning_job_mock():
+ with patch.object(
+ job_service_client_v1.JobServiceClient, "get_hyperparameter_tuning_job"
+ ) as get_hyperparameter_tuning_job_mock:
+ get_hyperparameter_tuning_job_mock.side_effect = [
+ _get_hyperparameter_tuning_job_proto(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_PENDING,
+ ),
+ _get_hyperparameter_tuning_job_proto(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_hyperparameter_tuning_job_proto(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ _get_hyperparameter_tuning_job_proto(
+ name=_TEST_HYPERPARAMETERTUNING_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ ]
+ yield get_hyperparameter_tuning_job_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestHyperparameterTuningJobPersistentResource:
+ def setup_method(self):
+ reload(aiplatform.initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ aiplatform.initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_hyperparameter_tuning_job_with_persistent_resource(
+ self,
+ create_hyperparameter_tuning_job_mock,
+ get_hyperparameter_tuning_job_mock,
+ sync,
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_STAGING_BUCKET,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ custom_job = jobs.CustomJob(
+ display_name=test_constants.TrainingJobConstants._TEST_DISPLAY_NAME,
+ worker_pool_specs=test_constants.TrainingJobConstants._TEST_WORKER_POOL_SPEC,
+ base_output_dir=test_constants.TrainingJobConstants._TEST_BASE_OUTPUT_DIR,
+ persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID,
+ )
+
+ job = jobs.HyperparameterTuningJob(
+ display_name=_TEST_DISPLAY_NAME,
+ custom_job=custom_job,
+ metric_spec={_TEST_METRIC_SPEC_KEY: _TEST_METRIC_SPEC_VALUE},
+ parameter_spec={
+ "lr": hpt.DoubleParameterSpec(min=0.001, max=0.1, scale="log"),
+ "units": hpt.IntegerParameterSpec(min=4, max=1028, scale="linear"),
+ "activation": hpt.CategoricalParameterSpec(
+ values=["relu", "sigmoid", "elu", "selu", "tanh"]
+ ),
+ "batch_size": hpt.DiscreteParameterSpec(
+ values=[4, 8, 16, 32, 64],
+ scale="linear",
+ conditional_parameter_spec={
+ "decay": _TEST_CONDITIONAL_PARAMETER_DECAY,
+ "learning_rate": _TEST_CONDITIONAL_PARAMETER_LR,
+ },
+ ),
+ },
+ parallel_trial_count=_TEST_PARALLEL_TRIAL_COUNT,
+ max_trial_count=_TEST_MAX_TRIAL_COUNT,
+ max_failed_trial_count=_TEST_MAX_FAILED_TRIAL_COUNT,
+ search_algorithm=_TEST_SEARCH_ALGORITHM,
+ measurement_selection=_TEST_MEASUREMENT_SELECTION,
+ labels=_TEST_LABELS,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ )
+
+ job.wait()
+
+ expected_hyperparameter_tuning_job = _get_hyperparameter_tuning_job_proto()
+
+ create_hyperparameter_tuning_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ hyperparameter_tuning_job=expected_hyperparameter_tuning_job,
+ timeout=None,
+ )
+
+ assert job.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED
+ assert job.network == _TEST_NETWORK
+ assert job.trials == []
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_imports.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_imports.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b72bd41bc8a64c21e9d9d341f6fa7485e206c20
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_imports.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+class TestImport:
+ """Tests that gapic library can be imported."""
+
+ def test_import_aiplatform_v1(self):
+ from google.cloud import aiplatform_v1 # noqa: F401
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_initializer.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_initializer.py
new file mode 100644
index 0000000000000000000000000000000000000000..31c494c9f5cfba22644f22fcbf97608904cd38e0
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_initializer.py
@@ -0,0 +1,542 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import importlib
+import os
+from typing import Optional
+from unittest import mock
+from unittest.mock import patch
+
+import pytest
+
+import google.auth
+from google.auth import credentials
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform.metadata.metadata import _experiment_tracker
+from google.cloud.aiplatform.constants import base as constants
+from google.cloud.aiplatform import utils
+from google.cloud.aiplatform.utils import resource_manager_utils
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
+ prediction_service_client_v1beta1,
+)
+import constants as test_constants
+
+_TEST_PROJECT = test_constants.ProjectConstants._TEST_PROJECT
+_TEST_PROJECT_2 = "test-project-2"
+_TEST_LOCATION = test_constants.ProjectConstants._TEST_LOCATION
+_TEST_LOCATION_2 = "europe-west4"
+_TEST_INVALID_LOCATION = "test-invalid-location"
+_TEST_EXPERIMENT = "test-experiment"
+_TEST_DESCRIPTION = "test-description"
+_TEST_STAGING_BUCKET = "test-bucket"
+_TEST_NETWORK = "projects/12345/global/networks/myVPC"
+_TEST_SERVICE_ACCOUNT = "test-service-account@test-project.iam.gserviceaccount.com"
+
+# tensorboard
+_TEST_TENSORBOARD_ID = "1028944691210842416"
+_TEST_TENSORBOARD_NAME = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/tensorboards/{_TEST_TENSORBOARD_ID}"
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestInit:
+ def setup_method(self):
+ importlib.reload(initializer)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_init_project_sets_project(self):
+ initializer.global_config.init(project=_TEST_PROJECT)
+ assert initializer.global_config.project == _TEST_PROJECT
+
+ def test_not_init_project_gets_default_project(self, monkeypatch):
+ def mock_auth_default():
+ return None, _TEST_PROJECT
+
+ monkeypatch.setattr(google.auth, "default", mock_auth_default)
+ assert initializer.global_config.project == _TEST_PROJECT
+
+ def test_infer_project_id(self):
+ cloud_project_number = "123"
+
+ def mock_get_project_id(project_number: str, **_):
+ assert project_number == cloud_project_number
+ return _TEST_PROJECT
+
+ with mock.patch.object(
+ target=resource_manager_utils,
+ attribute="get_project_id",
+ new=mock_get_project_id,
+ ), mock.patch.dict(
+ os.environ, {"CLOUD_ML_PROJECT_ID": cloud_project_number}, clear=True
+ ):
+ assert initializer.global_config.project == _TEST_PROJECT
+
+ def test_infer_project_id_with_precedence(self):
+ lower_precedence_cloud_project_number = "456"
+ higher_precedence_cloud_project_number = "123"
+
+ def mock_get_project_id(project_number: str, **_):
+ assert project_number == higher_precedence_cloud_project_number
+ return _TEST_PROJECT
+
+ with mock.patch.object(
+ target=resource_manager_utils,
+ attribute="get_project_id",
+ new=mock_get_project_id,
+ ), mock.patch.dict(
+ os.environ,
+ {
+ "GOOGLE_CLOUD_PROJECT": higher_precedence_cloud_project_number,
+ "CLOUD_ML_PROJECT_ID": lower_precedence_cloud_project_number,
+ },
+ clear=True,
+ ):
+ assert initializer.global_config.project == _TEST_PROJECT
+
+ def test_init_location_sets_location(self):
+ initializer.global_config.init(location=_TEST_LOCATION)
+ assert initializer.global_config.location == _TEST_LOCATION
+
+ def test_not_init_location_gets_env_location(self):
+ os.environ["CLOUD_ML_REGION"] = _TEST_LOCATION_2
+ assert initializer.global_config.location == _TEST_LOCATION_2
+ del os.environ["CLOUD_ML_REGION"]
+
+ def test_not_init_location_gets_default_location(self):
+ assert initializer.global_config.location == constants.DEFAULT_REGION
+
+ def test_init_location_with_invalid_location_raises(self):
+ with pytest.raises(ValueError):
+ initializer.global_config.init(location=_TEST_INVALID_LOCATION)
+
+ def test_init_network_sets_network(self):
+ initializer.global_config.init(network=_TEST_NETWORK)
+ assert initializer.global_config.network == _TEST_NETWORK
+
+ def test_init_service_account_sets_service_account(self):
+ initializer.global_config.init(service_account=_TEST_SERVICE_ACCOUNT)
+ assert initializer.global_config.service_account == _TEST_SERVICE_ACCOUNT
+
+ @patch.object(_experiment_tracker, "set_experiment")
+ def test_init_experiment_sets_experiment(self, set_experiment_mock):
+ initializer.global_config.init(experiment=_TEST_EXPERIMENT)
+ set_experiment_mock.assert_called_once_with(
+ experiment=_TEST_EXPERIMENT,
+ description=None,
+ backing_tensorboard=None,
+ )
+
+ @patch.object(_experiment_tracker, "set_experiment")
+ def test_init_experiment_sets_experiment_with_description(
+ self, set_experiment_mock
+ ):
+ initializer.global_config.init(
+ experiment=_TEST_EXPERIMENT, experiment_description=_TEST_DESCRIPTION
+ )
+ set_experiment_mock.assert_called_once_with(
+ experiment=_TEST_EXPERIMENT,
+ description=_TEST_DESCRIPTION,
+ backing_tensorboard=None,
+ )
+
+ @patch.object(_experiment_tracker, "set_tensorboard")
+ def test_init_with_experiment_tensorboard_id_sets_global_tensorboard(
+ self, set_tensorboard_mock
+ ):
+ creds = credentials.AnonymousCredentials()
+ initializer.global_config.init(
+ experiment_tensorboard=_TEST_TENSORBOARD_ID,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=creds,
+ )
+
+ set_tensorboard_mock.assert_called_once_with(
+ tensorboard=_TEST_TENSORBOARD_ID,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=creds,
+ )
+
+ @patch.object(_experiment_tracker, "set_tensorboard")
+ def test_init_with_experiment_tensorboard_resource_sets_global_tensorboard(
+ self, set_tensorboard_mock
+ ):
+ initializer.global_config.init(experiment_tensorboard=_TEST_TENSORBOARD_NAME)
+
+ set_tensorboard_mock.assert_called_once_with(
+ tensorboard=_TEST_TENSORBOARD_NAME,
+ project=None,
+ location=None,
+ credentials=None,
+ )
+
+ @patch.object(_experiment_tracker, "set_experiment")
+ def test_init_experiment_without_tensorboard_uses_global_tensorboard(
+ self, set_experiment_mock
+ ):
+ initializer.global_config.tensorboard = _TEST_TENSORBOARD_NAME
+
+ initializer.global_config.init(
+ experiment=_TEST_EXPERIMENT,
+ )
+
+ set_experiment_mock.assert_called_once_with(
+ experiment=_TEST_EXPERIMENT,
+ description=None,
+ backing_tensorboard=None,
+ )
+
+ assert initializer.global_config.tensorboard == _TEST_TENSORBOARD_NAME
+
+ @patch.object(_experiment_tracker, "set_tensorboard")
+ @patch.object(_experiment_tracker, "set_experiment")
+ def test_init_experiment_tensorboard_false_does_not_set_tensorboard(
+ self, set_experiment_mock, set_tensorboard_mock
+ ):
+ initializer.global_config.tensorboard = _TEST_TENSORBOARD_NAME
+
+ initializer.global_config.init(
+ experiment=_TEST_EXPERIMENT,
+ experiment_tensorboard=False,
+ )
+
+ set_tensorboard_mock.assert_not_called()
+
+ set_experiment_mock.assert_called_once_with(
+ experiment=_TEST_EXPERIMENT,
+ description=None,
+ backing_tensorboard=False,
+ )
+
+ def test_init_experiment_description_fail_without_experiment(self):
+ with pytest.raises(ValueError):
+ initializer.global_config.init(experiment_description=_TEST_DESCRIPTION)
+
+ def test_init_staging_bucket_sets_staging_bucket(self):
+ initializer.global_config.init(staging_bucket=_TEST_STAGING_BUCKET)
+ assert initializer.global_config.staging_bucket == _TEST_STAGING_BUCKET
+
+ def test_init_credentials_sets_credentials(self):
+ creds = credentials.AnonymousCredentials()
+ initializer.global_config.init(credentials=creds)
+ assert initializer.global_config.credentials is creds
+
+ def test_common_location_path_returns_parent(self):
+ initializer.global_config.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ true_resource_parent = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+ assert true_resource_parent == initializer.global_config.common_location_path()
+
+ def test_common_location_path_overrides(self):
+ initializer.global_config.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ true_resource_parent = (
+ f"projects/{_TEST_PROJECT_2}/locations/{_TEST_LOCATION_2}"
+ )
+ assert true_resource_parent == initializer.global_config.common_location_path(
+ project=_TEST_PROJECT_2, location=_TEST_LOCATION_2
+ )
+
+ def test_create_client_returns_client(self):
+ initializer.global_config.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ client = initializer.global_config.create_client(
+ client_class=utils.ModelClientWithOverride
+ )
+ assert client._client_class is model_service_client.ModelServiceClient
+ assert isinstance(client, utils.ModelClientWithOverride)
+ assert (
+ client._transport._host == f"{_TEST_LOCATION}-{constants.API_BASE_PATH}:443"
+ )
+
+ def test_create_client_with_default_api_transport(self):
+ initializer.global_config.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ client = initializer.global_config.create_client(
+ client_class=utils.ModelClientWithOverride
+ )
+ assert client._client_class is model_service_client.ModelServiceClient
+ assert isinstance(client, utils.ModelClientWithOverride)
+ assert client._api_transport is None
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest", None])
+ def test_create_client_with_api_transport_override(self, api_transport):
+ initializer.global_config.init(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, api_transport=api_transport
+ )
+ client = initializer.global_config.create_client(
+ client_class=utils.ModelClientWithOverride
+ )
+ assert client._client_class is model_service_client.ModelServiceClient
+ assert isinstance(client, utils.ModelClientWithOverride)
+ assert client._api_transport == (
+ api_transport if api_transport == "rest" else None
+ )
+
+ @pytest.mark.parametrize("api_transport", ["grpc_asyncio", "unsupported"])
+ def test_create_client_with_invalid_api_transport_override(self, api_transport):
+ with pytest.raises(ValueError):
+ initializer.global_config.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ api_transport=api_transport,
+ )
+
+ def test_create_client_overrides(self):
+ initializer.global_config.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ creds = credentials.AnonymousCredentials()
+ client = initializer.global_config.create_client(
+ client_class=utils.ModelClientWithOverride,
+ credentials=creds,
+ location_override=_TEST_LOCATION_2,
+ prediction_client=True,
+ )
+ assert isinstance(client, utils.ModelClientWithOverride)
+ assert (
+ client._transport._host
+ == f"{_TEST_LOCATION_2}-{constants.API_BASE_PATH}:443"
+ )
+ assert client._transport._credentials == creds
+
+ def test_create_client_user_agent(self):
+ initializer.global_config.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ client = initializer.global_config.create_client(
+ client_class=utils.ModelClientWithOverride
+ )
+
+ for wrapped_method in client._transport._wrapped_methods.values():
+ # wrapped_method._metadata looks like:
+ # [('x-goog-api-client', 'model-builder/0.3.1 gl-python/3.7.6 grpc/1.30.0 gax/1.22.2 gapic/0.3.1')]
+ user_agent = wrapped_method._metadata[0][1]
+ assert "model-builder/" in user_agent
+ assert "google.cloud.aiplatform" in user_agent
+
+ def test_create_client_user_agent_top_level_method(self):
+ initializer.global_config.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ class SomeClass:
+ # Overriding the module since the top level method code skips test namespaces.
+ __module__ = "vertexai"
+
+ def __init__(self):
+ self._client = initializer.global_config.create_client(
+ client_class=utils.ModelClientWithOverride
+ )
+
+ for wrapped_method in SomeClass()._client._transport._wrapped_methods.values():
+ # wrapped_method._metadata looks like:
+ # [('x-goog-api-client', 'model-builder/0.3.1 gl-python/3.7.6 grpc/1.30.0 gax/1.22.2 gapic/0.3.1')]
+ user_agent = wrapped_method._metadata[0][1]
+ assert (
+ f"+{initializer._TOP_GOOGLE_CONSTRUCTOR_METHOD_TAG}+vertexai.SomeClass.__init__"
+ in user_agent
+ )
+
+ def test_create_client_appended_user_agent(self):
+ appended_user_agent = ["fake_user_agent", "another_fake_user_agent"]
+ initializer.global_config.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ client = initializer.global_config.create_client(
+ client_class=utils.ModelClientWithOverride,
+ appended_user_agent=appended_user_agent,
+ )
+
+ for wrapped_method in client._transport._wrapped_methods.values():
+ # wrapped_method._metadata looks like:
+ # [('x-goog-api-client', 'model-builder/0.3.1 gl-python/3.7.6 grpc/1.30.0 gax/1.22.2 gapic/0.3.1')]
+ user_agent = wrapped_method._metadata[0][1]
+ assert " " + appended_user_agent[0] in user_agent
+ assert " " + appended_user_agent[1] in user_agent
+
+ def test_set_api_endpoint(self):
+ initializer.global_config.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ api_endpoint="test.googleapis.com",
+ )
+
+ assert initializer.global_config.api_endpoint == "test.googleapis.com"
+
+ def test_not_set_api_endpoint(self):
+ initializer.global_config.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ assert initializer.global_config.api_endpoint is None
+
+ @pytest.mark.parametrize(
+ "init_location, location_override, api_endpoint, expected_endpoint",
+ [
+ ("us-central1", None, None, "us-central1-aiplatform.googleapis.com"),
+ (
+ "us-central1",
+ "europe-west4",
+ None,
+ "europe-west4-aiplatform.googleapis.com",
+ ),
+ ("asia-east1", None, None, "asia-east1-aiplatform.googleapis.com"),
+ (
+ "asia-southeast1",
+ "australia-southeast1",
+ None,
+ "australia-southeast1-aiplatform.googleapis.com",
+ ),
+ (
+ "asia-east1",
+ None,
+ "us-central1-aiplatform.googleapis.com",
+ "us-central1-aiplatform.googleapis.com",
+ ),
+ (
+ "us-central1",
+ None,
+ "test.aiplatform.googleapis.com",
+ "test.aiplatform.googleapis.com",
+ ),
+ ],
+ )
+ def test_get_client_options(
+ self,
+ init_location: str,
+ location_override: Optional[str],
+ api_endpoint: Optional[str],
+ expected_endpoint: str,
+ ):
+ initializer.global_config.init(
+ location=init_location, api_endpoint=api_endpoint
+ )
+
+ assert (
+ initializer.global_config.get_client_options(
+ location_override=location_override
+ ).api_endpoint
+ == expected_endpoint
+ )
+
+ def test_get_client_options_with_api_override(self):
+ initializer.global_config.init(location="asia-east1")
+
+ client_options = initializer.global_config.get_client_options(
+ api_base_path_override="override.googleapis.com"
+ )
+
+ assert client_options.api_endpoint == "asia-east1-override.googleapis.com"
+
+ def test_get_resource_type(self):
+ initializer.global_config.init()
+ os.environ["VERTEX_PRODUCT"] = "COLAB_ENTERPRISE"
+ assert initializer.global_config.get_resource_type().value == (
+ "COLAB_ENTERPRISE"
+ )
+
+ initializer.global_config.init()
+ os.environ["VERTEX_PRODUCT"] = "WORKBENCH_INSTANCE"
+ assert initializer.global_config.get_resource_type().value == (
+ "WORKBENCH_INSTANCE"
+ )
+
+ initializer.global_config.init()
+ os.environ["VERTEX_PRODUCT"] = "WORKBENCH_CUSTOM_CONTAINER"
+ assert initializer.global_config.get_resource_type().value == (
+ "WORKBENCH_CUSTOM_CONTAINER"
+ )
+
+ def test_init_with_only_creds_does_not_override_set_project(self):
+ assert initializer.global_config.project is not _TEST_PROJECT_2
+ initializer.global_config.init(project=_TEST_PROJECT_2)
+
+ creds = credentials.AnonymousCredentials()
+ initializer.global_config.init(credentials=creds)
+
+ assert initializer.global_config.project == _TEST_PROJECT_2
+
+ def test_init_with_only_project_does_not_override_set_creds(self):
+ creds = credentials.AnonymousCredentials()
+ assert initializer.global_config.credentials is not creds
+ initializer.global_config.init(credentials=creds)
+
+ initializer.global_config.init(project=_TEST_PROJECT_2)
+ assert initializer.global_config.credentials is creds
+
+ def test_create_client_with_request_metadata_model_service(self):
+ global_metadata = [
+ ("global_param", "value1"),
+ ]
+ request_metadata = [
+ ("request_param", "value2"),
+ ]
+ initializer.global_config.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ request_metadata=global_metadata,
+ api_transport="rest",
+ )
+ client = initializer.global_config.create_client(
+ client_class=utils.ModelClientWithOverride
+ )
+ model_name = client.model_path(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ model="model_id",
+ )
+ with patch("requests.sessions.Session.get") as mock_get:
+ mock_get.return_value.status_code = 200
+ mock_get.return_value.content = "{}"
+ client.get_model(name=model_name, metadata=request_metadata)
+ call_kwargs = mock_get.call_args_list[0][1]
+ headers = call_kwargs["headers"]
+ for metadata_key in ["global_param", "request_param"]:
+ assert metadata_key in headers
+
+ def test_create_client_with_request_metadata_prediction_service(self):
+ global_metadata = [
+ ("global_param", "value1"),
+ ]
+ request_metadata = [
+ ("request_param", "value2"),
+ ]
+ initializer.global_config.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ request_metadata=global_metadata,
+ api_transport="rest",
+ )
+ client = initializer.global_config.create_client(
+ client_class=prediction_service_client_v1beta1.PredictionServiceClient
+ )
+ model_name = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/publishers/google/models/gemini-1.0-pro"
+ with patch("requests.sessions.Session.post") as mock_post:
+ mock_post.return_value.status_code = 200
+ mock_post.return_value.content = "{}"
+ client.generate_content(model=model_name, metadata=request_metadata)
+ call_kwargs = mock_post.call_args_list[0][1]
+ headers = call_kwargs["headers"]
+ for metadata_key in ["global_param", "request_param"]:
+ assert metadata_key in headers
+
+
+class TestThreadPool:
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize(
+ "cpu_count, expected", [(4, 20), (32, 32), (None, 4), (2, 10)]
+ )
+ def test_max_workers(self, cpu_count, expected):
+ with mock.patch.object(os, "cpu_count") as cpu_count_mock:
+ cpu_count_mock.return_value = cpu_count
+ importlib.reload(initializer)
+ assert initializer.global_pool._max_workers == expected
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_jobs.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_jobs.py
new file mode 100644
index 0000000000000000000000000000000000000000..79f99ea7e0c4bc1b6f6b5aecf7fec15264e9e3c9
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_jobs.py
@@ -0,0 +1,1426 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+import copy
+
+from unittest import mock
+from importlib import reload
+from unittest.mock import patch
+
+from google.cloud import storage
+from google.cloud import bigquery
+
+from google.api_core import operation
+from google.auth import credentials as auth_credentials
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import jobs
+from google.cloud.aiplatform import model_monitoring
+
+from google.cloud.aiplatform.compat.types import (
+ batch_prediction_job as gca_batch_prediction_job_compat,
+ explanation as gca_explanation_compat,
+ io as gca_io_compat,
+ job_state as gca_job_state_compat,
+ machine_resources as gca_machine_resources_compat,
+ manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters_compat,
+ model_deployment_monitoring_job as gca_model_deployment_monitoring_job_compat,
+ model_monitoring as gca_model_monitoring_compat,
+ batch_prediction_job_v1beta1 as gca_batch_prediction_job_v1beta1,
+ job_state_v1beta1 as gca_job_state_v1beta1,
+ model_monitoring_v1beta1 as gca_model_monitoring_v1beta1,
+ explanation_metadata_v1beta1 as gca_explanation_metadata_v1beta1,
+)
+
+from google.cloud.aiplatform.compat.services import (
+ job_service_client,
+ job_service_client_v1beta1,
+)
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import duration_pb2 # type: ignore
+
+import constants as test_constants
+
+# TODO(b/242108750): remove temporary logic once model monitoring for batch prediction is GA
+_TEST_API_CLIENT = job_service_client.JobServiceClient
+_TEST_API_CLIENT_BETA = job_service_client_v1beta1.JobServiceClient
+
+_TEST_PROJECT = test_constants.ProjectConstants._TEST_PROJECT
+_TEST_LOCATION = test_constants.ProjectConstants._TEST_LOCATION
+_TEST_ID = test_constants.TrainingJobConstants._TEST_ID
+_TEST_ALT_ID = "8834795523125638878"
+_TEST_DISPLAY_NAME = test_constants.TrainingJobConstants._TEST_DISPLAY_NAME
+_TEST_BQ_PROJECT_ID = "projectId"
+_TEST_BQ_DATASET_ID = "bqDatasetId"
+_TEST_BQ_TABLE_NAME = "someBqTable"
+_TEST_BQ_JOB_ID = "123459876"
+_TEST_BQ_MAX_RESULTS = 100
+_TEST_GCS_BUCKET_NAME = "my-bucket"
+_TEST_SERVICE_ACCOUNT = test_constants.ProjectConstants._TEST_SERVICE_ACCOUNT
+
+
+_TEST_BQ_PATH = f"bq://{_TEST_BQ_PROJECT_ID}.{_TEST_BQ_DATASET_ID}"
+_TEST_GCS_BUCKET_PATH = f"gs://{_TEST_GCS_BUCKET_NAME}"
+_TEST_GCS_JSONL_SOURCE_URI = f"{_TEST_GCS_BUCKET_PATH}/bp_input_config.jsonl"
+_TEST_PARENT = test_constants.ProjectConstants._TEST_PARENT
+
+_TEST_MODEL_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/models/{_TEST_ALT_ID}"
+)
+
+_TEST_MODEL_VERSION_ID = "2"
+_TEST_VERSIONED_MODEL_NAME = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/models/{_TEST_ALT_ID}@{_TEST_MODEL_VERSION_ID}"
+
+_TEST_PUBLISHER_MODEL_NAME = (
+ f"publishers/google/models/text-model-name@{_TEST_MODEL_VERSION_ID}"
+)
+
+_TEST_BATCH_PREDICTION_JOB_NAME = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/batchPredictionJobs/{_TEST_ID}"
+_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME = "test-batch-prediction-job"
+
+_TEST_BATCH_PREDICTION_GCS_SOURCE = "gs://example-bucket/folder/instance.jsonl"
+_TEST_BATCH_PREDICTION_GCS_SOURCE_LIST = [
+ "gs://example-bucket/folder/instance1.jsonl",
+ "gs://example-bucket/folder/instance2.jsonl",
+]
+_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX = "gs://example-bucket/folder/output"
+_TEST_BATCH_PREDICTION_BQ_PREFIX = "ucaip-sample-tests"
+_TEST_BATCH_PREDICTION_BQ_DEST_PREFIX_WITH_PROTOCOL = (
+ f"bq://{_TEST_BATCH_PREDICTION_BQ_PREFIX}"
+)
+
+_TEST_MDM_JOB_NAME = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/modelDeploymentMonitoringJobs/{_TEST_ID}"
+_TEST_ENDPOINT = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/endpoints/{_TEST_ID}"
+)
+
+_TEST_JOB_STATE_SUCCESS = gca_job_state_compat.JobState(4)
+_TEST_JOB_STATE_RUNNING = gca_job_state_compat.JobState(3)
+_TEST_JOB_STATE_PENDING = gca_job_state_compat.JobState(2)
+
+_TEST_JOB_STATE_SUCCESS_V1BETA1 = gca_job_state_v1beta1.JobState(4)
+_TEST_JOB_STATE_RUNNING_V1BETA1 = gca_job_state_v1beta1.JobState(3)
+_TEST_JOB_STATE_PENDING_V1BETA1 = gca_job_state_v1beta1.JobState(2)
+
+_TEST_GCS_INPUT_CONFIG = gca_batch_prediction_job_compat.BatchPredictionJob.InputConfig(
+ instances_format="jsonl",
+ gcs_source=gca_io_compat.GcsSource(uris=[_TEST_GCS_JSONL_SOURCE_URI]),
+)
+_TEST_GCS_OUTPUT_CONFIG = (
+ gca_batch_prediction_job_compat.BatchPredictionJob.OutputConfig(
+ predictions_format="jsonl",
+ gcs_destination=gca_io_compat.GcsDestination(
+ output_uri_prefix=_TEST_GCS_BUCKET_PATH
+ ),
+ )
+)
+
+_TEST_BQ_INPUT_CONFIG = gca_batch_prediction_job_compat.BatchPredictionJob.InputConfig(
+ instances_format="bigquery",
+ bigquery_source=gca_io_compat.BigQuerySource(input_uri=_TEST_BQ_PATH),
+)
+_TEST_BQ_OUTPUT_CONFIG = (
+ gca_batch_prediction_job_compat.BatchPredictionJob.OutputConfig(
+ predictions_format="bigquery",
+ bigquery_destination=gca_io_compat.BigQueryDestination(
+ output_uri=_TEST_BQ_PATH
+ ),
+ )
+)
+
+_TEST_GCS_OUTPUT_INFO = gca_batch_prediction_job_compat.BatchPredictionJob.OutputInfo(
+ gcs_output_directory=_TEST_GCS_BUCKET_NAME
+)
+_TEST_BQ_OUTPUT_INFO = gca_batch_prediction_job_compat.BatchPredictionJob.OutputInfo(
+ bigquery_output_dataset=_TEST_BQ_PATH, bigquery_output_table=_TEST_BQ_TABLE_NAME
+)
+_TEST_BQ_OUTPUT_INFO_INCOMPLETE = (
+ gca_batch_prediction_job_compat.BatchPredictionJob.OutputInfo(
+ bigquery_output_dataset=_TEST_BQ_PATH
+ )
+)
+
+_TEST_EMPTY_OUTPUT_INFO = (
+ gca_batch_prediction_job_compat.BatchPredictionJob.OutputInfo()
+)
+
+_TEST_GCS_BLOBS = [
+ storage.Blob(name="some/path/prediction.jsonl", bucket=_TEST_GCS_BUCKET_NAME)
+]
+
+_TEST_MACHINE_TYPE = "n1-standard-4"
+_TEST_ACCELERATOR_TYPE = "NVIDIA_TESLA_P100"
+_TEST_ACCELERATOR_COUNT = 2
+_TEST_STARTING_REPLICA_COUNT = 2
+_TEST_MAX_REPLICA_COUNT = 12
+_TEST_BATCH_SIZE = 16
+
+_TEST_LABEL = {"team": "experimentation", "trial_id": "x435"}
+
+_TEST_EXPLANATION_METADATA = aiplatform.explain.ExplanationMetadata(
+ inputs={
+ "features": {
+ "input_tensor_name": "dense_input",
+ "encoding": "BAG_OF_FEATURES",
+ "modality": "numeric",
+ "index_feature_mapping": ["abc", "def", "ghj"],
+ }
+ },
+ outputs={"medv": {"output_tensor_name": "dense_2"}},
+)
+_TEST_EXPLANATION_PARAMETERS = aiplatform.explain.ExplanationParameters(
+ {"sampled_shapley_attribution": {"path_count": 10}}
+)
+
+_TEST_EXPLANATION_METADATA_V1BETA1 = gca_explanation_metadata_v1beta1.ExplanationMetadata(
+ inputs={
+ "features": gca_explanation_metadata_v1beta1.ExplanationMetadata.InputMetadata(
+ input_tensor_name="dense_input",
+ encoding=gca_explanation_metadata_v1beta1.ExplanationMetadata.InputMetadata.Encoding.BAG_OF_FEATURES,
+ modality="numeric",
+ index_feature_mapping=["abc", "def", "ghj"],
+ )
+ },
+ outputs={
+ "medv": gca_explanation_metadata_v1beta1.ExplanationMetadata.OutputMetadata(
+ output_tensor_name="dense_2"
+ )
+ },
+)
+
+_TEST_JOB_GET_METHOD_NAME = "get_custom_job"
+_TEST_JOB_LIST_METHOD_NAME = "list_custom_job"
+_TEST_JOB_CANCEL_METHOD_NAME = "cancel_custom_job"
+_TEST_JOB_DELETE_METHOD_NAME = "delete_custom_job"
+_TEST_JOB_RESOURCE_NAME = f"{_TEST_PARENT}/customJobs/{_TEST_ID}"
+
+_TEST_MDM_JOB_DRIFT_DETECTION_CONFIG = {"TEST_KEY": 0.01}
+_TEST_MDM_USER_EMAIL = "TEST_EMAIL"
+_TEST_MDM_SAMPLE_RATE = 0.5
+_TEST_MDM_LABEL = {"TEST KEY": "TEST VAL"}
+_TEST_LOG_TTL_IN_DAYS = 1
+_TEST_MDM_NEW_NAME = "NEW_NAME"
+
+_TEST_MDM_OLD_JOB = (
+ gca_model_deployment_monitoring_job_compat.ModelDeploymentMonitoringJob(
+ name=_TEST_MDM_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ endpoint=_TEST_ENDPOINT,
+ state=_TEST_JOB_STATE_RUNNING,
+ )
+)
+
+_TEST_MDM_EXPECTED_NEW_JOB = gca_model_deployment_monitoring_job_compat.ModelDeploymentMonitoringJob(
+ name=_TEST_MDM_JOB_NAME,
+ display_name=_TEST_MDM_NEW_NAME,
+ endpoint=_TEST_ENDPOINT,
+ state=_TEST_JOB_STATE_RUNNING,
+ model_deployment_monitoring_objective_configs=[
+ gca_model_deployment_monitoring_job_compat.ModelDeploymentMonitoringObjectiveConfig(
+ deployed_model_id=model_id,
+ objective_config=gca_model_monitoring_compat.ModelMonitoringObjectiveConfig(
+ prediction_drift_detection_config=gca_model_monitoring_compat.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig(
+ drift_thresholds={
+ "TEST_KEY": gca_model_monitoring_compat.ThresholdConfig(
+ value=0.01
+ )
+ }
+ )
+ ),
+ )
+ for model_id in [
+ model.id for model in test_constants.EndpointConstants._TEST_DEPLOYED_MODELS
+ ]
+ ],
+ logging_sampling_strategy=gca_model_monitoring_compat.SamplingStrategy(
+ random_sample_config=gca_model_monitoring_compat.SamplingStrategy.RandomSampleConfig(
+ sample_rate=_TEST_MDM_SAMPLE_RATE
+ )
+ ),
+ labels=_TEST_MDM_LABEL,
+ model_monitoring_alert_config=gca_model_monitoring_compat.ModelMonitoringAlertConfig(
+ email_alert_config=gca_model_monitoring_compat.ModelMonitoringAlertConfig.EmailAlertConfig(
+ user_emails=[_TEST_MDM_USER_EMAIL]
+ )
+ ),
+ model_deployment_monitoring_schedule_config=gca_model_deployment_monitoring_job_compat.ModelDeploymentMonitoringScheduleConfig(
+ monitor_interval=duration_pb2.Duration(seconds=3600)
+ ),
+ log_ttl=duration_pb2.Duration(seconds=_TEST_LOG_TTL_IN_DAYS * 86400),
+ enable_monitoring_pipeline_logs=True,
+)
+
+_TEST_THRESHOLD_KEY = "TEST_KEY"
+_TEST_THRESHOLD_VAL = 0.1
+_TEST_MODEL_MONITORING_SKEW_CFG = gca_model_monitoring_v1beta1.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig(
+ skew_thresholds={
+ _TEST_THRESHOLD_KEY: gca_model_monitoring_v1beta1.ThresholdConfig(
+ value=_TEST_THRESHOLD_VAL
+ )
+ },
+ attribution_score_skew_thresholds={
+ _TEST_THRESHOLD_KEY: gca_model_monitoring_v1beta1.ThresholdConfig(
+ value=_TEST_THRESHOLD_VAL
+ )
+ },
+)
+
+_TEST_MODEL_MONITORING_DRIFT_CFG = gca_model_monitoring_v1beta1.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig(
+ drift_thresholds={
+ _TEST_THRESHOLD_KEY: gca_model_monitoring_v1beta1.ThresholdConfig(
+ value=_TEST_THRESHOLD_VAL
+ )
+ },
+ attribution_score_drift_thresholds={
+ _TEST_THRESHOLD_KEY: gca_model_monitoring_v1beta1.ThresholdConfig(
+ value=_TEST_THRESHOLD_VAL
+ )
+ },
+)
+
+_TEST_MODEL_MONITORING_TRAINING_DATASET = (
+ gca_model_monitoring_v1beta1.ModelMonitoringObjectiveConfig.TrainingDataset(
+ dataset="", target_field=""
+ )
+)
+_TEST_MODEL_MONITORING_ALERT_CFG = gca_model_monitoring_v1beta1.ModelMonitoringAlertConfig(
+ email_alert_config=gca_model_monitoring_v1beta1.ModelMonitoringAlertConfig.EmailAlertConfig(
+ user_emails=[""]
+ ),
+ enable_logging=False,
+)
+
+_TEST_MODEL_MONITORING_CFG = gca_model_monitoring_v1beta1.ModelMonitoringConfig(
+ objective_configs=[
+ gca_model_monitoring_v1beta1.ModelMonitoringObjectiveConfig(
+ training_dataset=_TEST_MODEL_MONITORING_TRAINING_DATASET,
+ training_prediction_skew_detection_config=_TEST_MODEL_MONITORING_SKEW_CFG,
+ prediction_drift_detection_config=_TEST_MODEL_MONITORING_DRIFT_CFG,
+ explanation_config=gca_model_monitoring_v1beta1.ModelMonitoringObjectiveConfig.ExplanationConfig(
+ enable_feature_attributes=True
+ ),
+ )
+ ],
+ alert_config=_TEST_MODEL_MONITORING_ALERT_CFG,
+ analysis_instance_schema_uri="",
+)
+
+# TODO(b/171333554): Move reusable test fixtures to conftest.py file
+
+
+@pytest.fixture
+def fake_job_getter_mock():
+ with patch.object(
+ _TEST_API_CLIENT, _TEST_JOB_GET_METHOD_NAME, create=True
+ ) as fake_job_getter_mock:
+ fake_job_getter_mock.return_value = {}
+ yield fake_job_getter_mock
+
+
+@pytest.fixture
+def fake_job_cancel_mock():
+ with patch.object(
+ _TEST_API_CLIENT, _TEST_JOB_CANCEL_METHOD_NAME, create=True
+ ) as fake_job_cancel_mock:
+ yield fake_job_cancel_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestJob:
+ class FakeJob(jobs._Job):
+ _job_type = "custom-job"
+ _resource_noun = "customJobs"
+ _getter_method = _TEST_JOB_GET_METHOD_NAME
+ _list_method = _TEST_JOB_LIST_METHOD_NAME
+ _cancel_method = _TEST_JOB_CANCEL_METHOD_NAME
+ _delete_method = _TEST_JOB_DELETE_METHOD_NAME
+ _parse_resource_name_method = "parse_custom_job_path"
+ _format_resource_name_method = "custom_job_path"
+ resource_name = _TEST_JOB_RESOURCE_NAME
+
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ # Unit Tests
+ def test_init_job_class(self):
+ """
+ Raises TypeError since abstract property '_getter_method' is not set,
+ the _Job class should only be instantiated through a child class.
+ """
+ with pytest.raises(TypeError):
+ jobs._Job(job_name=_TEST_BATCH_PREDICTION_JOB_NAME)
+
+ @pytest.mark.usefixtures("fake_job_getter_mock")
+ def test_cancel_mock_job(self, fake_job_cancel_mock):
+ """Create a fake `_Job` child class, and ensure the high-level cancel method works"""
+ fake_job = self.FakeJob(job_name=_TEST_JOB_RESOURCE_NAME)
+ fake_job.cancel()
+
+ fake_job_cancel_mock.assert_called_once_with(name=_TEST_JOB_RESOURCE_NAME)
+
+
+@pytest.fixture
+def get_batch_prediction_job_mock():
+ with patch.object(
+ _TEST_API_CLIENT, "get_batch_prediction_job"
+ ) as get_batch_prediction_job_mock:
+ get_batch_prediction_job_mock.side_effect = [
+ gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ state=_TEST_JOB_STATE_PENDING,
+ ),
+ gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ state=_TEST_JOB_STATE_RUNNING,
+ ),
+ gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ state=_TEST_JOB_STATE_SUCCESS,
+ ),
+ gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ state=_TEST_JOB_STATE_SUCCESS,
+ ),
+ ]
+ yield get_batch_prediction_job_mock
+
+
+@pytest.fixture
+def get_batch_prediction_job_v1beta1_mock():
+ with patch.object(
+ _TEST_API_CLIENT_BETA, "get_batch_prediction_job"
+ ) as get_batch_prediction_job_v1beta1_mock:
+ get_batch_prediction_job_v1beta1_mock.side_effect = [
+ gca_batch_prediction_job_v1beta1.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ state=_TEST_JOB_STATE_PENDING_V1BETA1,
+ ),
+ gca_batch_prediction_job_v1beta1.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ state=_TEST_JOB_STATE_RUNNING_V1BETA1,
+ ),
+ gca_batch_prediction_job_v1beta1.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ state=_TEST_JOB_STATE_SUCCESS_V1BETA1,
+ ),
+ gca_batch_prediction_job_v1beta1.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ state=_TEST_JOB_STATE_SUCCESS_V1BETA1,
+ ),
+ ]
+ yield get_batch_prediction_job_v1beta1_mock
+
+
+@pytest.fixture
+def create_batch_prediction_job_mock():
+ with mock.patch.object(
+ _TEST_API_CLIENT, "create_batch_prediction_job"
+ ) as create_batch_prediction_job_mock:
+ create_batch_prediction_job_mock.return_value = (
+ gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ state=_TEST_JOB_STATE_SUCCESS,
+ )
+ )
+ yield create_batch_prediction_job_mock
+
+
+@pytest.fixture
+def create_batch_prediction_job_v1beta1_mock():
+ with mock.patch.object(
+ _TEST_API_CLIENT_BETA, "create_batch_prediction_job"
+ ) as create_batch_prediction_job_v1beta1_mock:
+ create_batch_prediction_job_v1beta1_mock.return_value = (
+ gca_batch_prediction_job_v1beta1.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ state=_TEST_JOB_STATE_SUCCESS_V1BETA1,
+ )
+ )
+ yield create_batch_prediction_job_v1beta1_mock
+
+
+@pytest.fixture
+def create_batch_prediction_job_mock_fail():
+ with mock.patch.object(
+ _TEST_API_CLIENT, "create_batch_prediction_job"
+ ) as create_batch_prediction_job_mock:
+ create_batch_prediction_job_mock.side_effect = RuntimeError("Mock fail")
+ yield create_batch_prediction_job_mock
+
+
+@pytest.fixture
+def create_batch_prediction_job_with_explanations_mock():
+ with mock.patch.object(
+ _TEST_API_CLIENT, "create_batch_prediction_job"
+ ) as create_batch_prediction_job_mock:
+ create_batch_prediction_job_mock.return_value = (
+ gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ state=_TEST_JOB_STATE_SUCCESS,
+ )
+ )
+ yield create_batch_prediction_job_mock
+
+
+@pytest.fixture
+def get_batch_prediction_job_gcs_output_mock():
+ with patch.object(
+ _TEST_API_CLIENT, "get_batch_prediction_job"
+ ) as get_batch_prediction_job_mock:
+ get_batch_prediction_job_mock.return_value = (
+ gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_MODEL_NAME,
+ input_config=_TEST_GCS_INPUT_CONFIG,
+ output_config=_TEST_GCS_OUTPUT_CONFIG,
+ output_info=_TEST_GCS_OUTPUT_INFO,
+ state=_TEST_JOB_STATE_SUCCESS,
+ )
+ )
+ yield get_batch_prediction_job_mock
+
+
+@pytest.fixture
+def get_batch_prediction_job_bq_output_mock():
+ with patch.object(
+ _TEST_API_CLIENT, "get_batch_prediction_job"
+ ) as get_batch_prediction_job_mock:
+ get_batch_prediction_job_mock.return_value = (
+ gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_MODEL_NAME,
+ input_config=_TEST_GCS_INPUT_CONFIG,
+ output_config=_TEST_BQ_OUTPUT_CONFIG,
+ output_info=_TEST_BQ_OUTPUT_INFO,
+ state=_TEST_JOB_STATE_SUCCESS,
+ )
+ )
+ yield get_batch_prediction_job_mock
+
+
+@pytest.fixture
+def get_batch_prediction_job_incomplete_bq_output_mock():
+ with patch.object(
+ _TEST_API_CLIENT, "get_batch_prediction_job"
+ ) as get_batch_prediction_job_mock:
+ get_batch_prediction_job_mock.return_value = (
+ gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_MODEL_NAME,
+ input_config=_TEST_GCS_INPUT_CONFIG,
+ output_config=_TEST_BQ_OUTPUT_CONFIG,
+ output_info=_TEST_BQ_OUTPUT_INFO_INCOMPLETE,
+ state=_TEST_JOB_STATE_SUCCESS,
+ )
+ )
+ yield get_batch_prediction_job_mock
+
+
+@pytest.fixture
+def get_batch_prediction_job_empty_output_mock():
+ with patch.object(
+ _TEST_API_CLIENT, "get_batch_prediction_job"
+ ) as get_batch_prediction_job_mock:
+ get_batch_prediction_job_mock.return_value = (
+ gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_MODEL_NAME,
+ input_config=_TEST_GCS_INPUT_CONFIG,
+ output_config=_TEST_BQ_OUTPUT_CONFIG,
+ output_info=_TEST_EMPTY_OUTPUT_INFO,
+ state=_TEST_JOB_STATE_SUCCESS,
+ )
+ )
+ yield get_batch_prediction_job_mock
+
+
+@pytest.fixture
+def get_batch_prediction_job_running_bq_output_mock():
+ with patch.object(
+ _TEST_API_CLIENT, "get_batch_prediction_job"
+ ) as get_batch_prediction_job_mock:
+ get_batch_prediction_job_mock.return_value = (
+ gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_MODEL_NAME,
+ input_config=_TEST_GCS_INPUT_CONFIG,
+ output_config=_TEST_BQ_OUTPUT_CONFIG,
+ output_info=_TEST_BQ_OUTPUT_INFO,
+ state=_TEST_JOB_STATE_RUNNING,
+ )
+ )
+ yield get_batch_prediction_job_mock
+
+
+@pytest.fixture
+def storage_list_blobs_mock():
+ with patch.object(storage.Client, "list_blobs") as list_blobs_mock:
+ list_blobs_mock.return_value = _TEST_GCS_BLOBS
+ yield list_blobs_mock
+
+
+@pytest.fixture
+def bq_list_rows_mock():
+ with patch.object(bigquery.Client, "list_rows") as list_rows_mock:
+ list_rows_mock.return_value = mock.Mock(bigquery.table.RowIterator)
+ yield list_rows_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestBatchPredictionJob:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_init_batch_prediction_job(self, get_batch_prediction_job_mock):
+ jobs.BatchPredictionJob(
+ batch_prediction_job_name=_TEST_BATCH_PREDICTION_JOB_NAME
+ )
+ get_batch_prediction_job_mock.assert_called_once_with(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_batch_prediction_job_status(self, get_batch_prediction_job_mock):
+ bp = jobs.BatchPredictionJob(
+ batch_prediction_job_name=_TEST_BATCH_PREDICTION_JOB_NAME
+ )
+
+ # get_batch_prediction() is called again here
+ bp_job_state = bp.state
+
+ assert get_batch_prediction_job_mock.call_count == 2
+ assert bp_job_state == _TEST_JOB_STATE_RUNNING
+
+ get_batch_prediction_job_mock.assert_called_with(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_batch_prediction_job_done_get(self, get_batch_prediction_job_mock):
+ bp = jobs.BatchPredictionJob(
+ batch_prediction_job_name=_TEST_BATCH_PREDICTION_JOB_NAME
+ )
+
+ assert bp.done() is False
+ assert get_batch_prediction_job_mock.call_count == 2
+
+ @pytest.mark.usefixtures("get_batch_prediction_job_gcs_output_mock")
+ def test_batch_prediction_iter_dirs_gcs(self, storage_list_blobs_mock):
+ bp = jobs.BatchPredictionJob(
+ batch_prediction_job_name=_TEST_BATCH_PREDICTION_JOB_NAME
+ )
+ blobs = bp.iter_outputs()
+
+ storage_list_blobs_mock.assert_called_once_with(
+ _TEST_GCS_OUTPUT_INFO.gcs_output_directory, prefix=None
+ )
+
+ assert blobs == _TEST_GCS_BLOBS
+
+ @pytest.mark.usefixtures("get_batch_prediction_job_bq_output_mock")
+ def test_batch_prediction_iter_dirs_bq(self, bq_list_rows_mock):
+ bp = jobs.BatchPredictionJob(
+ batch_prediction_job_name=_TEST_BATCH_PREDICTION_JOB_NAME
+ )
+
+ bp.iter_outputs()
+
+ bq_list_rows_mock.assert_called_once_with(
+ table=f"{_TEST_BQ_PROJECT_ID}.{_TEST_BQ_DATASET_ID}.{_TEST_BQ_TABLE_NAME}",
+ max_results=_TEST_BQ_MAX_RESULTS,
+ )
+
+ @pytest.mark.usefixtures("get_batch_prediction_job_incomplete_bq_output_mock")
+ def test_batch_prediction_iter_dirs_bq_raises_on_empty(self, bq_list_rows_mock):
+ bp = jobs.BatchPredictionJob(
+ batch_prediction_job_name=_TEST_BATCH_PREDICTION_JOB_NAME
+ )
+ with pytest.raises(RuntimeError) as e:
+ bp.iter_outputs()
+ assert e.match(
+ regexp=(
+ "A BigQuery table with predictions was not found,"
+ " this might be due to errors. Visit http"
+ )
+ )
+
+ @pytest.mark.usefixtures("get_batch_prediction_job_running_bq_output_mock")
+ def test_batch_prediction_iter_dirs_while_running(self):
+ """
+ Raises RuntimeError since outputs cannot be read while BatchPredictionJob is still running
+ """
+ with pytest.raises(RuntimeError):
+ bp = jobs.BatchPredictionJob(
+ batch_prediction_job_name=_TEST_BATCH_PREDICTION_JOB_NAME
+ )
+ bp.iter_outputs()
+
+ @pytest.mark.usefixtures("get_batch_prediction_job_empty_output_mock")
+ def test_batch_prediction_iter_dirs_invalid_output_info(self):
+ """
+ Raises NotImplementedError since the BatchPredictionJob's output_info
+ contains no output GCS directory or BQ dataset.
+ """
+ with pytest.raises(NotImplementedError):
+ bp = jobs.BatchPredictionJob(
+ batch_prediction_job_name=_TEST_BATCH_PREDICTION_JOB_NAME
+ )
+ bp.iter_outputs()
+
+ @mock.patch.object(jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_batch_prediction_job_mock")
+ def test_batch_predict_gcs_source_and_dest(
+ self, create_batch_prediction_job_mock, sync
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ # Make SDK batch_predict method call
+ batch_prediction_job = jobs.BatchPredictionJob.create(
+ model_name=_TEST_MODEL_NAME,
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX,
+ sync=sync,
+ create_request_timeout=None,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ batch_prediction_job.wait_for_resource_creation()
+
+ batch_prediction_job.wait()
+
+ # Construct expected request
+ expected_gapic_batch_prediction_job = gca_batch_prediction_job_compat.BatchPredictionJob(
+ display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ model=_TEST_MODEL_NAME,
+ input_config=gca_batch_prediction_job_compat.BatchPredictionJob.InputConfig(
+ instances_format="jsonl",
+ gcs_source=gca_io_compat.GcsSource(
+ uris=[_TEST_BATCH_PREDICTION_GCS_SOURCE]
+ ),
+ ),
+ output_config=gca_batch_prediction_job_compat.BatchPredictionJob.OutputConfig(
+ gcs_destination=gca_io_compat.GcsDestination(
+ output_uri_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX
+ ),
+ predictions_format="jsonl",
+ ),
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ create_batch_prediction_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=None,
+ )
+
+ @mock.patch.object(jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_batch_prediction_job_mock")
+ def test_batch_predict_gcs_source_and_dest_with_timeout(
+ self, create_batch_prediction_job_mock, sync
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ # Make SDK batch_predict method call
+ batch_prediction_job = jobs.BatchPredictionJob.create(
+ model_name=_TEST_MODEL_NAME,
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX,
+ sync=sync,
+ create_request_timeout=180.0,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ batch_prediction_job.wait_for_resource_creation()
+
+ batch_prediction_job.wait()
+
+ # Construct expected request
+ expected_gapic_batch_prediction_job = gca_batch_prediction_job_compat.BatchPredictionJob(
+ display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ model=_TEST_MODEL_NAME,
+ input_config=gca_batch_prediction_job_compat.BatchPredictionJob.InputConfig(
+ instances_format="jsonl",
+ gcs_source=gca_io_compat.GcsSource(
+ uris=[_TEST_BATCH_PREDICTION_GCS_SOURCE]
+ ),
+ ),
+ output_config=gca_batch_prediction_job_compat.BatchPredictionJob.OutputConfig(
+ gcs_destination=gca_io_compat.GcsDestination(
+ output_uri_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX
+ ),
+ predictions_format="jsonl",
+ ),
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ create_batch_prediction_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=180.0,
+ )
+
+ @mock.patch.object(jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_batch_prediction_job_mock")
+ def test_batch_predict_gcs_source_and_dest_with_timeout_not_explicitly_set(
+ self, create_batch_prediction_job_mock, sync
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ # Make SDK batch_predict method call
+ batch_prediction_job = jobs.BatchPredictionJob.create(
+ model_name=_TEST_MODEL_NAME,
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX,
+ sync=sync,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ batch_prediction_job.wait_for_resource_creation()
+
+ batch_prediction_job.wait()
+
+ # Construct expected request
+ expected_gapic_batch_prediction_job = gca_batch_prediction_job_compat.BatchPredictionJob(
+ display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ model=_TEST_MODEL_NAME,
+ input_config=gca_batch_prediction_job_compat.BatchPredictionJob.InputConfig(
+ instances_format="jsonl",
+ gcs_source=gca_io_compat.GcsSource(
+ uris=[_TEST_BATCH_PREDICTION_GCS_SOURCE]
+ ),
+ ),
+ output_config=gca_batch_prediction_job_compat.BatchPredictionJob.OutputConfig(
+ gcs_destination=gca_io_compat.GcsDestination(
+ output_uri_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX
+ ),
+ predictions_format="jsonl",
+ ),
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ create_batch_prediction_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=None,
+ )
+
+ @mock.patch.object(jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures("get_batch_prediction_job_mock")
+ def test_batch_predict_job_done_create(self, create_batch_prediction_job_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ # Make SDK batch_predict method call
+ batch_prediction_job = jobs.BatchPredictionJob.create(
+ model_name=_TEST_MODEL_NAME,
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX,
+ sync=False,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ batch_prediction_job.wait_for_resource_creation()
+
+ assert batch_prediction_job.done() is False
+
+ batch_prediction_job.wait()
+
+ assert batch_prediction_job.done() is True
+
+ @mock.patch.object(jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures("get_batch_prediction_job_mock")
+ def test_batch_predict_job_submit(self, create_batch_prediction_job_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ # Make SDK batch_predict method call
+ batch_prediction_job = jobs.BatchPredictionJob._submit_impl(
+ model_name=_TEST_MODEL_NAME,
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ batch_prediction_job.wait_for_resource_creation()
+ assert batch_prediction_job.done() is False
+ assert (
+ batch_prediction_job.state
+ != jobs.gca_job_state.JobState.JOB_STATE_SUCCEEDED
+ )
+
+ batch_prediction_job.wait_for_completion()
+ assert (
+ batch_prediction_job.state
+ == jobs.gca_job_state.JobState.JOB_STATE_SUCCEEDED
+ )
+
+ @mock.patch.object(jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_batch_prediction_job_mock")
+ def test_batch_predict_gcs_source_bq_dest(
+ self, create_batch_prediction_job_mock, sync
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ batch_prediction_job = jobs.BatchPredictionJob.create(
+ model_name=_TEST_MODEL_NAME,
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX,
+ sync=sync,
+ create_request_timeout=None,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ batch_prediction_job.wait_for_resource_creation()
+
+ batch_prediction_job.wait()
+
+ assert (
+ batch_prediction_job.output_info
+ == gca_batch_prediction_job_compat.BatchPredictionJob.OutputInfo()
+ )
+
+ # Construct expected request
+ expected_gapic_batch_prediction_job = gca_batch_prediction_job_compat.BatchPredictionJob(
+ display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ model=_TEST_MODEL_NAME,
+ input_config=gca_batch_prediction_job_compat.BatchPredictionJob.InputConfig(
+ instances_format="jsonl",
+ gcs_source=gca_io_compat.GcsSource(
+ uris=[_TEST_BATCH_PREDICTION_GCS_SOURCE]
+ ),
+ ),
+ output_config=gca_batch_prediction_job_compat.BatchPredictionJob.OutputConfig(
+ bigquery_destination=gca_io_compat.BigQueryDestination(
+ output_uri=_TEST_BATCH_PREDICTION_BQ_DEST_PREFIX_WITH_PROTOCOL
+ ),
+ predictions_format="bigquery",
+ ),
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ create_batch_prediction_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=None,
+ )
+
+ @mock.patch.object(jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_batch_prediction_job_mock")
+ def test_batch_predict_with_all_args(
+ self, create_batch_prediction_job_with_explanations_mock, sync
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ creds = auth_credentials.AnonymousCredentials()
+
+ batch_prediction_job = jobs.BatchPredictionJob.create(
+ model_name=_TEST_MODEL_NAME,
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX,
+ predictions_format="csv",
+ model_parameters={},
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ starting_replica_count=_TEST_STARTING_REPLICA_COUNT,
+ max_replica_count=_TEST_MAX_REPLICA_COUNT,
+ generate_explanation=True,
+ explanation_metadata=_TEST_EXPLANATION_METADATA,
+ explanation_parameters=_TEST_EXPLANATION_PARAMETERS,
+ labels=_TEST_LABEL,
+ credentials=creds,
+ sync=sync,
+ create_request_timeout=None,
+ batch_size=_TEST_BATCH_SIZE,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ batch_prediction_job.wait_for_resource_creation()
+
+ batch_prediction_job.wait()
+
+ # Construct expected request
+ expected_gapic_batch_prediction_job = gca_batch_prediction_job_compat.BatchPredictionJob(
+ display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ model=_TEST_MODEL_NAME,
+ input_config=gca_batch_prediction_job_compat.BatchPredictionJob.InputConfig(
+ instances_format="jsonl",
+ gcs_source=gca_io_compat.GcsSource(
+ uris=[_TEST_BATCH_PREDICTION_GCS_SOURCE]
+ ),
+ ),
+ output_config=gca_batch_prediction_job_compat.BatchPredictionJob.OutputConfig(
+ gcs_destination=gca_io_compat.GcsDestination(
+ output_uri_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX
+ ),
+ predictions_format="csv",
+ ),
+ dedicated_resources=gca_machine_resources_compat.BatchDedicatedResources(
+ machine_spec=gca_machine_resources_compat.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ ),
+ starting_replica_count=_TEST_STARTING_REPLICA_COUNT,
+ max_replica_count=_TEST_MAX_REPLICA_COUNT,
+ ),
+ manual_batch_tuning_parameters=gca_manual_batch_tuning_parameters_compat.ManualBatchTuningParameters(
+ batch_size=_TEST_BATCH_SIZE
+ ),
+ generate_explanation=True,
+ explanation_spec=gca_explanation_compat.ExplanationSpec(
+ metadata=_TEST_EXPLANATION_METADATA,
+ parameters=_TEST_EXPLANATION_PARAMETERS,
+ ),
+ labels=_TEST_LABEL,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ create_batch_prediction_job_with_explanations_mock.assert_called_once_with(
+ parent=f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}",
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=None,
+ )
+
+ @mock.patch.object(jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_batch_prediction_job_v1beta1_mock")
+ def test_batch_predict_with_all_args_and_model_monitoring(
+ self, create_batch_prediction_job_v1beta1_mock, sync
+ ):
+ from google.cloud.aiplatform.compat.types import (
+ io_v1beta1 as gca_io_compat,
+ batch_prediction_job_v1beta1 as gca_batch_prediction_job_compat,
+ machine_resources_v1beta1 as gca_machine_resources_compat,
+ manual_batch_tuning_parameters_v1beta1 as gca_manual_batch_tuning_parameters_compat,
+ explanation_v1beta1 as gca_explanation_compat,
+ )
+
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ creds = auth_credentials.AnonymousCredentials()
+ mm_obj_cfg = model_monitoring.ObjectiveConfig(
+ skew_detection_config=model_monitoring.SkewDetectionConfig(
+ data_source="",
+ target_field="",
+ skew_thresholds={_TEST_THRESHOLD_KEY: _TEST_THRESHOLD_VAL},
+ attribute_skew_thresholds={_TEST_THRESHOLD_KEY: _TEST_THRESHOLD_VAL},
+ ),
+ drift_detection_config=model_monitoring.DriftDetectionConfig(
+ drift_thresholds={_TEST_THRESHOLD_KEY: _TEST_THRESHOLD_VAL},
+ attribute_drift_thresholds={_TEST_THRESHOLD_KEY: _TEST_THRESHOLD_VAL},
+ ),
+ explanation_config=model_monitoring.ExplanationConfig(),
+ )
+ mm_alert_cfg = model_monitoring.EmailAlertConfig(user_emails=[""])
+ batch_prediction_job = jobs.BatchPredictionJob.create(
+ model_name=_TEST_MODEL_NAME,
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX,
+ predictions_format="csv",
+ model_parameters={},
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ starting_replica_count=_TEST_STARTING_REPLICA_COUNT,
+ max_replica_count=_TEST_MAX_REPLICA_COUNT,
+ generate_explanation=True,
+ explanation_metadata=_TEST_EXPLANATION_METADATA,
+ labels=_TEST_LABEL,
+ credentials=creds,
+ sync=sync,
+ create_request_timeout=None,
+ batch_size=_TEST_BATCH_SIZE,
+ model_monitoring_objective_config=mm_obj_cfg,
+ model_monitoring_alert_config=mm_alert_cfg,
+ analysis_instance_schema_uri="",
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ batch_prediction_job.wait_for_resource_creation()
+ batch_prediction_job.wait()
+
+ # Construct expected request
+ expected_gapic_batch_prediction_job = gca_batch_prediction_job_compat.BatchPredictionJob(
+ display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ model=_TEST_MODEL_NAME,
+ input_config=gca_batch_prediction_job_compat.BatchPredictionJob.InputConfig(
+ instances_format="jsonl",
+ gcs_source=gca_io_compat.GcsSource(
+ uris=[_TEST_BATCH_PREDICTION_GCS_SOURCE]
+ ),
+ ),
+ output_config=gca_batch_prediction_job_compat.BatchPredictionJob.OutputConfig(
+ gcs_destination=gca_io_compat.GcsDestination(
+ output_uri_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX
+ ),
+ predictions_format="csv",
+ ),
+ dedicated_resources=gca_machine_resources_compat.BatchDedicatedResources(
+ machine_spec=gca_machine_resources_compat.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ ),
+ starting_replica_count=_TEST_STARTING_REPLICA_COUNT,
+ max_replica_count=_TEST_MAX_REPLICA_COUNT,
+ ),
+ manual_batch_tuning_parameters=gca_manual_batch_tuning_parameters_compat.ManualBatchTuningParameters(
+ batch_size=_TEST_BATCH_SIZE
+ ),
+ explanation_spec=gca_explanation_compat.ExplanationSpec(
+ metadata=_TEST_EXPLANATION_METADATA_V1BETA1,
+ ),
+ generate_explanation=True,
+ model_monitoring_config=_TEST_MODEL_MONITORING_CFG,
+ labels=_TEST_LABEL,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ create_batch_prediction_job_v1beta1_mock.assert_called_once_with(
+ parent=f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}",
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("create_batch_prediction_job_mock_fail")
+ def test_batch_predict_create_fails(self):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ batch_prediction_job = jobs.BatchPredictionJob.create(
+ model_name=_TEST_MODEL_NAME,
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX,
+ sync=False,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ batch_prediction_job.wait()
+ assert e.match(regexp=r"Mock fail")
+
+ with pytest.raises(RuntimeError) as e:
+ batch_prediction_job.output_info
+ assert e.match(
+ regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail"
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ batch_prediction_job.partial_failures
+ assert e.match(
+ regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail"
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ batch_prediction_job.completion_stats
+ assert e.match(
+ regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail"
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ batch_prediction_job.iter_outputs()
+ assert e.match(
+ regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail"
+ )
+
+ @pytest.mark.usefixtures("get_batch_prediction_job_mock")
+ def test_batch_predict_no_source(self, create_batch_prediction_job_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ # Make SDK batch_predict method call without source
+ with pytest.raises(ValueError) as e:
+ jobs.BatchPredictionJob.create(
+ model_name=_TEST_MODEL_NAME,
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ assert e.match(regexp=r"source")
+
+ @pytest.mark.usefixtures("get_batch_prediction_job_mock")
+ def test_batch_predict_two_sources(self, create_batch_prediction_job_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ # Make SDK batch_predict method call with two sources
+ with pytest.raises(ValueError) as e:
+ jobs.BatchPredictionJob.create(
+ model_name=_TEST_MODEL_NAME,
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ bigquery_source=_TEST_BATCH_PREDICTION_BQ_PREFIX,
+ bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ assert e.match(regexp=r"source")
+
+ @pytest.mark.usefixtures("get_batch_prediction_job_mock")
+ def test_batch_predict_no_destination(self):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ # Make SDK batch_predict method call without destination
+ with pytest.raises(ValueError) as e:
+ jobs.BatchPredictionJob.create(
+ model_name=_TEST_MODEL_NAME,
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ assert e.match(regexp=r"destination")
+
+ @pytest.mark.usefixtures("get_batch_prediction_job_mock")
+ def test_batch_predict_wrong_instance_format(self):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ # Make SDK batch_predict method call
+ with pytest.raises(ValueError) as e:
+ jobs.BatchPredictionJob.create(
+ model_name=_TEST_MODEL_NAME,
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ instances_format="wrong",
+ bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ assert e.match(regexp=r"accepted instances format")
+
+ @pytest.mark.usefixtures("get_batch_prediction_job_mock")
+ def test_batch_predict_wrong_prediction_format(self):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ # Make SDK batch_predict method call
+ with pytest.raises(ValueError) as e:
+ jobs.BatchPredictionJob.create(
+ model_name=_TEST_MODEL_NAME,
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ predictions_format="wrong",
+ bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ assert e.match(regexp=r"accepted prediction format")
+
+ @pytest.mark.usefixtures("get_batch_prediction_job_mock")
+ def test_batch_predict_job_with_versioned_model(
+ self, create_batch_prediction_job_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ # Make SDK batch_predict method call
+ _ = jobs.BatchPredictionJob.create(
+ model_name=_TEST_VERSIONED_MODEL_NAME,
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX,
+ sync=True,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ assert (
+ create_batch_prediction_job_mock.call_args_list[0][1][
+ "batch_prediction_job"
+ ].model
+ == _TEST_VERSIONED_MODEL_NAME
+ )
+
+ # Make SDK batch_predict method call
+ _ = jobs.BatchPredictionJob.create(
+ model_name=f"{_TEST_ALT_ID}@{_TEST_MODEL_VERSION_ID}",
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX,
+ sync=True,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ assert (
+ create_batch_prediction_job_mock.call_args_list[0][1][
+ "batch_prediction_job"
+ ].model
+ == _TEST_VERSIONED_MODEL_NAME
+ )
+
+ @pytest.mark.usefixtures("get_batch_prediction_job_mock")
+ def test_batch_predict_job_with_publisher_model(
+ self, create_batch_prediction_job_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ # Make SDK batch_predict method call
+ _ = jobs.BatchPredictionJob.create(
+ model_name=_TEST_PUBLISHER_MODEL_NAME,
+ job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX,
+ sync=True,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ assert (
+ create_batch_prediction_job_mock.call_args_list[0][1][
+ "batch_prediction_job"
+ ].model
+ == _TEST_PUBLISHER_MODEL_NAME
+ )
+
+
+@pytest.fixture
+def get_mdm_job_mock():
+ with mock.patch.object(
+ _TEST_API_CLIENT, "get_model_deployment_monitoring_job"
+ ) as get_mdm_job_mock:
+ get_mdm_job_mock.side_effect = [
+ _TEST_MDM_OLD_JOB,
+ _TEST_MDM_OLD_JOB,
+ _TEST_MDM_OLD_JOB,
+ _TEST_MDM_EXPECTED_NEW_JOB,
+ ]
+ yield get_mdm_job_mock
+
+
+@pytest.fixture
+def update_mdm_job_mock(get_endpoint_with_models_mock): # noqa: F811
+ with mock.patch.object(
+ _TEST_API_CLIENT, "update_model_deployment_monitoring_job"
+ ) as update_mdm_job_mock:
+ update_mdm_job_lro_mock = mock.Mock(operation.Operation)
+ update_mdm_job_lro_mock.result.return_value = _TEST_MDM_EXPECTED_NEW_JOB
+ update_mdm_job_mock.return_value = update_mdm_job_lro_mock
+ yield update_mdm_job_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestModelDeploymentMonitoringJob:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_update_mdm_job(self, get_mdm_job_mock, update_mdm_job_mock):
+ job = jobs.ModelDeploymentMonitoringJob(
+ model_deployment_monitoring_job_name=_TEST_MDM_JOB_NAME
+ )
+ old_job = copy.deepcopy(job._gca_resource)
+ drift_detection_config = aiplatform.model_monitoring.DriftDetectionConfig(
+ drift_thresholds=_TEST_MDM_JOB_DRIFT_DETECTION_CONFIG
+ )
+ schedule_config = aiplatform.model_monitoring.ScheduleConfig(monitor_interval=1)
+ alert_config = aiplatform.model_monitoring.EmailAlertConfig(
+ user_emails=[_TEST_MDM_USER_EMAIL]
+ )
+ sampling_strategy = aiplatform.model_monitoring.RandomSampleConfig(
+ sample_rate=_TEST_MDM_SAMPLE_RATE
+ )
+ labels = _TEST_MDM_LABEL
+ log_ttl = _TEST_LOG_TTL_IN_DAYS
+ display_name = _TEST_MDM_NEW_NAME
+ new_config = aiplatform.model_monitoring.ObjectiveConfig(
+ drift_detection_config=drift_detection_config
+ )
+ job.update(
+ display_name=display_name,
+ schedule_config=schedule_config,
+ alert_config=alert_config,
+ logging_sampling_strategy=sampling_strategy,
+ labels=labels,
+ bigquery_tables_log_ttl=log_ttl,
+ enable_monitoring_pipeline_logs=True,
+ objective_configs=new_config,
+ )
+ new_job = job._gca_resource
+ assert old_job != new_job
+ assert new_job.display_name == display_name
+ assert new_job.logging_sampling_strategy == sampling_strategy.as_proto()
+ assert (
+ new_job.model_deployment_monitoring_schedule_config
+ == schedule_config.as_proto()
+ )
+ assert new_job.labels == labels
+ assert new_job.model_monitoring_alert_config == alert_config.as_proto()
+ assert new_job.log_ttl.days == _TEST_LOG_TTL_IN_DAYS
+ assert new_job.enable_monitoring_pipeline_logs
+ assert (
+ new_job.model_deployment_monitoring_objective_configs[
+ 0
+ ].objective_config.prediction_drift_detection_config
+ == drift_detection_config.as_proto()
+ )
+ get_mdm_job_mock.assert_called_with(
+ name=_TEST_MDM_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+ update_mdm_job_mock.assert_called_once_with(
+ model_deployment_monitoring_job=new_job,
+ update_mask=field_mask_pb2.FieldMask(
+ paths=[
+ "display_name",
+ "model_deployment_monitoring_schedule_config",
+ "model_monitoring_alert_config",
+ "logging_sampling_strategy",
+ "labels",
+ "log_ttl",
+ "enable_monitoring_pipeline_logs",
+ "model_deployment_monitoring_objective_configs",
+ ]
+ ),
+ timeout=None,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_language_models.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_language_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..abc767c84d45186291dc3992e78c96ea72f6ec8a
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_language_models.py
@@ -0,0 +1,5269 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=protected-access,bad-continuation
+import dataclasses
+import json
+import pytest
+from importlib import reload
+from unittest import mock
+from urllib import request as urllib_request
+from typing import Tuple
+
+import pandas as pd
+
+import google.auth
+from google.cloud import storage
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import _streaming_prediction
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform.utils import gcs_utils
+import constants as test_constants
+
+from google.cloud.aiplatform.compat.services import (
+ model_garden_service_client,
+ endpoint_service_client,
+ model_service_client,
+ pipeline_service_client,
+)
+from google.cloud.aiplatform.compat.services import (
+ prediction_service_client,
+ prediction_service_async_client,
+)
+from google.cloud.aiplatform.compat.types import (
+ artifact as gca_artifact,
+ prediction_service as gca_prediction_service,
+ context as gca_context,
+ endpoint_v1 as gca_endpoint,
+ pipeline_job as gca_pipeline_job,
+ pipeline_state as gca_pipeline_state,
+ deployed_model_ref_v1,
+)
+from google.cloud.aiplatform.compat.types import (
+ publisher_model as gca_publisher_model,
+ model as gca_model,
+)
+
+from google.cloud.aiplatform_v1beta1.services.prediction_service import (
+ client as prediction_service_client_v1beta1,
+)
+from google.cloud.aiplatform_v1beta1.types import (
+ prediction_service as gca_prediction_service_v1beta1,
+)
+
+from vertexai.preview import (
+ language_models as preview_language_models,
+)
+from vertexai import language_models
+from vertexai.language_models import _language_models
+from vertexai.language_models import (
+ _evaluatable_language_models,
+)
+from vertexai.language_models import GroundingSource
+from google.cloud.aiplatform_v1 import Execution as GapicExecution
+from google.cloud.aiplatform.compat.types import (
+ encryption_spec as gca_encryption_spec,
+)
+
+
+try:
+ from google.auth.aio.credentials import (
+ AnonymousCredentials as AsyncAnonymousCredentials,
+ )
+
+ _HAS_ASYNC_CRED_DEPS = True
+except ImportError:
+ _HAS_ASYNC_CRED_DEPS = False
+
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+
+# CMEK encryption
+_TEST_ENCRYPTION_KEY_NAME = "key_1234"
+_TEST_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_ENCRYPTION_KEY_NAME
+)
+
+_TEXT_BISON_PUBLISHER_MODEL_DICT = {
+ "name": "publishers/google/models/text-bison",
+ "version_id": "001",
+ "open_source_category": "PROPRIETARY",
+ "launch_stage": gca_publisher_model.PublisherModel.LaunchStage.GA,
+ "publisher_model_template": "projects/{user-project}/locations/{location}/publishers/google/models/text-bison@001",
+ "predict_schemata": {
+ "instance_schema_uri": "gs://google-cloud-aiplatform/schema/predict/instance/text_generation_1.0.0.yaml",
+ "parameters_schema_uri": "gs://google-cloud-aiplatfrom/schema/predict/params/text_generation_1.0.0.yaml",
+ "prediction_schema_uri": "gs://google-cloud-aiplatform/schema/predict/prediction/text_generation_1.0.0.yaml",
+ },
+}
+_TEXT_GECKO_PUBLISHER_MODEL_DICT = {
+ "name": "publishers/google/models/textembedding-gecko",
+ "version_id": "003",
+ "open_source_category": "PROPRIETARY",
+ "launch_stage": gca_publisher_model.PublisherModel.LaunchStage.GA,
+ "publisher_model_template": "projects/{user-project}/locations/{location}/publishers/google/models/textembedding-gecko@003",
+ "predict_schemata": {
+ "instance_schema_uri": "gs://google-cloud-aiplatform/schema/predict/instance/text_embedding_1.0.0.yaml",
+ "parameters_schema_uri": "gs://google-cloud-aiplatfrom/schema/predict/params/text_embedding_1.0.0.yaml",
+ "prediction_schema_uri": "gs://google-cloud-aiplatform/schema/predict/prediction/text_embedding_1.0.0.yaml",
+ },
+}
+_CHAT_BISON_PUBLISHER_MODEL_DICT = {
+ "name": "publishers/google/models/chat-bison",
+ "version_id": "001",
+ "open_source_category": "PROPRIETARY",
+ "launch_stage": gca_publisher_model.PublisherModel.LaunchStage.GA,
+ "publisher_model_template": "projects/{user-project}/locations/{location}/publishers/google/models/chat-bison@001",
+ "predict_schemata": {
+ "instance_schema_uri": "gs://google-cloud-aiplatform/schema/predict/instance/chat_generation_1.0.0.yaml",
+ "parameters_schema_uri": "gs://google-cloud-aiplatfrom/schema/predict/params/chat_generation_1.0.0.yaml",
+ "prediction_schema_uri": "gs://google-cloud-aiplatform/schema/predict/prediction/chat_generation_1.0.0.yaml",
+ },
+}
+
+_CODECHAT_BISON_PUBLISHER_MODEL_DICT = {
+ "name": "publishers/google/models/codechat-bison",
+ "version_id": "001",
+ "open_source_category": "PROPRIETARY",
+ "launch_stage": gca_publisher_model.PublisherModel.LaunchStage.GA,
+ "publisher_model_template": "projects/{user-project}/locations/{location}/publishers/google/models/codechat-bison@001",
+ "predict_schemata": {
+ "instance_schema_uri": "gs://google-cloud-aiplatform/schema/predict/instance/codechat_generation_1.0.0.yaml",
+ "parameters_schema_uri": "gs://google-cloud-aiplatfrom/schema/predict/params/codechat_generation_1.0.0.yaml",
+ "prediction_schema_uri": "gs://google-cloud-aiplatform/schema/predict/prediction/codechat_generation_1.0.0.yaml",
+ },
+}
+
+_CODE_GENERATION_BISON_PUBLISHER_MODEL_DICT = {
+ "name": "publishers/google/models/code-bison",
+ "version_id": "001",
+ "open_source_category": "PROPRIETARY",
+ "launch_stage": gca_publisher_model.PublisherModel.LaunchStage.GA,
+ "publisher_model_template": "projects/{user-project}/locations/{location}/publishers/google/models/code-bison@001",
+ "predict_schemata": {
+ "instance_schema_uri": "gs://google-cloud-aiplatform/schema/predict/instance/code_generation_1.0.0.yaml",
+ "parameters_schema_uri": "gs://google-cloud-aiplatfrom/schema/predict/params/code_generation_1.0.0.yaml",
+ "prediction_schema_uri": "gs://google-cloud-aiplatform/schema/predict/prediction/code_generation_1.0.0.yaml",
+ },
+}
+
+_CODE_COMPLETION_BISON_PUBLISHER_MODEL_DICT = {
+ "name": "publishers/google/models/code-gecko",
+ "version_id": "001",
+ "open_source_category": "PROPRIETARY",
+ "launch_stage": gca_publisher_model.PublisherModel.LaunchStage.GA,
+ "publisher_model_template": "projects/{user-project}/locations/{location}/publishers/google/models/code-gecko@001",
+ "predict_schemata": {
+ "instance_schema_uri": "gs://google-cloud-aiplatform/schema/predict/instance/code_generation_1.0.0.yaml",
+ "parameters_schema_uri": "gs://google-cloud-aiplatfrom/schema/predict/params/code_generation_1.0.0.yaml",
+ "prediction_schema_uri": "gs://google-cloud-aiplatform/schema/predict/prediction/code_generation_1.0.0.yaml",
+ },
+}
+
+_TEXT_EMBEDDING_GECKO_PUBLISHER_MODEL_DICT = {
+ "name": "publishers/google/models/textembedding-gecko",
+ "version_id": "001",
+ "open_source_category": "PROPRIETARY",
+ "launch_stage": gca_publisher_model.PublisherModel.LaunchStage.GA,
+ "publisher_model_template": "projects/{user-project}/locations/{location}/publishers/google/models/textembedding-gecko@001",
+ "predict_schemata": {
+ "instance_schema_uri": "gs://google-cloud-aiplatform/schema/predict/instance/text_embedding_1.0.0.yaml",
+ "parameters_schema_uri": "gs://google-cloud-aiplatfrom/schema/predict/params/text_generation_1.0.0.yaml",
+ "prediction_schema_uri": "gs://google-cloud-aiplatform/schema/predict/prediction/text_embedding_1.0.0.yaml",
+ },
+}
+
+_TEST_GROUNDING_WEB_SEARCH = GroundingSource.WebSearch()
+
+_TEST_GROUNDING_VERTEX_AI_SEARCH_DATASTORE = GroundingSource.VertexAISearch(
+ data_store_id="test_datastore", location="global"
+)
+
+_TEST_TEXT_GENERATION_PREDICTION_GROUNDING = {
+ "safetyAttributes": {
+ "categories": ["Violent"],
+ "blocked": False,
+ "scores": [0.10000000149011612],
+ },
+ "groundingMetadata": {
+ "citations": [
+ {"url": "url1", "startIndex": 1, "endIndex": 2},
+ {"url": "url2", "startIndex": 3, "endIndex": 4},
+ ],
+ "searchQueries": [
+ "searchQuery",
+ ],
+ },
+ "content": """
+Ingredients:
+* 3 cups all-purpose flour
+
+Instructions:
+1. Preheat oven to 350 degrees F (175 degrees C).""",
+}
+
+_EXPECTED_PARSED_GROUNDING_METADATA = {
+ "citations": [
+ {
+ "url": "url1",
+ "start_index": 1,
+ "end_index": 2,
+ "title": None,
+ "license": None,
+ "publication_date": None,
+ },
+ {
+ "url": "url2",
+ "start_index": 3,
+ "end_index": 4,
+ "title": None,
+ "license": None,
+ "publication_date": None,
+ },
+ ],
+ "search_queries": ["searchQuery"],
+}
+
+_TEST_TEXT_GENERATION_PREDICTION = {
+ "safetyAttributes": {
+ "categories": ["Violent"],
+ "blocked": True,
+ "errors": [100],
+ "scores": [0.10000000149011612],
+ },
+ "content": """
+Ingredients:
+* 3 cups all-purpose flour
+
+Instructions:
+1. Preheat oven to 350 degrees F (175 degrees C).""",
+}
+
+_TEST_TEXT_GENERATION_PREDICTION_STREAMING = [
+ {
+ "content": "1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17.",
+ },
+ {
+ "content": " 18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31.",
+ "safetyAttributes": {"blocked": False, "categories": None, "scores": None},
+ },
+ {
+ "content": " 32. 33. 34. 35. 36. 37. 38. 39. 40. 41. 42. 43. 44. 45.",
+ "citationMetadata": {
+ "citations": [
+ {
+ "title": "THEATRUM ARITHMETICO-GEOMETRICUM",
+ "publicationDate": "1727",
+ "endIndex": 181,
+ "startIndex": 12,
+ }
+ ]
+ },
+ "safetyAttributes": {
+ "blocked": True,
+ "errors": [100],
+ "categories": ["Finance"],
+ "scores": [0.1],
+ },
+ },
+]
+
+_TEST_CHAT_GENERATION_PREDICTION1 = {
+ "safetyAttributes": [
+ {
+ "scores": [],
+ "blocked": False,
+ "categories": [],
+ }
+ ],
+ "candidates": [
+ {
+ "author": "1",
+ "content": "Chat response 1",
+ }
+ ],
+}
+_TEST_CHAT_GENERATION_PREDICTION2 = {
+ "safetyAttributes": [
+ {
+ "scores": [],
+ "blocked": False,
+ "categories": [],
+ }
+ ],
+ "candidates": [
+ {
+ "author": "1",
+ "content": "Chat response 2",
+ }
+ ],
+}
+_TEST_CHAT_GENERATION_MULTI_CANDIDATE_PREDICTION = {
+ "safetyAttributes": [
+ {
+ "scores": [],
+ "categories": [],
+ "blocked": False,
+ },
+ {
+ "scores": [0.1],
+ "categories": ["Finance"],
+ "blocked": True,
+ "errors": [100],
+ },
+ ],
+ "candidates": [
+ {
+ "author": "1",
+ "content": "Chat response 2",
+ },
+ {
+ "author": "1",
+ "content": "",
+ },
+ ],
+}
+
+_TEST_CHAT_GENERATION_MULTI_CANDIDATE_PREDICTION_GROUNDING = {
+ "safetyAttributes": [
+ {
+ "scores": [],
+ "categories": [],
+ "blocked": False,
+ },
+ {
+ "scores": [0.1],
+ "categories": ["Finance"],
+ "blocked": True,
+ "errors": [100],
+ },
+ ],
+ "groundingMetadata": [
+ {
+ "citations": [
+ {
+ "startIndex": 1,
+ "endIndex": 2,
+ "url": "url1",
+ }
+ ],
+ "searchQueries": ["searchQuery1"],
+ },
+ {
+ "citations": [
+ {
+ "startIndex": 3,
+ "endIndex": 4,
+ "url": "url2",
+ }
+ ],
+ "searchQueries": ["searchQuery2"],
+ },
+ ],
+ "candidates": [
+ {
+ "author": "1",
+ "content": "Chat response 2",
+ },
+ {
+ "author": "1",
+ "content": "",
+ },
+ ],
+}
+
+_TEST_CHAT_GENERATION_MULTI_CANDIDATE_PREDICTION_GROUNDING_NONE = {
+ "safetyAttributes": [
+ {
+ "scores": [],
+ "categories": [],
+ "blocked": False,
+ },
+ {
+ "scores": [0.1],
+ "categories": ["Finance"],
+ "blocked": True,
+ "errors": [100],
+ },
+ ],
+ "groundingMetadata": [
+ None,
+ None,
+ ],
+ "candidates": [
+ {
+ "author": "1",
+ "content": "Chat response 2",
+ },
+ {
+ "author": "1",
+ "content": "",
+ },
+ ],
+}
+
+_EXPECTED_PARSED_GROUNDING_METADATA_CHAT = {
+ "citations": [
+ {
+ "url": "url1",
+ "start_index": 1,
+ "end_index": 2,
+ "title": None,
+ "license": None,
+ "publication_date": None,
+ },
+ ],
+ "search_queries": ["searchQuery1"],
+}
+
+_EXPECTED_PARSED_GROUNDING_METADATA_CHAT_NONE = {
+ "citations": [],
+ "search_queries": [],
+}
+
+_TEST_CHAT_PREDICTION_STREAMING = [
+ {
+ "candidates": [
+ {
+ "author": "1",
+ "content": "1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15.",
+ }
+ ],
+ "safetyAttributes": [{"blocked": False, "categories": None, "scores": None}],
+ },
+ {
+ "candidates": [
+ {
+ "author": "1",
+ "content": " 16. 17. 18. 19. 20. 21. 22. 23. 24. 25. 26. 27.",
+ }
+ ],
+ "safetyAttributes": [
+ {
+ "blocked": True,
+ "errors": [100],
+ "categories": ["Finance"],
+ "scores": [0.1],
+ }
+ ],
+ },
+]
+
+_TEST_CODE_GENERATION_PREDICTION = {
+ "safetyAttributes": {
+ "blocked": True,
+ "errors": [100],
+ "categories": ["Finance"],
+ "scores": [0.1],
+ },
+ "content": """
+```python
+def is_leap_year(year):
+ \"\"\"
+ Returns True if the given year is a leap year, False otherwise.
+
+ Args:
+ year: The year to check.
+
+ Returns:
+ True if the year is a leap year, False otherwise.
+ \"\"\"
+
+ # A year is a leap year if it is divisible by 4, but not divisible by 100,
+ # unless it is also divisible by 400.
+
+ return (year % 4 == 0 and year % 100 != 0) or year % 400 == 0
+```""",
+}
+
+_TEST_CODE_COMPLETION_PREDICTION = {
+ "safetyAttributes": {
+ "categories": [],
+ "blocked": False,
+ "scores": [],
+ },
+ "content": """
+ return s[::-1]
+
+
+def reverse_string_2(s):""",
+}
+
+_TEXT_EMBEDDING_VECTOR_LENGTH = 768
+_TEST_TEXT_EMBEDDING_PREDICTION = {
+ "embeddings": {
+ "values": list([1.0] * _TEXT_EMBEDDING_VECTOR_LENGTH),
+ "statistics": {"truncated": False, "token_count": 4.0},
+ }
+}
+
+_TEST_COUNT_TOKENS_RESPONSE = {
+ "total_tokens": 5,
+ "total_billable_characters": 25,
+}
+
+_TEST_TEXT_BISON_TRAINING_DF = pd.DataFrame(
+ {
+ "input_text": [
+ "Basketball teams in the Midwest.",
+ "How to bake gluten-free bread?",
+ "Want to buy a new phone.",
+ ],
+ "output_text": [
+ "There are several basketball teams located in the Midwest region of the United States. Here are some of them:",
+ "Baking gluten-free bread can be a bit challenging because gluten is the protein that gives bread its structure and elasticity.",
+ "Great! There are many factors to consider when buying a new phone, including your budget, preferred operating system, desired features, and more. Here are some general steps to follow to help you make an informed decision:",
+ ],
+ },
+)
+
+_TEST_TEXT_BISON_PREFERENCE_TRAINING_DF = pd.DataFrame(
+ {
+ "input_text": [
+ "Create a description for Plantation Palms.",
+ ],
+ "candidate_0": [
+ "Enjoy some fun in the sun at Gulf Shores.",
+ ],
+ "candidate_1": [
+ "A Tranquil Oasis of Natural Beauty.",
+ ],
+ "choice": [
+ 0,
+ ],
+ },
+)
+
+_EMBEDDING_MODEL_TUNING_PIPELINE_SPEC = {
+ "components": {},
+ "deploymentSpec": {},
+ "pipelineInfo": {
+ "description": "Pipeline definition for v1.1.x embedding tuning pipelines.",
+ "name": "tune-text-embedding-model",
+ },
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "accelerator_count": {
+ "defaultValue": 4,
+ "description": "how many accelerators to use when running the\ncontainer.",
+ "isOptional": True,
+ "parameterType": "NUMBER_INTEGER",
+ },
+ "accelerator_type": {
+ "defaultValue": "NVIDIA_TESLA_V100",
+ "description": "the accelerator type for running the trainer component.",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "base_model_version_id": {
+ "defaultValue": "text-embedding-004",
+ "description": "which base model to tune. This may be any stable\nnumbered version, for example `textembedding-gecko@001`.",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "batch_size": {
+ "defaultValue": 128,
+ "description": "training batch size.",
+ "isOptional": True,
+ "parameterType": "NUMBER_INTEGER",
+ },
+ "corpus_path": {
+ "description": "the GCS path to the corpus data location.",
+ "parameterType": "STRING",
+ },
+ "encryption_spec_key_name": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "learning_rate_multiplier": {
+ "defaultValue": 1.0,
+ "isOptional": True,
+ "parameterType": "NUMBER_DOUBLE",
+ },
+ "machine_type": {
+ "defaultValue": "n1-standard-16",
+ "description": "the type of the machine to run the trainer component. For\nmore details about this input config, see:\nhttps://cloud.google.com/vertex-ai/docs/training/configure-compute.",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "model_display_name": {
+ "defaultValue": "tuned-text-embedding-model",
+ "description": "output model display name.",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "output_dimensionality": {
+ "defaultValue": -1,
+ "isOptional": True,
+ "parameterType": "NUMBER_INTEGER",
+ },
+ "queries_path": {
+ "description": "the GCS path to the queries location.",
+ "parameterType": "STRING",
+ },
+ "task_type": {
+ "defaultValue": "DEFAULT",
+ "description": "the task type expected to be used during inference. Valid\nvalues are `DEFAULT`, `RETRIEVAL_QUERY`, `RETRIEVAL_DOCUMENT`,\n`SEMANTIC_SIMILARITY`, `CLASSIFICATION`, and `CLUSTERING`.",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "test_label_path": {
+ "defaultValue": "",
+ "description": "the GCS path to the test label data location.",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "train_label_path": {
+ "description": "the GCS path to the train label data location.",
+ "parameterType": "STRING",
+ },
+ "train_steps": {
+ "defaultValue": 1000,
+ "description": "the number of steps to perform fine-tuning.",
+ "isOptional": True,
+ "parameterType": "NUMBER_INTEGER",
+ },
+ "validation_label_path": {
+ "defaultValue": "",
+ "description": "The GCS path to the validation label data location.",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ }
+ },
+ },
+ "schemaVersion": "2.1.0",
+ "sdkVersion": "kfp-2.6.0",
+}
+_TEST_PIPELINE_SPEC = {
+ "components": {},
+ "pipelineInfo": {"name": "evaluation-llm-text-generation-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "accelerator_type": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "api_endpoint": {
+ "defaultValue": "aiplatform.googleapis.com/ui",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "dataset_name": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "dataset_uri": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "default_context": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "enable_checkpoint_selection": {
+ "defaultValue": "default",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "enable_early_stopping": {
+ "defaultValue": True,
+ "isOptional": True,
+ "parameterType": "BOOLEAN",
+ },
+ "encryption_spec_key_name": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "evaluation_data_uri": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "evaluation_interval": {
+ "defaultValue": 20,
+ "isOptional": True,
+ "parameterType": "NUMBER_INTEGER",
+ },
+ "evaluation_output_root_dir": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "large_model_reference": {
+ "defaultValue": "text-bison@001",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "learning_rate": {
+ "defaultValue": -1,
+ "isOptional": True,
+ "parameterType": "NUMBER_DOUBLE",
+ },
+ "learning_rate_multiplier": {
+ "defaultValue": 1,
+ "isOptional": True,
+ "parameterType": "NUMBER_DOUBLE",
+ },
+ "location": {"parameterType": "STRING"},
+ "max_context_length": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "model_display_name": {"parameterType": "STRING"},
+ "project": {"parameterType": "STRING"},
+ "tensorboard_resource_id": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "tpu_training_skip_cmek": {
+ "defaultValue": False,
+ "isOptional": True,
+ "parameterType": "BOOLEAN",
+ },
+ "train_steps": {
+ "defaultValue": 300,
+ "isOptional": True,
+ "parameterType": "NUMBER_INTEGER",
+ },
+ "tuning_method": {
+ "defaultValue": "tune_v2",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ }
+ },
+ },
+ "schemaVersion": "2.1.0",
+ "sdkVersion": "kfp-2.0.0-beta.14",
+}
+
+
+_EMBEDDING_MODEL_TUNING_PIPELINE_SPEC_JSON = json.dumps(
+ _EMBEDDING_MODEL_TUNING_PIPELINE_SPEC,
+)
+_TEST_PIPELINE_SPEC_JSON = json.dumps(
+ _TEST_PIPELINE_SPEC,
+)
+
+_TEST_PIPELINE_JOB = json.dumps(
+ {
+ "runtimeConfig": {"parameterValues": {}},
+ "pipelineSpec": json.loads(_TEST_PIPELINE_SPEC_JSON),
+ }
+)
+
+_TEST_RLHF_PIPELINE_SPEC = {
+ "components": {},
+ "pipelineInfo": {"name": "rlhf"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "deploy_model": {
+ "defaultValue": True,
+ "isOptional": True,
+ "parameterType": "BOOLEAN",
+ },
+ "eval_dataset": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "instruction": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "kl_coeff": {
+ "defaultValue": 0.1,
+ "isOptional": True,
+ "parameterType": "NUMBER_DOUBLE",
+ },
+ "large_model_reference": {"parameterType": "STRING"},
+ "location": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "model_display_name": {"isOptional": True, "parameterType": "STRING"},
+ "preference_dataset": {"parameterType": "STRING"},
+ "project": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "prompt_dataset": {"parameterType": "STRING"},
+ "prompt_sequence_length": {
+ "defaultValue": 512.0,
+ "isOptional": True,
+ "parameterType": "NUMBER_INTEGER",
+ },
+ "reinforcement_learning_rate_multiplier": {
+ "defaultValue": 1.0,
+ "isOptional": True,
+ "parameterType": "NUMBER_DOUBLE",
+ },
+ "reinforcement_learning_train_steps": {
+ "defaultValue": 100.0,
+ "isOptional": True,
+ "parameterType": "NUMBER_INTEGER",
+ },
+ "reward_model_learning_rate_multiplier": {
+ "defaultValue": 1.0,
+ "isOptional": True,
+ "parameterType": "NUMBER_DOUBLE",
+ },
+ "reward_model_train_steps": {
+ "defaultValue": 100.0,
+ "isOptional": True,
+ "parameterType": "NUMBER_INTEGER",
+ },
+ "target_sequence_length": {
+ "defaultValue": 64.0,
+ "isOptional": True,
+ "parameterType": "NUMBER_INTEGER",
+ },
+ "accelerator_type": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "tensorboard_resource_id": {
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ }
+ },
+ },
+ "schemaVersion": "2.1.0",
+ "sdkVersion": "kfp-2.4.0",
+}
+
+_TEST_RLHF_PIPELINE_SPEC_JSON = json.dumps(
+ _TEST_RLHF_PIPELINE_SPEC,
+)
+
+_TEST_RLHF_PIPELINE_JOB = json.dumps(
+ {
+ "runtimeConfig": {"parameterValues": {}},
+ "pipelineSpec": json.loads(_TEST_RLHF_PIPELINE_SPEC_JSON),
+ }
+)
+
+_TEST_TEXT_GENERATION_METRICS = {
+ "bleu": 3.9311041439597427,
+ "rougeLSum": 19.014677479620463,
+}
+
+
+_TEST_TEXT_CLASSIFICATION_METRICS = {"auPrc": 0.9, "auRoc": 0.8, "logLoss": 0.5}
+
+_TEST_EVAL_DATA = [
+ {
+ "prompt": "Basketball teams in the Midwest.",
+ "ground_truth": "There are several basketball teams located in the Midwest region of the United States. Here are some of them:",
+ },
+ {
+ "prompt": "How to bake gluten-free bread?",
+ "ground_truth": "Baking gluten-free bread can be a bit challenging because gluten is the protein that gives bread its structure and elasticity.",
+ },
+]
+
+_TEST_EVAL_DATA_DF = pd.DataFrame(_TEST_EVAL_DATA)
+
+_TEST_ARTIFACT_ID = "123456"
+_TEST_ARTIFACT_NAME = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/default/artifacts/{_TEST_ARTIFACT_ID}"
+
+_TEST_EVAL_PIPELINE_SPEC = {
+ "components": {},
+ "pipelineInfo": {"name": "evaluation-llm-text-generation-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "batch_predict_accelerator_count": {
+ "defaultValue": 0.0,
+ "isOptional": True,
+ "parameterType": "NUMBER_INTEGER",
+ },
+ "batch_predict_accelerator_type": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "batch_predict_gcs_source_uris": {
+ "defaultValue": [],
+ "isOptional": True,
+ "parameterType": "LIST",
+ },
+ "batch_predict_gcs_destination_output_uri": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "batch_predict_predictions_format": {
+ "defaultValue": "jsonl",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "enable_web_access": {
+ "defaultValue": True,
+ "isOptional": True,
+ "parameterType": "BOOLEAN",
+ },
+ "encryption_spec_key_name": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "evaluation_display_name": {
+ "defaultValue": "evaluation-text-generation",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "location": {
+ "defaultValue": "us-central1",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "machine_type": {
+ "defaultValue": "e2-highmem-16",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "model_name": {"parameterType": "STRING"},
+ "evaluation_task": {"parameterType": "STRING"},
+ "network": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "nlp_task": {
+ "defaultValue": "text-generation",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "predictions_format": {
+ "defaultValue": "jsonl",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "predictions_gcs_source": {
+ "defaultValue": [],
+ "isOptional": True,
+ "parameterType": "LIST",
+ },
+ "project": {"parameterType": "STRING"},
+ "root_dir": {"parameterType": "STRING"},
+ "service_account": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ }
+ },
+ },
+ "schemaVersion": "2.1.0",
+ "sdkVersion": "kfp-2.0.0-beta.14",
+}
+
+
+_TEST_EVAL_PIPELINE_SPEC_JSON = json.dumps(
+ _TEST_EVAL_PIPELINE_SPEC,
+)
+
+_TEST_EVAL_PIPELINE_JOB = json.dumps(
+ {
+ "runtimeConfig": {"parameterValues": {}},
+ "pipelineSpec": json.loads(_TEST_EVAL_PIPELINE_SPEC_JSON),
+ }
+)
+_TEST_DISTILLATION_PIPELINE_SPEC = {
+ "components": {},
+ "pipelineInfo": {
+ "description": "Vertex kfp pipeline for distillation.",
+ "name": "distillation",
+ },
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "accelerator_type": {
+ "defaultValue": "GPU",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "api_endpoint": {
+ "defaultValue": "aiplatform.googleapis.com/ui",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "dataset_uri": {"parameterType": "STRING"},
+ "enable_checkpoint_selection": {
+ "defaultValue": "default",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "enable_early_stopping": {
+ "defaultValue": True,
+ "isOptional": True,
+ "parameterType": "BOOLEAN",
+ },
+ "encryption_spec_key_name": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "evaluation_data_uri": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "evaluation_interval": {
+ "defaultValue": 100,
+ "isOptional": True,
+ "parameterType": "NUMBER_INTEGER",
+ },
+ "evaluation_output_root_dir": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "learning_rate_multiplier": {
+ "defaultValue": 1,
+ "isOptional": True,
+ "parameterType": "NUMBER_DOUBLE",
+ },
+ "location": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "max_context_length": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "model_display_name": {
+ "defaultValue": "distilled-student-model",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "project": {"parameterType": "STRING"},
+ "student_model_reference": {
+ "defaultValue": "text-bison@002",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "teacher_model_reference": {
+ "defaultValue": "text-unicorn@001",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "temperature": {
+ "defaultValue": 0,
+ "isOptional": True,
+ "parameterType": "NUMBER_DOUBLE",
+ },
+ "tensorboard_resource_id": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "tpu_training_skip_cmek": {
+ "defaultValue": False,
+ "isOptional": True,
+ "parameterType": "BOOLEAN",
+ },
+ "train_steps": {
+ "defaultValue": 300,
+ "isOptional": True,
+ "parameterType": "NUMBER_INTEGER",
+ },
+ "version": {
+ "defaultValue": "latest",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ }
+ },
+ },
+ "schemaVersion": "2.1.0",
+ "sdkVersion": "kfp-2.4.0",
+}
+
+_TEST_DISTILLATION_PIPELINE_SPEC_JSON = json.dumps(
+ _TEST_DISTILLATION_PIPELINE_SPEC,
+)
+
+
+# Eval classification spec
+
+_TEST_EVAL_CLASSIFICATION_PIPELINE_SPEC = {
+ "components": {},
+ "pipelineInfo": {"name": "evaluation-llm-text-generation-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "batch_predict_accelerator_count": {
+ "defaultValue": 0.0,
+ "isOptional": True,
+ "parameterType": "NUMBER_INTEGER",
+ },
+ "batch_predict_accelerator_type": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "batch_predict_gcs_source_uris": {
+ "defaultValue": [],
+ "isOptional": True,
+ "parameterType": "LIST",
+ },
+ "batch_predict_gcs_destination_output_uri": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "batch_predict_predictions_format": {
+ "defaultValue": "jsonl",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "enable_web_access": {
+ "defaultValue": True,
+ "isOptional": True,
+ "parameterType": "BOOLEAN",
+ },
+ "encryption_spec_key_name": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "evaluation_display_name": {
+ "defaultValue": "evaluation-text-generation",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "location": {
+ "defaultValue": "us-central1",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "machine_type": {
+ "defaultValue": "e2-highmem-16",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "model_name": {"parameterType": "STRING"},
+ "evaluation_task": {"parameterType": "STRING"},
+ "network": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "nlp_task": {
+ "defaultValue": "text-generation",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "predictions_format": {
+ "defaultValue": "jsonl",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "predictions_gcs_source": {
+ "defaultValue": [],
+ "isOptional": True,
+ "parameterType": "LIST",
+ },
+ "evaluation_class_labels": {
+ "defaultValue": [],
+ "isOptional": True,
+ "parameterType": "LIST",
+ },
+ "target_field_name": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ "project": {"parameterType": "STRING"},
+ "root_dir": {"parameterType": "STRING"},
+ "service_account": {
+ "defaultValue": "",
+ "isOptional": True,
+ "parameterType": "STRING",
+ },
+ }
+ },
+ },
+ "schemaVersion": "2.1.0",
+ "sdkVersion": "kfp-2.0.0-beta.14",
+}
+
+_TEST_EVAL_CLASSIFICATION_PIPELINE_SPEC_JSON = json.dumps(
+ _TEST_EVAL_CLASSIFICATION_PIPELINE_SPEC,
+)
+
+_TEST_EVAL_CLASSIFICATION_PIPELINE_JOB = json.dumps(
+ {
+ "runtimeConfig": {"parameterValues": {}},
+ "pipelineSpec": json.loads(_TEST_EVAL_PIPELINE_SPEC_JSON),
+ }
+)
+
+_URL_DATA = {
+ "https://us-kfp.pkg.dev/ml-pipeline/distillation/distillation/v1.0.0": _TEST_DISTILLATION_PIPELINE_SPEC_JSON,
+}
+
+
+def _get_test_tensorboard_resource_id(
+ project: str = _TEST_PROJECT,
+ location: str = _TEST_LOCATION,
+) -> str:
+ """ "Returns a tensorboard resource id in the specified project and region."""
+ return f"projects/{project}/locations/{location}/tensorboards/123"
+
+
+@pytest.fixture
+def mock_pipeline_bucket_exists():
+ def mock_create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist(
+ output_artifacts_gcs_dir=None,
+ service_account=None,
+ project=None,
+ location=None,
+ credentials=None,
+ ):
+ output_artifacts_gcs_dir = (
+ output_artifacts_gcs_dir
+ or gcs_utils.generate_gcs_directory_for_pipeline_artifacts(
+ project=project,
+ location=location,
+ )
+ )
+ return output_artifacts_gcs_dir
+
+ with mock.patch(
+ "google.cloud.aiplatform.utils.gcs_utils.create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist",
+ wraps=mock_create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist,
+ ) as mock_context:
+ yield mock_context
+
+
+def make_pipeline_job(state):
+ return gca_pipeline_job.PipelineJob(
+ name=test_constants.PipelineJobConstants._TEST_PIPELINE_JOB_NAME,
+ state=state,
+ create_time=test_constants.PipelineJobConstants._TEST_PIPELINE_CREATE_TIME,
+ service_account=test_constants.ProjectConstants._TEST_SERVICE_ACCOUNT,
+ network=test_constants.TrainingJobConstants._TEST_NETWORK,
+ job_detail=gca_pipeline_job.PipelineJobDetail(
+ pipeline_run_context=gca_context.Context(
+ name=test_constants.PipelineJobConstants._TEST_PIPELINE_JOB_NAME,
+ ),
+ task_details=[
+ gca_pipeline_job.PipelineTaskDetail(
+ task_id=456,
+ task_name="tune-large-model-20230724214903",
+ execution=GapicExecution(
+ name="projects/123/locations/europe-west4/metadataStores/default/executions/...",
+ display_name="tune-large-model-20230724214903",
+ schema_title="system.Run",
+ metadata={
+ "output:model_resource_name": "projects/123/locations/us-central1/models/456",
+ "output:endpoint_resource_name": "projects/123/locations/us-central1/endpoints/456",
+ },
+ ),
+ ),
+ ],
+ ),
+ )
+
+
+def make_eval_pipeline_job(state):
+ return gca_pipeline_job.PipelineJob(
+ name=test_constants.PipelineJobConstants._TEST_PIPELINE_JOB_NAME,
+ state=state,
+ create_time=test_constants.PipelineJobConstants._TEST_PIPELINE_CREATE_TIME,
+ service_account=test_constants.ProjectConstants._TEST_SERVICE_ACCOUNT,
+ network=test_constants.TrainingJobConstants._TEST_NETWORK,
+ job_detail=gca_pipeline_job.PipelineJobDetail(
+ pipeline_run_context=gca_context.Context(
+ name=test_constants.PipelineJobConstants._TEST_PIPELINE_JOB_NAME,
+ ),
+ task_details=[
+ gca_pipeline_job.PipelineTaskDetail(
+ task_id=456,
+ task_name=test_constants.PipelineJobConstants._TEST_PIPELINE_JOB_ID,
+ outputs={
+ "evaluation_metrics": gca_pipeline_job.PipelineTaskDetail.ArtifactList(
+ artifacts=[
+ gca_artifact.Artifact(
+ name="test-metric-artifact",
+ metadata=_TEST_TEXT_GENERATION_METRICS,
+ ),
+ ],
+ )
+ },
+ ),
+ gca_pipeline_job.PipelineTaskDetail(
+ task_id=789,
+ task_name=test_constants.PipelineJobConstants._TEST_PIPELINE_JOB_ID,
+ outputs={
+ "evaluation_metrics": gca_pipeline_job.PipelineTaskDetail.ArtifactList(
+ artifacts=[
+ gca_artifact.Artifact(
+ display_name="evaluation_metrics",
+ uri="gs://test-bucket/evaluation_metrics",
+ ),
+ ]
+ )
+ },
+ ),
+ ],
+ ),
+ )
+
+
+def make_eval_classification_pipeline_job(state):
+ return gca_pipeline_job.PipelineJob(
+ name=test_constants.PipelineJobConstants._TEST_PIPELINE_JOB_NAME,
+ state=state,
+ create_time=test_constants.PipelineJobConstants._TEST_PIPELINE_CREATE_TIME,
+ service_account=test_constants.ProjectConstants._TEST_SERVICE_ACCOUNT,
+ network=test_constants.TrainingJobConstants._TEST_NETWORK,
+ job_detail=gca_pipeline_job.PipelineJobDetail(
+ pipeline_run_context=gca_context.Context(
+ name=test_constants.PipelineJobConstants._TEST_PIPELINE_JOB_NAME,
+ ),
+ task_details=[
+ gca_pipeline_job.PipelineTaskDetail(
+ task_id=456,
+ task_name=test_constants.PipelineJobConstants._TEST_PIPELINE_JOB_ID,
+ outputs={
+ "evaluation_metrics": gca_pipeline_job.PipelineTaskDetail.ArtifactList(
+ artifacts=[
+ gca_artifact.Artifact(
+ name="test-metric-artifact",
+ metadata=_TEST_TEXT_CLASSIFICATION_METRICS,
+ ),
+ ],
+ )
+ },
+ ),
+ gca_pipeline_job.PipelineTaskDetail(
+ task_id=789,
+ task_name=test_constants.PipelineJobConstants._TEST_PIPELINE_JOB_ID,
+ outputs={
+ "evaluation_metrics": gca_pipeline_job.PipelineTaskDetail.ArtifactList(
+ artifacts=[
+ gca_artifact.Artifact(
+ display_name="evaluation_metrics",
+ uri="gs://test-bucket/evaluation_metrics",
+ ),
+ ]
+ )
+ },
+ ),
+ ],
+ ),
+ )
+
+
+def get_client_api_transport(client):
+ return client._transport.__class__.__name__.lower()
+
+
+@pytest.fixture
+def mock_pipeline_service_create():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_pipeline_job"
+ ) as mock_create_pipeline_job:
+ mock_create_pipeline_job.return_value = make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ yield mock_create_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_service_create_rlhf():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_pipeline_job"
+ ) as mock_create_pipeline_job:
+ mock_create_pipeline_job.return_value = make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ yield mock_create_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_service_create_eval():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_pipeline_job"
+ ) as mock_create_pipeline_job:
+ mock_create_pipeline_job.return_value = make_eval_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ yield mock_create_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_service_create_eval_classification():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_pipeline_job"
+ ) as mock_create_pipeline_job:
+ mock_create_pipeline_job.return_value = make_eval_classification_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ yield mock_create_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_job_get():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_pipeline_job:
+ mock_get_pipeline_job.side_effect = [
+ make_pipeline_job(gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ ]
+
+ yield mock_get_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_job_get_eval():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_pipeline_job:
+ mock_get_pipeline_job.side_effect = [
+ make_eval_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING
+ ),
+ make_eval_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_eval_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_eval_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_eval_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_eval_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_eval_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_eval_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_eval_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ ]
+
+ yield mock_get_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_job_get_eval_classification():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_pipeline_job:
+ mock_get_pipeline_job.side_effect = [
+ make_eval_classification_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING
+ ),
+ make_eval_classification_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_eval_classification_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_eval_classification_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_eval_classification_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_eval_classification_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_eval_classification_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_eval_classification_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_eval_classification_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ ]
+
+ yield mock_get_pipeline_job
+
+
+@pytest.fixture
+def mock_load_yaml_and_json(job_spec):
+ with mock.patch.object(
+ storage.Blob, "download_as_bytes"
+ ) as mock_load_yaml_and_json:
+ mock_load_yaml_and_json.return_value = job_spec.encode()
+ yield mock_load_yaml_and_json
+
+
+@pytest.fixture
+def mock_gcs_from_string():
+ with mock.patch.object(storage.Blob, "from_string") as mock_from_string:
+ yield mock_from_string
+
+
+@pytest.fixture
+def mock_gcs_upload():
+ with mock.patch.object(storage.Blob, "upload_from_filename") as mock_from_filename:
+ yield mock_from_filename
+
+
+@pytest.fixture
+def mock_request_urlopen(request: str) -> Tuple[str, mock.MagicMock]:
+ data = _TEST_PIPELINE_SPEC
+ with mock.patch.object(urllib_request, "urlopen") as mock_urlopen:
+ mock_read_response = mock.MagicMock()
+ mock_decode_response = mock.MagicMock()
+ mock_decode_response.return_value = json.dumps(data)
+ mock_read_response.return_value.decode = mock_decode_response
+ mock_urlopen.return_value.read = mock_read_response
+ yield request.param, mock_urlopen
+
+
+@pytest.fixture
+def mock_request_urlopen_gecko(request: str) -> Tuple[str, mock.MagicMock]:
+ data = _EMBEDDING_MODEL_TUNING_PIPELINE_SPEC
+ with mock.patch.object(urllib_request, "urlopen") as mock_urlopen:
+ mock_read_response = mock.MagicMock()
+ mock_decode_response = mock.MagicMock()
+ mock_decode_response.return_value = json.dumps(data)
+ mock_read_response.return_value.decode = mock_decode_response
+ mock_urlopen.return_value.read = mock_read_response
+ yield request.param, mock_urlopen
+
+
+@pytest.fixture
+def mock_request_urlopen_rlhf(request: str) -> Tuple[str, mock.MagicMock]:
+ data = _TEST_RLHF_PIPELINE_SPEC
+ with mock.patch.object(urllib_request, "urlopen") as mock_urlopen:
+ mock_read_response = mock.MagicMock()
+ mock_decode_response = mock.MagicMock()
+ mock_decode_response.return_value = json.dumps(data)
+ mock_read_response.return_value.decode = mock_decode_response
+ mock_urlopen.return_value.read = mock_read_response
+ yield request.param, mock_urlopen
+
+
+@pytest.fixture
+def mock_request_urlopen_eval(request: str) -> Tuple[str, mock.MagicMock]:
+ data = _TEST_EVAL_PIPELINE_SPEC
+ with mock.patch.object(urllib_request, "urlopen") as mock_urlopen:
+ mock_read_response = mock.MagicMock()
+ mock_decode_response = mock.MagicMock()
+ mock_decode_response.return_value = json.dumps(data)
+ mock_read_response.return_value.decode = mock_decode_response
+ mock_urlopen.return_value.read = mock_read_response
+ yield request.param, mock_urlopen
+
+
+@pytest.fixture
+def mock_request_urlopen_eval_classification(
+ request: str,
+) -> Tuple[str, mock.MagicMock]:
+ data = _TEST_EVAL_CLASSIFICATION_PIPELINE_SPEC
+ with mock.patch.object(urllib_request, "urlopen") as mock_urlopen:
+ mock_read_response = mock.MagicMock()
+ mock_decode_response = mock.MagicMock()
+ mock_decode_response.return_value = json.dumps(data)
+ mock_read_response.return_value.decode = mock_decode_response
+ mock_urlopen.return_value.read = mock_read_response
+ yield request.param, mock_urlopen
+
+
+@pytest.fixture
+def mock_urllib_request_urlopen(request: str) -> Tuple[str, mock.MagicMock]:
+ url = request.param
+ data = _URL_DATA[url]
+ with mock.patch.object(urllib_request, "urlopen") as mock_urlopen:
+ mock_read_response = mock.MagicMock()
+ mock_decode_response = mock.MagicMock()
+ mock_decode_response.return_value = data
+ mock_read_response.return_value.decode = mock_decode_response
+ mock_urlopen.return_value.read = mock_read_response
+ yield url, mock_urlopen
+
+
+@pytest.fixture
+def get_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name="test-display-name",
+ name=test_constants.EndpointConstants._TEST_ENDPOINT_NAME,
+ deployed_models=[
+ gca_endpoint.DeployedModel(
+ model=test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME
+ ),
+ ],
+ )
+ yield get_endpoint_mock
+
+
+@pytest.fixture
+def mock_deploy_tuned_embedding_model(get_endpoint_mock):
+ with mock.patch.object(
+ _language_models._PreviewTunableTextEmbeddingModelMixin, "deploy_tuned_model"
+ ) as mock_text_generation_model:
+ mock_text_generation_model.return_value._model_id = (
+ test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME
+ )
+ mock_text_generation_model.return_value._endpoint_name = (
+ test_constants.EndpointConstants._TEST_ENDPOINT_NAME
+ )
+ mock_text_generation_model.return_value._endpoint = get_endpoint_mock
+ yield mock_text_generation_model
+
+
+@pytest.fixture
+def mock_get_tuned_model(get_endpoint_mock):
+ with mock.patch.object(
+ _language_models._TunableModelMixin, "get_tuned_model"
+ ) as mock_text_generation_model:
+ mock_text_generation_model.return_value._model_id = (
+ test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME
+ )
+ mock_text_generation_model.return_value._endpoint_name = (
+ test_constants.EndpointConstants._TEST_ENDPOINT_NAME
+ )
+ mock_text_generation_model.return_value._endpoint = get_endpoint_mock
+ yield mock_text_generation_model
+
+
+@pytest.fixture
+def get_model_with_tuned_version_label_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = gca_model.Model(
+ display_name=test_constants.ModelConstants._TEST_MODEL_NAME,
+ name=test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME,
+ labels={"google-vertex-llm-tuning-base-model-id": "text-bison-001"},
+ deployed_models=[
+ deployed_model_ref_v1.DeployedModelRef(
+ endpoint=test_constants.EndpointConstants._TEST_ENDPOINT_NAME,
+ deployed_model_id=test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME,
+ )
+ ],
+ )
+ yield get_model_mock
+
+
+@pytest.fixture
+def get_endpoint_with_models_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_models_mock:
+ get_endpoint_models_mock.return_value = gca_endpoint.Endpoint(
+ display_name=test_constants.EndpointConstants._TEST_DISPLAY_NAME,
+ name=test_constants.EndpointConstants._TEST_ENDPOINT_NAME,
+ deployed_models=[
+ gca_endpoint.DeployedModel(
+ id=test_constants.ModelConstants._TEST_ID,
+ display_name=test_constants.ModelConstants._TEST_MODEL_NAME,
+ model=test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME,
+ ),
+ ],
+ traffic_split=test_constants.EndpointConstants._TEST_TRAFFIC_SPLIT,
+ )
+ yield get_endpoint_models_mock
+
+
+# Model Evaluation fixtures
+@pytest.fixture
+def mock_model_evaluate():
+ with mock.patch.object(
+ _evaluatable_language_models._EvaluatableLanguageModel, "evaluate"
+ ) as mock_model_evaluate:
+ mock_model_evaluate.return_value = _TEST_TEXT_GENERATION_METRICS
+ yield mock_model_evaluate
+
+
+@pytest.fixture
+def mock_successfully_completed_eval_job():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_model_eval_job:
+ mock_get_model_eval_job.return_value = make_eval_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ yield mock_get_model_eval_job
+
+
+@pytest.fixture
+def mock_successfully_completed_eval_classification_job():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_model_eval_job:
+ mock_get_model_eval_job.return_value = make_eval_classification_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ yield mock_get_model_eval_job
+
+
+@pytest.fixture
+def mock_storage_blob_upload_from_filename():
+ with mock.patch(
+ "google.cloud.storage.Blob.upload_from_filename"
+ ) as mock_blob_upload_from_filename, mock.patch(
+ "google.cloud.storage.Bucket.exists", return_value=True
+ ):
+ yield mock_blob_upload_from_filename
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestLanguageModels:
+ """Unit tests for the language models."""
+
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_text_generation(self, api_transport):
+ """Tests the text generation model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ api_transport=api_transport,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ) as mock_get_publisher_model:
+ model = preview_language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ mock_get_publisher_model.assert_called_once_with(
+ name="publishers/google/models/text-bison@001", retry=base._DEFAULT_RETRY
+ )
+
+ assert (
+ model._model_resource_name
+ == f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/publishers/google/models/text-bison@001"
+ )
+
+ gca_predict_response = gca_prediction_service.PredictResponse()
+ gca_predict_response.predictions.append(_TEST_TEXT_GENERATION_PREDICTION)
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response,
+ ):
+ response = model.predict(
+ "What is the best recipe for banana bread? Recipe:",
+ max_output_tokens=128,
+ temperature=0.0,
+ top_p=1.0,
+ top_k=5,
+ )
+
+ assert response.text == _TEST_TEXT_GENERATION_PREDICTION["content"]
+ assert (
+ response.raw_prediction_response.predictions[0]
+ == _TEST_TEXT_GENERATION_PREDICTION
+ )
+ assert (
+ response.safety_attributes["Violent"]
+ == _TEST_TEXT_GENERATION_PREDICTION["safetyAttributes"]["scores"][0]
+ )
+
+ def test_text_generation_preview_count_tokens(self):
+ """Tests the text generation model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = preview_language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ gca_count_tokens_response = gca_prediction_service_v1beta1.CountTokensResponse(
+ total_tokens=_TEST_COUNT_TOKENS_RESPONSE["total_tokens"],
+ total_billable_characters=_TEST_COUNT_TOKENS_RESPONSE[
+ "total_billable_characters"
+ ],
+ )
+
+ with mock.patch.object(
+ target=prediction_service_client_v1beta1.PredictionServiceClient,
+ attribute="count_tokens",
+ return_value=gca_count_tokens_response,
+ ):
+ response = model.count_tokens(["What is the best recipe for banana bread?"])
+
+ assert response.total_tokens == _TEST_COUNT_TOKENS_RESPONSE["total_tokens"]
+ assert (
+ response.total_billable_characters
+ == _TEST_COUNT_TOKENS_RESPONSE["total_billable_characters"]
+ )
+
+ def test_text_generation_ga(self):
+ """Tests the text generation model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ) as mock_get_publisher_model:
+ model = language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ mock_get_publisher_model.assert_called_once_with(
+ name="publishers/google/models/text-bison@001", retry=base._DEFAULT_RETRY
+ )
+
+ assert (
+ model._model_resource_name
+ == f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/publishers/google/models/text-bison@001"
+ )
+
+ gca_predict_response = gca_prediction_service.PredictResponse()
+ gca_predict_response.predictions.append(_TEST_TEXT_GENERATION_PREDICTION)
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response,
+ ) as mock_predict:
+ response = model.predict(
+ "What is the best recipe for banana bread? Recipe:",
+ max_output_tokens=128,
+ temperature=0.0,
+ top_p=1.0,
+ top_k=5,
+ stop_sequences=["\n"],
+ logprobs=3,
+ presence_penalty=1.0,
+ frequency_penalty=1.0,
+ logit_bias={1: 100.0, 2: -100.0},
+ seed=42,
+ )
+
+ expected_errors = (100,)
+ prediction_parameters = mock_predict.call_args[1]["parameters"]
+ assert prediction_parameters["maxDecodeSteps"] == 128
+ assert prediction_parameters["temperature"] == 0.0
+ assert prediction_parameters["topP"] == 1.0
+ assert prediction_parameters["topK"] == 5
+ assert prediction_parameters["stopSequences"] == ["\n"]
+ assert prediction_parameters["logprobs"] == 3
+ assert prediction_parameters["presencePenalty"] == 1.0
+ assert prediction_parameters["frequencyPenalty"] == 1.0
+ assert prediction_parameters["logitBias"] == {1: 100.0, 2: -100.0}
+ assert prediction_parameters["seed"] == 42
+ assert response.text == _TEST_TEXT_GENERATION_PREDICTION["content"]
+ assert response.errors == expected_errors
+
+ # Validating that unspecified parameters are not passed to the model
+ # (except `max_output_tokens`).
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response,
+ ) as mock_predict:
+ model.predict(
+ "What is the best recipe for banana bread? Recipe:",
+ )
+
+ prediction_parameters = mock_predict.call_args[1]["parameters"]
+ assert (
+ prediction_parameters["maxDecodeSteps"]
+ == language_models.TextGenerationModel._DEFAULT_MAX_OUTPUT_TOKENS
+ )
+ assert "temperature" not in prediction_parameters
+ assert "topP" not in prediction_parameters
+ assert "topK" not in prediction_parameters
+
+ def test_text_generation_multiple_candidates(self):
+ """Tests the text generation model with multiple candidates."""
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ gca_predict_response = gca_prediction_service.PredictResponse()
+ # Discrepancy between the number of `instances` and the number of `predictions`
+ # is a violation of the prediction service invariant, but the service does this.
+ gca_predict_response.predictions.append(_TEST_TEXT_GENERATION_PREDICTION)
+ gca_predict_response.predictions.append(_TEST_TEXT_GENERATION_PREDICTION)
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response,
+ ) as mock_predict:
+ response = model.predict(
+ "What is the best recipe for banana bread? Recipe:",
+ candidate_count=2,
+ )
+ prediction_parameters = mock_predict.call_args[1]["parameters"]
+ assert prediction_parameters["candidateCount"] == 2
+
+ assert response.text == _TEST_TEXT_GENERATION_PREDICTION["content"]
+ assert len(response.candidates) == 2
+ assert (
+ response.candidates[0].text == _TEST_TEXT_GENERATION_PREDICTION["content"]
+ )
+
+ def test_text_generation_multiple_candidates_grounding(self):
+ """Tests the text generation model with multiple candidates with web grounding."""
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ gca_predict_response = gca_prediction_service.PredictResponse()
+ # Discrepancy between the number of `instances` and the number of `predictions`
+ # is a violation of the prediction service invariant, but the service does this.
+ gca_predict_response.predictions.append(
+ _TEST_TEXT_GENERATION_PREDICTION_GROUNDING
+ )
+ gca_predict_response.predictions.append(
+ _TEST_TEXT_GENERATION_PREDICTION_GROUNDING
+ )
+
+ test_grounding_sources = [
+ _TEST_GROUNDING_WEB_SEARCH,
+ _TEST_GROUNDING_VERTEX_AI_SEARCH_DATASTORE,
+ ]
+ datastore_path = (
+ "projects/test-project/locations/global/"
+ "collections/default_collection/dataStores/test_datastore"
+ )
+ expected_grounding_sources = [
+ {
+ "sources": [{"type": "WEB"}],
+ "disableAttribution": False,
+ },
+ {
+ "sources": [
+ {
+ "type": "VERTEX_AI_SEARCH",
+ "vertexAiSearchDatastore": datastore_path,
+ }
+ ],
+ "disableAttribution": False,
+ },
+ ]
+
+ for test_grounding_source, expected_grounding_source in zip(
+ test_grounding_sources, expected_grounding_sources
+ ):
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response,
+ ) as mock_predict:
+ response = model.predict(
+ "What is the best recipe for banana bread? Recipe:",
+ candidate_count=2,
+ grounding_source=test_grounding_source,
+ )
+ prediction_parameters = mock_predict.call_args[1]["parameters"]
+ assert prediction_parameters["candidateCount"] == 2
+ assert prediction_parameters["groundingConfig"] == expected_grounding_source
+ assert (
+ response.text == _TEST_TEXT_GENERATION_PREDICTION_GROUNDING["content"]
+ )
+ assert len(response.candidates) == 2
+ assert (
+ response.candidates[0].text
+ == _TEST_TEXT_GENERATION_PREDICTION_GROUNDING["content"]
+ )
+ assert (
+ dataclasses.asdict(response.candidates[0].grounding_metadata)
+ == _EXPECTED_PARSED_GROUNDING_METADATA
+ )
+
+ @pytest.mark.asyncio
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ async def test_text_generation_async(self, api_transport):
+ """Tests the text generation model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ api_transport=api_transport,
+ )
+ if api_transport == "rest":
+ # Should raise ValueError for setting async REST credentials
+ # with sync credentials.
+ with pytest.raises(ValueError):
+ aiplatform.initializer._set_async_rest_credentials(
+ google.auth.credentials.AnonymousCredentials()
+ )
+ # If there are async credentials deps, create async REST credentials.
+ # Otherwise, it will fallback to gRPC
+ if _HAS_ASYNC_CRED_DEPS:
+ # Construct google.auth.aio.credentials.AnonymousCredentials
+ # for async REST transport.
+ aiplatform.initializer._set_async_rest_credentials(
+ AsyncAnonymousCredentials()
+ )
+ else:
+ # Should raise ValueError for setting async REST credentials
+ # for non-REST transport.
+ with pytest.raises(ValueError):
+ aiplatform.initializer._set_async_rest_credentials(
+ google.auth.credentials.AnonymousCredentials()
+ )
+
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ gca_predict_response = gca_prediction_service.PredictResponse()
+ gca_predict_response.predictions.append(_TEST_TEXT_GENERATION_PREDICTION)
+
+ with mock.patch.object(
+ target=prediction_service_async_client.PredictionServiceAsyncClient,
+ attribute="predict",
+ return_value=gca_predict_response,
+ ) as mock_predict:
+ response = await model.predict_async(
+ "What is the best recipe for banana bread? Recipe:",
+ max_output_tokens=128,
+ temperature=0.0,
+ top_p=1.0,
+ top_k=5,
+ stop_sequences=["\n"],
+ )
+ prediction_parameters = mock_predict.call_args[1]["parameters"]
+ assert prediction_parameters["maxDecodeSteps"] == 128
+ assert prediction_parameters["temperature"] == 0.0
+ assert prediction_parameters["topP"] == 1.0
+ assert prediction_parameters["topK"] == 5
+ assert prediction_parameters["stopSequences"] == ["\n"]
+ assert response.text == _TEST_TEXT_GENERATION_PREDICTION["content"]
+
+ if _HAS_ASYNC_CRED_DEPS:
+ assert api_transport in get_client_api_transport(
+ model._endpoint._prediction_async_client._client
+ )
+ else:
+ # Assert that grpc was used as a fallback.
+ assert "grpc" in get_client_api_transport(
+ model._endpoint._prediction_async_client._client
+ )
+
+ await model._endpoint._prediction_async_client.transport.close()
+
+ @pytest.mark.asyncio
+ async def test_text_generation_multiple_candidates_grounding_async(self):
+ """Tests the text generation model with multiple candidates async with web grounding."""
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ gca_predict_response = gca_prediction_service.PredictResponse()
+ # Discrepancy between the number of `instances` and the number of `predictions`
+ # is a violation of the prediction service invariant, but the service does this.
+ gca_predict_response.predictions.append(
+ _TEST_TEXT_GENERATION_PREDICTION_GROUNDING
+ )
+
+ test_grounding_sources = [
+ _TEST_GROUNDING_WEB_SEARCH,
+ _TEST_GROUNDING_VERTEX_AI_SEARCH_DATASTORE,
+ ]
+ datastore_path = (
+ "projects/test-project/locations/global/"
+ "collections/default_collection/dataStores/test_datastore"
+ )
+ expected_grounding_sources = [
+ {
+ "sources": [
+ {
+ "type": "WEB",
+ }
+ ],
+ "disableAttribution": False,
+ },
+ {
+ "sources": [
+ {
+ "type": "VERTEX_AI_SEARCH",
+ "vertexAiSearchDatastore": datastore_path,
+ }
+ ],
+ "disableAttribution": False,
+ },
+ ]
+
+ for test_grounding_source, expected_grounding_source in zip(
+ test_grounding_sources, expected_grounding_sources
+ ):
+ with mock.patch.object(
+ target=prediction_service_async_client.PredictionServiceAsyncClient,
+ attribute="predict",
+ return_value=gca_predict_response,
+ ) as mock_predict:
+ response = await model.predict_async(
+ "What is the best recipe for banana bread? Recipe:",
+ max_output_tokens=128,
+ temperature=0.0,
+ top_p=1.0,
+ top_k=5,
+ stop_sequences=["\n"],
+ grounding_source=test_grounding_source,
+ )
+ prediction_parameters = mock_predict.call_args[1]["parameters"]
+ assert prediction_parameters["maxDecodeSteps"] == 128
+ assert prediction_parameters["temperature"] == 0.0
+ assert prediction_parameters["topP"] == 1.0
+ assert prediction_parameters["topK"] == 5
+ assert prediction_parameters["stopSequences"] == ["\n"]
+ assert prediction_parameters["groundingConfig"] == expected_grounding_source
+ assert (
+ response.text == _TEST_TEXT_GENERATION_PREDICTION_GROUNDING["content"]
+ )
+ assert (
+ dataclasses.asdict(response.grounding_metadata)
+ == _EXPECTED_PARSED_GROUNDING_METADATA
+ )
+
+ def test_text_generation_model_predict_streaming(self):
+ """Tests the TextGenerationModel.predict_streaming method."""
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ response_generator = (
+ gca_prediction_service.StreamingPredictResponse(
+ outputs=[_streaming_prediction.value_to_tensor(response_dict)]
+ )
+ for response_dict in _TEST_TEXT_GENERATION_PREDICTION_STREAMING
+ )
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="server_streaming_predict",
+ return_value=response_generator,
+ ):
+ for response in model.predict_streaming(
+ "Count to 50",
+ max_output_tokens=1000,
+ temperature=0.0,
+ top_p=1.0,
+ top_k=5,
+ stop_sequences=["# %%"],
+ ):
+ assert len(response.text) > 10
+
+ @pytest.mark.asyncio
+ async def test_text_generation_model_predict_streaming_async(self):
+ """Tests the TextGenerationModel.predict_streaming_async method."""
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ async def mock_server_streaming_predict_async_iter(*args, **kwargs):
+ for response_dict in _TEST_TEXT_GENERATION_PREDICTION_STREAMING:
+ yield gca_prediction_service.StreamingPredictResponse(
+ outputs=[_streaming_prediction.value_to_tensor(response_dict)]
+ )
+
+ async def mock_server_streaming_predict_async(*args, **kwargs):
+ return mock_server_streaming_predict_async_iter(*args, **kwargs)
+
+ with mock.patch.object(
+ target=prediction_service_async_client.PredictionServiceAsyncClient,
+ attribute="server_streaming_predict",
+ new=mock_server_streaming_predict_async,
+ ):
+ async for response in model.predict_streaming_async(
+ "Count to 50",
+ max_output_tokens=1000,
+ temperature=0.0,
+ top_p=1.0,
+ top_k=5,
+ stop_sequences=["# %%"],
+ ):
+ assert len(response.text) > 10
+
+ def test_text_generation_response_repr(self):
+ response = language_models.TextGenerationResponse(
+ text="",
+ is_blocked=True,
+ safety_attributes={"Violent": 0.1},
+ _prediction_response=None,
+ )
+ response_repr = repr(response)
+ assert "blocked" in response_repr
+ assert "Violent" in response_repr
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_EMBEDDING_MODEL_TUNING_PIPELINE_SPEC_JSON],
+ )
+ @pytest.mark.parametrize(
+ "mock_request_urlopen_gecko",
+ ["https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"],
+ indirect=True,
+ )
+ @pytest.mark.parametrize(
+ "base_model_version_id,tune_args,expected_pipeline_args",
+ [ # Do not pass any optional parameters.
+ (
+ "textembedding-gecko@003",
+ dict(
+ training_data="gs://bucket/training.tsv",
+ corpus_data="gs://bucket/corpus.jsonl",
+ queries_data="gs://bucket/queries.jsonl",
+ ),
+ dict(
+ base_model_version_id="textembedding-gecko@003",
+ train_label_path="gs://bucket/training.tsv",
+ corpus_path="gs://bucket/corpus.jsonl",
+ queries_path="gs://bucket/queries.jsonl",
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ ),
+ ),
+ # Pass all optional parameters.
+ (
+ "text-multilingual-embedding-002",
+ dict(
+ training_data="gs://bucket/training.tsv",
+ corpus_data="gs://bucket/corpus.jsonl",
+ queries_data="gs://bucket/queries.jsonl",
+ test_data="gs://bucket/test.tsv",
+ validation_data="gs://bucket/validation.tsv",
+ tuned_model_location="us-central1",
+ model_display_name="my-tuned-model",
+ train_steps=30,
+ batch_size=256,
+ accelerator="NVIDIA_TESLA_V100",
+ accelerator_count=1,
+ machine_type="n1-highmem-16",
+ task_type="DEFAULT",
+ output_dimensionality=128,
+ learning_rate_multiplier=0.1,
+ ),
+ dict(
+ train_steps=30,
+ accelerator_type="NVIDIA_TESLA_V100",
+ accelerator_count=1,
+ machine_type="n1-highmem-16",
+ base_model_version_id="text-multilingual-embedding-002",
+ train_label_path="gs://bucket/training.tsv",
+ corpus_path="gs://bucket/corpus.jsonl",
+ queries_path="gs://bucket/queries.jsonl",
+ test_label_path="gs://bucket/test.tsv",
+ batch_size=256,
+ model_display_name="my-tuned-model",
+ validation_label_path="gs://bucket/validation.tsv",
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ task_type="DEFAULT",
+ output_dimensionality=128,
+ learning_rate_multiplier=0.1,
+ ),
+ ),
+ ],
+ )
+ def test_tune_text_embedding_model(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_job_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_gcs_from_string,
+ mock_gcs_upload,
+ mock_request_urlopen_gecko,
+ mock_deploy_tuned_embedding_model,
+ tune_args,
+ expected_pipeline_args,
+ base_model_version_id,
+ ):
+ """Tests tuning the text embedding model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_GECKO_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextEmbeddingModel.from_pretrained(
+ base_model_version_id
+ )
+ tuning_result = model.tune_model(**tune_args)
+
+ call_kwargs = mock_pipeline_service_create.call_args[1]
+ pipeline_arguments = dict(
+ call_kwargs["pipeline_job"].runtime_config.parameter_values
+ )
+
+ if (
+ "model_display_name" not in tune_args
+ and "model_display_name" in pipeline_arguments
+ ):
+ # This is automatically generated from some params, so don't
+ # check it.
+ del pipeline_arguments["model_display_name"]
+
+ assert pipeline_arguments == expected_pipeline_args
+
+ # Testing the tuned model
+ assert tuning_result.pipeline_job_name.startswith("sample-test-pipeline")
+ tuned_model = tuning_result.deploy_tuned_model()
+ assert (
+ tuned_model._endpoint_name
+ == test_constants.EndpointConstants._TEST_ENDPOINT_NAME
+ )
+
+ @pytest.mark.parametrize(
+ "optional_tune_args,error_regex",
+ [
+ (
+ dict(test_data="/tmp/bucket/test.tsv"),
+ "Each tuning dataset file must be a Google Cloud Storage URI starting with 'gs://'.",
+ ),
+ (
+ dict(output_dimensionality=-1),
+ "output_dimensionality must be an integer between 1 and 768",
+ ),
+ (
+ dict(learning_rate_multiplier=0),
+ "learning_rate_multiplier must be greater than 0",
+ ),
+ (
+ dict(train_steps=29),
+ "train_steps must be greater than or equal to 30",
+ ),
+ (
+ dict(batch_size=2048),
+ "batch_size must be between 1 and 1024",
+ ),
+ ],
+ )
+ def test_tune_text_embedding_model_invalid_values(
+ self, optional_tune_args, error_regex
+ ):
+ """Tests that certain embedding tuning values fail validation."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_GECKO_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = preview_language_models.TextEmbeddingModel.from_pretrained(
+ "text-multilingual-embedding-002"
+ )
+ with pytest.raises(ValueError, match=error_regex):
+ model.tune_model(
+ training_data="gs://bucket/training.tsv",
+ corpus_data="gs://bucket/corpus.jsonl",
+ queries_data="gs://bucket/queries.jsonl",
+ **optional_tune_args,
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_JOB],
+ )
+ @pytest.mark.parametrize(
+ "mock_request_urlopen",
+ ["https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"],
+ indirect=True,
+ )
+ def test_tune_text_generation_model(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_job_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_gcs_from_string,
+ mock_gcs_upload,
+ mock_request_urlopen,
+ mock_get_tuned_model,
+ ):
+ """Tests tuning the text generation model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = preview_language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ tuning_job_location = "europe-west4"
+ evaluation_data_uri = "gs://bucket/eval.jsonl"
+ evaluation_interval = 37
+ enable_early_stopping = True
+ tensorboard_name = _get_test_tensorboard_resource_id(
+ location=tuning_job_location
+ )
+
+ tuning_job = model.tune_model(
+ training_data=_TEST_TEXT_BISON_TRAINING_DF,
+ tuning_job_location=tuning_job_location,
+ tuned_model_location="us-central1",
+ learning_rate=0.1,
+ learning_rate_multiplier=2.0,
+ train_steps=10,
+ tuning_evaluation_spec=preview_language_models.TuningEvaluationSpec(
+ evaluation_data=evaluation_data_uri,
+ evaluation_interval=evaluation_interval,
+ enable_early_stopping=enable_early_stopping,
+ tensorboard=tensorboard_name,
+ ),
+ accelerator_type="TPU",
+ )
+ call_kwargs = mock_pipeline_service_create.call_args[1]
+ pipeline_arguments = call_kwargs[
+ "pipeline_job"
+ ].runtime_config.parameter_values
+ assert pipeline_arguments["learning_rate"] == 0.1
+ assert pipeline_arguments["learning_rate_multiplier"] == 2.0
+ assert pipeline_arguments["train_steps"] == 10
+ assert pipeline_arguments["evaluation_data_uri"] == evaluation_data_uri
+ assert pipeline_arguments["evaluation_interval"] == evaluation_interval
+ assert pipeline_arguments["enable_early_stopping"] == enable_early_stopping
+ assert pipeline_arguments["tensorboard_resource_id"] == tensorboard_name
+ assert pipeline_arguments["large_model_reference"] == "text-bison@001"
+ assert pipeline_arguments["accelerator_type"] == "TPU"
+ assert (
+ call_kwargs["pipeline_job"].encryption_spec.kms_key_name
+ == _TEST_ENCRYPTION_KEY_NAME
+ )
+
+ # Testing the tuned model
+ tuned_model = tuning_job.get_tuned_model()
+ assert (
+ tuned_model._endpoint_name
+ == test_constants.EndpointConstants._TEST_ENDPOINT_NAME
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_JOB],
+ )
+ @pytest.mark.parametrize(
+ "mock_request_urlopen",
+ ["https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"],
+ indirect=True,
+ )
+ def test_tune_text_generation_model_ga(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_job_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_gcs_from_string,
+ mock_gcs_upload,
+ mock_request_urlopen,
+ mock_get_tuned_model,
+ ):
+ """Tests tuning the text generation model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ tuning_job_location = "europe-west4"
+ evaluation_data_uri = "gs://bucket/eval.jsonl"
+ evaluation_interval = 37
+ enable_early_stopping = True
+ enable_checkpoint_selection = True
+ tensorboard_name = _get_test_tensorboard_resource_id(
+ location=tuning_job_location
+ )
+
+ tuning_job = model.tune_model(
+ training_data=_TEST_TEXT_BISON_TRAINING_DF,
+ tuning_job_location=tuning_job_location,
+ tuned_model_location="us-central1",
+ learning_rate_multiplier=2.0,
+ train_steps=10,
+ tuning_evaluation_spec=preview_language_models.TuningEvaluationSpec(
+ evaluation_data=evaluation_data_uri,
+ evaluation_interval=evaluation_interval,
+ enable_early_stopping=enable_early_stopping,
+ enable_checkpoint_selection=enable_checkpoint_selection,
+ tensorboard=tensorboard_name,
+ ),
+ accelerator_type="TPU",
+ max_context_length="32k",
+ )
+ call_kwargs = mock_pipeline_service_create.call_args[1]
+ pipeline_arguments = call_kwargs[
+ "pipeline_job"
+ ].runtime_config.parameter_values
+ assert pipeline_arguments["learning_rate_multiplier"] == 2.0
+ assert pipeline_arguments["train_steps"] == 10
+ assert pipeline_arguments["evaluation_data_uri"] == evaluation_data_uri
+ assert pipeline_arguments["evaluation_interval"] == evaluation_interval
+ assert pipeline_arguments["enable_early_stopping"] == enable_early_stopping
+ assert (
+ pipeline_arguments["enable_checkpoint_selection"]
+ == enable_checkpoint_selection
+ )
+ assert pipeline_arguments["tensorboard_resource_id"] == tensorboard_name
+ assert pipeline_arguments["large_model_reference"] == "text-bison@001"
+ assert pipeline_arguments["accelerator_type"] == "TPU"
+ assert pipeline_arguments["max_context_length"] == "32k"
+ assert (
+ call_kwargs["pipeline_job"].encryption_spec.kms_key_name
+ == _TEST_ENCRYPTION_KEY_NAME
+ )
+
+ # Testing the tuned model
+ tuned_model = tuning_job.get_tuned_model()
+ assert (
+ tuned_model._endpoint_name
+ == test_constants.EndpointConstants._TEST_ENDPOINT_NAME
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON],
+ )
+ @pytest.mark.parametrize(
+ "mock_request_urlopen",
+ ["https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"],
+ indirect=True,
+ )
+ def test_tune_text_generation_model_evaluation_with_only_tensorboard(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_job_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_gcs_from_string,
+ mock_gcs_upload,
+ mock_request_urlopen,
+ mock_get_tuned_model,
+ ):
+ """Tests tuning the text generation model."""
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ tuning_job_location = "europe-west4"
+ tensorboard_name = _get_test_tensorboard_resource_id(
+ location=tuning_job_location
+ )
+
+ model.tune_model(
+ training_data=_TEST_TEXT_BISON_TRAINING_DF,
+ tuning_job_location=tuning_job_location,
+ tuned_model_location="us-central1",
+ tuning_evaluation_spec=preview_language_models.TuningEvaluationSpec(
+ tensorboard=tensorboard_name,
+ ),
+ )
+ call_kwargs = mock_pipeline_service_create.call_args[1]
+ pipeline_arguments = call_kwargs[
+ "pipeline_job"
+ ].runtime_config.parameter_values
+ assert pipeline_arguments["tensorboard_resource_id"] == tensorboard_name
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON],
+ )
+ @pytest.mark.parametrize(
+ "mock_request_urlopen",
+ ["https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"],
+ indirect=True,
+ )
+ def test_tune_text_generation_model_staging_bucket(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_job_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_gcs_from_string,
+ mock_gcs_upload,
+ mock_request_urlopen,
+ mock_get_tuned_model,
+ ):
+ """Tests that tune_model respects staging_bucket."""
+ TEST_STAGING_BUCKET = "gs://test_staging_bucket/path/"
+ aiplatform.init(staging_bucket=TEST_STAGING_BUCKET)
+
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ model.tune_model(
+ training_data=_TEST_TEXT_BISON_TRAINING_DF,
+ tuning_job_location="europe-west4",
+ tuned_model_location="us-central1",
+ )
+ call_kwargs = mock_pipeline_service_create.call_args[1]
+ pipeline_arguments = call_kwargs[
+ "pipeline_job"
+ ].runtime_config.parameter_values
+ assert pipeline_arguments["dataset_uri"].startswith(TEST_STAGING_BUCKET)
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON],
+ )
+ @pytest.mark.parametrize(
+ "mock_request_urlopen",
+ ["https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"],
+ indirect=True,
+ )
+ def test_tune_chat_model(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_job_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_gcs_from_string,
+ mock_gcs_upload,
+ mock_request_urlopen,
+ mock_get_tuned_model,
+ ):
+ """Tests tuning a chat model."""
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CHAT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.ChatModel.from_pretrained("chat-bison@001")
+
+ tuning_job_location = "europe-west4"
+ tensorboard_name = _get_test_tensorboard_resource_id(
+ location=tuning_job_location
+ )
+
+ default_context = "Default context"
+ tuning_job = model.tune_model(
+ training_data=_TEST_TEXT_BISON_TRAINING_DF,
+ tuning_job_location="europe-west4",
+ tuned_model_location="us-central1",
+ default_context=default_context,
+ tuning_evaluation_spec=preview_language_models.TuningEvaluationSpec(
+ tensorboard=tensorboard_name,
+ ),
+ accelerator_type="TPU",
+ )
+ call_kwargs = mock_pipeline_service_create.call_args[1]
+ pipeline_arguments = call_kwargs[
+ "pipeline_job"
+ ].runtime_config.parameter_values
+ assert pipeline_arguments["large_model_reference"] == "chat-bison@001"
+ assert pipeline_arguments["default_context"] == default_context
+ assert pipeline_arguments["accelerator_type"] == "TPU"
+ assert pipeline_arguments["tensorboard_resource_id"] == tensorboard_name
+
+ # Testing the tuned model
+ tuned_model = tuning_job.get_tuned_model()
+ assert (
+ tuned_model._endpoint_name
+ == test_constants.EndpointConstants._TEST_ENDPOINT_NAME
+ )
+
+ unsupported_tuning_evaluation_spec_att = (
+ {"evaluation_data": "gs://bucket/eval.jsonl"},
+ {"evaluation_interval": 37},
+ {"enable_early_stopping": True},
+ {"enable_checkpoint_selection": True},
+ )
+ for unsupported_att in unsupported_tuning_evaluation_spec_att:
+ unsupported_tuning_evaluation_spec = (
+ preview_language_models.TuningEvaluationSpec(**unsupported_att)
+ )
+ with pytest.raises(AttributeError):
+ model.tune_model(
+ training_data=_TEST_TEXT_BISON_TRAINING_DF,
+ tuning_job_location="europe-west4",
+ tuned_model_location="us-central1",
+ default_context=default_context,
+ tuning_evaluation_spec=unsupported_tuning_evaluation_spec,
+ accelerator_type="TPU",
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON],
+ )
+ @pytest.mark.parametrize(
+ "mock_request_urlopen",
+ ["https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"],
+ indirect=True,
+ )
+ def test_tune_code_generation_model(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_job_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_gcs_from_string,
+ mock_gcs_upload,
+ mock_request_urlopen,
+ mock_get_tuned_model,
+ ):
+ """Tests tuning a code generation model."""
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CODE_GENERATION_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = preview_language_models.CodeGenerationModel.from_pretrained(
+ "code-bison@001"
+ )
+ # The tune_model call needs to be inside the PublisherModel mock
+ # since it gets a new PublisherModel when tuning completes.
+ model.tune_model(
+ training_data=_TEST_TEXT_BISON_TRAINING_DF,
+ tuning_job_location="europe-west4",
+ tuned_model_location="us-central1",
+ accelerator_type="TPU",
+ )
+ call_kwargs = mock_pipeline_service_create.call_args[1]
+ pipeline_arguments = call_kwargs[
+ "pipeline_job"
+ ].runtime_config.parameter_values
+ assert pipeline_arguments["large_model_reference"] == "code-bison@001"
+ assert pipeline_arguments["accelerator_type"] == "TPU"
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON],
+ )
+ @pytest.mark.parametrize(
+ "mock_request_urlopen",
+ ["https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"],
+ indirect=True,
+ )
+ def test_tune_code_chat_model(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_job_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_gcs_from_string,
+ mock_gcs_upload,
+ mock_request_urlopen,
+ mock_get_tuned_model,
+ ):
+ """Tests tuning a code chat model."""
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CODECHAT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.CodeChatModel.from_pretrained("codechat-bison@001")
+
+ tuning_job_location = "europe-west4"
+ tensorboard_name = _get_test_tensorboard_resource_id(
+ location=tuning_job_location
+ )
+
+ # The tune_model call needs to be inside the PublisherModel mock
+ # since it gets a new PublisherModel when tuning completes.
+ model.tune_model(
+ training_data=_TEST_TEXT_BISON_TRAINING_DF,
+ tuning_job_location="europe-west4",
+ tuned_model_location="us-central1",
+ tuning_evaluation_spec=preview_language_models.TuningEvaluationSpec(
+ tensorboard=tensorboard_name,
+ ),
+ accelerator_type="TPU",
+ )
+ call_kwargs = mock_pipeline_service_create.call_args[1]
+ pipeline_arguments = call_kwargs[
+ "pipeline_job"
+ ].runtime_config.parameter_values
+ assert pipeline_arguments["large_model_reference"] == "codechat-bison@001"
+ assert pipeline_arguments["accelerator_type"] == "TPU"
+ assert pipeline_arguments["tensorboard_resource_id"] == tensorboard_name
+
+ unsupported_tuning_evaluation_spec_att = (
+ {"evaluation_data": "gs://bucket/eval.jsonl"},
+ {"evaluation_interval": 37},
+ {"enable_early_stopping": True},
+ {"enable_checkpoint_selection": True},
+ )
+ for unsupported_att in unsupported_tuning_evaluation_spec_att:
+ unsupported_tuning_evaluation_spec = (
+ preview_language_models.TuningEvaluationSpec(**unsupported_att)
+ )
+ with pytest.raises(AttributeError):
+ model.tune_model(
+ training_data=_TEST_TEXT_BISON_TRAINING_DF,
+ tuning_job_location="europe-west4",
+ tuned_model_location="us-central1",
+ tuning_evaluation_spec=unsupported_tuning_evaluation_spec,
+ accelerator_type="TPU",
+ )
+
+ @pytest.mark.usefixtures(
+ "get_model_with_tuned_version_label_mock",
+ "get_endpoint_with_models_mock",
+ )
+ def test_get_tuned_model(
+ self,
+ ):
+ """Tests getting a tuned model"""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ tuned_model = preview_language_models.TextGenerationModel.get_tuned_model(
+ test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME
+ )
+
+ assert (
+ tuned_model._model_resource_name
+ == test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME
+ )
+
+ @pytest.mark.usefixtures(
+ "get_model_mock",
+ )
+ def get_tuned_model_raises_if_not_called_with_mg_model(self):
+ """Tests getting a tuned model raises if not called with a Model trained from Model Garden."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ with pytest.raises(ValueError):
+ preview_language_models.TextGenerationModel.get_tuned_model(
+ test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_RLHF_PIPELINE_SPEC_JSON],
+ )
+ @pytest.mark.parametrize(
+ "mock_request_urlopen_rlhf",
+ ["https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"],
+ indirect=True,
+ )
+ def test_tune_text_generation_model_rlhf(
+ self,
+ mock_pipeline_service_create_rlhf,
+ mock_pipeline_job_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_gcs_from_string,
+ mock_gcs_upload,
+ mock_request_urlopen_rlhf,
+ mock_get_tuned_model,
+ ):
+ """Tests tuning a text generation model using RLHF."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ # Define tuning parameters that should get passed to the pipeline job:
+ large_model_reference = "text-bison@001"
+ model_display_name = "rlhf-tuned-model"
+ prompt_sequence_length = 1024
+ target_sequence_length = 128
+ reward_model_learning_rate_multiplier = 2.0
+ reinforcement_learning_rate_multiplier = 0.5
+ reward_model_train_steps = 1
+ reinforcement_learning_train_steps = 2
+ kl_coeff = 0.3
+ tensorboard_resource_id = _get_test_tensorboard_resource_id()
+ eval_dataset = "gs://bucket/eval.jsonl"
+ accelerator_type = "TPU"
+
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextGenerationModel.from_pretrained(
+ model_name=large_model_reference,
+ )
+ _ = model.tune_model_rlhf(
+ prompt_data=_TEST_TEXT_BISON_TRAINING_DF,
+ preference_data=_TEST_TEXT_BISON_PREFERENCE_TRAINING_DF,
+ model_display_name=model_display_name,
+ prompt_sequence_length=prompt_sequence_length,
+ target_sequence_length=target_sequence_length,
+ reward_model_learning_rate_multiplier=reward_model_learning_rate_multiplier,
+ reinforcement_learning_rate_multiplier=reinforcement_learning_rate_multiplier,
+ reward_model_train_steps=reward_model_train_steps,
+ reinforcement_learning_train_steps=reinforcement_learning_train_steps,
+ kl_coeff=kl_coeff,
+ accelerator_type=accelerator_type,
+ tuning_evaluation_spec=preview_language_models.TuningEvaluationSpec(
+ tensorboard=tensorboard_resource_id,
+ evaluation_data=eval_dataset,
+ ),
+ )
+ call_kwargs = mock_pipeline_service_create_rlhf.call_args[1]
+ pipeline_arguments = call_kwargs[
+ "pipeline_job"
+ ].runtime_config.parameter_values
+ assert pipeline_arguments["large_model_reference"] == large_model_reference
+ assert pipeline_arguments["model_display_name"] == model_display_name
+ assert (
+ pipeline_arguments["prompt_sequence_length"] == prompt_sequence_length
+ )
+ assert (
+ pipeline_arguments["target_sequence_length"] == target_sequence_length
+ )
+ assert (
+ pipeline_arguments["reward_model_learning_rate_multiplier"]
+ == reward_model_learning_rate_multiplier
+ )
+ assert (
+ pipeline_arguments["reinforcement_learning_rate_multiplier"]
+ == reinforcement_learning_rate_multiplier
+ )
+ assert (
+ pipeline_arguments["reward_model_train_steps"]
+ == reward_model_train_steps
+ )
+ assert (
+ pipeline_arguments["reinforcement_learning_train_steps"]
+ == reinforcement_learning_train_steps
+ )
+ assert pipeline_arguments["kl_coeff"] == kl_coeff
+ assert (
+ pipeline_arguments["tensorboard_resource_id"] == tensorboard_resource_id
+ )
+ assert pipeline_arguments["eval_dataset"] == eval_dataset
+ assert pipeline_arguments["accelerator_type"] == "TPU"
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_RLHF_PIPELINE_SPEC_JSON, _TEST_RLHF_PIPELINE_JOB],
+ )
+ @pytest.mark.parametrize(
+ "mock_request_urlopen_rlhf",
+ ["https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"],
+ indirect=True,
+ )
+ def test_tune_chat_generation_model_rlhf(
+ self,
+ mock_pipeline_service_create_rlhf,
+ mock_pipeline_job_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_gcs_from_string,
+ mock_gcs_upload,
+ mock_request_urlopen_rlhf,
+ mock_get_tuned_model,
+ ):
+ """Tests tuning a chat model using RLHF."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ large_model_reference = "chat-bison@001"
+ model = language_models.TextGenerationModel.from_pretrained(
+ model_name=large_model_reference,
+ )
+ _ = model.tune_model_rlhf(
+ prompt_data="gs://bucket/prompt.jsonl",
+ preference_data="gs://bucket/preference.jsonl",
+ )
+ call_kwargs = mock_pipeline_service_create_rlhf.call_args[1]
+ pipeline_arguments = call_kwargs[
+ "pipeline_job"
+ ].runtime_config.parameter_values
+ assert pipeline_arguments["large_model_reference"] == large_model_reference
+
+ def test_tune_model_rlhf_raises_if_called_with_unsupported_region(self):
+ """Tests RLHF tuning raises if called from an unsupported region."""
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+ unsupported_location = "unsupported-location1"
+ with pytest.raises(ValueError) as excinfo:
+ model.tune_model_rlhf(
+ prompt_data="gs://bucket/prompt.jsonl",
+ preference_data="gs://bucket/preference.jsonl",
+ tuning_job_location=unsupported_location,
+ )
+ expected_msg = _language_models._get_invalid_tuning_location_msg(
+ requested_location=unsupported_location,
+ valid_locations=_language_models._SUPPORTED_RLHF_LOCATIONS,
+ )
+ assert excinfo.exception.message == expected_msg
+
+ def test_tune_model_rlhf_raises_if_called_with_unsupported_model(self):
+ """Tests RLHF tuning raises if called from an unsupported model."""
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ # Pick a valid model id that is not supported by RLHF:
+ unsupported_model_id = "codechat-bison@001"
+ assert unsupported_model_id not in _language_models._SUPPORTED_RLHF_MODELS
+
+ model = language_models.TextGenerationModel.from_pretrained(
+ unsupported_model_id
+ )
+ with pytest.raises(ValueError) as excinfo:
+ model.tune_model_rlhf(
+ prompt_data="gs://bucket/prompt.jsonl",
+ preference_data="gs://bucket/preference.jsonl",
+ )
+ expected_msg = _language_models._get_invalid_rlhf_model_msg(
+ requested_model=self._model_id,
+ )
+ assert excinfo.exception.message == expected_msg
+
+ @pytest.mark.parametrize("unused_key", _language_models._UNUSED_RLHF_EVAL_SPECS)
+ def test_tune_model_rlhf_raises_if_called_with_unused_evaluation_spec(
+ self,
+ unused_key,
+ ):
+ """Tests RLHF tuning raises if called from an unused evaluation spec."""
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+ eval_spec = _language_models.TuningEvaluationSpec()
+ setattr(eval_spec, unused_key, "invalid")
+ with pytest.raises(AttributeError) as excinfo:
+ model.tune_model_rlhf(
+ prompt_data="gs://bucket/prompt.jsonl",
+ preference_data="gs://bucket/preference.jsonl",
+ tuning_evaluation_spec=eval_spec,
+ )
+ expected_msg = _language_models._get_unused_rlhf_eval_spec_error_msg(
+ unused_key=unused_key,
+ )
+ assert excinfo.exception.message == expected_msg
+
+ def test_chat(self):
+ """Tests the chat generation model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CHAT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ) as mock_get_publisher_model:
+ model = preview_language_models.ChatModel.from_pretrained("chat-bison@001")
+
+ mock_get_publisher_model.assert_called_once_with(
+ name="publishers/google/models/chat-bison@001", retry=base._DEFAULT_RETRY
+ )
+
+ chat = model.start_chat(
+ context="""
+ My name is Ned.
+ You are my personal assistant.
+ My favorite movies are Lord of the Rings and Hobbit.
+ """,
+ examples=[
+ preview_language_models.InputOutputTextPair(
+ input_text="Who do you work for?",
+ output_text="I work for Ned.",
+ ),
+ preview_language_models.InputOutputTextPair(
+ input_text="What do I like?",
+ output_text="Ned likes watching movies.",
+ ),
+ ],
+ message_history=[
+ preview_language_models.ChatMessage(
+ author=preview_language_models.ChatSession.USER_AUTHOR,
+ content="Question 1?",
+ ),
+ preview_language_models.ChatMessage(
+ author=preview_language_models.ChatSession.MODEL_AUTHOR,
+ content="Answer 1.",
+ ),
+ ],
+ temperature=0.0,
+ )
+
+ gca_predict_response1 = gca_prediction_service.PredictResponse()
+ gca_predict_response1.predictions.append(_TEST_CHAT_GENERATION_PREDICTION1)
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response1,
+ ):
+ message_text1 = "Are my favorite movies based on a book series?"
+ expected_response1 = _TEST_CHAT_GENERATION_PREDICTION1["candidates"][0][
+ "content"
+ ]
+ response = chat.send_message(message_text1)
+ assert response.text == expected_response1
+ assert len(chat.message_history) == 4
+ assert chat.message_history[2].author == chat.USER_AUTHOR
+ assert chat.message_history[2].content == message_text1
+ assert chat.message_history[3].author == chat.MODEL_AUTHOR
+ assert chat.message_history[3].content == expected_response1
+
+ gca_predict_response2 = gca_prediction_service.PredictResponse()
+ gca_predict_response2.predictions.append(_TEST_CHAT_GENERATION_PREDICTION2)
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response2,
+ ):
+ message_text2 = "When were these books published?"
+ expected_response2 = _TEST_CHAT_GENERATION_PREDICTION2["candidates"][0][
+ "content"
+ ]
+ response = chat.send_message(message_text2, temperature=0.1)
+ assert response.text == expected_response2
+ assert len(chat.message_history) == 6
+ assert chat.message_history[4].author == chat.USER_AUTHOR
+ assert chat.message_history[4].content == message_text2
+ assert chat.message_history[5].author == chat.MODEL_AUTHOR
+ assert chat.message_history[5].content == expected_response2
+
+ # Validating the parameters
+ chat_temperature = 0.1
+ chat_max_output_tokens = 100
+ chat_top_k = 1
+ chat_top_p = 0.1
+ message_temperature = 0.2
+ message_max_output_tokens = 200
+ message_top_k = 2
+ message_top_p = 0.2
+
+ chat2 = model.start_chat(
+ temperature=chat_temperature,
+ max_output_tokens=chat_max_output_tokens,
+ top_k=chat_top_k,
+ top_p=chat_top_p,
+ )
+
+ gca_predict_response3 = gca_prediction_service.PredictResponse()
+ gca_predict_response3.predictions.append(_TEST_CHAT_GENERATION_PREDICTION1)
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response3,
+ ) as mock_predict3:
+ chat2.send_message("Are my favorite movies based on a book series?")
+ prediction_parameters = mock_predict3.call_args[1]["parameters"]
+ assert prediction_parameters["temperature"] == chat_temperature
+ assert prediction_parameters["maxDecodeSteps"] == chat_max_output_tokens
+ assert prediction_parameters["topK"] == chat_top_k
+ assert prediction_parameters["topP"] == chat_top_p
+
+ chat2.send_message(
+ "Are my favorite movies based on a book series?",
+ temperature=message_temperature,
+ max_output_tokens=message_max_output_tokens,
+ top_k=message_top_k,
+ top_p=message_top_p,
+ )
+ prediction_parameters = mock_predict3.call_args[1]["parameters"]
+ assert prediction_parameters["temperature"] == message_temperature
+ assert prediction_parameters["maxDecodeSteps"] == message_max_output_tokens
+ assert prediction_parameters["topK"] == message_top_k
+ assert prediction_parameters["topP"] == message_top_p
+
+ gca_predict_response4 = gca_prediction_service.PredictResponse()
+ gca_predict_response4.predictions.append(
+ _TEST_CHAT_GENERATION_MULTI_CANDIDATE_PREDICTION_GROUNDING
+ )
+ test_grounding_sources = [
+ _TEST_GROUNDING_WEB_SEARCH,
+ _TEST_GROUNDING_VERTEX_AI_SEARCH_DATASTORE,
+ ]
+ datastore_path = (
+ "projects/test-project/locations/global/"
+ "collections/default_collection/dataStores/test_datastore"
+ )
+ expected_grounding_sources = [
+ {
+ "sources": [
+ {
+ "type": "WEB",
+ }
+ ],
+ "disableAttribution": False,
+ },
+ {
+ "sources": [
+ {
+ "type": "VERTEX_AI_SEARCH",
+ "vertexAiSearchDatastore": datastore_path,
+ }
+ ],
+ "disableAttribution": False,
+ },
+ ]
+ for test_grounding_source, expected_grounding_source in zip(
+ test_grounding_sources, expected_grounding_sources
+ ):
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response4,
+ ) as mock_predict4:
+ response = chat2.send_message(
+ "Are my favorite movies based on a book series?",
+ grounding_source=test_grounding_source,
+ )
+ prediction_parameters = mock_predict4.call_args[1]["parameters"]
+ assert (
+ prediction_parameters["groundingConfig"]
+ == expected_grounding_source
+ )
+ assert (
+ dataclasses.asdict(response.grounding_metadata)
+ == _EXPECTED_PARSED_GROUNDING_METADATA_CHAT
+ )
+
+ gca_predict_response5 = gca_prediction_service.PredictResponse()
+ gca_predict_response5.predictions.append(
+ _TEST_CHAT_GENERATION_MULTI_CANDIDATE_PREDICTION_GROUNDING_NONE
+ )
+ test_grounding_sources = [
+ _TEST_GROUNDING_WEB_SEARCH,
+ _TEST_GROUNDING_VERTEX_AI_SEARCH_DATASTORE,
+ ]
+ datastore_path = (
+ "projects/test-project/locations/global/"
+ "collections/default_collection/dataStores/test_datastore"
+ )
+ expected_grounding_sources = [
+ {
+ "sources": [
+ {
+ "type": "WEB",
+ }
+ ],
+ "disableAttribution": False,
+ },
+ {
+ "sources": [
+ {
+ "type": "VERTEX_AI_SEARCH",
+ "vertexAiSearchDatastore": datastore_path,
+ }
+ ],
+ "disableAttribution": False,
+ },
+ ]
+ for test_grounding_source, expected_grounding_source in zip(
+ test_grounding_sources, expected_grounding_sources
+ ):
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response5,
+ ) as mock_predict5:
+ response = chat2.send_message(
+ "Are my favorite movies based on a book series?",
+ grounding_source=test_grounding_source,
+ )
+ prediction_parameters = mock_predict5.call_args[1]["parameters"]
+ assert (
+ prediction_parameters["groundingConfig"]
+ == expected_grounding_source
+ )
+ assert (
+ dataclasses.asdict(response.grounding_metadata)
+ == _EXPECTED_PARSED_GROUNDING_METADATA_CHAT_NONE
+ )
+
+ @pytest.mark.asyncio
+ async def test_chat_async(self):
+ """Test the chat generation model async api."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CHAT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ) as mock_get_publisher_model:
+ model = preview_language_models.ChatModel.from_pretrained("chat-bison@001")
+
+ mock_get_publisher_model.assert_called_once_with(
+ name="publishers/google/models/chat-bison@001", retry=base._DEFAULT_RETRY
+ )
+ chat_temperature = 0.1
+ chat_max_output_tokens = 100
+ chat_top_k = 1
+ chat_top_p = 0.1
+
+ chat = model.start_chat(
+ temperature=chat_temperature,
+ max_output_tokens=chat_max_output_tokens,
+ top_k=chat_top_k,
+ top_p=chat_top_p,
+ )
+
+ gca_predict_response6 = gca_prediction_service.PredictResponse()
+ gca_predict_response6.predictions.append(
+ _TEST_CHAT_GENERATION_MULTI_CANDIDATE_PREDICTION_GROUNDING
+ )
+ test_grounding_sources = [
+ _TEST_GROUNDING_WEB_SEARCH,
+ _TEST_GROUNDING_VERTEX_AI_SEARCH_DATASTORE,
+ ]
+ datastore_path = (
+ "projects/test-project/locations/global/"
+ "collections/default_collection/dataStores/test_datastore"
+ )
+ expected_grounding_sources = [
+ {
+ "sources": [
+ {
+ "type": "WEB",
+ }
+ ],
+ "disableAttribution": False,
+ },
+ {
+ "sources": [
+ {
+ "type": "VERTEX_AI_SEARCH",
+ "vertexAiSearchDatastore": datastore_path,
+ }
+ ],
+ "disableAttribution": False,
+ },
+ ]
+ for test_grounding_source, expected_grounding_source in zip(
+ test_grounding_sources, expected_grounding_sources
+ ):
+ with mock.patch.object(
+ target=prediction_service_async_client.PredictionServiceAsyncClient,
+ attribute="predict",
+ return_value=gca_predict_response6,
+ ) as mock_predict6:
+ response = await chat.send_message_async(
+ "Are my favorite movies based on a book series?",
+ grounding_source=test_grounding_source,
+ )
+ prediction_parameters = mock_predict6.call_args[1]["parameters"]
+ assert prediction_parameters["temperature"] == chat_temperature
+ assert prediction_parameters["maxDecodeSteps"] == chat_max_output_tokens
+ assert prediction_parameters["topK"] == chat_top_k
+ assert prediction_parameters["topP"] == chat_top_p
+ assert (
+ prediction_parameters["groundingConfig"]
+ == expected_grounding_source
+ )
+ assert (
+ dataclasses.asdict(response.grounding_metadata)
+ == _EXPECTED_PARSED_GROUNDING_METADATA_CHAT
+ )
+
+ gca_predict_response7 = gca_prediction_service.PredictResponse()
+ gca_predict_response7.predictions.append(
+ _TEST_CHAT_GENERATION_MULTI_CANDIDATE_PREDICTION_GROUNDING_NONE
+ )
+ test_grounding_sources = [
+ _TEST_GROUNDING_WEB_SEARCH,
+ _TEST_GROUNDING_VERTEX_AI_SEARCH_DATASTORE,
+ ]
+ datastore_path = (
+ "projects/test-project/locations/global/"
+ "collections/default_collection/dataStores/test_datastore"
+ )
+ expected_grounding_sources = [
+ {
+ "sources": [
+ {
+ "type": "WEB",
+ }
+ ],
+ "disableAttribution": False,
+ },
+ {
+ "sources": [
+ {
+ "type": "VERTEX_AI_SEARCH",
+ "vertexAiSearchDatastore": datastore_path,
+ }
+ ],
+ "disableAttribution": False,
+ },
+ ]
+ for test_grounding_source, expected_grounding_source in zip(
+ test_grounding_sources, expected_grounding_sources
+ ):
+ with mock.patch.object(
+ target=prediction_service_async_client.PredictionServiceAsyncClient,
+ attribute="predict",
+ return_value=gca_predict_response7,
+ ) as mock_predict7:
+ response = await chat.send_message_async(
+ "Are my favorite movies based on a book series?",
+ grounding_source=test_grounding_source,
+ )
+ prediction_parameters = mock_predict7.call_args[1]["parameters"]
+ assert (
+ prediction_parameters["groundingConfig"]
+ == expected_grounding_source
+ )
+ assert (
+ dataclasses.asdict(response.grounding_metadata)
+ == _EXPECTED_PARSED_GROUNDING_METADATA_CHAT_NONE
+ )
+
+ def test_chat_ga(self):
+ """Tests the chat generation model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CHAT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ) as mock_get_publisher_model:
+ model = language_models.ChatModel.from_pretrained("chat-bison@001")
+
+ mock_get_publisher_model.assert_called_once_with(
+ name="publishers/google/models/chat-bison@001", retry=base._DEFAULT_RETRY
+ )
+
+ chat = model.start_chat(
+ context="""
+ My name is Ned.
+ You are my personal assistant.
+ My favorite movies are Lord of the Rings and Hobbit.
+ """,
+ examples=[
+ language_models.InputOutputTextPair(
+ input_text="Who do you work for?",
+ output_text="I work for Ned.",
+ ),
+ language_models.InputOutputTextPair(
+ input_text="What do I like?",
+ output_text="Ned likes watching movies.",
+ ),
+ ],
+ message_history=[
+ language_models.ChatMessage(
+ author=preview_language_models.ChatSession.USER_AUTHOR,
+ content="Question 1?",
+ ),
+ language_models.ChatMessage(
+ author=preview_language_models.ChatSession.MODEL_AUTHOR,
+ content="Answer 1.",
+ ),
+ ],
+ temperature=0.0,
+ )
+
+ gca_predict_response1 = gca_prediction_service.PredictResponse()
+ gca_predict_response1.predictions.append(_TEST_CHAT_GENERATION_PREDICTION1)
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response1,
+ ):
+ message_text1 = "Are my favorite movies based on a book series?"
+ expected_response1 = _TEST_CHAT_GENERATION_PREDICTION1["candidates"][0][
+ "content"
+ ]
+ response = chat.send_message(message_text1)
+ assert response.text == expected_response1
+ assert len(chat.message_history) == 4
+ assert chat.message_history[2].author == chat.USER_AUTHOR
+ assert chat.message_history[2].content == message_text1
+ assert chat.message_history[3].author == chat.MODEL_AUTHOR
+ assert chat.message_history[3].content == expected_response1
+
+ gca_predict_response2 = gca_prediction_service.PredictResponse()
+ gca_predict_response2.predictions.append(_TEST_CHAT_GENERATION_PREDICTION2)
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response2,
+ ):
+ message_text2 = "When were these books published?"
+ expected_response2 = _TEST_CHAT_GENERATION_PREDICTION2["candidates"][0][
+ "content"
+ ]
+ response = chat.send_message(message_text2, temperature=0.1)
+ assert response.text == expected_response2
+ assert len(chat.message_history) == 6
+ assert chat.message_history[4].author == chat.USER_AUTHOR
+ assert chat.message_history[4].content == message_text2
+ assert chat.message_history[5].author == chat.MODEL_AUTHOR
+ assert chat.message_history[5].content == expected_response2
+
+ # Validating the parameters
+ chat_temperature = 0.1
+ chat_max_output_tokens = 100
+ chat_top_k = 1
+ chat_top_p = 0.1
+ stop_sequences = ["\n"]
+ message_temperature = 0.2
+ message_max_output_tokens = 200
+ message_top_k = 2
+ message_top_p = 0.2
+ message_stop_sequences = ["# %%"]
+
+ chat2 = model.start_chat(
+ temperature=chat_temperature,
+ max_output_tokens=chat_max_output_tokens,
+ top_k=chat_top_k,
+ top_p=chat_top_p,
+ stop_sequences=stop_sequences,
+ )
+
+ gca_predict_response3 = gca_prediction_service.PredictResponse()
+ gca_predict_response3.predictions.append(_TEST_CHAT_GENERATION_PREDICTION1)
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response3,
+ ) as mock_predict3:
+ chat2.send_message("Are my favorite movies based on a book series?")
+ prediction_parameters = mock_predict3.call_args[1]["parameters"]
+ assert prediction_parameters["temperature"] == chat_temperature
+ assert prediction_parameters["maxDecodeSteps"] == chat_max_output_tokens
+ assert prediction_parameters["topK"] == chat_top_k
+ assert prediction_parameters["topP"] == chat_top_p
+ assert prediction_parameters["stopSequences"] == stop_sequences
+
+ chat2.send_message(
+ "Are my favorite movies based on a book series?",
+ temperature=message_temperature,
+ max_output_tokens=message_max_output_tokens,
+ top_k=message_top_k,
+ top_p=message_top_p,
+ stop_sequences=message_stop_sequences,
+ )
+ prediction_parameters = mock_predict3.call_args[1]["parameters"]
+ assert prediction_parameters["temperature"] == message_temperature
+ assert prediction_parameters["maxDecodeSteps"] == message_max_output_tokens
+ assert prediction_parameters["topK"] == message_top_k
+ assert prediction_parameters["topP"] == message_top_p
+ assert prediction_parameters["stopSequences"] == message_stop_sequences
+
+ def test_chat_model_send_message_with_multiple_candidates(self):
+ """Tests the chat generation model with multiple candidates."""
+
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CHAT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ) as mock_get_publisher_model:
+ model = language_models.ChatModel.from_pretrained("chat-bison@001")
+
+ mock_get_publisher_model.assert_called_once_with(
+ name="publishers/google/models/chat-bison@001", retry=base._DEFAULT_RETRY
+ )
+
+ chat = model.start_chat()
+
+ gca_predict_response1 = gca_prediction_service.PredictResponse()
+ gca_predict_response1.predictions.append(
+ _TEST_CHAT_GENERATION_MULTI_CANDIDATE_PREDICTION
+ )
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response1,
+ ):
+ message_text1 = "Are my favorite movies based on a book series?"
+ expected_response_candidates = (
+ _TEST_CHAT_GENERATION_MULTI_CANDIDATE_PREDICTION["candidates"]
+ )
+ expected_candidate_0 = expected_response_candidates[0]["content"]
+ expected_candidate_1 = expected_response_candidates[1]["content"]
+ expected_errors_0 = ()
+ expected_errors_1 = (100,)
+
+ response = chat.send_message(message_text1, candidate_count=2)
+ assert response.text == expected_candidate_0
+ assert len(response.candidates) == 2
+ assert response.candidates[0].text == expected_candidate_0
+ assert response.candidates[1].text == expected_candidate_1
+ assert response.candidates[0].errors == expected_errors_0
+ assert response.candidates[1].errors == expected_errors_1
+
+ assert len(chat.message_history) == 2
+ assert chat.message_history[0].author == chat.USER_AUTHOR
+ assert chat.message_history[0].content == message_text1
+ assert chat.message_history[1].author == chat.MODEL_AUTHOR
+ assert chat.message_history[1].content == expected_candidate_0
+
+ def test_chat_model_send_message_streaming(self):
+ """Tests the chat generation model."""
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CHAT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.ChatModel.from_pretrained("chat-bison@001")
+
+ chat = model.start_chat(
+ context="""
+ My name is Ned.
+ You are my personal assistant.
+ My favorite movies are Lord of the Rings and Hobbit.
+ """,
+ examples=[
+ language_models.InputOutputTextPair(
+ input_text="Who do you work for?",
+ output_text="I work for Ned.",
+ ),
+ language_models.InputOutputTextPair(
+ input_text="What do I like?",
+ output_text="Ned likes watching movies.",
+ ),
+ ],
+ message_history=[
+ language_models.ChatMessage(
+ author=preview_language_models.ChatSession.USER_AUTHOR,
+ content="Question 1?",
+ ),
+ language_models.ChatMessage(
+ author=preview_language_models.ChatSession.MODEL_AUTHOR,
+ content="Answer 1.",
+ ),
+ ],
+ temperature=0.0,
+ stop_sequences=["\n"],
+ )
+
+ # Using list instead of a generator so that it can be reused.
+ response_generator = [
+ gca_prediction_service.StreamingPredictResponse(
+ outputs=[_streaming_prediction.value_to_tensor(response_dict)]
+ )
+ for response_dict in _TEST_CHAT_PREDICTION_STREAMING
+ ]
+
+ message_temperature = 0.2
+ message_max_output_tokens = 200
+ message_top_k = 2
+ message_top_p = 0.2
+ message_stop_sequences = ["# %%"]
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="server_streaming_predict",
+ return_value=response_generator,
+ ):
+ message_text1 = "Are my favorite movies based on a book series?"
+
+ for idx, response in enumerate(
+ chat.send_message_streaming(
+ message=message_text1,
+ max_output_tokens=message_max_output_tokens,
+ temperature=message_temperature,
+ top_k=message_top_k,
+ top_p=message_top_p,
+ stop_sequences=message_stop_sequences,
+ )
+ ):
+ assert len(response.text) > 10
+ # New messages are not added until the response is fully read
+ if idx + 1 < len(response_generator):
+ assert len(chat.message_history) == 2
+
+ # New messages are only added after the response is fully read
+ assert len(chat.message_history) == 4
+ assert chat.message_history[2].author == chat.USER_AUTHOR
+ assert chat.message_history[2].content == message_text1
+ assert chat.message_history[3].author == chat.MODEL_AUTHOR
+
+ def test_chat_model_preview_count_tokens(self):
+ """Tests the text generation model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CHAT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = preview_language_models.ChatModel.from_pretrained("chat-bison@001")
+
+ chat = model.start_chat()
+ assert isinstance(chat, preview_language_models.ChatSession)
+
+ gca_count_tokens_response = gca_prediction_service_v1beta1.CountTokensResponse(
+ total_tokens=_TEST_COUNT_TOKENS_RESPONSE["total_tokens"],
+ total_billable_characters=_TEST_COUNT_TOKENS_RESPONSE[
+ "total_billable_characters"
+ ],
+ )
+
+ with mock.patch.object(
+ target=prediction_service_client_v1beta1.PredictionServiceClient,
+ attribute="count_tokens",
+ return_value=gca_count_tokens_response,
+ ):
+ response = chat.count_tokens("What is the best recipe for banana bread?")
+
+ assert response.total_tokens == _TEST_COUNT_TOKENS_RESPONSE["total_tokens"]
+ assert (
+ response.total_billable_characters
+ == _TEST_COUNT_TOKENS_RESPONSE["total_billable_characters"]
+ )
+
+ def test_code_chat(self):
+ """Tests the code chat model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CODECHAT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ) as mock_get_publisher_model:
+ model = language_models.CodeChatModel.from_pretrained(
+ "google/codechat-bison@001"
+ )
+
+ mock_get_publisher_model.assert_called_once_with(
+ name="publishers/google/models/codechat-bison@001",
+ retry=base._DEFAULT_RETRY,
+ )
+
+ code_chat = model.start_chat(
+ context="We're working on large-scale production system.",
+ max_output_tokens=128,
+ temperature=0.2,
+ stop_sequences=["\n"],
+ )
+
+ gca_predict_response1 = gca_prediction_service.PredictResponse()
+ gca_predict_response1.predictions.append(_TEST_CHAT_GENERATION_PREDICTION1)
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response1,
+ ):
+ response = code_chat.send_message("Hi, how are you?")
+ assert (
+ response.text
+ == _TEST_CHAT_GENERATION_PREDICTION1["candidates"][0]["content"]
+ )
+ assert len(code_chat.message_history) == 2
+
+ gca_predict_response2 = gca_prediction_service.PredictResponse()
+ gca_predict_response2.predictions.append(_TEST_CHAT_GENERATION_PREDICTION2)
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response2,
+ ):
+ response = code_chat.send_message(
+ "Please help write a function to calculate the min of two numbers",
+ temperature=0.2,
+ max_output_tokens=256,
+ )
+ assert (
+ response.text
+ == _TEST_CHAT_GENERATION_PREDICTION2["candidates"][0]["content"]
+ )
+ assert len(code_chat.message_history) == 4
+
+ # Validating the parameters
+ chat_temperature = 0.1
+ chat_max_output_tokens = 100
+ chat_stop_sequences = ["\n"]
+ message_temperature = 0.2
+ message_max_output_tokens = 200
+ message_stop_sequences = ["# %%"]
+
+ code_chat2 = model.start_chat(
+ temperature=chat_temperature,
+ max_output_tokens=chat_max_output_tokens,
+ stop_sequences=chat_stop_sequences,
+ )
+
+ gca_predict_response3 = gca_prediction_service.PredictResponse()
+ gca_predict_response3.predictions.append(_TEST_CHAT_GENERATION_PREDICTION1)
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response3,
+ ) as mock_predict:
+ code_chat2.send_message(
+ "Please help write a function to calculate the min of two numbers"
+ )
+ prediction_parameters = mock_predict.call_args[1]["parameters"]
+ assert prediction_parameters["temperature"] == chat_temperature
+ assert prediction_parameters["maxDecodeSteps"] == chat_max_output_tokens
+ assert prediction_parameters["stopSequences"] == chat_stop_sequences
+
+ code_chat2.send_message(
+ "Please help write a function to calculate the min of two numbers",
+ temperature=message_temperature,
+ max_output_tokens=message_max_output_tokens,
+ stop_sequences=message_stop_sequences,
+ )
+ prediction_parameters = mock_predict.call_args[1]["parameters"]
+ assert prediction_parameters["temperature"] == message_temperature
+ assert prediction_parameters["maxDecodeSteps"] == message_max_output_tokens
+ assert prediction_parameters["stopSequences"] == message_stop_sequences
+
+ def test_code_chat_model_send_message_with_multiple_candidates(self):
+ """Tests the code chat model with multiple candidates."""
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CODECHAT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ autospec=True,
+ ):
+ model = language_models.CodeChatModel.from_pretrained(
+ "google/codechat-bison@001"
+ )
+
+ chat = model.start_chat()
+
+ gca_predict_response1 = gca_prediction_service.PredictResponse()
+ gca_predict_response1.predictions.append(
+ _TEST_CHAT_GENERATION_MULTI_CANDIDATE_PREDICTION
+ )
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response1,
+ autospec=True,
+ ):
+ message_text1 = "Are my favorite movies based on a book series?"
+ expected_response_candidates = (
+ _TEST_CHAT_GENERATION_MULTI_CANDIDATE_PREDICTION["candidates"]
+ )
+ expected_candidate_0 = expected_response_candidates[0]["content"]
+ expected_candidate_1 = expected_response_candidates[1]["content"]
+
+ response = chat.send_message(
+ message=message_text1,
+ # candidate_count acts as a maximum number, not exact number.
+ candidate_count=7,
+ )
+ # The service can return a different number of candidates.
+ assert response.text == expected_candidate_0
+ assert len(response.candidates) == 2
+ assert response.candidates[0].text == expected_candidate_0
+ assert response.candidates[1].text == expected_candidate_1
+
+ assert len(chat.message_history) == 2
+ assert chat.message_history[0].author == chat.USER_AUTHOR
+ assert chat.message_history[0].content == message_text1
+ assert chat.message_history[1].author == chat.MODEL_AUTHOR
+ assert chat.message_history[1].content == expected_candidate_0
+
+ async def test_code_chat_model_send_message_async(self):
+ """Tests the send_message_async method for code chat model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CODECHAT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.CodeChatModel.from_pretrained("codechat-bison@001")
+ chat = model.start_chat()
+
+ gca_predict_response = gca_prediction_service.PredictResponse()
+ gca_predict_response.predictions.append(
+ _TEST_CHAT_GENERATION_MULTI_CANDIDATE_PREDICTION
+ )
+ with mock.patch.object(
+ target=prediction_service_async_client.PredictionServiceAsyncClient,
+ attribute="predict",
+ return_value=gca_predict_response,
+ autospec=True,
+ ):
+ message_text = "Are my favorite movies based on a book series?"
+ expected_response_candidates = (
+ _TEST_CHAT_GENERATION_MULTI_CANDIDATE_PREDICTION["candidates"]
+ )
+ expected_candidate_0 = expected_response_candidates[0]["content"]
+ expected_candidate_1 = expected_response_candidates[1]["content"]
+
+ response = await chat.send_message_async(
+ message=message_text,
+ )
+ # The service can return a different number of candidates.
+ assert response.text == expected_candidate_0
+ assert len(response.candidates) == 2
+ assert response.candidates[0].text == expected_candidate_0
+ assert response.candidates[1].text == expected_candidate_1
+
+ assert len(chat.message_history) == 2
+ assert chat.message_history[0].author == chat.USER_AUTHOR
+ assert chat.message_history[0].content == message_text
+ assert chat.message_history[1].author == chat.MODEL_AUTHOR
+ assert chat.message_history[1].content == expected_candidate_0
+
+ def test_code_chat_model_send_message_streaming(self):
+ """Tests the chat generation model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CODECHAT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.CodeChatModel.from_pretrained("codechat-bison@001")
+
+ chat = model.start_chat(temperature=0.0, stop_sequences=["\n"])
+
+ # Using list instead of a generator so that it can be reused.
+ response_generator = [
+ gca_prediction_service.StreamingPredictResponse(
+ outputs=[_streaming_prediction.value_to_tensor(response_dict)]
+ )
+ for response_dict in _TEST_CHAT_PREDICTION_STREAMING
+ ]
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="server_streaming_predict",
+ return_value=response_generator,
+ ):
+ message_text1 = (
+ "Please help write a function to calculate the max of two numbers"
+ )
+ # New messages are not added until the response is fully read
+ assert not chat.message_history
+ for response in chat.send_message_streaming(message_text1):
+ assert len(response.text) > 10
+ # New messages are only added after the response is fully read
+ assert chat.message_history
+
+ assert len(chat.message_history) == 2
+ assert chat.message_history[0].author == chat.USER_AUTHOR
+ assert chat.message_history[0].content == message_text1
+ assert chat.message_history[1].author == chat.MODEL_AUTHOR
+
+ def test_code_chat_model_preview_count_tokens(self):
+ """Tests the text generation model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CODECHAT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = preview_language_models.CodeChatModel.from_pretrained(
+ "codechat-bison@001"
+ )
+
+ chat = model.start_chat()
+ assert isinstance(chat, preview_language_models.CodeChatSession)
+
+ gca_count_tokens_response = gca_prediction_service_v1beta1.CountTokensResponse(
+ total_tokens=_TEST_COUNT_TOKENS_RESPONSE["total_tokens"],
+ total_billable_characters=_TEST_COUNT_TOKENS_RESPONSE[
+ "total_billable_characters"
+ ],
+ )
+
+ with mock.patch.object(
+ target=prediction_service_client_v1beta1.PredictionServiceClient,
+ attribute="count_tokens",
+ return_value=gca_count_tokens_response,
+ ):
+ response = chat.count_tokens("What is the best recipe for banana bread?")
+
+ assert response.total_tokens == _TEST_COUNT_TOKENS_RESPONSE["total_tokens"]
+ assert (
+ response.total_billable_characters
+ == _TEST_COUNT_TOKENS_RESPONSE["total_billable_characters"]
+ )
+
+ def test_code_generation(self):
+ """Tests code generation with the code generation model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CODE_GENERATION_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ) as mock_get_publisher_model:
+ model = language_models.CodeGenerationModel.from_pretrained(
+ "google/code-bison@001"
+ )
+
+ mock_get_publisher_model.assert_called_once_with(
+ name="publishers/google/models/code-bison@001",
+ retry=base._DEFAULT_RETRY,
+ )
+
+ gca_predict_response = gca_prediction_service.PredictResponse()
+ gca_predict_response.predictions.append(_TEST_CODE_GENERATION_PREDICTION)
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response,
+ ):
+ response = model.predict(
+ prefix="Write a function that checks if a year is a leap year.",
+ max_output_tokens=256,
+ temperature=0.2,
+ )
+ assert response.text == _TEST_CODE_GENERATION_PREDICTION["content"]
+ expected_safety_attributes_raw = _TEST_CODE_GENERATION_PREDICTION[
+ "safetyAttributes"
+ ]
+ expected_safety_attributes = dict(
+ zip(
+ expected_safety_attributes_raw["categories"],
+ expected_safety_attributes_raw["scores"],
+ )
+ )
+ assert response.safety_attributes == expected_safety_attributes
+ assert response.is_blocked == expected_safety_attributes_raw["blocked"]
+
+ # Validating the parameters
+ predict_temperature = 0.1
+ predict_max_output_tokens = 100
+ stop_sequences = ["\n"]
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response,
+ ) as mock_predict:
+ model.predict(
+ prefix="Write a function that checks if a year is a leap year.",
+ max_output_tokens=predict_max_output_tokens,
+ temperature=predict_temperature,
+ stop_sequences=stop_sequences,
+ )
+ prediction_parameters = mock_predict.call_args[1]["parameters"]
+ assert prediction_parameters["temperature"] == predict_temperature
+ assert prediction_parameters["maxOutputTokens"] == predict_max_output_tokens
+ assert prediction_parameters["stopSequences"] == stop_sequences
+
+ model.predict(
+ prefix="Write a function that checks if a year is a leap year.",
+ )
+ prediction_parameters = mock_predict.call_args[1]["parameters"]
+ assert "temperature" not in prediction_parameters
+ assert "maxOutputTokens" not in prediction_parameters
+
+ def test_code_generation_multiple_candidates(self):
+ """Tests the code generation model with multiple candidates."""
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CODE_GENERATION_BISON_PUBLISHER_MODEL_DICT
+ ),
+ autospec=True,
+ ):
+ model = language_models.CodeGenerationModel.from_pretrained(
+ "code-bison@001"
+ )
+
+ gca_predict_response = gca_prediction_service.PredictResponse()
+ # Discrepancy between the number of `instances` and the number of `predictions`
+ # is a violation of the prediction service invariant, but the service does this.
+ gca_predict_response.predictions.append(_TEST_CODE_GENERATION_PREDICTION)
+ gca_predict_response.predictions.append(_TEST_CODE_GENERATION_PREDICTION)
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response,
+ autospec=True,
+ ) as mock_predict:
+ response = model.predict(
+ prefix="Write a function that checks if a year is a leap year.",
+ # candidate_count acts as a maximum number, not exact number.
+ candidate_count=7,
+ )
+ prediction_parameters = mock_predict.call_args[1]["parameters"]
+ assert prediction_parameters["candidateCount"] == 7
+
+ assert response.text == _TEST_CODE_GENERATION_PREDICTION["content"]
+ # The service can return a different number of candidates.
+ assert len(response.candidates) == 2
+ assert (
+ response.candidates[0].text == _TEST_CODE_GENERATION_PREDICTION["content"]
+ )
+
+ def test_code_generation_preview_count_tokens(self):
+ """Tests the count_tokens method in CodeGenerationModel."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CODE_COMPLETION_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = preview_language_models.CodeGenerationModel.from_pretrained(
+ "code-gecko@001"
+ )
+
+ gca_count_tokens_response = gca_prediction_service_v1beta1.CountTokensResponse(
+ total_tokens=_TEST_COUNT_TOKENS_RESPONSE["total_tokens"],
+ total_billable_characters=_TEST_COUNT_TOKENS_RESPONSE[
+ "total_billable_characters"
+ ],
+ )
+
+ with mock.patch.object(
+ target=prediction_service_client_v1beta1.PredictionServiceClient,
+ attribute="count_tokens",
+ return_value=gca_count_tokens_response,
+ ):
+ response = model.count_tokens("def reverse_string(s):")
+
+ assert response.total_tokens == _TEST_COUNT_TOKENS_RESPONSE["total_tokens"]
+ assert (
+ response.total_billable_characters
+ == _TEST_COUNT_TOKENS_RESPONSE["total_billable_characters"]
+ )
+
+ def test_code_completion(self):
+ """Tests code completion with the code generation model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CODE_COMPLETION_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ) as mock_get_publisher_model:
+ model = language_models.CodeGenerationModel.from_pretrained(
+ "google/code-gecko@001"
+ )
+
+ mock_get_publisher_model.assert_called_once_with(
+ name="publishers/google/models/code-gecko@001",
+ retry=base._DEFAULT_RETRY,
+ )
+
+ gca_predict_response = gca_prediction_service.PredictResponse()
+ gca_predict_response.predictions.append(_TEST_CODE_COMPLETION_PREDICTION)
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response,
+ ):
+ response = model.predict(
+ prefix="def reverse_string(s):",
+ max_output_tokens=128,
+ temperature=0.2,
+ )
+ assert response.text == _TEST_CODE_COMPLETION_PREDICTION["content"]
+
+ # Validating the parameters
+ predict_temperature = 0.1
+ predict_max_output_tokens = 100
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response,
+ ) as mock_predict:
+ model.predict(
+ prefix="def reverse_string(s):",
+ max_output_tokens=predict_max_output_tokens,
+ temperature=predict_temperature,
+ )
+ prediction_parameters = mock_predict.call_args[1]["parameters"]
+ assert prediction_parameters["temperature"] == predict_temperature
+ assert prediction_parameters["maxOutputTokens"] == predict_max_output_tokens
+
+ model.predict(
+ prefix="def reverse_string(s):",
+ )
+ prediction_parameters = mock_predict.call_args[1]["parameters"]
+ assert "temperature" not in prediction_parameters
+ assert "maxOutputTokens" not in prediction_parameters
+
+ def test_code_generation_model_predict_streaming(self):
+ """Tests the TextGenerationModel.predict_streaming method."""
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CODE_GENERATION_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.CodeGenerationModel.from_pretrained(
+ "code-bison@001"
+ )
+
+ response_generator = (
+ gca_prediction_service.StreamingPredictResponse(
+ outputs=[_streaming_prediction.value_to_tensor(response_dict)]
+ )
+ for response_dict in _TEST_TEXT_GENERATION_PREDICTION_STREAMING
+ )
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="server_streaming_predict",
+ return_value=response_generator,
+ ):
+ for response in model.predict_streaming(
+ prefix="def reverse_string(s):",
+ suffix=" return s",
+ max_output_tokens=1000,
+ temperature=0.0,
+ ):
+ assert len(response.text) > 10
+
+ def test_text_embedding(self):
+ """Tests the text embedding model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_EMBEDDING_GECKO_PUBLISHER_MODEL_DICT
+ ),
+ ) as mock_get_publisher_model:
+ model = preview_language_models.TextEmbeddingModel.from_pretrained(
+ "textembedding-gecko@001"
+ )
+
+ mock_get_publisher_model.assert_called_once_with(
+ name="publishers/google/models/textembedding-gecko@001",
+ retry=base._DEFAULT_RETRY,
+ )
+
+ gca_predict_response = gca_prediction_service.PredictResponse()
+ gca_predict_response.predictions.append(_TEST_TEXT_EMBEDDING_PREDICTION)
+ gca_predict_response.predictions.append(_TEST_TEXT_EMBEDDING_PREDICTION)
+
+ expected_embedding = _TEST_TEXT_EMBEDDING_PREDICTION["embeddings"]
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response,
+ ) as mock_predict:
+ embeddings = model.get_embeddings(
+ [
+ "What is life?",
+ language_models.TextEmbeddingInput(
+ text="Foo",
+ task_type="RETRIEVAL_DOCUMENT",
+ title="Bar",
+ ),
+ language_models.TextEmbeddingInput(
+ text="Baz",
+ task_type="CLASSIFICATION",
+ ),
+ ],
+ auto_truncate=False,
+ output_dimensionality=3,
+ )
+ prediction_instances = mock_predict.call_args[1]["instances"]
+ assert prediction_instances == [
+ {"content": "What is life?"},
+ {
+ "content": "Foo",
+ "task_type": "RETRIEVAL_DOCUMENT",
+ "title": "Bar",
+ },
+ {
+ "content": "Baz",
+ "task_type": "CLASSIFICATION",
+ },
+ ]
+ prediction_parameters = mock_predict.call_args[1]["parameters"]
+ assert not prediction_parameters["autoTruncate"]
+ assert prediction_parameters["outputDimensionality"] == 3
+ assert embeddings
+ for embedding in embeddings:
+ vector = embedding.values
+ assert len(vector) == _TEXT_EMBEDDING_VECTOR_LENGTH
+ assert vector == expected_embedding["values"]
+ assert (
+ embedding.statistics.token_count
+ == expected_embedding["statistics"]["token_count"]
+ )
+ assert (
+ embedding.statistics.truncated
+ == expected_embedding["statistics"]["truncated"]
+ )
+
+ def test_text_embedding_count_tokens_ga(self):
+ """Tests the text embedding model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_EMBEDDING_GECKO_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextEmbeddingModel.from_pretrained(
+ "textembedding-gecko@001"
+ )
+
+ gca_count_tokens_response = (
+ gca_prediction_service_v1beta1.CountTokensResponse(
+ total_tokens=_TEST_COUNT_TOKENS_RESPONSE["total_tokens"],
+ total_billable_characters=_TEST_COUNT_TOKENS_RESPONSE[
+ "total_billable_characters"
+ ],
+ )
+ )
+
+ with mock.patch.object(
+ target=prediction_service_client_v1beta1.PredictionServiceClient,
+ attribute="count_tokens",
+ return_value=gca_count_tokens_response,
+ ):
+ response = model.count_tokens(["What is life?"])
+
+ assert (
+ response.total_tokens == _TEST_COUNT_TOKENS_RESPONSE["total_tokens"]
+ )
+ assert (
+ response.total_billable_characters
+ == _TEST_COUNT_TOKENS_RESPONSE["total_billable_characters"]
+ )
+
+ def test_text_embedding_count_tokens_preview(self):
+ """Tests the text embedding model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_EMBEDDING_GECKO_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = preview_language_models.TextEmbeddingModel.from_pretrained(
+ "textembedding-gecko@001"
+ )
+
+ gca_count_tokens_response = (
+ gca_prediction_service_v1beta1.CountTokensResponse(
+ total_tokens=_TEST_COUNT_TOKENS_RESPONSE["total_tokens"],
+ total_billable_characters=_TEST_COUNT_TOKENS_RESPONSE[
+ "total_billable_characters"
+ ],
+ )
+ )
+
+ with mock.patch.object(
+ target=prediction_service_client_v1beta1.PredictionServiceClient,
+ attribute="count_tokens",
+ return_value=gca_count_tokens_response,
+ ):
+ response = model.count_tokens(["What is life?"])
+
+ assert (
+ response.total_tokens == _TEST_COUNT_TOKENS_RESPONSE["total_tokens"]
+ )
+ assert (
+ response.total_billable_characters
+ == _TEST_COUNT_TOKENS_RESPONSE["total_billable_characters"]
+ )
+
+ def test_text_embedding_ga(self):
+ """Tests the text embedding model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_EMBEDDING_GECKO_PUBLISHER_MODEL_DICT
+ ),
+ ) as mock_get_publisher_model:
+ model = language_models.TextEmbeddingModel.from_pretrained(
+ "textembedding-gecko@001"
+ )
+
+ mock_get_publisher_model.assert_called_once_with(
+ name="publishers/google/models/textembedding-gecko@001",
+ retry=base._DEFAULT_RETRY,
+ )
+
+ gca_predict_response = gca_prediction_service.PredictResponse()
+ gca_predict_response.predictions.append(_TEST_TEXT_EMBEDDING_PREDICTION)
+
+ with mock.patch.object(
+ target=prediction_service_client.PredictionServiceClient,
+ attribute="predict",
+ return_value=gca_predict_response,
+ ):
+ embeddings = model.get_embeddings(["What is life?"])
+ assert embeddings
+ for embedding in embeddings:
+ vector = embedding.values
+ assert len(vector) == _TEXT_EMBEDDING_VECTOR_LENGTH
+ assert vector == _TEST_TEXT_EMBEDDING_PREDICTION["embeddings"]["values"]
+
+ # Validating that a single string is not accepted.
+ with pytest.raises(TypeError):
+ model.get_embeddings("What is life?")
+
+ def test_batch_prediction(
+ self,
+ get_endpoint_mock,
+ ):
+ """Tests batch prediction."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ with mock.patch.object(
+ target=aiplatform.BatchPredictionJob,
+ attribute="create",
+ ) as mock_create:
+ model.batch_predict(
+ dataset="gs://test-bucket/test_table.jsonl",
+ destination_uri_prefix="gs://test-bucket/results/",
+ model_parameters={"temperature": 0.1},
+ )
+ mock_create.assert_called_once_with(
+ model_name=f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/publishers/google/models/text-bison@001",
+ job_display_name=None,
+ gcs_source="gs://test-bucket/test_table.jsonl",
+ gcs_destination_prefix="gs://test-bucket/results/",
+ model_parameters={"temperature": 0.1},
+ )
+
+ # Testing tuned model batch prediction
+ tuned_model = language_models.TextGenerationModel(
+ model_id=model._model_id,
+ endpoint_name=test_constants.EndpointConstants._TEST_ENDPOINT_NAME,
+ )
+ with mock.patch.object(
+ target=aiplatform.BatchPredictionJob,
+ attribute="create",
+ ) as mock_create:
+ tuned_model.batch_predict(
+ dataset="gs://test-bucket/test_table.jsonl",
+ destination_uri_prefix="gs://test-bucket/results/",
+ model_parameters={"temperature": 0.1},
+ )
+ mock_create.assert_called_once_with(
+ model_name=test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME,
+ job_display_name=None,
+ gcs_source="gs://test-bucket/test_table.jsonl",
+ gcs_destination_prefix="gs://test-bucket/results/",
+ model_parameters={"temperature": 0.1},
+ )
+
+ def test_batch_prediction_for_code_generation(self):
+ """Tests batch prediction."""
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _CODE_GENERATION_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = preview_language_models.CodeGenerationModel.from_pretrained(
+ "code-bison@001"
+ )
+
+ with mock.patch.object(
+ target=aiplatform.BatchPredictionJob,
+ attribute="create",
+ ) as mock_create:
+ model.batch_predict(
+ dataset="gs://test-bucket/test_table.jsonl",
+ destination_uri_prefix="gs://test-bucket/results/",
+ model_parameters={},
+ )
+ mock_create.assert_called_once_with(
+ model_name=f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/publishers/google/models/code-bison@001",
+ job_display_name=None,
+ gcs_source="gs://test-bucket/test_table.jsonl",
+ gcs_destination_prefix="gs://test-bucket/results/",
+ model_parameters={},
+ )
+
+ def test_batch_prediction_for_text_embedding(self):
+ """Tests batch prediction."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_EMBEDDING_GECKO_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextEmbeddingModel.from_pretrained(
+ "textembedding-gecko@001"
+ )
+
+ with mock.patch.object(
+ target=aiplatform.BatchPredictionJob,
+ attribute="create",
+ ) as mock_create:
+ model.batch_predict(
+ dataset="gs://test-bucket/test_table.jsonl",
+ destination_uri_prefix="gs://test-bucket/results/",
+ model_parameters={},
+ )
+ mock_create.assert_called_once_with(
+ model_name=f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/publishers/google/models/textembedding-gecko@001",
+ job_display_name=None,
+ gcs_source="gs://test-bucket/test_table.jsonl",
+ gcs_destination_prefix="gs://test-bucket/results/",
+ model_parameters={},
+ )
+
+ def test_batch_prediction_for_text_embedding_preview(self):
+ """Tests batch prediction."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_EMBEDDING_GECKO_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = preview_language_models.TextEmbeddingModel.from_pretrained(
+ "textembedding-gecko@001"
+ )
+
+ with mock.patch.object(
+ target=aiplatform.BatchPredictionJob,
+ attribute="create",
+ ) as mock_create:
+ model.batch_predict(
+ dataset="gs://test-bucket/test_table.jsonl",
+ destination_uri_prefix="gs://test-bucket/results/",
+ model_parameters={},
+ )
+ mock_create.assert_called_once_with(
+ model_name=f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/publishers/google/models/textembedding-gecko@001",
+ job_display_name=None,
+ gcs_source="gs://test-bucket/test_table.jsonl",
+ gcs_destination_prefix="gs://test-bucket/results/",
+ model_parameters={},
+ )
+
+
+# TODO (b/285946649): add more test coverage before public preview release
+@pytest.mark.usefixtures("google_auth_mock")
+class TestLanguageModelEvaluation:
+ @pytest.mark.usefixtures(
+ "get_model_with_tuned_version_label_mock",
+ "get_endpoint_with_models_mock",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_EVAL_PIPELINE_SPEC_JSON, _TEST_EVAL_PIPELINE_JOB],
+ )
+ @pytest.mark.parametrize(
+ "mock_request_urlopen_eval",
+ ["https://us-kfp.pkg.dev/proj/repo/pack/latest"],
+ indirect=True,
+ )
+ def test_model_evaluation_text_generation_task_with_gcs_input(
+ self,
+ job_spec,
+ mock_pipeline_service_create_eval,
+ mock_pipeline_job_get_eval,
+ mock_successfully_completed_eval_job,
+ mock_pipeline_bucket_exists,
+ mock_load_yaml_and_json,
+ mock_request_urlopen_eval,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ my_model = preview_language_models.TextGenerationModel.get_tuned_model(
+ test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME
+ )
+
+ eval_metrics = my_model.evaluate(
+ task_spec=preview_language_models.EvaluationTextGenerationSpec(
+ ground_truth_data="gs://my-bucket/ground-truth.jsonl",
+ ),
+ )
+
+ assert isinstance(eval_metrics, preview_language_models.EvaluationMetric)
+ assert eval_metrics.bleu == _TEST_TEXT_GENERATION_METRICS["bleu"]
+
+ @pytest.mark.usefixtures(
+ "get_model_with_tuned_version_label_mock",
+ "get_endpoint_with_models_mock",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_EVAL_PIPELINE_SPEC_JSON, _TEST_EVAL_PIPELINE_JOB],
+ )
+ def test_populate_eval_template_params(
+ self,
+ job_spec,
+ mock_pipeline_service_create,
+ mock_model_evaluate,
+ mock_pipeline_job_get,
+ mock_successfully_completed_eval_job,
+ mock_pipeline_bucket_exists,
+ mock_load_yaml_and_json,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ my_model = preview_language_models.TextGenerationModel.get_tuned_model(
+ test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME
+ )
+
+ task_spec = preview_language_models.EvaluationTextGenerationSpec(
+ ground_truth_data="gs://my-bucket/ground-truth.jsonl",
+ )
+
+ formatted_template_params = (
+ _evaluatable_language_models._populate_eval_template_params(
+ task_spec=task_spec, model_name=my_model._model_resource_name
+ )
+ )
+
+ assert (
+ "batch_predict_gcs_destination_output_uri" in formatted_template_params
+ )
+ assert "model_name" in formatted_template_params
+ assert "evaluation_task" in formatted_template_params
+
+ # This should only be in the classification task pipeline template
+ assert "evaluation_class_labels" not in formatted_template_params
+ assert "target_column_name" not in formatted_template_params
+
+ @pytest.mark.usefixtures(
+ "get_model_with_tuned_version_label_mock",
+ "get_endpoint_with_models_mock",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_EVAL_PIPELINE_SPEC_JSON, _TEST_EVAL_PIPELINE_JOB],
+ )
+ def test_populate_template_params_for_classification_task(
+ self,
+ job_spec,
+ mock_pipeline_service_create,
+ mock_model_evaluate,
+ mock_pipeline_job_get,
+ mock_successfully_completed_eval_job,
+ mock_pipeline_bucket_exists,
+ mock_load_yaml_and_json,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ my_model = preview_language_models.TextGenerationModel.get_tuned_model(
+ test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME
+ )
+
+ task_spec = preview_language_models.EvaluationTextClassificationSpec(
+ ground_truth_data="gs://my-bucket/ground-truth.jsonl",
+ target_column_name="test_targ_name",
+ class_names=["test_class_name_1", "test_class_name_2"],
+ )
+
+ formatted_template_params = (
+ _evaluatable_language_models._populate_eval_template_params(
+ task_spec=task_spec, model_name=my_model._model_resource_name
+ )
+ )
+
+ assert "evaluation_class_labels" in formatted_template_params
+ assert "target_field_name" in formatted_template_params
+
+ @pytest.mark.usefixtures(
+ "get_model_with_tuned_version_label_mock",
+ "get_endpoint_with_models_mock",
+ "mock_storage_blob_upload_from_filename",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_EVAL_PIPELINE_SPEC_JSON, _TEST_EVAL_PIPELINE_JOB],
+ )
+ def test_populate_template_params_with_dataframe_input(
+ self,
+ job_spec,
+ mock_pipeline_service_create,
+ mock_pipeline_job_get,
+ mock_successfully_completed_eval_job,
+ mock_pipeline_bucket_exists,
+ mock_load_yaml_and_json,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ my_model = preview_language_models.TextGenerationModel.get_tuned_model(
+ test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME
+ )
+
+ task_spec = preview_language_models.EvaluationTextGenerationSpec(
+ ground_truth_data=_TEST_EVAL_DATA_DF,
+ )
+
+ formatted_template_params = (
+ _evaluatable_language_models._populate_eval_template_params(
+ task_spec=task_spec, model_name=my_model._model_resource_name
+ )
+ )
+
+ # The utility method should not modify task_spec
+ assert isinstance(task_spec.ground_truth_data, pd.DataFrame)
+
+ assert (
+ "batch_predict_gcs_destination_output_uri" in formatted_template_params
+ )
+ assert "model_name" in formatted_template_params
+ assert "evaluation_task" in formatted_template_params
+
+ # This should only be in the classification task pipeline template
+ assert "evaluation_class_labels" not in formatted_template_params
+ assert "target_column_name" not in formatted_template_params
+
+ def test_evaluate_raises_on_ga_language_model(
+ self,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ with pytest.raises(AttributeError):
+ model.evaluate()
+
+ @pytest.mark.usefixtures(
+ "get_endpoint_with_models_mock",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_EVAL_PIPELINE_SPEC_JSON, _TEST_EVAL_PIPELINE_JOB],
+ )
+ @pytest.mark.parametrize(
+ "mock_request_urlopen_eval",
+ ["https://us-kfp.pkg.dev/proj/repo/pack/latest"],
+ indirect=True,
+ )
+ def test_model_evaluation_text_generation_task_on_base_model(
+ self,
+ job_spec,
+ mock_pipeline_service_create_eval,
+ mock_pipeline_job_get_eval,
+ mock_successfully_completed_eval_job,
+ mock_pipeline_bucket_exists,
+ mock_load_yaml_and_json,
+ mock_request_urlopen_eval,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ my_model = preview_language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ eval_metrics = my_model.evaluate(
+ task_spec=preview_language_models.EvaluationTextGenerationSpec(
+ ground_truth_data="gs://my-bucket/ground-truth.jsonl",
+ ),
+ )
+
+ assert isinstance(eval_metrics, preview_language_models.EvaluationMetric)
+
+ @pytest.mark.usefixtures(
+ "get_endpoint_with_models_mock",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_EVAL_CLASSIFICATION_PIPELINE_SPEC_JSON,
+ _TEST_EVAL_CLASSIFICATION_PIPELINE_JOB,
+ ],
+ )
+ @pytest.mark.parametrize(
+ "mock_request_urlopen_eval_classification",
+ ["https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"],
+ indirect=True,
+ )
+ def test_model_evaluation_text_classification_base_model_only_summary_metrics(
+ self,
+ job_spec,
+ mock_pipeline_service_create_eval_classification,
+ mock_pipeline_job_get_eval_classification,
+ mock_successfully_completed_eval_classification_job,
+ mock_pipeline_bucket_exists,
+ mock_load_yaml_and_json,
+ mock_request_urlopen_eval_classification,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ my_model = preview_language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ eval_metrics = my_model.evaluate(
+ task_spec=preview_language_models.EvaluationTextClassificationSpec(
+ ground_truth_data="gs://my-bucket/ground-truth.jsonl",
+ target_column_name="test_targ_name",
+ class_names=["test_class_name_1", "test_class_name_2"],
+ )
+ )
+
+ assert isinstance(
+ eval_metrics,
+ preview_language_models.EvaluationClassificationMetric,
+ )
+ assert eval_metrics.confidenceMetrics is None
+ assert eval_metrics.auPrc == _TEST_TEXT_CLASSIFICATION_METRICS["auPrc"]
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_DISTILLATION_PIPELINE_SPEC_JSON,
+ ],
+ )
+ @pytest.mark.parametrize(
+ "mock_urllib_request_urlopen",
+ ["https://us-kfp.pkg.dev/ml-pipeline/distillation/distillation/v1.0.0"],
+ indirect=True,
+ )
+ def test_text_generation_model_distill_from(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_job_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_gcs_from_string,
+ mock_gcs_upload,
+ mock_urllib_request_urlopen,
+ mock_get_tuned_model,
+ ):
+ """Tests distilling the text generation model."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ):
+ model = preview_language_models.TextGenerationModel.from_pretrained(
+ "text-bison@001"
+ )
+
+ dataset_uri = "gs://bucket/distillation.training_data.jsonl"
+ evaluation_data_uri = "gs://bucket/eval.jsonl"
+ evaluation_interval = 37
+ enable_early_stopping = True
+ enable_checkpoint_selection = True
+ tensorboard_name = _get_test_tensorboard_resource_id()
+
+ tuning_job = model.distill_from(
+ dataset=dataset_uri,
+ teacher_model="text-unicorn@001",
+ learning_rate_multiplier=2.0,
+ train_steps=10,
+ evaluation_spec=preview_language_models.TuningEvaluationSpec(
+ evaluation_data=evaluation_data_uri,
+ evaluation_interval=evaluation_interval,
+ enable_early_stopping=enable_early_stopping,
+ enable_checkpoint_selection=enable_checkpoint_selection,
+ tensorboard=tensorboard_name,
+ ),
+ accelerator_type="TPU",
+ )
+ call_kwargs = mock_pipeline_service_create.call_args[1]
+ pipeline_arguments = call_kwargs[
+ "pipeline_job"
+ ].runtime_config.parameter_values
+ assert pipeline_arguments["teacher_model_reference"] == "text-unicorn@001"
+ assert pipeline_arguments["student_model_reference"] == "text-bison@001"
+ assert pipeline_arguments["dataset_uri"] == dataset_uri
+ assert pipeline_arguments["project"] == _TEST_PROJECT
+ assert pipeline_arguments["location"] == _TEST_LOCATION
+ assert pipeline_arguments["train_steps"] == 10
+ assert pipeline_arguments["learning_rate_multiplier"] == 2.0
+ assert pipeline_arguments["evaluation_data_uri"] == evaluation_data_uri
+ assert pipeline_arguments["evaluation_interval"] == evaluation_interval
+ assert pipeline_arguments["enable_early_stopping"] == enable_early_stopping
+ assert (
+ pipeline_arguments["enable_checkpoint_selection"]
+ == enable_checkpoint_selection
+ )
+ assert pipeline_arguments["tensorboard_resource_id"] == tensorboard_name
+ assert pipeline_arguments["accelerator_type"] == "TPU"
+ assert (
+ pipeline_arguments["encryption_spec_key_name"]
+ == _TEST_ENCRYPTION_KEY_NAME
+ )
+ assert (
+ call_kwargs["pipeline_job"].encryption_spec.kms_key_name
+ == _TEST_ENCRYPTION_KEY_NAME
+ )
+
+ # Testing the tuned model
+ tuned_model = tuning_job.get_tuned_model()
+ assert (
+ tuned_model._endpoint_name
+ == test_constants.EndpointConstants._TEST_ENDPOINT_NAME
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_logdir_loader.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_logdir_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..e22f41fdd52fd4622cf25adfb3b2671ebe2afca9
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_logdir_loader.py
@@ -0,0 +1,300 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019-2024 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Tests for tensorboard.uploader.logdir_loader."""
+
+import logging
+import os.path
+import shutil
+import tempfile
+
+from google.cloud.aiplatform.tensorboard import logdir_loader
+import tensorflow as tf
+
+from tensorboard.backend.event_processing import directory_loader
+from tensorboard.backend.event_processing import event_file_loader
+from tensorboard.backend.event_processing import io_wrapper
+from tensorboard.compat.proto import event_pb2
+from tensorboard.compat.proto import graph_pb2
+from tensorboard.compat.proto import meta_graph_pb2
+from tensorboard.compat.proto import summary_pb2
+
+
+class FileWriter(tf.compat.v1.summary.FileWriter):
+ """FileWriter for test.
+
+ TensorFlow FileWriter uses TensorFlow's Protobuf Python binding
+ which is largely discouraged in TensorBoard. We do not want a
+ TB.Writer but require one for testing in integrational style
+ (writing out event files and use the real event readers).
+ """
+
+ def __init__(self, *args, **kwargs):
+ # Briefly enter graph mode context so this testing FileWriter can be
+ # created from an eager mode context without triggering a usage error.
+ with tf.compat.v1.Graph().as_default():
+ super(FileWriter, self).__init__(*args, **kwargs)
+
+ def add_test_summary(self, tag, simple_value=1.0, step=None):
+ """Convenience for writing a simple summary for a given tag."""
+ value = summary_pb2.Summary.Value(tag=tag, simple_value=simple_value)
+ summary = summary_pb2.Summary(value=[value])
+ self.add_summary(summary, global_step=step)
+
+ def add_test_tensor_summary(self, tag, tensor, step=None, value_metadata=None):
+ """Convenience for writing a simple summary for a given tag."""
+ value = summary_pb2.Summary.Value(
+ tag=tag, tensor=tensor, metadata=value_metadata
+ )
+ summary = summary_pb2.Summary(value=[value])
+ self.add_summary(summary, global_step=step)
+
+ def add_event(self, event):
+ if isinstance(event, event_pb2.Event):
+ tf_event = tf.compat.v1.Event.FromString(event.SerializeToString())
+ else:
+ tf_event = event
+ if not isinstance(event, bytes):
+ logging.error(
+ "Added TensorFlow event proto. "
+ "Please prefer TensorBoard copy of the proto"
+ )
+ super(FileWriter, self).add_event(tf_event)
+
+ def add_summary(self, summary, global_step=None):
+ if isinstance(summary, summary_pb2.Summary):
+ tf_summary = tf.compat.v1.Summary.FromString(summary.SerializeToString())
+ else:
+ tf_summary = summary
+ if not isinstance(summary, bytes):
+ logging.error(
+ "Added TensorFlow summary proto. "
+ "Please prefer TensorBoard copy of the proto"
+ )
+ super(FileWriter, self).add_summary(tf_summary, global_step)
+
+ def add_session_log(self, session_log, global_step=None):
+ if isinstance(session_log, event_pb2.SessionLog):
+ tf_session_log = tf.compat.v1.SessionLog.FromString(
+ session_log.SerializeToString()
+ )
+ else:
+ tf_session_log = session_log
+ if not isinstance(session_log, bytes):
+ logging.error(
+ "Added TensorFlow session_log proto. "
+ "Please prefer TensorBoard copy of the proto"
+ )
+ super(FileWriter, self).add_session_log(tf_session_log, global_step)
+
+ def add_graph(self, graph, global_step=None, graph_def=None):
+ if isinstance(graph_def, graph_pb2.GraphDef):
+ tf_graph_def = tf.compat.v1.GraphDef.FromString(
+ graph_def.SerializeToString()
+ )
+ else:
+ tf_graph_def = graph_def
+
+ super(FileWriter, self).add_graph(
+ graph, global_step=global_step, graph_def=tf_graph_def
+ )
+
+ def add_meta_graph(self, meta_graph_def, global_step=None):
+ if isinstance(meta_graph_def, meta_graph_pb2.MetaGraphDef):
+ tf_meta_graph_def = tf.compat.v1.MetaGraphDef.FromString(
+ meta_graph_def.SerializeToString()
+ )
+ else:
+ tf_meta_graph_def = meta_graph_def
+
+ super(FileWriter, self).add_meta_graph(
+ meta_graph_def=tf_meta_graph_def, global_step=global_step
+ )
+
+
+class LogdirLoaderTest(tf.test.TestCase):
+ """Tests for LogdirLoader."""
+
+ def get_temp_dir(self):
+ if not hasattr(self, "_tempdir") or self._tempdir is None:
+ self._tempdir = tempfile.mkdtemp()
+ return self._tempdir
+
+ def _create_logdir_loader(self, logdir):
+ def directory_loader_factory(path):
+ return directory_loader.DirectoryLoader(
+ path,
+ event_file_loader.TimestampedEventFileLoader,
+ path_filter=io_wrapper.IsTensorFlowEventsFile,
+ )
+
+ return logdir_loader.LogdirLoader(logdir, directory_loader_factory)
+
+ def _extract_tags(self, event_generator):
+ """Converts a generator of tf.Events into a list of event tags."""
+ return [
+ event.summary.value[0].tag
+ for event in event_generator
+ if not event.file_version
+ ]
+
+ def _extract_run_to_tags(self, run_to_events):
+ """Returns run-to-tags dict from run-to-event-generator dict."""
+ run_to_tags = {}
+ for run_name, event_generator in run_to_events.items():
+ # There should be no duplicate runs.
+ self.assertNotIn(run_name, run_to_tags)
+ run_to_tags[run_name] = self._extract_tags(event_generator)
+ return run_to_tags
+
+ def test_empty_logdir(self):
+ logdir = self.get_temp_dir()
+ loader = self._create_logdir_loader(logdir)
+ # Default state is empty.
+ self.assertEmpty(list(loader.get_run_events()))
+ loader.synchronize_runs()
+ # Still empty, since there's no data.
+ self.assertEmpty(list(loader.get_run_events()))
+
+ def test_single_event_logdir(self):
+ logdir = self.get_temp_dir()
+ with FileWriter(logdir) as writer:
+ writer.add_test_summary("foo")
+ loader = self._create_logdir_loader(logdir)
+ loader.synchronize_runs()
+ self.assertEqual(
+ self._extract_run_to_tags(loader.get_run_events()), {".": ["foo"]}
+ )
+ # A second load should indicate no new data for the run.
+ self.assertEqual(self._extract_run_to_tags(loader.get_run_events()), {".": []})
+
+ def test_profile_logdir(self):
+ logdir = self.get_temp_dir()
+ profile_dir = os.path.join(logdir, "foo/plugins/profile")
+ os.makedirs(profile_dir, exist_ok=True)
+ tempfile.NamedTemporaryFile(
+ prefix="bar", suffix=".xplane.pb", dir=profile_dir, delete=False
+ )
+ self.assertNotEmpty(os.listdir(profile_dir))
+ loader = self._create_logdir_loader(logdir)
+ loader.synchronize_runs()
+ self.assertEqual(
+ self._extract_run_to_tags(loader.get_run_events()), {"foo": []}
+ )
+
+ def test_profile_subdirectories(self):
+ logdir = self.get_temp_dir()
+ profile_dir = os.path.join(logdir, "foo/bar/subdir/plugins/profile")
+ os.makedirs(profile_dir, exist_ok=True)
+ tempfile.NamedTemporaryFile(
+ prefix="bar", suffix=".xplane.pb", dir=profile_dir, delete=False
+ )
+ self.assertNotEmpty(os.listdir(profile_dir))
+ loader = self._create_logdir_loader(logdir)
+ loader.synchronize_runs()
+ self.assertEqual(
+ self._extract_run_to_tags(loader.get_run_events()), {"foo/bar/subdir": []}
+ )
+
+ def test_multiple_writes_to_logdir(self):
+ logdir = self.get_temp_dir()
+ with FileWriter(os.path.join(logdir, "a")) as writer:
+ writer.add_test_summary("tag_a")
+ with FileWriter(os.path.join(logdir, "b")) as writer:
+ writer.add_test_summary("tag_b")
+ with FileWriter(os.path.join(logdir, "b", "x")) as writer:
+ writer.add_test_summary("tag_b_x")
+ with FileWriter(os.path.join(logdir, "b_z")) as writer:
+ writer.add_test_summary("tag_b_z")
+ writer_c = FileWriter(os.path.join(logdir, "c"))
+ writer_c.add_test_summary("tag_c")
+ writer_c.flush()
+ loader = self._create_logdir_loader(logdir)
+ loader.synchronize_runs()
+ self.assertEqual(
+ self._extract_run_to_tags(loader.get_run_events()),
+ {
+ "a": ["tag_a"],
+ "b": ["tag_b"],
+ "b/x": ["tag_b_x"],
+ "b_z": ["tag_b_z"],
+ "c": ["tag_c"],
+ },
+ )
+ # A second load should indicate no new data.
+ self.assertEqual(
+ self._extract_run_to_tags(loader.get_run_events()),
+ {"a": [], "b": [], "b/x": [], "b_z": [], "c": []},
+ )
+ # Write some new data to both new and pre-existing event files.
+ with FileWriter(os.path.join(logdir, "a"), filename_suffix=".other") as writer:
+ writer.add_test_summary("tag_a_2")
+ writer.add_test_summary("tag_a_3")
+ writer.add_test_summary("tag_a_4")
+ with FileWriter(
+ os.path.join(logdir, "b", "x"), filename_suffix=".other"
+ ) as writer:
+ writer.add_test_summary("tag_b_x_2")
+ with writer_c as writer:
+ writer.add_test_summary("tag_c_2")
+ # New data should appear on the next load.
+ self.assertEqual(
+ self._extract_run_to_tags(loader.get_run_events()),
+ {
+ "a": ["tag_a_2", "tag_a_3", "tag_a_4"],
+ "b": [],
+ "b/x": ["tag_b_x_2"],
+ "b_z": [],
+ "c": ["tag_c_2"],
+ },
+ )
+
+ def test_directory_deletion(self):
+ logdir = self.get_temp_dir()
+ with FileWriter(os.path.join(logdir, "a")) as writer:
+ writer.add_test_summary("tag_a")
+ with FileWriter(os.path.join(logdir, "b")) as writer:
+ writer.add_test_summary("tag_b")
+ with FileWriter(os.path.join(logdir, "c")) as writer:
+ writer.add_test_summary("tag_c")
+ loader = self._create_logdir_loader(logdir)
+ loader.synchronize_runs()
+ self.assertEqual(list(loader.get_run_events().keys()), ["a", "b", "c"])
+ shutil.rmtree(os.path.join(logdir, "b"))
+ loader.synchronize_runs()
+ self.assertEqual(list(loader.get_run_events().keys()), ["a", "c"])
+ shutil.rmtree(logdir)
+ loader.synchronize_runs()
+ self.assertEmpty(loader.get_run_events())
+
+ def test_directory_deletion_during_event_loading(self):
+ logdir = self.get_temp_dir()
+ with FileWriter(logdir) as writer:
+ writer.add_test_summary("foo")
+ loader = self._create_logdir_loader(logdir)
+ loader.synchronize_runs()
+ self.assertEqual(
+ self._extract_run_to_tags(loader.get_run_events()), {".": ["foo"]}
+ )
+ shutil.rmtree(logdir)
+ runs_to_events = loader.get_run_events()
+ self.assertEqual(list(runs_to_events.keys()), ["."])
+ events = runs_to_events["."]
+ self.assertEqual(self._extract_tags(events), [])
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_logging.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..010b6b670d77f338b29d9636a3683c582f7f753a
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_logging.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google.cloud.aiplatform import base
+import logging
+import pytest
+import sys
+
+
+class TestLogging:
+ @pytest.mark.skipif(
+ sys.version_info < (3, 8),
+ reason="requires python3.8 or higher to work with MLFlow",
+ )
+ def test_no_root_logging_handler_override(self, caplog):
+ # Users should be able to control the root logger in their apps
+ # The aiplatform module import should not override their root logger config
+ caplog.set_level(logging.DEBUG)
+
+ logging.debug("Debug level")
+ logging.info("Info level")
+ logging.critical("Critical level")
+
+ assert "Debug level\n" in caplog.text
+ assert "Info level\n" in caplog.text
+ assert "Critical level\n" in caplog.text
+
+ @pytest.mark.skipif(
+ sys.version_info < (3, 8),
+ reason="requires python3.8 or higher to work with MLFlow",
+ )
+ def test_log_level_coexistance(self, caplog):
+ # The aiplatform module and the root logger can have different log levels.
+ aip_logger = base.Logger(__name__)
+
+ caplog.set_level(logging.DEBUG)
+
+ logging.debug("This should exist")
+ logging.info("This should too")
+
+ aip_logger.info("This should also exist")
+ aip_logger.debug("This should NOT exist")
+
+ assert "This should exist\n" in caplog.text
+ assert "This should too\n" in caplog.text
+ assert "This should also exist\n" in caplog.text
+ assert "This should NOT exist\n" not in caplog.text
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_matching_engine_index.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_matching_engine_index.py
new file mode 100644
index 0000000000000000000000000000000000000000..0dc0fb6d1060ac6deb815134bed43296055945ce
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_matching_engine_index.py
@@ -0,0 +1,778 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+import uuid
+
+from unittest import mock
+from importlib import reload
+from unittest.mock import patch
+
+from google.api_core import operation
+from google.protobuf import field_mask_pb2
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform.compat.services import (
+ index_service_client,
+)
+
+from google.cloud.aiplatform.matching_engine import (
+ matching_engine_index_config,
+)
+
+from google.cloud.aiplatform.compat.types import (
+ index as gca_index,
+ encryption_spec as gca_encryption_spec,
+ index_service as gca_index_service,
+)
+import constants as test_constants
+
+# project
+_TEST_PROJECT = test_constants.ProjectConstants._TEST_PROJECT
+_TEST_LOCATION = test_constants.ProjectConstants._TEST_LOCATION
+_TEST_PARENT = test_constants.ProjectConstants._TEST_PARENT
+
+
+# index
+_TEST_INDEX_ID = test_constants.MatchingEngineConstants._TEST_INDEX_ID
+_TEST_INDEX_NAME = test_constants.MatchingEngineConstants._TEST_INDEX_NAME
+_TEST_INDEX_DISPLAY_NAME = (
+ test_constants.MatchingEngineConstants._TEST_INDEX_DISPLAY_NAME
+)
+_TEST_CONTENTS_DELTA_URI = "gs://contents"
+_TEST_INDEX_DISTANCE_MEASURE_TYPE = (
+ matching_engine_index_config.DistanceMeasureType.SQUARED_L2_DISTANCE
+)
+_TEST_INDEX_FEATURE_NORM_TYPE = (
+ matching_engine_index_config.FeatureNormType.UNIT_L2_NORM
+)
+
+_TEST_CONTENTS_DELTA_URI_UPDATE = "gs://contents_update"
+_TEST_IS_COMPLETE_OVERWRITE_UPDATE = True
+
+_TEST_INDEX_CONFIG_DIMENSIONS = 100
+_TEST_INDEX_APPROXIMATE_NEIGHBORS_COUNT = 150
+_TEST_LEAF_NODE_EMBEDDING_COUNT = 123
+_TEST_LEAF_NODES_TO_SEARCH_PERCENT = 50
+_TEST_SHARD_SIZES = ["SHARD_SIZE_SMALL", "SHARD_SIZE_LARGE", "SHARD_SIZE_MEDIUM"]
+
+_TEST_INDEX_DESCRIPTION = test_constants.MatchingEngineConstants._TEST_INDEX_DESCRIPTION
+
+
+_TEST_LABELS = test_constants.MatchingEngineConstants._TEST_LABELS
+_TEST_DISPLAY_NAME_UPDATE = (
+ test_constants.MatchingEngineConstants._TEST_DISPLAY_NAME_UPDATE
+)
+_TEST_DESCRIPTION_UPDATE = (
+ test_constants.MatchingEngineConstants._TEST_DESCRIPTION_UPDATE
+)
+_TEST_LABELS_UPDATE = test_constants.MatchingEngineConstants._TEST_LABELS_UPDATE
+
+# request_metadata
+_TEST_REQUEST_METADATA = test_constants.MatchingEngineConstants._TEST_REQUEST_METADATA
+
+# Lists
+_TEST_INDEX_LIST = [
+ gca_index.Index(
+ name=_TEST_INDEX_NAME,
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ description=_TEST_INDEX_DESCRIPTION,
+ ),
+ gca_index.Index(
+ name=_TEST_INDEX_NAME,
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ description=_TEST_INDEX_DESCRIPTION,
+ ),
+ gca_index.Index(
+ name=_TEST_INDEX_NAME,
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ description=_TEST_INDEX_DESCRIPTION,
+ ),
+]
+
+# Index update method
+_TEST_INDEX_BATCH_UPDATE_METHOD = "BATCH_UPDATE"
+_TEST_INDEX_STREAM_UPDATE_METHOD = "STREAM_UPDATE"
+_TEST_INDEX_EMPTY_UPDATE_METHOD = None
+_TEST_INDEX_INVALID_UPDATE_METHOD = "INVALID_UPDATE_METHOD"
+_TEST_INDEX_UPDATE_METHOD_EXPECTED_RESULT_MAP = {
+ _TEST_INDEX_BATCH_UPDATE_METHOD: gca_index.Index.IndexUpdateMethod.BATCH_UPDATE,
+ _TEST_INDEX_STREAM_UPDATE_METHOD: gca_index.Index.IndexUpdateMethod.STREAM_UPDATE,
+ _TEST_INDEX_EMPTY_UPDATE_METHOD: None,
+ _TEST_INDEX_INVALID_UPDATE_METHOD: None,
+}
+
+# Encryption spec
+_TEST_ENCRYPTION_SPEC_KEY_NAME = "TEST_ENCRYPTION_SPEC"
+
+_TEST_DATAPOINT_IDS = ("1", "2")
+_TEST_DATAPOINT_1 = gca_index.IndexDatapoint(
+ datapoint_id="0",
+ feature_vector=[0.00526886899, -0.0198396724],
+ restricts=[
+ gca_index.IndexDatapoint.Restriction(namespace="Color", allow_list=["red"])
+ ],
+ numeric_restricts=[
+ gca_index.IndexDatapoint.NumericRestriction(
+ namespace="cost",
+ value_int=1,
+ )
+ ],
+)
+_TEST_DATAPOINT_2 = gca_index.IndexDatapoint(
+ datapoint_id="1",
+ feature_vector=[0.00526886899, -0.0198396724],
+ numeric_restricts=[
+ gca_index.IndexDatapoint.NumericRestriction(
+ namespace="cost",
+ value_double=0.1,
+ )
+ ],
+ crowding_tag=gca_index.IndexDatapoint.CrowdingTag(crowding_attribute="crowding"),
+)
+_TEST_DATAPOINT_3 = gca_index.IndexDatapoint(
+ datapoint_id="2",
+ feature_vector=[0.00526886899, -0.0198396724],
+ numeric_restricts=[
+ gca_index.IndexDatapoint.NumericRestriction(
+ namespace="cost",
+ value_float=1.1,
+ )
+ ],
+)
+_TEST_DATAPOINTS = (_TEST_DATAPOINT_1, _TEST_DATAPOINT_2, _TEST_DATAPOINT_3)
+_TEST_TIMEOUT = 1800.0
+_TEST_UPDATE_MASK = ["all_restricts"]
+
+
+def uuid_mock():
+ return uuid.UUID(int=1)
+
+
+# All Index Mocks
+@pytest.fixture
+def get_index_mock():
+ with patch.object(
+ index_service_client.IndexServiceClient, "get_index"
+ ) as get_index_mock:
+ get_index_mock.return_value = gca_index.Index(
+ name=_TEST_INDEX_NAME,
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ description=_TEST_INDEX_DESCRIPTION,
+ )
+ yield get_index_mock
+
+
+@pytest.fixture
+def update_index_metadata_mock():
+ with patch.object(
+ index_service_client.IndexServiceClient, "update_index"
+ ) as update_index_mock:
+ index_lro_mock = mock.Mock(operation.Operation)
+ index_lro_mock.result.return_value = gca_index.Index(
+ name=_TEST_INDEX_NAME,
+ display_name=_TEST_DISPLAY_NAME_UPDATE,
+ description=_TEST_DESCRIPTION_UPDATE,
+ labels=_TEST_LABELS_UPDATE,
+ )
+ update_index_mock.return_value = index_lro_mock
+ yield update_index_mock
+
+
+@pytest.fixture
+def update_index_embeddings_mock():
+ with patch.object(
+ index_service_client.IndexServiceClient, "update_index"
+ ) as update_index_mock:
+ index_lro_mock = mock.Mock(operation.Operation)
+ index_lro_mock.result.return_value = gca_index.Index(
+ name=_TEST_INDEX_NAME,
+ )
+ update_index_mock.return_value = index_lro_mock
+ yield update_index_mock
+
+
+@pytest.fixture
+def list_indexes_mock():
+ with patch.object(
+ index_service_client.IndexServiceClient, "list_indexes"
+ ) as list_indexes_mock:
+ list_indexes_mock.return_value = _TEST_INDEX_LIST
+ yield list_indexes_mock
+
+
+@pytest.fixture
+def delete_index_mock():
+ with mock.patch.object(
+ index_service_client.IndexServiceClient, "delete_index"
+ ) as delete_index_mock:
+ delete_index_lro_mock = mock.Mock(operation.Operation)
+ delete_index_mock.return_value = delete_index_lro_mock
+ yield delete_index_mock
+
+
+@pytest.fixture
+def create_index_mock():
+ with patch.object(
+ index_service_client.IndexServiceClient, "create_index"
+ ) as create_index_mock:
+ create_index_lro_mock = mock.Mock(operation.Operation)
+ create_index_lro_mock.result.return_value = gca_index.Index(
+ name=_TEST_INDEX_NAME,
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ description=_TEST_INDEX_DESCRIPTION,
+ )
+ create_index_mock.return_value = create_index_lro_mock
+ yield create_index_mock
+
+
+@pytest.fixture
+def upsert_datapoints_mock():
+ with patch.object(
+ index_service_client.IndexServiceClient, "upsert_datapoints"
+ ) as upsert_datapoints_mock:
+ yield upsert_datapoints_mock
+
+
+@pytest.fixture
+def remove_datapoints_mock():
+ with patch.object(
+ index_service_client.IndexServiceClient, "remove_datapoints"
+ ) as remove_datapoints_mock:
+ yield remove_datapoints_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestMatchingEngineIndex:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize("index_name", [_TEST_INDEX_ID, _TEST_INDEX_NAME])
+ def test_init_index(self, index_name, get_index_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index = aiplatform.MatchingEngineIndex(index_name=index_name)
+
+ get_index_mock.assert_called_once_with(
+ name=my_index.resource_name,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ @pytest.mark.usefixtures("get_index_mock")
+ def test_update_index_metadata(self, update_index_metadata_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index = aiplatform.MatchingEngineIndex(index_name=_TEST_INDEX_ID)
+ updated_index = my_index.update_metadata(
+ display_name=_TEST_DISPLAY_NAME_UPDATE,
+ description=_TEST_DESCRIPTION_UPDATE,
+ labels=_TEST_LABELS_UPDATE,
+ update_request_timeout=_TEST_TIMEOUT,
+ )
+
+ expected = gca_index.Index(
+ name=_TEST_INDEX_NAME,
+ display_name=_TEST_DISPLAY_NAME_UPDATE,
+ description=_TEST_DESCRIPTION_UPDATE,
+ labels=_TEST_LABELS_UPDATE,
+ )
+
+ update_index_metadata_mock.assert_called_once_with(
+ index=expected,
+ update_mask=field_mask_pb2.FieldMask(
+ paths=["labels", "display_name", "description"]
+ ),
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=_TEST_TIMEOUT,
+ )
+
+ assert updated_index.gca_resource == expected
+
+ @pytest.mark.usefixtures("get_index_mock")
+ def test_update_index_embeddings(self, update_index_embeddings_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index = aiplatform.MatchingEngineIndex(index_name=_TEST_INDEX_ID)
+ updated_index = my_index.update_embeddings(
+ contents_delta_uri=_TEST_CONTENTS_DELTA_URI_UPDATE,
+ is_complete_overwrite=_TEST_IS_COMPLETE_OVERWRITE_UPDATE,
+ update_request_timeout=_TEST_TIMEOUT,
+ )
+
+ expected = gca_index.Index(
+ name=_TEST_INDEX_NAME,
+ metadata={
+ "contentsDeltaUri": _TEST_CONTENTS_DELTA_URI_UPDATE,
+ "isCompleteOverwrite": _TEST_IS_COMPLETE_OVERWRITE_UPDATE,
+ },
+ )
+
+ update_index_embeddings_mock.assert_called_once_with(
+ index=expected,
+ update_mask=field_mask_pb2.FieldMask(paths=["metadata"]),
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=_TEST_TIMEOUT,
+ )
+
+ # The service only returns the name of the Index
+ assert updated_index.gca_resource == gca_index.Index(name=_TEST_INDEX_NAME)
+
+ def test_list_indexes(self, list_indexes_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_indexes_list = aiplatform.MatchingEngineIndex.list()
+
+ list_indexes_mock.assert_called_once_with(request={"parent": _TEST_PARENT})
+ assert len(my_indexes_list) == len(_TEST_INDEX_LIST)
+ for my_index in my_indexes_list:
+ assert isinstance(my_index, aiplatform.MatchingEngineIndex)
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_index_mock")
+ def test_delete_index(self, delete_index_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index = aiplatform.MatchingEngineIndex(index_name=_TEST_INDEX_ID)
+ my_index.delete(sync=sync)
+
+ if not sync:
+ my_index.wait()
+
+ delete_index_mock.assert_called_once_with(name=my_index.resource_name)
+
+ @pytest.mark.usefixtures("get_index_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize(
+ "index_update_method",
+ [
+ _TEST_INDEX_STREAM_UPDATE_METHOD,
+ _TEST_INDEX_BATCH_UPDATE_METHOD,
+ _TEST_INDEX_EMPTY_UPDATE_METHOD,
+ _TEST_INDEX_INVALID_UPDATE_METHOD,
+ ],
+ )
+ @pytest.mark.parametrize("shard_size", _TEST_SHARD_SIZES)
+ def test_create_tree_ah_index(
+ self, create_index_mock, sync, index_update_method, shard_size
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index = aiplatform.MatchingEngineIndex.create_tree_ah_index(
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ contents_delta_uri=_TEST_CONTENTS_DELTA_URI,
+ dimensions=_TEST_INDEX_CONFIG_DIMENSIONS,
+ approximate_neighbors_count=_TEST_INDEX_APPROXIMATE_NEIGHBORS_COUNT,
+ distance_measure_type=_TEST_INDEX_DISTANCE_MEASURE_TYPE,
+ feature_norm_type=_TEST_INDEX_FEATURE_NORM_TYPE,
+ leaf_node_embedding_count=_TEST_LEAF_NODE_EMBEDDING_COUNT,
+ leaf_nodes_to_search_percent=_TEST_LEAF_NODES_TO_SEARCH_PERCENT,
+ description=_TEST_INDEX_DESCRIPTION,
+ labels=_TEST_LABELS,
+ sync=sync,
+ index_update_method=index_update_method,
+ encryption_spec_key_name=_TEST_ENCRYPTION_SPEC_KEY_NAME,
+ create_request_timeout=_TEST_TIMEOUT,
+ shard_size=shard_size,
+ )
+
+ if not sync:
+ my_index.wait()
+
+ config = {
+ "treeAhConfig": {
+ "leafNodeEmbeddingCount": _TEST_LEAF_NODE_EMBEDDING_COUNT,
+ "leafNodesToSearchPercent": _TEST_LEAF_NODES_TO_SEARCH_PERCENT,
+ }
+ }
+
+ expected = gca_index.Index(
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ metadata={
+ "config": {
+ "algorithmConfig": config,
+ "dimensions": _TEST_INDEX_CONFIG_DIMENSIONS,
+ "approximateNeighborsCount": _TEST_INDEX_APPROXIMATE_NEIGHBORS_COUNT,
+ "distanceMeasureType": _TEST_INDEX_DISTANCE_MEASURE_TYPE,
+ "featureNormType": _TEST_INDEX_FEATURE_NORM_TYPE,
+ "shardSize": shard_size,
+ },
+ "contentsDeltaUri": _TEST_CONTENTS_DELTA_URI,
+ },
+ description=_TEST_INDEX_DESCRIPTION,
+ labels=_TEST_LABELS,
+ index_update_method=_TEST_INDEX_UPDATE_METHOD_EXPECTED_RESULT_MAP[
+ index_update_method
+ ],
+ encryption_spec=gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_ENCRYPTION_SPEC_KEY_NAME
+ ),
+ )
+
+ create_index_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ index=expected,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=_TEST_TIMEOUT,
+ )
+
+ @pytest.mark.usefixtures("get_index_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize(
+ "index_update_method",
+ [
+ _TEST_INDEX_STREAM_UPDATE_METHOD,
+ _TEST_INDEX_BATCH_UPDATE_METHOD,
+ _TEST_INDEX_EMPTY_UPDATE_METHOD,
+ _TEST_INDEX_INVALID_UPDATE_METHOD,
+ ],
+ )
+ @pytest.mark.parametrize("shard_size", _TEST_SHARD_SIZES)
+ def test_create_tree_ah_index_with_empty_index(
+ self, create_index_mock, sync, index_update_method, shard_size
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index = aiplatform.MatchingEngineIndex.create_tree_ah_index(
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ contents_delta_uri=None,
+ dimensions=_TEST_INDEX_CONFIG_DIMENSIONS,
+ approximate_neighbors_count=_TEST_INDEX_APPROXIMATE_NEIGHBORS_COUNT,
+ distance_measure_type=_TEST_INDEX_DISTANCE_MEASURE_TYPE,
+ feature_norm_type=_TEST_INDEX_FEATURE_NORM_TYPE,
+ leaf_node_embedding_count=_TEST_LEAF_NODE_EMBEDDING_COUNT,
+ leaf_nodes_to_search_percent=_TEST_LEAF_NODES_TO_SEARCH_PERCENT,
+ description=_TEST_INDEX_DESCRIPTION,
+ labels=_TEST_LABELS,
+ sync=sync,
+ index_update_method=index_update_method,
+ encryption_spec_key_name=_TEST_ENCRYPTION_SPEC_KEY_NAME,
+ create_request_timeout=_TEST_TIMEOUT,
+ shard_size=shard_size,
+ )
+
+ if not sync:
+ my_index.wait()
+
+ config = {
+ "treeAhConfig": {
+ "leafNodeEmbeddingCount": _TEST_LEAF_NODE_EMBEDDING_COUNT,
+ "leafNodesToSearchPercent": _TEST_LEAF_NODES_TO_SEARCH_PERCENT,
+ }
+ }
+
+ expected = gca_index.Index(
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ metadata={
+ "config": {
+ "algorithmConfig": config,
+ "dimensions": _TEST_INDEX_CONFIG_DIMENSIONS,
+ "approximateNeighborsCount": _TEST_INDEX_APPROXIMATE_NEIGHBORS_COUNT,
+ "distanceMeasureType": _TEST_INDEX_DISTANCE_MEASURE_TYPE,
+ "featureNormType": _TEST_INDEX_FEATURE_NORM_TYPE,
+ "shardSize": shard_size,
+ },
+ },
+ description=_TEST_INDEX_DESCRIPTION,
+ labels=_TEST_LABELS,
+ index_update_method=_TEST_INDEX_UPDATE_METHOD_EXPECTED_RESULT_MAP[
+ index_update_method
+ ],
+ encryption_spec=gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_ENCRYPTION_SPEC_KEY_NAME
+ ),
+ )
+
+ create_index_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ index=expected,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=_TEST_TIMEOUT,
+ )
+
+ @pytest.mark.usefixtures("get_index_mock")
+ def test_create_tree_ah_index_backward_compatibility(self, create_index_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ aiplatform.MatchingEngineIndex.create_tree_ah_index(
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ contents_delta_uri=_TEST_CONTENTS_DELTA_URI,
+ dimensions=_TEST_INDEX_CONFIG_DIMENSIONS,
+ approximate_neighbors_count=_TEST_INDEX_APPROXIMATE_NEIGHBORS_COUNT,
+ distance_measure_type=_TEST_INDEX_DISTANCE_MEASURE_TYPE.value,
+ feature_norm_type=_TEST_INDEX_FEATURE_NORM_TYPE.value,
+ leaf_node_embedding_count=_TEST_LEAF_NODE_EMBEDDING_COUNT,
+ leaf_nodes_to_search_percent=_TEST_LEAF_NODES_TO_SEARCH_PERCENT,
+ description=_TEST_INDEX_DESCRIPTION,
+ labels=_TEST_LABELS,
+ )
+
+ config = {
+ "treeAhConfig": {
+ "leafNodeEmbeddingCount": _TEST_LEAF_NODE_EMBEDDING_COUNT,
+ "leafNodesToSearchPercent": _TEST_LEAF_NODES_TO_SEARCH_PERCENT,
+ }
+ }
+
+ expected = gca_index.Index(
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ metadata={
+ "config": {
+ "algorithmConfig": config,
+ "dimensions": _TEST_INDEX_CONFIG_DIMENSIONS,
+ "approximateNeighborsCount": _TEST_INDEX_APPROXIMATE_NEIGHBORS_COUNT,
+ "distanceMeasureType": _TEST_INDEX_DISTANCE_MEASURE_TYPE,
+ "featureNormType": _TEST_INDEX_FEATURE_NORM_TYPE,
+ "shardSize": None,
+ },
+ "contentsDeltaUri": _TEST_CONTENTS_DELTA_URI,
+ },
+ description=_TEST_INDEX_DESCRIPTION,
+ labels=_TEST_LABELS,
+ )
+
+ create_index_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ index=expected,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_index_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize(
+ "index_update_method",
+ [
+ _TEST_INDEX_STREAM_UPDATE_METHOD,
+ _TEST_INDEX_BATCH_UPDATE_METHOD,
+ _TEST_INDEX_EMPTY_UPDATE_METHOD,
+ _TEST_INDEX_INVALID_UPDATE_METHOD,
+ ],
+ )
+ @pytest.mark.parametrize("shard_size", _TEST_SHARD_SIZES)
+ def test_create_brute_force_index(
+ self, create_index_mock, sync, index_update_method, shard_size
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index = aiplatform.MatchingEngineIndex.create_brute_force_index(
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ contents_delta_uri=_TEST_CONTENTS_DELTA_URI,
+ dimensions=_TEST_INDEX_CONFIG_DIMENSIONS,
+ distance_measure_type=_TEST_INDEX_DISTANCE_MEASURE_TYPE,
+ feature_norm_type=_TEST_INDEX_FEATURE_NORM_TYPE,
+ description=_TEST_INDEX_DESCRIPTION,
+ labels=_TEST_LABELS,
+ sync=sync,
+ index_update_method=index_update_method,
+ encryption_spec_key_name=_TEST_ENCRYPTION_SPEC_KEY_NAME,
+ create_request_timeout=_TEST_TIMEOUT,
+ shard_size=shard_size,
+ )
+
+ if not sync:
+ my_index.wait()
+
+ config = {"bruteForceConfig": {}}
+
+ expected = gca_index.Index(
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ metadata={
+ "config": {
+ "algorithmConfig": config,
+ "dimensions": _TEST_INDEX_CONFIG_DIMENSIONS,
+ "approximateNeighborsCount": None,
+ "distanceMeasureType": _TEST_INDEX_DISTANCE_MEASURE_TYPE,
+ "featureNormType": _TEST_INDEX_FEATURE_NORM_TYPE,
+ "shardSize": shard_size,
+ },
+ "contentsDeltaUri": _TEST_CONTENTS_DELTA_URI,
+ },
+ description=_TEST_INDEX_DESCRIPTION,
+ labels=_TEST_LABELS,
+ index_update_method=_TEST_INDEX_UPDATE_METHOD_EXPECTED_RESULT_MAP[
+ index_update_method
+ ],
+ encryption_spec=gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_ENCRYPTION_SPEC_KEY_NAME,
+ ),
+ )
+
+ create_index_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ index=expected,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=_TEST_TIMEOUT,
+ )
+
+ @pytest.mark.usefixtures("get_index_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize(
+ "index_update_method",
+ [
+ _TEST_INDEX_STREAM_UPDATE_METHOD,
+ _TEST_INDEX_BATCH_UPDATE_METHOD,
+ _TEST_INDEX_EMPTY_UPDATE_METHOD,
+ _TEST_INDEX_INVALID_UPDATE_METHOD,
+ ],
+ )
+ def test_create_brute_force_index_with_empty_index(
+ self, create_index_mock, sync, index_update_method
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index = aiplatform.MatchingEngineIndex.create_brute_force_index(
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ dimensions=_TEST_INDEX_CONFIG_DIMENSIONS,
+ distance_measure_type=_TEST_INDEX_DISTANCE_MEASURE_TYPE,
+ feature_norm_type=_TEST_INDEX_FEATURE_NORM_TYPE,
+ description=_TEST_INDEX_DESCRIPTION,
+ labels=_TEST_LABELS,
+ sync=sync,
+ index_update_method=index_update_method,
+ encryption_spec_key_name=_TEST_ENCRYPTION_SPEC_KEY_NAME,
+ create_request_timeout=_TEST_TIMEOUT,
+ )
+
+ if not sync:
+ my_index.wait()
+
+ config = {"bruteForceConfig": {}}
+
+ expected = gca_index.Index(
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ metadata={
+ "config": {
+ "algorithmConfig": config,
+ "dimensions": _TEST_INDEX_CONFIG_DIMENSIONS,
+ "approximateNeighborsCount": None,
+ "distanceMeasureType": _TEST_INDEX_DISTANCE_MEASURE_TYPE,
+ "featureNormType": _TEST_INDEX_FEATURE_NORM_TYPE,
+ "shardSize": None,
+ },
+ },
+ description=_TEST_INDEX_DESCRIPTION,
+ labels=_TEST_LABELS,
+ index_update_method=_TEST_INDEX_UPDATE_METHOD_EXPECTED_RESULT_MAP[
+ index_update_method
+ ],
+ encryption_spec=gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_ENCRYPTION_SPEC_KEY_NAME,
+ ),
+ )
+
+ create_index_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ index=expected,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=_TEST_TIMEOUT,
+ )
+
+ @pytest.mark.usefixtures("get_index_mock")
+ def test_create_brute_force_index_backward_compatibility(self, create_index_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ aiplatform.MatchingEngineIndex.create_brute_force_index(
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ contents_delta_uri=_TEST_CONTENTS_DELTA_URI,
+ dimensions=_TEST_INDEX_CONFIG_DIMENSIONS,
+ distance_measure_type=_TEST_INDEX_DISTANCE_MEASURE_TYPE,
+ feature_norm_type=_TEST_INDEX_FEATURE_NORM_TYPE,
+ description=_TEST_INDEX_DESCRIPTION,
+ labels=_TEST_LABELS,
+ )
+
+ config = {"bruteForceConfig": {}}
+
+ expected = gca_index.Index(
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ metadata={
+ "config": {
+ "algorithmConfig": config,
+ "dimensions": _TEST_INDEX_CONFIG_DIMENSIONS,
+ "approximateNeighborsCount": None,
+ "distanceMeasureType": _TEST_INDEX_DISTANCE_MEASURE_TYPE,
+ "featureNormType": _TEST_INDEX_FEATURE_NORM_TYPE,
+ "shardSize": None,
+ },
+ "contentsDeltaUri": _TEST_CONTENTS_DELTA_URI,
+ },
+ description=_TEST_INDEX_DESCRIPTION,
+ labels=_TEST_LABELS,
+ )
+
+ create_index_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ index=expected,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_index_mock")
+ def test_upsert_datapoints(self, upsert_datapoints_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index = aiplatform.MatchingEngineIndex(index_name=_TEST_INDEX_ID)
+ my_index.upsert_datapoints(
+ datapoints=_TEST_DATAPOINTS,
+ )
+
+ upsert_datapoints_request = gca_index_service.UpsertDatapointsRequest(
+ index=_TEST_INDEX_NAME,
+ datapoints=_TEST_DATAPOINTS,
+ )
+
+ upsert_datapoints_mock.assert_called_once_with(upsert_datapoints_request)
+
+ @pytest.mark.usefixtures("get_index_mock")
+ def test_upsert_datapoints_dynamic_metadata_update(self, upsert_datapoints_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index = aiplatform.MatchingEngineIndex(index_name=_TEST_INDEX_ID)
+ my_index.upsert_datapoints(
+ datapoints=_TEST_DATAPOINTS,
+ update_mask=_TEST_UPDATE_MASK,
+ )
+
+ upsert_datapoints_request = gca_index_service.UpsertDatapointsRequest(
+ index=_TEST_INDEX_NAME,
+ datapoints=_TEST_DATAPOINTS,
+ update_mask=field_mask_pb2.FieldMask(paths=_TEST_UPDATE_MASK),
+ )
+
+ upsert_datapoints_mock.assert_called_once_with(upsert_datapoints_request)
+
+ @pytest.mark.usefixtures("get_index_mock")
+ def test_remove_datapoints(self, remove_datapoints_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index = aiplatform.MatchingEngineIndex(index_name=_TEST_INDEX_ID)
+ my_index.remove_datapoints(
+ datapoint_ids=_TEST_DATAPOINT_IDS,
+ )
+
+ remove_datapoints_request = gca_index_service.RemoveDatapointsRequest(
+ index=_TEST_INDEX_NAME,
+ datapoint_ids=_TEST_DATAPOINT_IDS,
+ )
+
+ remove_datapoints_mock.assert_called_once_with(remove_datapoints_request)
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_matching_engine_index_endpoint.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_matching_engine_index_endpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..bfd85c41b67d761978a5f8d7f7d01ba37403cec5
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_matching_engine_index_endpoint.py
@@ -0,0 +1,2424 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import uuid
+from importlib import reload
+from unittest import mock
+from unittest.mock import patch
+
+from google.api_core import operation
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform.matching_engine._protos import (
+ match_service_pb2,
+ match_service_pb2_grpc,
+)
+from google.cloud.aiplatform.matching_engine.matching_engine_index_endpoint import (
+ Namespace,
+ NumericNamespace,
+ MatchNeighbor,
+ HybridQuery,
+)
+from google.cloud.aiplatform.compat.types import (
+ matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref,
+ index_endpoint as gca_index_endpoint,
+ index as gca_index,
+ match_service_v1beta1 as gca_match_service_v1beta1,
+ index_v1beta1 as gca_index_v1beta1,
+ service_networking as gca_service_networking,
+ encryption_spec as gca_encryption_spec,
+)
+from google.cloud.aiplatform.compat.services import (
+ index_endpoint_service_client,
+ index_service_client,
+ match_service_client_v1beta1,
+)
+import constants as test_constants
+
+from google.protobuf import field_mask_pb2
+
+import grpc
+
+import pytest
+
+# project
+_TEST_PROJECT = test_constants.ProjectConstants._TEST_PROJECT
+_TEST_LOCATION = test_constants.ProjectConstants._TEST_LOCATION
+_TEST_PARENT = test_constants.ProjectConstants._TEST_PARENT
+
+# index
+_TEST_INDEX_ID = test_constants.MatchingEngineConstants._TEST_INDEX_ID
+_TEST_INDEX_NAME = test_constants.MatchingEngineConstants._TEST_INDEX_NAME
+_TEST_INDEX_DISPLAY_NAME = (
+ test_constants.MatchingEngineConstants._TEST_INDEX_DISPLAY_NAME
+)
+
+# index_endpoint
+_TEST_INDEX_ENDPOINT_ID = "index_endpoint_id"
+_TEST_INDEX_ENDPOINT_PUBLIC_DNS = (
+ "1114627793.us-central1-249381615684.vdb.vertexai.goog"
+)
+_TEST_INDEX_ENDPOINT_NAME = f"{_TEST_PARENT}/indexEndpoints/{_TEST_INDEX_ENDPOINT_ID}"
+_TEST_INDEX_ENDPOINT_DISPLAY_NAME = "index_endpoint_display_name"
+_TEST_INDEX_ENDPOINT_DESCRIPTION = "index_endpoint_description"
+_TEST_INDEX_DESCRIPTION = test_constants.MatchingEngineConstants._TEST_INDEX_DESCRIPTION
+_TEST_INDEX_ENDPOINT_VPC_NETWORK = "projects/{}/global/networks/{}".format(
+ "12345", "network"
+)
+
+_TEST_LABELS = test_constants.MatchingEngineConstants._TEST_LABELS
+_TEST_DISPLAY_NAME_UPDATE = (
+ test_constants.MatchingEngineConstants._TEST_DISPLAY_NAME_UPDATE
+)
+_TEST_DESCRIPTION_UPDATE = (
+ test_constants.MatchingEngineConstants._TEST_DESCRIPTION_UPDATE
+)
+_TEST_LABELS_UPDATE = test_constants.MatchingEngineConstants._TEST_LABELS_UPDATE
+
+# deployment
+_TEST_DEPLOYED_INDEX_ID = "deployed_index_id"
+_TEST_DEPLOYED_INDEX_DISPLAY_NAME = "deployed_index_display_name"
+_TEST_MIN_REPLICA_COUNT = 2
+_TEST_MAX_REPLICA_COUNT = 2
+_TEST_ENABLE_ACCESS_LOGGING = False
+_TEST_RESERVED_IP_RANGES = ["vertex-ai-ip-range-1", "vertex-ai-ip-range-2"]
+_TEST_DEPLOYMENT_GROUP = "prod"
+_TEST_AUTH_CONFIG_AUDIENCES = ["a", "b"]
+_TEST_AUTH_CONFIG_ALLOWED_ISSUERS = [
+ "service-account-name-1@project-id.iam.gserviceaccount.com",
+ "service-account-name-2@project-id.iam.gserviceaccount.com",
+]
+_TEST_SIGNED_JWT = "signed_jwt"
+_TEST_AUTHORIZATION_METADATA = (("authorization", f"Bearer: {_TEST_SIGNED_JWT}"),)
+
+_TEST_PSC_NETWORK1 = "projects/project1/global/networks/network1"
+_TEST_PSC_NETWORK2 = "projects/project2/global/networks/network2"
+_TEST_PSC_NETWORK3 = "projects/project3/global/networks/network3"
+_TEST_PSC_AUTOMATION_CONFIGS = [
+ ("project1", _TEST_PSC_NETWORK1),
+ ("project2", _TEST_PSC_NETWORK2),
+ ("project3", _TEST_PSC_NETWORK3),
+]
+
+# deployment_updated
+_TEST_MIN_REPLICA_COUNT_UPDATED = 4
+_TEST_MAX_REPLICA_COUNT_UPDATED = 4
+
+# request_metadata
+_TEST_REQUEST_METADATA = test_constants.MatchingEngineConstants._TEST_REQUEST_METADATA
+
+# Lists
+_TEST_INDEX_ENDPOINT_LIST = [
+ gca_index_endpoint.IndexEndpoint(
+ name=_TEST_INDEX_ENDPOINT_NAME,
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ ),
+ gca_index_endpoint.IndexEndpoint(
+ name=_TEST_INDEX_ENDPOINT_NAME,
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ ),
+ gca_index_endpoint.IndexEndpoint(
+ name=_TEST_INDEX_ENDPOINT_NAME,
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ ),
+]
+
+# Match
+_TEST_QUERIES = [
+ [
+ -0.11333,
+ 0.48402,
+ 0.090771,
+ -0.22439,
+ 0.034206,
+ -0.55831,
+ 0.041849,
+ -0.53573,
+ 0.18809,
+ -0.58722,
+ 0.015313,
+ -0.014555,
+ 0.80842,
+ -0.038519,
+ 0.75348,
+ 0.70502,
+ -0.17863,
+ 0.3222,
+ 0.67575,
+ 0.67198,
+ 0.26044,
+ 0.4187,
+ -0.34122,
+ 0.2286,
+ -0.53529,
+ 1.2582,
+ -0.091543,
+ 0.19716,
+ -0.037454,
+ -0.3336,
+ 0.31399,
+ 0.36488,
+ 0.71263,
+ 0.1307,
+ -0.24654,
+ -0.52445,
+ -0.036091,
+ 0.55068,
+ 0.10017,
+ 0.48095,
+ 0.71104,
+ -0.053462,
+ 0.22325,
+ 0.30917,
+ -0.39926,
+ 0.036634,
+ -0.35431,
+ -0.42795,
+ 0.46444,
+ 0.25586,
+ 0.68257,
+ -0.20821,
+ 0.38433,
+ 0.055773,
+ -0.2539,
+ -0.20804,
+ 0.52522,
+ -0.11399,
+ -0.3253,
+ -0.44104,
+ 0.17528,
+ 0.62255,
+ 0.50237,
+ -0.7607,
+ -0.071786,
+ 0.0080131,
+ -0.13286,
+ 0.50097,
+ 0.18824,
+ -0.54722,
+ -0.42664,
+ 0.4292,
+ 0.14877,
+ -0.0072514,
+ -0.16484,
+ -0.059798,
+ 0.9895,
+ -0.61738,
+ 0.054169,
+ 0.48424,
+ -0.35084,
+ -0.27053,
+ 0.37829,
+ 0.11503,
+ -0.39613,
+ 0.24266,
+ 0.39147,
+ -0.075256,
+ 0.65093,
+ -0.20822,
+ -0.17456,
+ 0.53571,
+ -0.16537,
+ 0.13582,
+ -0.56016,
+ 0.016964,
+ 0.1277,
+ 0.94071,
+ -0.22608,
+ -0.021106,
+ ]
+]
+_TEST_QUERY_IDS = ["1", "2"]
+_TEST_HYBRID_QUERIES = [
+ HybridQuery(
+ sparse_embedding_dimensions=[1, 2, 3],
+ sparse_embedding_values=[0.1, 0.2, 0.3],
+ rrf_ranking_alpha=0.2,
+ ),
+ HybridQuery(
+ dense_embedding=_TEST_QUERIES[0],
+ sparse_embedding_dimensions=[1, 2, 3],
+ sparse_embedding_values=[0.1, 0.2, 0.3],
+ ),
+]
+_TEST_NUM_NEIGHBOURS = 1
+_TEST_FILTER = [
+ Namespace(name="class", allow_tokens=["token_1"], deny_tokens=["token_2"])
+]
+_TEST_NUMERIC_FILTER = [
+ NumericNamespace(name="cost", value_double=0.3, op="EQUAL"),
+ NumericNamespace(name="size", value_int=0, op="GREATER"),
+ NumericNamespace(name="seconds", value_float=-20.5, op="LESS_EQUAL"),
+ NumericNamespace(name="duration", value_int=10, op="NOT_EQUAL"),
+]
+_TEST_NUMERIC_NAMESPACE = [
+ match_service_pb2.NumericNamespace(name="cost", value_double=0.3, op=3),
+ match_service_pb2.NumericNamespace(name="size", value_int=0, op=5),
+ match_service_pb2.NumericNamespace(name="seconds", value_float=-20.5, op=2),
+ match_service_pb2.NumericNamespace(name="duration", value_int=10, op="NOT_EQUAL"),
+]
+_TEST_IDS = ["123", "456", "789"]
+_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS = 3
+_TEST_APPROX_NUM_NEIGHBORS = 2
+_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE = 0.8
+_TEST_RETURN_FULL_DATAPOINT = True
+_TEST_LOW_LEVEL_BATCH_SIZE = 3
+_TEST_ENCRYPTION_SPEC_KEY_NAME = "kms_key_name"
+_TEST_PROJECT_ALLOWLIST = ["project-1", "project-2"]
+_TEST_PRIVATE_SERVICE_CONNECT_IP_ADDRESS = "10.128.0.5"
+_TEST_PRIVATE_SERVICE_CONNECT_IP_AUTOMATION_ADDRESS_1 = "10.128.0.6"
+_TEST_PRIVATE_SERVICE_CONNECT_IP_AUTOMATION_ADDRESS_2 = "10.128.0.7"
+_TEST_PRIVATE_SERVICE_CONNECT_IP_AUTOMATION_ADDRESS_3 = "10.128.0.8"
+_TEST_SERVICE_ATTACHMENT_URI = "projects/test-project/regions/test-region/serviceAttachments/test-service-attachment"
+_TEST_PRIVATE_SERVICE_CONNECT_URI = "{}:10000".format(
+ _TEST_PRIVATE_SERVICE_CONNECT_IP_ADDRESS
+)
+_TEST_PRIVATE_SERVICE_CONNECT_AUTOMATION_URI_1 = "{}:10000".format(
+ _TEST_PRIVATE_SERVICE_CONNECT_IP_AUTOMATION_ADDRESS_1
+)
+_TEST_PRIVATE_SERVICE_CONNECT_AUTOMATION_URI_3 = "{}:10000".format(
+ _TEST_PRIVATE_SERVICE_CONNECT_IP_AUTOMATION_ADDRESS_3
+)
+_TEST_READ_INDEX_DATAPOINTS_RESPONSE = [
+ gca_index_v1beta1.IndexDatapoint(
+ datapoint_id="1",
+ feature_vector=[1.0, 2.0, 3.0],
+ restricts=[
+ gca_index_v1beta1.IndexDatapoint.Restriction(
+ namespace="class",
+ allow_list=["token_1"],
+ deny_list=["token_2"],
+ )
+ ],
+ ),
+ gca_index_v1beta1.IndexDatapoint(
+ datapoint_id="2",
+ feature_vector=[0, -1.0, 2.0],
+ crowding_tag=gca_index_v1beta1.IndexDatapoint.CrowdingTag(
+ crowding_attribute="1"
+ ),
+ ),
+]
+_TEST_PRIVATE_MATCH_NEIGHBOR_RESPONSE = [
+ [
+ MatchNeighbor(
+ id="1",
+ distance=0.1,
+ feature_vector=[0.0, -1.0, 2.0],
+ crowding_tag="0",
+ restricts=[
+ Namespace(
+ name="class",
+ allow_tokens=["token_1"],
+ deny_tokens=["token_2"],
+ )
+ ],
+ ),
+ MatchNeighbor(
+ id="2",
+ distance=0.1,
+ feature_vector=[1.0, 2.0, 3.0],
+ crowding_tag="0",
+ restricts=[
+ Namespace(
+ name="class",
+ allow_tokens=["token_1"],
+ deny_tokens=["token_2"],
+ )
+ ],
+ ),
+ ]
+]
+_TEST_TIMEOUT = 1800.0
+
+
+def uuid_mock():
+ return uuid.UUID(int=1)
+
+
+# All index mocks
+@pytest.fixture
+def get_index_mock():
+ with patch.object(
+ index_service_client.IndexServiceClient, "get_index"
+ ) as get_index_mock:
+ index = gca_index.Index(
+ name=_TEST_INDEX_NAME,
+ display_name=_TEST_INDEX_DISPLAY_NAME,
+ description=_TEST_INDEX_DESCRIPTION,
+ )
+
+ index.deployed_indexes = [
+ gca_matching_engine_deployed_index_ref.DeployedIndexRef(
+ index_endpoint=index.name,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ )
+ ]
+
+ get_index_mock.return_value = index
+ yield get_index_mock
+
+
+# All index_endpoint mocks
+@pytest.fixture
+def get_index_endpoint_mock():
+ with patch.object(
+ index_endpoint_service_client.IndexEndpointServiceClient, "get_index_endpoint"
+ ) as get_index_endpoint_mock:
+ index_endpoint = gca_index_endpoint.IndexEndpoint(
+ name=_TEST_INDEX_ENDPOINT_NAME,
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ )
+ index_endpoint.deployed_indexes = [
+ gca_index_endpoint.DeployedIndex(
+ id=_TEST_DEPLOYED_INDEX_ID,
+ index=_TEST_INDEX_NAME,
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
+ enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ deployment_group=_TEST_DEPLOYMENT_GROUP,
+ automatic_resources={
+ "min_replica_count": _TEST_MIN_REPLICA_COUNT,
+ "max_replica_count": _TEST_MAX_REPLICA_COUNT,
+ },
+ deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
+ auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
+ audiences=_TEST_AUTH_CONFIG_AUDIENCES,
+ allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
+ )
+ ),
+ ),
+ gca_index_endpoint.DeployedIndex(
+ id=f"{_TEST_DEPLOYED_INDEX_ID}_2",
+ index=f"{_TEST_INDEX_NAME}_2",
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
+ enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ deployment_group=_TEST_DEPLOYMENT_GROUP,
+ automatic_resources={
+ "min_replica_count": _TEST_MIN_REPLICA_COUNT,
+ "max_replica_count": _TEST_MAX_REPLICA_COUNT,
+ },
+ deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
+ auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
+ audiences=_TEST_AUTH_CONFIG_AUDIENCES,
+ allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
+ )
+ ),
+ ),
+ ]
+ get_index_endpoint_mock.return_value = index_endpoint
+ yield get_index_endpoint_mock
+
+
+@pytest.fixture
+def get_psa_index_endpoint_mock():
+ with patch.object(
+ index_endpoint_service_client.IndexEndpointServiceClient, "get_index_endpoint"
+ ) as get_psa_index_endpoint_mock:
+ index_endpoint = gca_index_endpoint.IndexEndpoint(
+ name=_TEST_INDEX_ENDPOINT_NAME,
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ )
+ index_endpoint.deployed_indexes = [
+ gca_index_endpoint.DeployedIndex(
+ id=_TEST_DEPLOYED_INDEX_ID,
+ index=_TEST_INDEX_NAME,
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
+ enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ deployment_group=_TEST_DEPLOYMENT_GROUP,
+ automatic_resources={
+ "min_replica_count": _TEST_MIN_REPLICA_COUNT,
+ "max_replica_count": _TEST_MAX_REPLICA_COUNT,
+ },
+ deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
+ auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
+ audiences=_TEST_AUTH_CONFIG_AUDIENCES,
+ allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
+ )
+ ),
+ private_endpoints=gca_index_endpoint.IndexPrivateEndpoints(
+ match_grpc_address="10.128.0.0",
+ service_attachment=_TEST_SERVICE_ATTACHMENT_URI,
+ ),
+ ),
+ gca_index_endpoint.DeployedIndex(
+ id=f"{_TEST_DEPLOYED_INDEX_ID}_2",
+ index=f"{_TEST_INDEX_NAME}_2",
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
+ enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ deployment_group=_TEST_DEPLOYMENT_GROUP,
+ automatic_resources={
+ "min_replica_count": _TEST_MIN_REPLICA_COUNT,
+ "max_replica_count": _TEST_MAX_REPLICA_COUNT,
+ },
+ deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
+ auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
+ audiences=_TEST_AUTH_CONFIG_AUDIENCES,
+ allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
+ )
+ ),
+ private_endpoints=gca_index_endpoint.IndexPrivateEndpoints(
+ match_grpc_address="10.128.0.1",
+ service_attachment=_TEST_SERVICE_ATTACHMENT_URI,
+ ),
+ ),
+ ]
+ get_psa_index_endpoint_mock.return_value = index_endpoint
+ yield get_psa_index_endpoint_mock
+
+
+@pytest.fixture
+def get_manual_psc_index_endpoint_mock():
+ with patch.object(
+ index_endpoint_service_client.IndexEndpointServiceClient, "get_index_endpoint"
+ ) as get_manual_psc_index_endpoint_mock:
+ index_endpoint = gca_index_endpoint.IndexEndpoint(
+ name=_TEST_INDEX_ENDPOINT_NAME,
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ )
+ index_endpoint.deployed_indexes = [
+ gca_index_endpoint.DeployedIndex(
+ id=_TEST_DEPLOYED_INDEX_ID,
+ index=_TEST_INDEX_NAME,
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
+ enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ deployment_group=_TEST_DEPLOYMENT_GROUP,
+ automatic_resources={
+ "min_replica_count": _TEST_MIN_REPLICA_COUNT,
+ "max_replica_count": _TEST_MAX_REPLICA_COUNT,
+ },
+ deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
+ auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
+ audiences=_TEST_AUTH_CONFIG_AUDIENCES,
+ allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
+ )
+ ),
+ private_endpoints=gca_index_endpoint.IndexPrivateEndpoints(
+ service_attachment=_TEST_SERVICE_ATTACHMENT_URI,
+ ),
+ ),
+ gca_index_endpoint.DeployedIndex(
+ id=f"{_TEST_DEPLOYED_INDEX_ID}_2",
+ index=f"{_TEST_INDEX_NAME}_2",
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
+ enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ deployment_group=_TEST_DEPLOYMENT_GROUP,
+ automatic_resources={
+ "min_replica_count": _TEST_MIN_REPLICA_COUNT,
+ "max_replica_count": _TEST_MAX_REPLICA_COUNT,
+ },
+ deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
+ auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
+ audiences=_TEST_AUTH_CONFIG_AUDIENCES,
+ allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
+ )
+ ),
+ private_endpoints=gca_index_endpoint.IndexPrivateEndpoints(
+ service_attachment=_TEST_SERVICE_ATTACHMENT_URI,
+ ),
+ ),
+ ]
+ get_manual_psc_index_endpoint_mock.return_value = index_endpoint
+ yield get_manual_psc_index_endpoint_mock
+
+
+@pytest.fixture
+def get_psc_automated_index_endpoint_mock():
+ with patch.object(
+ index_endpoint_service_client.IndexEndpointServiceClient,
+ "get_index_endpoint",
+ ) as get_psc_automated_index_endpoint_mock:
+ index_endpoint = gca_index_endpoint.IndexEndpoint(
+ name=_TEST_INDEX_ENDPOINT_NAME,
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ )
+ index_endpoint.deployed_indexes = [
+ gca_index_endpoint.DeployedIndex(
+ id=_TEST_DEPLOYED_INDEX_ID,
+ index=_TEST_INDEX_NAME,
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
+ enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
+ deployment_group=_TEST_DEPLOYMENT_GROUP,
+ automatic_resources={
+ "min_replica_count": _TEST_MIN_REPLICA_COUNT,
+ "max_replica_count": _TEST_MAX_REPLICA_COUNT,
+ },
+ deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
+ auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
+ audiences=_TEST_AUTH_CONFIG_AUDIENCES,
+ allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
+ )
+ ),
+ private_endpoints=gca_index_endpoint.IndexPrivateEndpoints(
+ service_attachment=_TEST_SERVICE_ATTACHMENT_URI,
+ psc_automated_endpoints=[
+ gca_service_networking.PscAutomatedEndpoints(
+ network=_TEST_PSC_NETWORK1,
+ project_id="test-project1",
+ match_address=_TEST_PRIVATE_SERVICE_CONNECT_IP_AUTOMATION_ADDRESS_1,
+ ),
+ gca_service_networking.PscAutomatedEndpoints(
+ network=_TEST_PSC_NETWORK2,
+ project_id="test-project2",
+ match_address=_TEST_PRIVATE_SERVICE_CONNECT_IP_AUTOMATION_ADDRESS_2,
+ ),
+ gca_service_networking.PscAutomatedEndpoints(
+ network=_TEST_PSC_NETWORK3,
+ project_id="test-project3",
+ match_address=_TEST_PRIVATE_SERVICE_CONNECT_IP_AUTOMATION_ADDRESS_3,
+ ),
+ ],
+ ),
+ ),
+ gca_index_endpoint.DeployedIndex(
+ id=f"{_TEST_DEPLOYED_INDEX_ID}_2",
+ index=f"{_TEST_INDEX_NAME}_2",
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
+ enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
+ deployment_group=_TEST_DEPLOYMENT_GROUP,
+ automatic_resources={
+ "min_replica_count": _TEST_MIN_REPLICA_COUNT,
+ "max_replica_count": _TEST_MAX_REPLICA_COUNT,
+ },
+ deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
+ auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
+ audiences=_TEST_AUTH_CONFIG_AUDIENCES,
+ allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
+ )
+ ),
+ private_endpoints=gca_index_endpoint.IndexPrivateEndpoints(
+ service_attachment=_TEST_SERVICE_ATTACHMENT_URI,
+ psc_automated_endpoints=[
+ gca_service_networking.PscAutomatedEndpoints(
+ network="test-network2",
+ project_id="test-project2",
+ match_address="10.128.0.8",
+ )
+ ],
+ ),
+ ),
+ ]
+ get_psc_automated_index_endpoint_mock.return_value = index_endpoint
+ yield get_psc_automated_index_endpoint_mock
+
+
+@pytest.fixture
+def get_index_public_endpoint_mock():
+ with patch.object(
+ index_endpoint_service_client.IndexEndpointServiceClient, "get_index_endpoint"
+ ) as get_index_public_endpoint_mock:
+ index_endpoint = gca_index_endpoint.IndexEndpoint(
+ name=_TEST_INDEX_ENDPOINT_NAME,
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ public_endpoint_domain_name=_TEST_INDEX_ENDPOINT_PUBLIC_DNS,
+ )
+ index_endpoint.deployed_indexes = [
+ gca_index_endpoint.DeployedIndex(
+ id=_TEST_DEPLOYED_INDEX_ID,
+ index=_TEST_INDEX_NAME,
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
+ enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
+ deployment_group=_TEST_DEPLOYMENT_GROUP,
+ automatic_resources={
+ "min_replica_count": _TEST_MIN_REPLICA_COUNT,
+ "max_replica_count": _TEST_MAX_REPLICA_COUNT,
+ },
+ deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
+ auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
+ audiences=_TEST_AUTH_CONFIG_AUDIENCES,
+ allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
+ )
+ ),
+ ),
+ gca_index_endpoint.DeployedIndex(
+ id=f"{_TEST_DEPLOYED_INDEX_ID}_2",
+ index=f"{_TEST_INDEX_NAME}_2",
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
+ enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
+ deployment_group=_TEST_DEPLOYMENT_GROUP,
+ automatic_resources={
+ "min_replica_count": _TEST_MIN_REPLICA_COUNT,
+ "max_replica_count": _TEST_MAX_REPLICA_COUNT,
+ },
+ deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
+ auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
+ audiences=_TEST_AUTH_CONFIG_AUDIENCES,
+ allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
+ )
+ ),
+ ),
+ ]
+ get_index_public_endpoint_mock.return_value = index_endpoint
+ yield get_index_public_endpoint_mock
+
+
+@pytest.fixture
+def deploy_index_mock():
+ with patch.object(
+ index_endpoint_service_client.IndexEndpointServiceClient,
+ "deploy_index",
+ ) as deploy_index_mock:
+ deploy_index_lro_mock = mock.Mock(operation.Operation)
+ deploy_index_mock.return_value = deploy_index_lro_mock
+ yield deploy_index_mock
+
+
+@pytest.fixture
+def undeploy_index_mock():
+ with patch.object(
+ index_endpoint_service_client.IndexEndpointServiceClient,
+ "undeploy_index",
+ ) as undeploy_index_mock:
+ undeploy_index_lro_mock = mock.Mock(operation.Operation)
+ undeploy_index_mock.return_value = undeploy_index_lro_mock
+ yield undeploy_index_mock
+
+
+@pytest.fixture
+def update_index_endpoint_mock():
+ with patch.object(
+ index_endpoint_service_client.IndexEndpointServiceClient,
+ "update_index_endpoint",
+ ) as index_endpoint_mock:
+ index_endpoint_mock.return_value = gca_index_endpoint.IndexEndpoint(
+ name=_TEST_INDEX_ENDPOINT_NAME,
+ display_name=_TEST_DISPLAY_NAME_UPDATE,
+ description=_TEST_DESCRIPTION_UPDATE,
+ labels=_TEST_LABELS_UPDATE,
+ )
+
+ yield index_endpoint_mock
+
+
+@pytest.fixture
+def mutate_deployed_index_mock():
+ with patch.object(
+ index_endpoint_service_client.IndexEndpointServiceClient,
+ "mutate_deployed_index",
+ ) as mutate_deployed_index_mock:
+ mutate_deployed_index_lro_mock = mock.Mock(operation.Operation)
+ update_index_endpoint_mock.return_value = mutate_deployed_index_lro_mock
+ yield mutate_deployed_index_mock
+
+
+@pytest.fixture
+def list_index_endpoints_mock():
+ with patch.object(
+ index_endpoint_service_client.IndexEndpointServiceClient, "list_index_endpoints"
+ ) as list_index_endpoints_mock:
+ list_index_endpoints_mock.return_value = _TEST_INDEX_ENDPOINT_LIST
+ yield list_index_endpoints_mock
+
+
+@pytest.fixture
+def delete_index_endpoint_mock():
+ with patch.object(
+ index_endpoint_service_client.IndexEndpointServiceClient,
+ "delete_index_endpoint",
+ ) as delete_index_endpoint_mock:
+ delete_index_endpoint_lro_mock = mock.Mock(operation.Operation)
+ delete_index_endpoint_mock.return_value = delete_index_endpoint_lro_mock
+ yield delete_index_endpoint_mock
+
+
+@pytest.fixture
+def create_index_endpoint_mock():
+ with patch.object(
+ index_endpoint_service_client.IndexEndpointServiceClient,
+ "create_index_endpoint",
+ ) as create_index_endpoint_mock:
+ create_index_endpoint_lro_mock = mock.Mock(operation.Operation)
+ create_index_endpoint_lro_mock.result.return_value = (
+ gca_index_endpoint.IndexEndpoint(
+ name=_TEST_INDEX_ENDPOINT_NAME,
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ )
+ )
+ create_index_endpoint_mock.return_value = create_index_endpoint_lro_mock
+ yield create_index_endpoint_mock
+
+
+@pytest.fixture
+def grpc_insecure_channel_mock():
+ with patch.object(grpc, "insecure_channel") as grpc_insecure_channel_mock:
+ grpc_insecure_channel_mock.return_value = mock.Mock()
+ yield grpc_insecure_channel_mock
+
+
+@pytest.fixture
+def index_endpoint_match_queries_mock():
+ with patch.object(
+ match_service_pb2_grpc, "MatchServiceStub"
+ ) as match_service_stub_mock:
+ match_service_stub_mock = match_service_stub_mock.return_value
+ match_service_stub_mock.BatchMatch.return_value = (
+ match_service_pb2.BatchMatchResponse(
+ responses=[
+ match_service_pb2.BatchMatchResponse.BatchMatchResponsePerIndex(
+ deployed_index_id="1",
+ responses=[
+ match_service_pb2.MatchResponse(
+ neighbor=[
+ match_service_pb2.MatchResponse.Neighbor(
+ id="1", distance=0.1
+ ),
+ match_service_pb2.MatchResponse.Neighbor(
+ id="2", distance=0.1
+ ),
+ ],
+ embeddings=[
+ match_service_pb2.Embedding(
+ id="2",
+ float_val=[1.0, 2.0, 3.0],
+ restricts=[
+ match_service_pb2.Namespace(
+ name="class",
+ allow_tokens=["token_1"],
+ deny_tokens=["token_2"],
+ )
+ ],
+ crowding_attribute=0,
+ ),
+ match_service_pb2.Embedding(
+ id="1",
+ float_val=[0, -1.0, 2.0],
+ restricts=[
+ match_service_pb2.Namespace(
+ name="class",
+ allow_tokens=["token_1"],
+ deny_tokens=["token_2"],
+ )
+ ],
+ ),
+ ],
+ )
+ ],
+ )
+ ]
+ )
+ )
+ yield match_service_stub_mock
+
+
+@pytest.fixture
+def index_endpoint_batch_get_embeddings_mock():
+ with patch.object(
+ match_service_pb2_grpc, "MatchServiceStub"
+ ) as match_service_stub_mock:
+ match_service_stub_mock = match_service_stub_mock.return_value
+ match_service_stub_mock.BatchGetEmbeddings.return_value = (
+ match_service_pb2.BatchGetEmbeddingsResponse(
+ embeddings=[
+ match_service_pb2.Embedding(
+ id="1",
+ float_val=[1.0, 2.0, 3.0],
+ restricts=[
+ match_service_pb2.Namespace(
+ name="class",
+ allow_tokens=["token_1"],
+ deny_tokens=["token_2"],
+ )
+ ],
+ ),
+ match_service_pb2.Embedding(
+ id="2",
+ float_val=[0, -1.0, 2.0],
+ crowding_attribute=1,
+ ),
+ ]
+ )
+ )
+ yield match_service_stub_mock
+
+
+@pytest.fixture
+def index_public_endpoint_match_queries_mock():
+ with patch.object(
+ match_service_client_v1beta1.MatchServiceClient, "find_neighbors"
+ ) as index_public_endpoint_match_queries_mock:
+ index_public_endpoint_match_queries_mock.return_value = (
+ gca_match_service_v1beta1.FindNeighborsResponse(
+ nearest_neighbors=[
+ gca_match_service_v1beta1.FindNeighborsResponse.NearestNeighbors(
+ id="1",
+ neighbors=[
+ gca_match_service_v1beta1.FindNeighborsResponse.Neighbor(
+ datapoint=gca_index_v1beta1.IndexDatapoint(
+ datapoint_id="1"
+ ),
+ distance=0.1,
+ )
+ ],
+ )
+ ]
+ )
+ )
+ yield index_public_endpoint_match_queries_mock
+
+
+@pytest.fixture
+def index_public_endpoint_read_index_datapoints_mock():
+ with patch.object(
+ match_service_client_v1beta1.MatchServiceClient, "read_index_datapoints"
+ ) as index_public_endpoint_read_index_datapoints_mock:
+ index_public_endpoint_read_index_datapoints_mock.return_value = (
+ gca_match_service_v1beta1.ReadIndexDatapointsResponse(
+ datapoints=[
+ gca_index_v1beta1.IndexDatapoint(
+ datapoint_id="1",
+ feature_vector=[0, 1, 2, 3],
+ )
+ ]
+ )
+ )
+ yield index_public_endpoint_read_index_datapoints_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestMatchingEngineIndexEndpoint:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize(
+ "index_endpoint_name", [_TEST_INDEX_ENDPOINT_ID, _TEST_INDEX_ENDPOINT_NAME]
+ )
+ def test_init_index_endpoint(self, index_endpoint_name, get_index_endpoint_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=index_endpoint_name
+ )
+
+ get_index_endpoint_mock.assert_called_once_with(
+ name=my_index_endpoint.resource_name, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures("get_index_endpoint_mock")
+ def test_update_index_endpoint(self, update_index_endpoint_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+ updated_endpoint = my_index_endpoint.update(
+ display_name=_TEST_DISPLAY_NAME_UPDATE,
+ description=_TEST_DESCRIPTION_UPDATE,
+ labels=_TEST_LABELS_UPDATE,
+ request_metadata=_TEST_REQUEST_METADATA,
+ update_request_timeout=_TEST_TIMEOUT,
+ )
+
+ expected = gca_index_endpoint.IndexEndpoint(
+ name=_TEST_INDEX_ENDPOINT_NAME,
+ display_name=_TEST_DISPLAY_NAME_UPDATE,
+ description=_TEST_DESCRIPTION_UPDATE,
+ labels=_TEST_LABELS_UPDATE,
+ )
+
+ update_index_endpoint_mock.assert_called_once_with(
+ index_endpoint=expected,
+ update_mask=field_mask_pb2.FieldMask(
+ paths=["labels", "display_name", "description"]
+ ),
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=_TEST_TIMEOUT,
+ )
+
+ assert updated_endpoint.gca_resource == expected
+
+ def test_list_index_endpoints(self, list_index_endpoints_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoints_list = aiplatform.MatchingEngineIndexEndpoint.list()
+
+ list_index_endpoints_mock.assert_called_once_with(
+ request={"parent": _TEST_PARENT}
+ )
+ assert len(my_index_endpoints_list) == len(_TEST_INDEX_ENDPOINT_LIST)
+ for my_index_endpoint in my_index_endpoints_list:
+ assert isinstance(my_index_endpoint, aiplatform.MatchingEngineIndexEndpoint)
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_index_endpoint_mock")
+ def test_delete_index_endpoint(self, delete_index_endpoint_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+ my_index_endpoint.delete(sync=sync)
+
+ if not sync:
+ my_index_endpoint.wait()
+
+ delete_index_endpoint_mock.assert_called_once_with(
+ name=my_index_endpoint.resource_name
+ )
+
+ @pytest.mark.usefixtures("get_index_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_index_endpoint(self, create_index_endpoint_mock, sync):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint.create(
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ network=_TEST_INDEX_ENDPOINT_VPC_NETWORK,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ labels=_TEST_LABELS,
+ create_request_timeout=_TEST_TIMEOUT,
+ )
+
+ if not sync:
+ my_index_endpoint.wait()
+
+ expected = gca_index_endpoint.IndexEndpoint(
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ network=_TEST_INDEX_ENDPOINT_VPC_NETWORK,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ labels=_TEST_LABELS,
+ )
+ create_index_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ index_endpoint=expected,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=_TEST_TIMEOUT,
+ )
+
+ @pytest.mark.usefixtures("get_index_endpoint_mock")
+ def test_create_index_endpoint_with_private_service_connect(
+ self, create_index_endpoint_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ aiplatform.MatchingEngineIndexEndpoint.create(
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ labels=_TEST_LABELS,
+ enable_private_service_connect=True,
+ project_allowlist=_TEST_PROJECT_ALLOWLIST,
+ encryption_spec_key_name=_TEST_ENCRYPTION_SPEC_KEY_NAME,
+ )
+
+ expected = gca_index_endpoint.IndexEndpoint(
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ labels=_TEST_LABELS,
+ private_service_connect_config=gca_service_networking.PrivateServiceConnectConfig(
+ project_allowlist=_TEST_PROJECT_ALLOWLIST,
+ enable_private_service_connect=True,
+ ),
+ encryption_spec=gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_ENCRYPTION_SPEC_KEY_NAME
+ ),
+ )
+ create_index_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ index_endpoint=expected,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_index_endpoint_mock")
+ def test_create_index_endpoint_with_network_init(self, create_index_endpoint_mock):
+ aiplatform.init(project=_TEST_PROJECT, network=_TEST_INDEX_ENDPOINT_VPC_NETWORK)
+
+ aiplatform.MatchingEngineIndexEndpoint.create(
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ labels=_TEST_LABELS,
+ encryption_spec_key_name=_TEST_ENCRYPTION_SPEC_KEY_NAME,
+ )
+
+ expected = gca_index_endpoint.IndexEndpoint(
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ network=_TEST_INDEX_ENDPOINT_VPC_NETWORK,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ labels=_TEST_LABELS,
+ public_endpoint_enabled=False,
+ encryption_spec=gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_ENCRYPTION_SPEC_KEY_NAME
+ ),
+ )
+
+ create_index_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ index_endpoint=expected,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_index_public_endpoint_mock")
+ def test_create_index_endpoint_with_public_endpoint_enabled(
+ self, create_index_endpoint_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ aiplatform.MatchingEngineIndexEndpoint.create(
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ public_endpoint_enabled=True,
+ labels=_TEST_LABELS,
+ encryption_spec_key_name=_TEST_ENCRYPTION_SPEC_KEY_NAME,
+ )
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ expected = gca_index_endpoint.IndexEndpoint(
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ public_endpoint_enabled=True,
+ labels=_TEST_LABELS,
+ encryption_spec=gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_ENCRYPTION_SPEC_KEY_NAME
+ ),
+ )
+
+ create_index_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ index_endpoint=expected,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ assert (
+ my_index_endpoint.public_endpoint_domain_name
+ == _TEST_INDEX_ENDPOINT_PUBLIC_DNS
+ )
+
+ def test_create_index_endpoint_missing_argument_throw_error(
+ self, create_index_endpoint_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ expected_message = (
+ "Please provide `network` argument for Private Service Access endpoint,"
+ "or provide `enable_private_service_connect` for Private Service"
+ "Connect endpoint, or provide `public_endpoint_enabled` to"
+ "deploy to a public endpoint"
+ )
+
+ with pytest.raises(ValueError) as exception:
+ _ = aiplatform.MatchingEngineIndexEndpoint.create(
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ labels=_TEST_LABELS,
+ )
+
+ assert str(exception.value) == expected_message
+
+ def test_create_index_endpoint_set_both_psa_and_public_throw_error(
+ self, create_index_endpoint_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ expected_message = "One and only one among network, public_endpoint_enabled and enable_private_service_connect should be set."
+
+ with pytest.raises(ValueError) as exception:
+ _ = aiplatform.MatchingEngineIndexEndpoint.create(
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ public_endpoint_enabled=True,
+ network=_TEST_INDEX_ENDPOINT_VPC_NETWORK,
+ labels=_TEST_LABELS,
+ )
+
+ assert str(exception.value) == expected_message
+
+ def test_create_index_endpoint_set_both_psa_and_psc_throw_error(
+ self, create_index_endpoint_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ expected_message = "One and only one among network, public_endpoint_enabled and enable_private_service_connect should be set."
+
+ with pytest.raises(ValueError) as exception:
+ _ = aiplatform.MatchingEngineIndexEndpoint.create(
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ network=_TEST_INDEX_ENDPOINT_VPC_NETWORK,
+ labels=_TEST_LABELS,
+ enable_private_service_connect=True,
+ )
+
+ assert str(exception.value) == expected_message
+
+ def test_create_index_endpoint_set_both_psc_and_public_throw_error(
+ self, create_index_endpoint_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ expected_message = "One and only one among network, public_endpoint_enabled and enable_private_service_connect should be set."
+
+ with pytest.raises(ValueError) as exception:
+ _ = aiplatform.MatchingEngineIndexEndpoint.create(
+ display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
+ description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
+ public_endpoint_enabled=True,
+ labels=_TEST_LABELS,
+ enable_private_service_connect=True,
+ )
+
+ assert str(exception.value) == expected_message
+
+ @pytest.mark.usefixtures("get_index_endpoint_mock", "get_index_mock")
+ def test_deploy_index(self, deploy_index_mock, undeploy_index_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ # Get index
+ my_index = aiplatform.MatchingEngineIndex(index_name=_TEST_INDEX_NAME)
+
+ my_index_endpoint = my_index_endpoint.deploy_index(
+ index=my_index,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
+ min_replica_count=_TEST_MIN_REPLICA_COUNT,
+ max_replica_count=_TEST_MAX_REPLICA_COUNT,
+ enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ deployment_group=_TEST_DEPLOYMENT_GROUP,
+ auth_config_audiences=_TEST_AUTH_CONFIG_AUDIENCES,
+ auth_config_allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
+ request_metadata=_TEST_REQUEST_METADATA,
+ deploy_request_timeout=_TEST_TIMEOUT,
+ )
+
+ deploy_index_mock.assert_called_once_with(
+ index_endpoint=my_index_endpoint.resource_name,
+ deployed_index=gca_index_endpoint.DeployedIndex(
+ id=_TEST_DEPLOYED_INDEX_ID,
+ index=my_index.resource_name,
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
+ enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ deployment_group=_TEST_DEPLOYMENT_GROUP,
+ automatic_resources={
+ "min_replica_count": _TEST_MIN_REPLICA_COUNT,
+ "max_replica_count": _TEST_MAX_REPLICA_COUNT,
+ },
+ deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
+ auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
+ audiences=_TEST_AUTH_CONFIG_AUDIENCES,
+ allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
+ )
+ ),
+ ),
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=_TEST_TIMEOUT,
+ )
+
+ my_index_endpoint = my_index_endpoint.undeploy_index(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID
+ )
+
+ undeploy_index_mock.assert_called_once_with(
+ index_endpoint=my_index_endpoint.resource_name,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_psc_automated_index_endpoint_mock", "get_index_mock")
+ def test_deploy_index_psc_automation_configs(self, deploy_index_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ # Get index
+ my_index = aiplatform.MatchingEngineIndex(index_name=_TEST_INDEX_NAME)
+
+ my_index_endpoint = my_index_endpoint.deploy_index(
+ index=my_index,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
+ min_replica_count=_TEST_MIN_REPLICA_COUNT,
+ max_replica_count=_TEST_MAX_REPLICA_COUNT,
+ enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ deployment_group=_TEST_DEPLOYMENT_GROUP,
+ auth_config_audiences=_TEST_AUTH_CONFIG_AUDIENCES,
+ auth_config_allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
+ psc_automation_configs=_TEST_PSC_AUTOMATION_CONFIGS,
+ request_metadata=_TEST_REQUEST_METADATA,
+ deploy_request_timeout=_TEST_TIMEOUT,
+ )
+
+ deploy_index_mock.assert_called_once_with(
+ index_endpoint=my_index_endpoint.resource_name,
+ deployed_index=gca_index_endpoint.DeployedIndex(
+ id=_TEST_DEPLOYED_INDEX_ID,
+ index=my_index.resource_name,
+ display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
+ enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ deployment_group=_TEST_DEPLOYMENT_GROUP,
+ automatic_resources={
+ "min_replica_count": _TEST_MIN_REPLICA_COUNT,
+ "max_replica_count": _TEST_MAX_REPLICA_COUNT,
+ },
+ deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
+ auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
+ audiences=_TEST_AUTH_CONFIG_AUDIENCES,
+ allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
+ )
+ ),
+ psc_automation_configs=[
+ gca_service_networking.PSCAutomationConfig(
+ project_id=test_psc_automation_config[0],
+ network=test_psc_automation_config[1],
+ )
+ for test_psc_automation_config in _TEST_PSC_AUTOMATION_CONFIGS
+ ],
+ ),
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=_TEST_TIMEOUT,
+ )
+
+ @pytest.mark.usefixtures("get_index_endpoint_mock", "get_index_mock")
+ def test_mutate_deployed_index(self, mutate_deployed_index_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_index_endpoint.mutate_deployed_index(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ min_replica_count=_TEST_MIN_REPLICA_COUNT_UPDATED,
+ max_replica_count=_TEST_MAX_REPLICA_COUNT_UPDATED,
+ request_metadata=_TEST_REQUEST_METADATA,
+ mutate_request_timeout=_TEST_TIMEOUT,
+ )
+
+ mutate_deployed_index_mock.assert_called_once_with(
+ index_endpoint=_TEST_INDEX_ENDPOINT_NAME,
+ deployed_index=gca_index_endpoint.DeployedIndex(
+ id=_TEST_DEPLOYED_INDEX_ID,
+ automatic_resources={
+ "min_replica_count": _TEST_MIN_REPLICA_COUNT_UPDATED,
+ "max_replica_count": _TEST_MAX_REPLICA_COUNT_UPDATED,
+ },
+ ),
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=_TEST_TIMEOUT,
+ )
+
+ @pytest.mark.usefixtures("get_index_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_delete_index_endpoint_without_force(
+ self, undeploy_index_mock, delete_index_endpoint_mock, sync
+ ):
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_NAME
+ )
+
+ my_index_endpoint.delete(sync=sync)
+
+ if not sync:
+ my_index_endpoint.wait()
+
+ # undeploy_index_mock should not be called unless force is set to True
+ undeploy_index_mock.assert_not_called()
+
+ delete_index_endpoint_mock.assert_called_once_with(
+ name=_TEST_INDEX_ENDPOINT_NAME
+ )
+
+ @pytest.mark.usefixtures("get_index_endpoint_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_delete_index_endpoint_with_force(
+ self, undeploy_index_mock, delete_index_endpoint_mock, sync
+ ):
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_NAME
+ )
+ my_index_endpoint.delete(force=True, sync=sync)
+
+ if not sync:
+ my_index_endpoint.wait()
+
+ # undeploy_index_mock should be called if force is set to True
+ assert undeploy_index_mock.call_count == 2
+
+ delete_index_endpoint_mock.assert_called_once_with(
+ name=_TEST_INDEX_ENDPOINT_NAME
+ )
+
+ @pytest.mark.usefixtures("get_index_endpoint_mock")
+ def test_index_endpoint_match_queries_backward_compatibility(
+ self, index_endpoint_match_queries_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_index_endpoint.match(
+ _TEST_DEPLOYED_INDEX_ID,
+ _TEST_QUERIES,
+ _TEST_NUM_NEIGHBOURS,
+ _TEST_FILTER,
+ )
+
+ batch_request = match_service_pb2.BatchMatchRequest(
+ requests=[
+ match_service_pb2.BatchMatchRequest.BatchMatchRequestPerIndex(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ requests=[
+ match_service_pb2.MatchRequest(
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ float_val=_TEST_QUERIES[0],
+ restricts=[
+ match_service_pb2.Namespace(
+ name="class",
+ allow_tokens=["token_1"],
+ deny_tokens=["token_2"],
+ )
+ ],
+ )
+ ],
+ )
+ ]
+ )
+
+ index_endpoint_match_queries_mock.BatchMatch.assert_called_with(
+ batch_request, metadata=mock.ANY
+ )
+
+ @pytest.mark.usefixtures("get_index_endpoint_mock")
+ def test_private_service_access_hybrid_search_match_queries(
+ self, index_endpoint_match_queries_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_index_endpoint.match(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ filter=_TEST_FILTER,
+ queries=_TEST_HYBRID_QUERIES,
+ per_crowding_attribute_num_neighbors=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ low_level_batch_size=_TEST_LOW_LEVEL_BATCH_SIZE,
+ numeric_filter=_TEST_NUMERIC_FILTER,
+ )
+
+ batch_request = match_service_pb2.BatchMatchRequest(
+ requests=[
+ match_service_pb2.BatchMatchRequest.BatchMatchRequestPerIndex(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ low_level_batch_size=_TEST_LOW_LEVEL_BATCH_SIZE,
+ requests=[
+ match_service_pb2.MatchRequest(
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ float_val=_TEST_HYBRID_QUERIES[i].dense_embedding,
+ restricts=[
+ match_service_pb2.Namespace(
+ name="class",
+ allow_tokens=["token_1"],
+ deny_tokens=["token_2"],
+ )
+ ],
+ per_crowding_attribute_num_neighbors=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ numeric_restricts=_TEST_NUMERIC_NAMESPACE,
+ sparse_embedding=match_service_pb2.SparseEmbedding(
+ float_val=_TEST_HYBRID_QUERIES[
+ i
+ ].sparse_embedding_values,
+ dimension=_TEST_HYBRID_QUERIES[
+ i
+ ].sparse_embedding_dimensions,
+ ),
+ rrf=match_service_pb2.MatchRequest.RRF(
+ alpha=_TEST_HYBRID_QUERIES[i].rrf_ranking_alpha,
+ )
+ if _TEST_HYBRID_QUERIES[i].rrf_ranking_alpha
+ else None,
+ )
+ for i in range(len(_TEST_HYBRID_QUERIES))
+ ],
+ )
+ ]
+ )
+
+ index_endpoint_match_queries_mock.BatchMatch.assert_called_with(
+ batch_request, metadata=mock.ANY
+ )
+
+ @pytest.mark.usefixtures("get_index_endpoint_mock")
+ def test_private_service_access_index_endpoint_match_queries(
+ self, index_endpoint_match_queries_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_index_endpoint.match(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ filter=_TEST_FILTER,
+ queries=_TEST_QUERIES,
+ per_crowding_attribute_num_neighbors=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ low_level_batch_size=_TEST_LOW_LEVEL_BATCH_SIZE,
+ numeric_filter=_TEST_NUMERIC_FILTER,
+ )
+
+ batch_request = match_service_pb2.BatchMatchRequest(
+ requests=[
+ match_service_pb2.BatchMatchRequest.BatchMatchRequestPerIndex(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ low_level_batch_size=_TEST_LOW_LEVEL_BATCH_SIZE,
+ requests=[
+ match_service_pb2.MatchRequest(
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ float_val=_TEST_QUERIES[i],
+ restricts=[
+ match_service_pb2.Namespace(
+ name="class",
+ allow_tokens=["token_1"],
+ deny_tokens=["token_2"],
+ )
+ ],
+ per_crowding_attribute_num_neighbors=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ numeric_restricts=_TEST_NUMERIC_NAMESPACE,
+ )
+ for i in range(len(_TEST_QUERIES))
+ ],
+ )
+ ]
+ )
+
+ index_endpoint_match_queries_mock.BatchMatch.assert_called_with(
+ batch_request, metadata=mock.ANY
+ )
+
+ @pytest.mark.usefixtures("get_index_endpoint_mock")
+ def test_private_service_access_index_endpoint_match_queries_with_jwt(
+ self, index_endpoint_match_queries_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_index_endpoint.match(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ filter=_TEST_FILTER,
+ queries=_TEST_QUERIES,
+ per_crowding_attribute_num_neighbors=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ low_level_batch_size=_TEST_LOW_LEVEL_BATCH_SIZE,
+ numeric_filter=_TEST_NUMERIC_FILTER,
+ signed_jwt=_TEST_SIGNED_JWT,
+ )
+
+ batch_request = match_service_pb2.BatchMatchRequest(
+ requests=[
+ match_service_pb2.BatchMatchRequest.BatchMatchRequestPerIndex(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ low_level_batch_size=_TEST_LOW_LEVEL_BATCH_SIZE,
+ requests=[
+ match_service_pb2.MatchRequest(
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ float_val=_TEST_QUERIES[i],
+ restricts=[
+ match_service_pb2.Namespace(
+ name="class",
+ allow_tokens=["token_1"],
+ deny_tokens=["token_2"],
+ )
+ ],
+ per_crowding_attribute_num_neighbors=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ numeric_restricts=_TEST_NUMERIC_NAMESPACE,
+ )
+ for i in range(len(_TEST_QUERIES))
+ ],
+ )
+ ]
+ )
+
+ index_endpoint_match_queries_mock.BatchMatch.assert_called_with(
+ batch_request, metadata=_TEST_AUTHORIZATION_METADATA
+ )
+
+ @pytest.mark.usefixtures("get_index_endpoint_mock")
+ def test_index_private_service_access_endpoint_find_neighbor_queries(
+ self, index_endpoint_match_queries_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_private_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_private_index_endpoint.find_neighbors(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ queries=_TEST_QUERIES,
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ filter=_TEST_FILTER,
+ per_crowding_attribute_neighbor_count=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ return_full_datapoint=_TEST_RETURN_FULL_DATAPOINT,
+ numeric_filter=_TEST_NUMERIC_FILTER,
+ )
+
+ batch_match_request = match_service_pb2.BatchMatchRequest(
+ requests=[
+ match_service_pb2.BatchMatchRequest.BatchMatchRequestPerIndex(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ requests=[
+ match_service_pb2.MatchRequest(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ float_val=test_query,
+ restricts=[
+ match_service_pb2.Namespace(
+ name="class",
+ allow_tokens=["token_1"],
+ deny_tokens=["token_2"],
+ )
+ ],
+ per_crowding_attribute_num_neighbors=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ numeric_restricts=_TEST_NUMERIC_NAMESPACE,
+ )
+ for test_query in _TEST_QUERIES
+ ],
+ )
+ ]
+ )
+ index_endpoint_match_queries_mock.BatchMatch.assert_called_with(
+ batch_match_request, metadata=mock.ANY
+ )
+
+ @pytest.mark.usefixtures("get_psc_automated_index_endpoint_mock")
+ def test_index_private_service_connect_automation_endpoint_find_neighbor_queries(
+ self, index_endpoint_match_queries_mock, grpc_insecure_channel_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_private_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_private_index_endpoint.find_neighbors(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ queries=_TEST_QUERIES,
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ filter=_TEST_FILTER,
+ per_crowding_attribute_neighbor_count=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ return_full_datapoint=_TEST_RETURN_FULL_DATAPOINT,
+ numeric_filter=_TEST_NUMERIC_FILTER,
+ psc_network=_TEST_PSC_NETWORK1,
+ )
+
+ batch_match_request = match_service_pb2.BatchMatchRequest(
+ requests=[
+ match_service_pb2.BatchMatchRequest.BatchMatchRequestPerIndex(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ requests=[
+ match_service_pb2.MatchRequest(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ float_val=test_query,
+ restricts=[
+ match_service_pb2.Namespace(
+ name="class",
+ allow_tokens=["token_1"],
+ deny_tokens=["token_2"],
+ )
+ ],
+ per_crowding_attribute_num_neighbors=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ numeric_restricts=_TEST_NUMERIC_NAMESPACE,
+ )
+ for test_query in _TEST_QUERIES
+ ],
+ )
+ ]
+ )
+ index_endpoint_match_queries_mock.BatchMatch.assert_called_with(
+ batch_match_request, metadata=mock.ANY
+ )
+ grpc_insecure_channel_mock.assert_called_with(
+ _TEST_PRIVATE_SERVICE_CONNECT_AUTOMATION_URI_1
+ )
+
+ @pytest.mark.usefixtures("get_index_endpoint_mock")
+ def test_index_private_service_access_endpoint_find_neighbor_queries_with_jwt(
+ self, index_endpoint_match_queries_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_private_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_private_index_endpoint.find_neighbors(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ queries=_TEST_QUERIES,
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ filter=_TEST_FILTER,
+ per_crowding_attribute_neighbor_count=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ return_full_datapoint=_TEST_RETURN_FULL_DATAPOINT,
+ numeric_filter=_TEST_NUMERIC_FILTER,
+ signed_jwt=_TEST_SIGNED_JWT,
+ )
+
+ batch_match_request = match_service_pb2.BatchMatchRequest(
+ requests=[
+ match_service_pb2.BatchMatchRequest.BatchMatchRequestPerIndex(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ requests=[
+ match_service_pb2.MatchRequest(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ float_val=test_query,
+ restricts=[
+ match_service_pb2.Namespace(
+ name="class",
+ allow_tokens=["token_1"],
+ deny_tokens=["token_2"],
+ )
+ ],
+ per_crowding_attribute_num_neighbors=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ numeric_restricts=_TEST_NUMERIC_NAMESPACE,
+ )
+ for test_query in _TEST_QUERIES
+ ],
+ )
+ ]
+ )
+ index_endpoint_match_queries_mock.BatchMatch.assert_called_with(
+ batch_match_request, metadata=_TEST_AUTHORIZATION_METADATA
+ )
+
+ @pytest.mark.usefixtures("get_index_endpoint_mock")
+ def test_index_private_service_connect_endpoint_match_queries(
+ self, grpc_insecure_channel_mock, index_endpoint_match_queries_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_index_endpoint.private_service_connect_ip_address = (
+ _TEST_PRIVATE_SERVICE_CONNECT_IP_ADDRESS
+ )
+ my_index_endpoint.match(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ queries=_TEST_QUERIES,
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ filter=_TEST_FILTER,
+ per_crowding_attribute_num_neighbors=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ )
+
+ batch_request = match_service_pb2.BatchMatchRequest(
+ requests=[
+ match_service_pb2.BatchMatchRequest.BatchMatchRequestPerIndex(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ requests=[
+ match_service_pb2.MatchRequest(
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ float_val=_TEST_QUERIES[0],
+ restricts=[
+ match_service_pb2.Namespace(
+ name="class",
+ allow_tokens=["token_1"],
+ deny_tokens=["token_2"],
+ )
+ ],
+ per_crowding_attribute_num_neighbors=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ )
+ ],
+ )
+ ]
+ )
+
+ index_endpoint_match_queries_mock.BatchMatch.assert_called_with(
+ batch_request, metadata=mock.ANY
+ )
+
+ grpc_insecure_channel_mock.assert_called_with(_TEST_PRIVATE_SERVICE_CONNECT_URI)
+
+ @pytest.mark.usefixtures("get_psc_automated_index_endpoint_mock")
+ def test_index_private_service_connect_automation_match_queries(
+ self, index_endpoint_match_queries_mock, grpc_insecure_channel_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_index_endpoint.match(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ queries=_TEST_QUERIES,
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ filter=_TEST_FILTER,
+ per_crowding_attribute_num_neighbors=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ psc_network=_TEST_PSC_NETWORK1,
+ )
+
+ batch_request = match_service_pb2.BatchMatchRequest(
+ requests=[
+ match_service_pb2.BatchMatchRequest.BatchMatchRequestPerIndex(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ requests=[
+ match_service_pb2.MatchRequest(
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ float_val=_TEST_QUERIES[0],
+ restricts=[
+ match_service_pb2.Namespace(
+ name="class",
+ allow_tokens=["token_1"],
+ deny_tokens=["token_2"],
+ )
+ ],
+ per_crowding_attribute_num_neighbors=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ )
+ ],
+ )
+ ]
+ )
+
+ index_endpoint_match_queries_mock.BatchMatch.assert_called_with(
+ batch_request, metadata=mock.ANY
+ )
+
+ grpc_insecure_channel_mock.assert_called_with(
+ _TEST_PRIVATE_SERVICE_CONNECT_AUTOMATION_URI_1
+ )
+
+ @pytest.mark.usefixtures("get_psc_automated_index_endpoint_mock")
+ def test_index_private_service_connect_automation_match_queries_find_ip_address(
+ self, index_endpoint_match_queries_mock, grpc_insecure_channel_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_index_endpoint.match(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ queries=_TEST_QUERIES,
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ filter=_TEST_FILTER,
+ per_crowding_attribute_num_neighbors=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ psc_network=_TEST_PSC_NETWORK3,
+ )
+
+ batch_request = match_service_pb2.BatchMatchRequest(
+ requests=[
+ match_service_pb2.BatchMatchRequest.BatchMatchRequestPerIndex(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ requests=[
+ match_service_pb2.MatchRequest(
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ float_val=_TEST_QUERIES[0],
+ restricts=[
+ match_service_pb2.Namespace(
+ name="class",
+ allow_tokens=["token_1"],
+ deny_tokens=["token_2"],
+ )
+ ],
+ per_crowding_attribute_num_neighbors=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ )
+ ],
+ )
+ ]
+ )
+
+ index_endpoint_match_queries_mock.BatchMatch.assert_called_with(
+ batch_request, metadata=mock.ANY
+ )
+
+ grpc_insecure_channel_mock.assert_called_with(
+ _TEST_PRIVATE_SERVICE_CONNECT_AUTOMATION_URI_3
+ )
+
+ @pytest.mark.usefixtures("get_index_public_endpoint_mock")
+ def test_index_public_endpoint_find_neighbors_queries_backward_compatibility(
+ self, index_public_endpoint_match_queries_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_public_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_public_index_endpoint.find_neighbors(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ queries=_TEST_QUERIES,
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ filter=_TEST_FILTER,
+ per_crowding_attribute_neighbor_count=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ return_full_datapoint=_TEST_RETURN_FULL_DATAPOINT,
+ )
+
+ find_neighbors_request = gca_match_service_v1beta1.FindNeighborsRequest(
+ index_endpoint=my_public_index_endpoint.resource_name,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ queries=[
+ gca_match_service_v1beta1.FindNeighborsRequest.Query(
+ neighbor_count=_TEST_NUM_NEIGHBOURS,
+ datapoint=gca_index_v1beta1.IndexDatapoint(
+ feature_vector=_TEST_QUERIES[0],
+ restricts=[
+ gca_index_v1beta1.IndexDatapoint.Restriction(
+ namespace="class",
+ allow_list=["token_1"],
+ deny_list=["token_2"],
+ )
+ ],
+ ),
+ per_crowding_attribute_neighbor_count=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approximate_neighbor_count=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ )
+ ],
+ return_full_datapoint=_TEST_RETURN_FULL_DATAPOINT,
+ )
+
+ index_public_endpoint_match_queries_mock.assert_called_with(
+ find_neighbors_request
+ )
+
+ @pytest.mark.usefixtures("get_index_public_endpoint_mock")
+ def test_index_public_endpoint_find_neighbors_queries(
+ self, index_public_endpoint_match_queries_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_public_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_public_index_endpoint.find_neighbors(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ filter=_TEST_FILTER,
+ per_crowding_attribute_neighbor_count=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ return_full_datapoint=_TEST_RETURN_FULL_DATAPOINT,
+ queries=_TEST_HYBRID_QUERIES,
+ )
+
+ find_neighbors_request = gca_match_service_v1beta1.FindNeighborsRequest(
+ index_endpoint=my_public_index_endpoint.resource_name,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ queries=[
+ gca_match_service_v1beta1.FindNeighborsRequest.Query(
+ neighbor_count=_TEST_NUM_NEIGHBOURS,
+ datapoint=gca_index_v1beta1.IndexDatapoint(
+ restricts=[
+ gca_index_v1beta1.IndexDatapoint.Restriction(
+ namespace="class",
+ allow_list=["token_1"],
+ deny_list=["token_2"],
+ )
+ ],
+ sparse_embedding=gca_index_v1beta1.IndexDatapoint.SparseEmbedding(
+ values=[0.1, 0.2, 0.3], dimensions=[1, 2, 3]
+ ),
+ ),
+ rrf=gca_match_service_v1beta1.FindNeighborsRequest.Query.RRF(
+ alpha=0.2,
+ ),
+ per_crowding_attribute_neighbor_count=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approximate_neighbor_count=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ ),
+ gca_match_service_v1beta1.FindNeighborsRequest.Query(
+ neighbor_count=_TEST_NUM_NEIGHBOURS,
+ datapoint=gca_index_v1beta1.IndexDatapoint(
+ feature_vector=_TEST_QUERIES[0],
+ restricts=[
+ gca_index_v1beta1.IndexDatapoint.Restriction(
+ namespace="class",
+ allow_list=["token_1"],
+ deny_list=["token_2"],
+ )
+ ],
+ sparse_embedding=gca_index_v1beta1.IndexDatapoint.SparseEmbedding(
+ values=[0.1, 0.2, 0.3], dimensions=[1, 2, 3]
+ ),
+ ),
+ per_crowding_attribute_neighbor_count=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approximate_neighbor_count=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ ),
+ ],
+ return_full_datapoint=_TEST_RETURN_FULL_DATAPOINT,
+ )
+
+ index_public_endpoint_match_queries_mock.assert_called_with(
+ find_neighbors_request
+ )
+
+ @pytest.mark.usefixtures("get_index_public_endpoint_mock")
+ def test_index_public_endpoint_find_neighbor_query_by_id(
+ self, index_public_endpoint_match_queries_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_pubic_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_pubic_index_endpoint.find_neighbors(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ filter=_TEST_FILTER,
+ per_crowding_attribute_neighbor_count=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ return_full_datapoint=_TEST_RETURN_FULL_DATAPOINT,
+ embedding_ids=_TEST_QUERY_IDS,
+ )
+
+ find_neighbors_request = gca_match_service_v1beta1.FindNeighborsRequest(
+ index_endpoint=my_pubic_index_endpoint.resource_name,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ queries=[
+ gca_match_service_v1beta1.FindNeighborsRequest.Query(
+ neighbor_count=_TEST_NUM_NEIGHBOURS,
+ datapoint=gca_index_v1beta1.IndexDatapoint(
+ datapoint_id=_TEST_QUERY_IDS[i],
+ restricts=[
+ gca_index_v1beta1.IndexDatapoint.Restriction(
+ namespace="class",
+ allow_list=["token_1"],
+ deny_list=["token_2"],
+ )
+ ],
+ ),
+ per_crowding_attribute_neighbor_count=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approximate_neighbor_count=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ )
+ for i in range(len(_TEST_QUERY_IDS))
+ ],
+ return_full_datapoint=_TEST_RETURN_FULL_DATAPOINT,
+ )
+
+ index_public_endpoint_match_queries_mock.assert_called_with(
+ find_neighbors_request
+ )
+
+ @pytest.mark.usefixtures("get_index_public_endpoint_mock")
+ def test_index_public_endpoint_match_queries_with_numeric_filtering(
+ self, index_public_endpoint_match_queries_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_public_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_public_index_endpoint.find_neighbors(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ queries=_TEST_QUERIES,
+ num_neighbors=_TEST_NUM_NEIGHBOURS,
+ filter=_TEST_FILTER,
+ per_crowding_attribute_neighbor_count=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ return_full_datapoint=_TEST_RETURN_FULL_DATAPOINT,
+ numeric_filter=_TEST_NUMERIC_FILTER,
+ )
+
+ find_neighbors_request = gca_match_service_v1beta1.FindNeighborsRequest(
+ index_endpoint=my_public_index_endpoint.resource_name,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ queries=[
+ gca_match_service_v1beta1.FindNeighborsRequest.Query(
+ neighbor_count=_TEST_NUM_NEIGHBOURS,
+ datapoint=gca_index_v1beta1.IndexDatapoint(
+ feature_vector=_TEST_QUERIES[0],
+ restricts=[
+ gca_index_v1beta1.IndexDatapoint.Restriction(
+ namespace="class",
+ allow_list=["token_1"],
+ deny_list=["token_2"],
+ )
+ ],
+ numeric_restricts=[
+ gca_index_v1beta1.IndexDatapoint.NumericRestriction(
+ namespace="cost", value_double=0.3, op="EQUAL"
+ ),
+ gca_index_v1beta1.IndexDatapoint.NumericRestriction(
+ namespace="size", value_int=0, op="GREATER"
+ ),
+ gca_index_v1beta1.IndexDatapoint.NumericRestriction(
+ namespace="seconds", value_float=-20.5, op="LESS_EQUAL"
+ ),
+ gca_index_v1beta1.IndexDatapoint.NumericRestriction(
+ namespace="duration", value_int=10, op="NOT_EQUAL"
+ ),
+ ],
+ ),
+ per_crowding_attribute_neighbor_count=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS,
+ approximate_neighbor_count=_TEST_APPROX_NUM_NEIGHBORS,
+ fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE,
+ )
+ ],
+ return_full_datapoint=_TEST_RETURN_FULL_DATAPOINT,
+ )
+
+ index_public_endpoint_match_queries_mock.assert_called_with(
+ find_neighbors_request
+ )
+
+ def test_post_init_numeric_filter_invalid_operator_throws_exception(
+ self,
+ ):
+ expected_message = (
+ "Invalid operator 'NOT_EQ', must be one of the valid operators."
+ )
+ with pytest.raises(ValueError) as exception:
+ NumericNamespace(name="cost", value_int=3, op="NOT_EQ")
+
+ assert str(exception.value) == expected_message
+
+ def test_post_init_numeric_namespace_missing_value_throws_exception(self):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ expected_message = (
+ "Must choose one among `value_int`,"
+ "`value_float` and `value_double` for "
+ "intended precision."
+ )
+
+ with pytest.raises(ValueError) as exception:
+ NumericNamespace(name="cost", op="EQUAL")
+
+ assert str(exception.value) == expected_message
+
+ def test_index_public_endpoint_match_queries_with_numeric_filtering_value_type_mismatch_throws_exception(
+ self,
+ ):
+ expected_message = "value_int must be of type int, got ."
+
+ with pytest.raises(ValueError) as exception:
+ NumericNamespace(name="cost", value_int=0.3, op="EQUAL")
+
+ assert str(exception.value) == expected_message
+
+ @pytest.mark.usefixtures("get_index_public_endpoint_mock")
+ def test_index_public_endpoint_read_index_datapoints(
+ self, index_public_endpoint_read_index_datapoints_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_public_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_public_index_endpoint.read_index_datapoints(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ ids=_TEST_IDS,
+ )
+
+ read_index_datapoints_request = (
+ gca_match_service_v1beta1.ReadIndexDatapointsRequest(
+ index_endpoint=my_public_index_endpoint.resource_name,
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ ids=_TEST_IDS,
+ )
+ )
+
+ index_public_endpoint_read_index_datapoints_mock.assert_called_with(
+ read_index_datapoints_request
+ )
+
+ @pytest.mark.usefixtures("get_psa_index_endpoint_mock")
+ def test_index_endpoint_batch_get_embeddings(
+ self, index_endpoint_batch_get_embeddings_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_index_endpoint._batch_get_embeddings(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID, ids=["1", "2"]
+ )
+
+ batch_request = match_service_pb2.BatchGetEmbeddingsRequest(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID, id=["1", "2"]
+ )
+
+ index_endpoint_batch_get_embeddings_mock.BatchGetEmbeddings.assert_called_with(
+ batch_request, metadata=mock.ANY
+ )
+
+ @pytest.mark.usefixtures("get_psa_index_endpoint_mock")
+ def test_index_endpoint_read_index_datapoints_for_private_service_access(
+ self, index_endpoint_batch_get_embeddings_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ response = my_index_endpoint.read_index_datapoints(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID, ids=["1", "2"]
+ )
+
+ batch_request = match_service_pb2.BatchGetEmbeddingsRequest(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID, id=["1", "2"]
+ )
+
+ index_endpoint_batch_get_embeddings_mock.BatchGetEmbeddings.assert_called_with(
+ batch_request, metadata=mock.ANY
+ )
+
+ assert response == _TEST_READ_INDEX_DATAPOINTS_RESPONSE
+
+ @pytest.mark.usefixtures("get_index_endpoint_mock")
+ def test_index_endpoint_read_index_datapoints_for_private_service_access_with_jwt(
+ self, index_endpoint_batch_get_embeddings_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ response = my_index_endpoint.read_index_datapoints(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ ids=["1", "2"],
+ signed_jwt=_TEST_SIGNED_JWT,
+ )
+
+ batch_request = match_service_pb2.BatchGetEmbeddingsRequest(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID, id=["1", "2"]
+ )
+
+ index_endpoint_batch_get_embeddings_mock.BatchGetEmbeddings.assert_called_with(
+ batch_request, metadata=_TEST_AUTHORIZATION_METADATA
+ )
+
+ assert response == _TEST_READ_INDEX_DATAPOINTS_RESPONSE
+
+ @pytest.mark.usefixtures("get_manual_psc_index_endpoint_mock")
+ def test_index_endpoint_read_index_datapoints_for_private_service_connect(
+ self, grpc_insecure_channel_mock, index_endpoint_batch_get_embeddings_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ my_index_endpoint.private_service_connect_ip_address = (
+ _TEST_PRIVATE_SERVICE_CONNECT_IP_ADDRESS
+ )
+ response = my_index_endpoint.read_index_datapoints(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ ids=["1", "2"],
+ )
+
+ batch_request = match_service_pb2.BatchGetEmbeddingsRequest(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID, id=["1", "2"]
+ )
+
+ index_endpoint_batch_get_embeddings_mock.BatchGetEmbeddings.assert_called_with(
+ batch_request, metadata=mock.ANY
+ )
+
+ grpc_insecure_channel_mock.assert_called_with(_TEST_PRIVATE_SERVICE_CONNECT_URI)
+
+ assert response == _TEST_READ_INDEX_DATAPOINTS_RESPONSE
+
+ @pytest.mark.usefixtures("get_psc_automated_index_endpoint_mock")
+ def test_index_endpoint_read_index_datapoints_for_private_service_connect_automation(
+ self, index_endpoint_batch_get_embeddings_mock, grpc_insecure_channel_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
+ )
+
+ response = my_index_endpoint.read_index_datapoints(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ ids=["1", "2"],
+ psc_network=_TEST_PSC_NETWORK1,
+ )
+
+ batch_request = match_service_pb2.BatchGetEmbeddingsRequest(
+ deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
+ id=["1", "2"],
+ )
+
+ index_endpoint_batch_get_embeddings_mock.BatchGetEmbeddings.assert_called_with(
+ batch_request, metadata=mock.ANY
+ )
+
+ grpc_insecure_channel_mock.assert_called_with(
+ _TEST_PRIVATE_SERVICE_CONNECT_AUTOMATION_URI_1
+ )
+
+ assert response == _TEST_READ_INDEX_DATAPOINTS_RESPONSE
+
+
+class TestMatchNeighbor:
+ def test_from_index_datapoint(self):
+ index_datapoint = gca_index_v1beta1.IndexDatapoint()
+ index_datapoint.datapoint_id = "test_datapoint_id"
+ index_datapoint.feature_vector = [1.0, 2.0, 3.0]
+ index_datapoint.crowding_tag = gca_index_v1beta1.IndexDatapoint.CrowdingTag(
+ crowding_attribute="test_crowding"
+ )
+ index_datapoint.restricts = [
+ gca_index_v1beta1.IndexDatapoint.Restriction(
+ namespace="namespace1", allow_list=["token1"], deny_list=["token2"]
+ ),
+ ]
+ index_datapoint.numeric_restricts = [
+ gca_index_v1beta1.IndexDatapoint.NumericRestriction(
+ namespace="namespace2",
+ value_int=0,
+ )
+ ]
+
+ result = MatchNeighbor(
+ id="index_datapoint_id", distance=0.3
+ ).from_index_datapoint(index_datapoint)
+
+ assert result.feature_vector == [1.0, 2.0, 3.0]
+ assert result.crowding_tag == "test_crowding"
+ assert len(result.restricts) == 1
+ assert result.restricts[0].name == "namespace1"
+ assert result.restricts[0].allow_tokens == ["token1"]
+ assert result.restricts[0].deny_tokens == ["token2"]
+ assert len(result.numeric_restricts) == 1
+ assert result.numeric_restricts[0].name == "namespace2"
+ assert result.numeric_restricts[0].value_int == 0
+ assert result.numeric_restricts[0].value_float is None
+ assert result.numeric_restricts[0].value_double is None
+
+ def test_from_embedding(self):
+ embedding = match_service_pb2.Embedding(
+ id="test_embedding_id",
+ float_val=[1.0, 2.0, 3.0],
+ crowding_attribute=1,
+ restricts=[
+ match_service_pb2.Namespace(
+ name="namespace1", allow_tokens=["token1"], deny_tokens=["token2"]
+ ),
+ ],
+ numeric_restricts=[
+ match_service_pb2.NumericNamespace(
+ name="namespace2", value_int=10, value_float=None, value_double=None
+ )
+ ],
+ )
+
+ result = MatchNeighbor(id="embedding_id", distance=0.3).from_embedding(
+ embedding
+ )
+
+ assert result.feature_vector == [1.0, 2.0, 3.0]
+ assert result.crowding_tag == "1"
+ assert len(result.restricts) == 1
+ assert result.restricts[0].name == "namespace1"
+ assert result.restricts[0].allow_tokens == ["token1"]
+ assert result.restricts[0].deny_tokens == ["token2"]
+ assert len(result.numeric_restricts) == 1
+ assert result.numeric_restricts[0].name == "namespace2"
+ assert result.numeric_restricts[0].value_int == 10
+ assert not result.numeric_restricts[0].value_float
+ assert not result.numeric_restricts[0].value_double
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_metadata.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_metadata.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac5ada2e7239af2bc9a1e6208e5a78898a716e98
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_metadata.py
@@ -0,0 +1,2288 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import copy
+from importlib import reload
+from unittest import TestCase, mock
+from unittest.mock import patch, call
+
+import numpy as np
+from sklearn.linear_model import LinearRegression
+
+import pytest
+from google.api_core import exceptions
+from google.api_core import operation
+from google.auth import credentials
+
+import google.cloud.aiplatform.metadata.constants
+from google.cloud.aiplatform.metadata.schema.google import (
+ artifact_schema as google_artifact_schema,
+)
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform_v1 import (
+ AddContextArtifactsAndExecutionsResponse,
+ LineageSubgraph,
+ Artifact as GapicArtifact,
+ Context as GapicContext,
+ Execution as GapicExecution,
+ JobServiceClient,
+ MetadataServiceClient,
+ AddExecutionEventsResponse,
+ MetadataStore as GapicMetadataStore,
+ TensorboardServiceClient,
+)
+from google.cloud.aiplatform.compat.types import event as gca_event
+from google.cloud.aiplatform.compat.types import execution as gca_execution
+from google.cloud.aiplatform.compat.types import (
+ tensorboard_data as gca_tensorboard_data,
+)
+from google.cloud.aiplatform.compat.types import (
+ tensorboard as gca_tensorboard,
+)
+from google.cloud.aiplatform.compat.types import (
+ tensorboard_experiment as gca_tensorboard_experiment,
+)
+from google.cloud.aiplatform.compat.types import (
+ tensorboard_run as gca_tensorboard_run,
+)
+from google.cloud.aiplatform.compat.types import (
+ tensorboard_time_series as gca_tensorboard_time_series,
+)
+from google.cloud.aiplatform.metadata import constants
+from google.cloud.aiplatform.metadata import experiment_resources
+from google.cloud.aiplatform.metadata import experiment_run_resource
+from google.cloud.aiplatform.metadata import metadata
+from google.cloud.aiplatform.metadata import metadata_store
+from google.cloud.aiplatform.metadata import utils as metadata_utils
+from google.cloud.aiplatform.tensorboard import tensorboard_resource
+
+from google.cloud.aiplatform import utils
+from google.cloud.aiplatform.utils import _ipython_utils
+
+import constants as test_constants
+
+_TEST_PROJECT = "test-project"
+_TEST_OTHER_PROJECT = "test-project-1"
+_TEST_LOCATION = "us-central1"
+_TEST_PARENT = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/default"
+)
+_TEST_EXPERIMENT = "test-experiment"
+_TEST_EXPERIMENT_DESCRIPTION = "test-experiment-description"
+_TEST_OTHER_EXPERIMENT_DESCRIPTION = "test-other-experiment-description"
+_TEST_PIPELINE = _TEST_EXPERIMENT
+_TEST_RUN = "run-1"
+_TEST_OTHER_RUN = "run-2"
+_TEST_DISPLAY_NAME = "test-display-name"
+_TEST_CREDENTIALS = mock.Mock(
+ spec=credentials.AnonymousCredentials(),
+ universe_domain="googleapis.com",
+)
+_TEST_BUCKET_NAME = "gs://test-bucket"
+
+# resource attributes
+_TEST_METADATA = {"test-param1": 1, "test-param2": "test-value", "test-param3": True}
+
+# metadataStore
+_TEST_METADATASTORE = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/default"
+)
+
+# context
+_TEST_CONTEXT_ID = _TEST_EXPERIMENT
+_TEST_CONTEXT_NAME = f"{_TEST_PARENT}/contexts/{_TEST_CONTEXT_ID}"
+
+# execution
+_TEST_EXECUTION_ID = f"{_TEST_EXPERIMENT}-{_TEST_RUN}"
+_TEST_EXECUTION_NAME = f"{_TEST_PARENT}/executions/{_TEST_EXECUTION_ID}"
+_TEST_OTHER_EXECUTION_ID = f"{_TEST_EXPERIMENT}-{_TEST_OTHER_RUN}"
+_TEST_OTHER_EXECUTION_NAME = f"{_TEST_PARENT}/executions/{_TEST_OTHER_EXECUTION_ID}"
+_TEST_SCHEMA_TITLE = "test.Schema"
+
+_TEST_EXECUTION = GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ metadata=_TEST_METADATA,
+ state=GapicExecution.State.RUNNING,
+)
+
+# artifact
+_TEST_ARTIFACT_ID = f"{_TEST_EXPERIMENT}-{_TEST_RUN}-metrics"
+_TEST_ARTIFACT_NAME = f"{_TEST_PARENT}/artifacts/{_TEST_ARTIFACT_ID}"
+_TEST_OTHER_ARTIFACT_ID = f"{_TEST_EXPERIMENT}-{_TEST_OTHER_RUN}-metrics"
+_TEST_OTHER_ARTIFACT_NAME = f"{_TEST_PARENT}/artifacts/{_TEST_OTHER_ARTIFACT_ID}"
+_TEST_MODEL_ID = "test-model"
+_TEST_MODEL_NAME = f"{_TEST_PARENT}/artifacts/{_TEST_MODEL_ID}"
+
+# parameters
+_TEST_PARAM_KEY_1 = "learning_rate"
+_TEST_PARAM_KEY_2 = "dropout"
+_TEST_PARAMS = {_TEST_PARAM_KEY_1: 0.01, _TEST_PARAM_KEY_2: 0.2}
+_TEST_OTHER_PARAMS = {_TEST_PARAM_KEY_1: 0.02, _TEST_PARAM_KEY_2: 0.3}
+
+# metrics
+_TEST_METRIC_KEY_1 = "rmse"
+_TEST_METRIC_KEY_2 = "accuracy"
+_TEST_METRICS = {_TEST_METRIC_KEY_1: 222, _TEST_METRIC_KEY_2: 1}
+_TEST_OTHER_METRICS = {_TEST_METRIC_KEY_2: 0.9}
+
+# classification_metrics
+_TEST_CLASSIFICATION_METRICS = {
+ "display_name": "my-classification-metrics",
+ "labels": ["cat", "dog"],
+ "matrix": [[9, 1], [1, 9]],
+ "fpr": [0.1, 0.5, 0.9],
+ "tpr": [0.1, 0.7, 0.9],
+ "threshold": [0.9, 0.5, 0.1],
+}
+
+# schema
+_TEST_WRONG_SCHEMA_TITLE = "system.WrongSchema"
+
+# tensorboard
+_TEST_DEFAULT_TENSORBOARD_NAME = "test-tensorboard-default-name"
+_TEST_DEFAULT_TENSORBOARD_GCA = gca_tensorboard.Tensorboard(
+ name=_TEST_DEFAULT_TENSORBOARD_NAME,
+ is_default=True,
+)
+
+
+@pytest.fixture
+def get_metadata_store_mock():
+ with patch.object(
+ MetadataServiceClient, "get_metadata_store"
+ ) as get_metadata_store_mock:
+ get_metadata_store_mock.return_value = GapicMetadataStore(
+ name=_TEST_METADATASTORE,
+ )
+ yield get_metadata_store_mock
+
+
+@pytest.fixture
+def get_metadata_store_mock_raise_not_found_exception():
+ with patch.object(
+ MetadataServiceClient, "get_metadata_store"
+ ) as get_metadata_store_mock:
+ get_metadata_store_mock.side_effect = [
+ exceptions.NotFound("Test store not found."),
+ GapicMetadataStore(
+ name=_TEST_METADATASTORE,
+ ),
+ ]
+
+ yield get_metadata_store_mock
+
+
+@pytest.fixture
+def ipython_is_available_mock():
+ with patch.object(_ipython_utils, "is_ipython_available") as ipython_available_mock:
+ ipython_available_mock.return_value = True
+ yield ipython_available_mock
+
+
+@pytest.fixture
+def ipython_is_not_available_mock():
+ with patch.object(
+ _ipython_utils, "is_ipython_available"
+ ) as ipython_not_available_mock:
+ ipython_not_available_mock.return_value = False
+ yield ipython_not_available_mock
+
+
+@pytest.fixture
+def create_metadata_store_mock():
+ with patch.object(
+ MetadataServiceClient, "create_metadata_store"
+ ) as create_metadata_store_mock:
+ create_metadata_store_lro_mock = mock.Mock(operation.Operation)
+ create_metadata_store_lro_mock.result.return_value = GapicMetadataStore(
+ name=_TEST_METADATASTORE,
+ )
+ create_metadata_store_mock.return_value = create_metadata_store_lro_mock
+ yield create_metadata_store_mock
+
+
+@pytest.fixture
+def get_context_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.return_value = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_EXPERIMENT,
+ description=_TEST_EXPERIMENT_DESCRIPTION,
+ schema_title=constants.SYSTEM_EXPERIMENT,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_EXPERIMENT],
+ metadata=constants.EXPERIMENT_METADATA,
+ )
+ yield get_context_mock
+
+
+@pytest.fixture
+def get_context_wrong_schema_mock():
+ with patch.object(
+ MetadataServiceClient, "get_context"
+ ) as get_context_wrong_schema_mock:
+ get_context_wrong_schema_mock.return_value = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_EXPERIMENT,
+ schema_title=_TEST_WRONG_SCHEMA_TITLE,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_EXPERIMENT],
+ metadata=constants.EXPERIMENT_METADATA,
+ )
+ yield get_context_wrong_schema_mock
+
+
+@pytest.fixture
+def get_pipeline_context_mock():
+ with patch.object(
+ MetadataServiceClient, "get_context"
+ ) as get_pipeline_context_mock:
+ get_pipeline_context_mock.return_value = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_EXPERIMENT,
+ schema_title=constants.SYSTEM_PIPELINE,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_PIPELINE],
+ metadata=constants.EXPERIMENT_METADATA,
+ )
+ yield get_pipeline_context_mock
+
+
+@pytest.fixture
+def get_context_not_found_mock():
+ with patch.object(
+ MetadataServiceClient, "get_context"
+ ) as get_context_not_found_mock:
+ get_context_not_found_mock.side_effect = exceptions.NotFound("test: not found")
+ yield get_context_not_found_mock
+
+
+_TEST_EXPERIMENT_CONTEXT = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_EXPERIMENT,
+ description=_TEST_EXPERIMENT_DESCRIPTION,
+ schema_title=constants.SYSTEM_EXPERIMENT,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_EXPERIMENT],
+ metadata={
+ **constants.EXPERIMENT_METADATA,
+ constants._BACKING_TENSORBOARD_RESOURCE_KEY: test_constants.TensorboardConstants._TEST_TENSORBOARD_NAME,
+ },
+)
+
+
+@pytest.fixture
+def update_context_mock():
+ with patch.object(MetadataServiceClient, "update_context") as update_context_mock:
+ update_context_mock.return_value = _TEST_EXPERIMENT_CONTEXT
+ yield update_context_mock
+
+
+@pytest.fixture
+def add_context_artifacts_and_executions_mock():
+ with patch.object(
+ MetadataServiceClient, "add_context_artifacts_and_executions"
+ ) as add_context_artifacts_and_executions_mock:
+ add_context_artifacts_and_executions_mock.return_value = (
+ AddContextArtifactsAndExecutionsResponse()
+ )
+ yield add_context_artifacts_and_executions_mock
+
+
+@pytest.fixture
+def get_execution_mock():
+ with patch.object(MetadataServiceClient, "get_execution") as get_execution_mock:
+ get_execution_mock.return_value = GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
+ )
+ yield get_execution_mock
+
+
+@pytest.fixture
+def get_execution_not_found_mock():
+ with patch.object(
+ MetadataServiceClient, "get_execution"
+ ) as get_execution_not_found_mock:
+ get_execution_not_found_mock.side_effect = exceptions.NotFound(
+ "test: not found"
+ )
+ yield get_execution_not_found_mock
+
+
+@pytest.fixture
+def get_execution_wrong_schema_mock():
+ with patch.object(
+ MetadataServiceClient, "get_execution"
+ ) as get_execution_wrong_schema_mock:
+ get_execution_wrong_schema_mock.return_value = GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_RUN,
+ schema_title=_TEST_WRONG_SCHEMA_TITLE,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
+ )
+ yield get_execution_wrong_schema_mock
+
+
+@pytest.fixture
+def update_execution_mock():
+ with patch.object(
+ MetadataServiceClient, "update_execution"
+ ) as update_execution_mock:
+ update_execution_mock.return_value = GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
+ metadata=_TEST_PARAMS,
+ )
+ yield update_execution_mock
+
+
+@pytest.fixture
+def add_execution_events_mock():
+ with patch.object(
+ MetadataServiceClient, "add_execution_events"
+ ) as add_execution_events_mock:
+ add_execution_events_mock.return_value = AddExecutionEventsResponse()
+ yield add_execution_events_mock
+
+
+@pytest.fixture
+def list_executions_mock():
+ with patch.object(MetadataServiceClient, "list_executions") as list_executions_mock:
+ list_executions_mock.return_value = [
+ GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
+ metadata=_TEST_PARAMS,
+ ),
+ GapicExecution(
+ name=_TEST_OTHER_EXECUTION_NAME,
+ display_name=_TEST_OTHER_RUN,
+ schema_title=constants.SYSTEM_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
+ metadata=_TEST_OTHER_PARAMS,
+ ),
+ ]
+ yield list_executions_mock
+
+
+@pytest.fixture
+def get_tensorboard_run_not_found_mock():
+ with patch.object(
+ TensorboardServiceClient, "get_tensorboard_run"
+ ) as get_tensorboard_run_mock:
+ get_tensorboard_run_mock.side_effect = [
+ exceptions.NotFound(""),
+ test_constants.TensorboardConstants._TEST_TENSORBOARD_RUN,
+ ]
+ yield get_tensorboard_run_mock
+
+
+@pytest.fixture
+def list_default_tensorboard_mock():
+ with patch.object(
+ TensorboardServiceClient, "list_tensorboards"
+ ) as list_default_tensorboard_mock:
+ list_default_tensorboard_mock.side_effect = [
+ [_TEST_DEFAULT_TENSORBOARD_GCA],
+ [_TEST_DEFAULT_TENSORBOARD_GCA],
+ ]
+ yield list_default_tensorboard_mock
+
+
+@pytest.fixture
+def list_default_tensorboard_empty_mock():
+ with patch.object(
+ TensorboardServiceClient, "list_tensorboards"
+ ) as list_default_tensorboard_empty_mock:
+ list_default_tensorboard_empty_mock.return_value = []
+ yield list_default_tensorboard_empty_mock
+
+
+@pytest.fixture
+def create_default_tensorboard_mock():
+ with patch.object(
+ tensorboard_resource.Tensorboard, "create"
+ ) as create_default_tensorboard_mock:
+ create_default_tensorboard_mock.return_value = _TEST_DEFAULT_TENSORBOARD_GCA
+ yield create_default_tensorboard_mock
+
+
+@pytest.fixture
+def assign_backing_tensorboard_mock():
+ with patch.object(
+ experiment_resources.Experiment, "assign_backing_tensorboard"
+ ) as assign_backing_tensorboard_mock:
+ yield assign_backing_tensorboard_mock
+
+
+@pytest.fixture
+def get_or_create_default_tb_none_mock():
+ with patch.object(
+ metadata, "_get_or_create_default_tensorboard"
+ ) as get_or_create_default_tb_none_mock:
+ get_or_create_default_tb_none_mock.return_value = None
+ yield get_or_create_default_tb_none_mock
+
+
+@pytest.fixture
+def get_tensorboard_experiment_not_found_mock():
+ with patch.object(
+ TensorboardServiceClient, "get_tensorboard_experiment"
+ ) as get_tensorboard_experiment_mock:
+ get_tensorboard_experiment_mock.side_effect = [
+ exceptions.NotFound(""),
+ test_constants.TensorboardConstants._TEST_TENSORBOARD_EXPERIMENT,
+ ]
+ yield get_tensorboard_experiment_mock
+
+
+@pytest.fixture
+def get_tensorboard_time_series_not_found_mock():
+ with patch.object(
+ TensorboardServiceClient, "get_tensorboard_time_series"
+ ) as get_tensorboard_time_series_mock:
+ get_tensorboard_time_series_mock.side_effect = [
+ exceptions.NotFound(""),
+ # test_tensorboard._TEST_TENSORBOARD_TIME_SERIES # change to time series
+ ]
+ yield get_tensorboard_time_series_mock
+
+
+@pytest.fixture
+def query_execution_inputs_and_outputs_mock():
+ with patch.object(
+ MetadataServiceClient, "query_execution_inputs_and_outputs"
+ ) as query_execution_inputs_and_outputs_mock:
+ query_execution_inputs_and_outputs_mock.side_effect = [
+ LineageSubgraph(
+ artifacts=[
+ GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ display_name=_TEST_ARTIFACT_ID,
+ schema_title=constants.SYSTEM_METRICS,
+ schema_version=constants.SCHEMA_VERSIONS[
+ constants.SYSTEM_METRICS
+ ],
+ metadata=_TEST_METRICS,
+ )
+ ],
+ events=[
+ gca_event.Event(
+ artifact=_TEST_ARTIFACT_NAME,
+ execution=_TEST_EXECUTION_NAME,
+ type_=gca_event.Event.Type.OUTPUT,
+ )
+ ],
+ ),
+ LineageSubgraph(
+ artifacts=[
+ GapicArtifact(
+ name=_TEST_OTHER_ARTIFACT_NAME,
+ display_name=_TEST_OTHER_ARTIFACT_ID,
+ schema_title=constants.SYSTEM_METRICS,
+ schema_version=constants.SCHEMA_VERSIONS[
+ constants.SYSTEM_METRICS
+ ],
+ metadata=_TEST_OTHER_METRICS,
+ ),
+ ],
+ events=[
+ gca_event.Event(
+ artifact=_TEST_OTHER_ARTIFACT_NAME,
+ execution=_TEST_OTHER_EXECUTION_NAME,
+ type_=gca_event.Event.Type.OUTPUT,
+ )
+ ],
+ ),
+ ]
+ yield query_execution_inputs_and_outputs_mock
+
+
+_TEST_CLASSIFICATION_METRICS_METADATA = {
+ "confusionMatrix": {
+ "annotationSpecs": [{"displayName": "cat"}, {"displayName": "dog"}],
+ "rows": [[9, 1], [1, 9]],
+ },
+ "confidenceMetrics": [
+ {"confidenceThreshold": 0.9, "recall": 0.1, "falsePositiveRate": 0.1},
+ {"confidenceThreshold": 0.5, "recall": 0.7, "falsePositiveRate": 0.5},
+ {"confidenceThreshold": 0.1, "recall": 0.9, "falsePositiveRate": 0.9},
+ ],
+}
+
+_TEST_CLASSIFICATION_METRICS_ARTIFACT = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ display_name=_TEST_CLASSIFICATION_METRICS["display_name"],
+ schema_title=constants.GOOGLE_CLASSIFICATION_METRICS,
+ schema_version=constants._DEFAULT_SCHEMA_VERSION,
+ metadata=_TEST_CLASSIFICATION_METRICS_METADATA,
+ state=GapicArtifact.State.LIVE,
+)
+
+
+@pytest.fixture
+def create_classification_metrics_artifact_mock():
+ with patch.object(
+ MetadataServiceClient, "create_artifact"
+ ) as create_classification_metrics_artifact_mock:
+ create_classification_metrics_artifact_mock.return_value = (
+ _TEST_CLASSIFICATION_METRICS_ARTIFACT
+ )
+ yield create_classification_metrics_artifact_mock
+
+
+@pytest.fixture
+def get_classification_metrics_artifact_mock():
+ with patch.object(
+ MetadataServiceClient, "get_artifact"
+ ) as get_classification_metrics_artifact_mock:
+ get_classification_metrics_artifact_mock.return_value = (
+ _TEST_CLASSIFICATION_METRICS_ARTIFACT
+ )
+ yield get_classification_metrics_artifact_mock
+
+
+@pytest.fixture
+def get_artifact_mock():
+ with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock:
+ get_artifact_mock.return_value = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ display_name=_TEST_ARTIFACT_ID,
+ schema_title=constants.SYSTEM_METRICS,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_METRICS],
+ )
+ yield get_artifact_mock
+
+
+@pytest.fixture
+def get_artifact_mock_with_metadata():
+ with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock:
+ get_artifact_mock.return_value = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ display_name=_TEST_ARTIFACT_ID,
+ schema_title=constants.SYSTEM_METRICS,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_METRICS],
+ metadata={
+ google.cloud.aiplatform.metadata.constants._VERTEX_EXPERIMENT_TRACKING_LABEL: True,
+ constants.GCP_ARTIFACT_RESOURCE_NAME_KEY: test_constants.TensorboardConstants._TEST_TENSORBOARD_RUN_NAME,
+ constants._STATE_KEY: gca_execution.Execution.State.RUNNING,
+ },
+ )
+ yield get_artifact_mock
+
+
+@pytest.fixture
+def get_artifact_not_found_mock():
+ with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock:
+ get_artifact_mock.side_effect = exceptions.NotFound("")
+ yield get_artifact_mock
+
+
+@pytest.fixture
+def get_artifact_wrong_schema_mock():
+ with patch.object(
+ MetadataServiceClient, "get_artifact"
+ ) as get_artifact_wrong_schema_mock:
+ get_artifact_wrong_schema_mock.return_value = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ display_name=_TEST_ARTIFACT_ID,
+ schema_title=_TEST_WRONG_SCHEMA_TITLE,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_METRICS],
+ )
+ yield get_artifact_wrong_schema_mock
+
+
+@pytest.fixture
+def update_artifact_mock():
+ with patch.object(MetadataServiceClient, "update_artifact") as update_artifact_mock:
+ update_artifact_mock.return_value = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ display_name=_TEST_ARTIFACT_ID,
+ schema_title=constants.SYSTEM_METRICS,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_METRICS],
+ metadata=_TEST_METRICS,
+ )
+ yield update_artifact_mock
+
+
+def _assert_frame_equal_with_sorted_columns(dataframe_1, dataframe_2):
+ try:
+ import pandas as pd
+ except ImportError:
+ raise ImportError(
+ "Pandas is not installed and is required to test the get_experiment_df/pipeline_df method. "
+ 'Please install the SDK using "pip install google-cloud-aiplatform[full]"'
+ )
+
+ pd.testing.assert_frame_equal(
+ dataframe_1.sort_index(axis=1), dataframe_2.sort_index(axis=1), check_names=True
+ )
+
+
+@pytest.fixture
+def mock_storage_blob_upload_from_filename():
+ with patch(
+ "google.cloud.storage.Blob.upload_from_filename"
+ ) as mock_blob_upload_from_filename, patch(
+ "google.cloud.storage.Bucket.exists", return_value=True
+ ):
+ yield mock_blob_upload_from_filename
+
+
+_TEST_EXPERIMENT_MODEL_ARTIFACT = GapicArtifact(
+ name=_TEST_MODEL_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=constants.GOOGLE_EXPERIMENT_MODEL,
+ schema_version=constants._DEFAULT_SCHEMA_VERSION,
+ state=GapicArtifact.State.LIVE,
+)
+
+
+@pytest.fixture
+def create_experiment_model_artifact_mock():
+ with patch.object(
+ MetadataServiceClient, "create_artifact"
+ ) as create_experiment_model_artifact_mock:
+ create_experiment_model_artifact_mock.return_value = (
+ _TEST_EXPERIMENT_MODEL_ARTIFACT
+ )
+ yield create_experiment_model_artifact_mock
+
+
+@pytest.fixture
+def get_experiment_model_artifact_mock():
+ with patch.object(
+ MetadataServiceClient, "get_artifact"
+ ) as get_experiment_model_artifact_mock:
+ get_experiment_model_artifact_mock.return_value = (
+ _TEST_EXPERIMENT_MODEL_ARTIFACT
+ )
+ yield get_experiment_model_artifact_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestMetadata:
+ def setup_method(self):
+ reload(initializer)
+ reload(metadata)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.usefixtures("get_pipeline_context_mock")
+ def test_get_pipeline_df(
+ self, list_executions_mock, query_execution_inputs_and_outputs_mock
+ ):
+ try:
+ import pandas as pd
+ except ImportError:
+ raise ImportError(
+ "Pandas is not installed and is required to test the get_pipeline_df method. "
+ 'Please install the SDK using "pip install google-cloud-aiplatform[full]"'
+ )
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ pipeline_df = aiplatform.get_pipeline_df(_TEST_PIPELINE)
+
+ expected_filter = metadata_utils._make_filter_string(
+ schema_title=constants.SYSTEM_RUN, in_context=[_TEST_CONTEXT_NAME]
+ )
+
+ list_executions_mock.assert_called_once_with(
+ request={"parent": _TEST_PARENT, "filter": expected_filter}
+ )
+ query_execution_inputs_and_outputs_mock.assert_has_calls(
+ [
+ call(execution=_TEST_EXECUTION_NAME),
+ call(execution=_TEST_OTHER_EXECUTION_NAME),
+ ]
+ )
+ pipeline_df_truth = pd.DataFrame(
+ [
+ {
+ "pipeline_name": _TEST_PIPELINE,
+ "run_name": _TEST_RUN,
+ "param.%s" % _TEST_PARAM_KEY_1: 0.01,
+ "param.%s" % _TEST_PARAM_KEY_2: 0.2,
+ "metric.%s" % _TEST_METRIC_KEY_1: 222,
+ "metric.%s" % _TEST_METRIC_KEY_2: 1,
+ },
+ {
+ "pipeline_name": _TEST_PIPELINE,
+ "run_name": _TEST_OTHER_RUN,
+ "param.%s" % _TEST_PARAM_KEY_1: 0.02,
+ "param.%s" % _TEST_PARAM_KEY_2: 0.3,
+ "metric.%s" % _TEST_METRIC_KEY_2: 0.9,
+ },
+ ]
+ )
+
+ _assert_frame_equal_with_sorted_columns(pipeline_df, pipeline_df_truth)
+
+ @pytest.mark.usefixtures("get_context_not_found_mock")
+ def test_get_pipeline_df_not_exist(self):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ with pytest.raises(exceptions.NotFound):
+ aiplatform.get_pipeline_df(_TEST_PIPELINE)
+
+ @pytest.mark.usefixtures("get_context_mock")
+ def test_get_pipeline_df_wrong_schema(self):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ with pytest.raises(ValueError):
+ aiplatform.get_pipeline_df(_TEST_PIPELINE)
+
+
+_TEST_EXPERIMENT_RUN_CONTEXT_NAME = f"{_TEST_PARENT}/contexts/{_TEST_EXECUTION_ID}"
+_TEST_OTHER_EXPERIMENT_RUN_CONTEXT_NAME = (
+ f"{_TEST_PARENT}/contexts/{_TEST_OTHER_EXECUTION_ID}"
+)
+
+_EXPERIMENT_MOCK = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_EXPERIMENT,
+ description=_TEST_EXPERIMENT_DESCRIPTION,
+ schema_title=constants.SYSTEM_EXPERIMENT,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_EXPERIMENT],
+ metadata={**constants.EXPERIMENT_METADATA},
+)
+
+_EXPERIMENT_RUN_MOCK = GapicContext(
+ name=_TEST_EXPERIMENT_RUN_CONTEXT_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_EXPERIMENT_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_EXPERIMENT_RUN],
+ metadata={
+ constants._PARAM_KEY: {},
+ constants._METRIC_KEY: {},
+ constants._STATE_KEY: gca_execution.Execution.State.RUNNING.name,
+ },
+)
+
+_EXPERIMENT_RUN_EMPTY_METADATA_MOCK = GapicContext(
+ name=_TEST_EXPERIMENT_RUN_CONTEXT_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_EXPERIMENT_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_EXPERIMENT_RUN],
+ metadata={},
+)
+
+_EXPERIMENT_RUN_MOCK_WITH_PARENT_EXPERIMENT = copy.deepcopy(_EXPERIMENT_RUN_MOCK)
+_EXPERIMENT_RUN_MOCK_WITH_PARENT_EXPERIMENT.parent_contexts = [_TEST_CONTEXT_NAME]
+_EXPERIMENT_RUN_EMPTY_METADATA_MOCK_WITH_PARENT_EXPERIMENT = copy.deepcopy(
+ _EXPERIMENT_RUN_EMPTY_METADATA_MOCK
+)
+_EXPERIMENT_RUN_EMPTY_METADATA_MOCK_WITH_PARENT_EXPERIMENT.parent_contexts = [
+ _TEST_CONTEXT_NAME
+]
+
+_TEST_CUSTOM_JOB_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/customJobs/12345"
+)
+_TEST_CUSTOM_JOB_CONSOLE_URI = "test-custom-job-console-uri"
+
+_EXPERIMENT_RUN_MOCK_WITH_CUSTOM_JOBS = copy.deepcopy(
+ _EXPERIMENT_RUN_MOCK_WITH_PARENT_EXPERIMENT
+)
+_EXPERIMENT_RUN_MOCK_WITH_CUSTOM_JOBS.metadata[constants._CUSTOM_JOB_KEY] = [
+ {
+ constants._CUSTOM_JOB_RESOURCE_NAME: _TEST_CUSTOM_JOB_NAME,
+ constants._CUSTOM_JOB_CONSOLE_URI: _TEST_CUSTOM_JOB_CONSOLE_URI,
+ },
+]
+
+
+@pytest.fixture
+def get_experiment_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.return_value = _EXPERIMENT_MOCK
+ yield get_context_mock
+
+
+@pytest.fixture
+def get_experiment_not_found_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.side_effect = exceptions.NotFound("test: not found")
+ yield get_context_mock
+
+
+@pytest.fixture
+def get_experiment_run_run_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.side_effect = [
+ _EXPERIMENT_MOCK,
+ _EXPERIMENT_RUN_MOCK,
+ _EXPERIMENT_RUN_MOCK_WITH_PARENT_EXPERIMENT,
+ ]
+
+ yield get_context_mock
+
+
+@pytest.fixture
+def get_experiment_run_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.side_effect = [
+ _EXPERIMENT_MOCK,
+ _EXPERIMENT_RUN_MOCK_WITH_PARENT_EXPERIMENT,
+ ]
+
+ yield get_context_mock
+
+
+@pytest.fixture
+def get_empty_experiment_run_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.side_effect = [
+ _EXPERIMENT_MOCK,
+ _EXPERIMENT_RUN_EMPTY_METADATA_MOCK_WITH_PARENT_EXPERIMENT,
+ ]
+
+ yield get_context_mock
+
+
+@pytest.fixture
+def get_experiment_run_with_custom_jobs_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.side_effect = [
+ _EXPERIMENT_MOCK,
+ _EXPERIMENT_RUN_MOCK_WITH_CUSTOM_JOBS,
+ ]
+
+ yield get_context_mock
+
+
+@pytest.fixture
+def get_experiment_run_not_found_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.side_effect = [
+ _EXPERIMENT_MOCK,
+ exceptions.NotFound("test: not found"),
+ ]
+
+ yield get_context_mock
+
+
+@pytest.fixture
+def create_experiment_context_mock():
+ with patch.object(MetadataServiceClient, "create_context") as create_context_mock:
+ create_context_mock.side_effect = [_TEST_EXPERIMENT_CONTEXT]
+ yield create_context_mock
+
+
+@pytest.fixture
+def create_experiment_run_context_mock():
+ with patch.object(MetadataServiceClient, "create_context") as create_context_mock:
+ create_context_mock.side_effect = [_EXPERIMENT_RUN_MOCK]
+ yield create_context_mock
+
+
+@pytest.fixture
+def update_experiment_run_context_to_running():
+ with patch.object(MetadataServiceClient, "update_context") as update_context_mock:
+ update_context_mock.side_effect = [_EXPERIMENT_RUN_MOCK]
+ yield update_context_mock
+
+
+@pytest.fixture
+def create_execution_mock():
+ with patch.object(
+ MetadataServiceClient, "create_execution"
+ ) as create_execution_mock:
+ create_execution_mock.side_effect = [_TEST_EXECUTION]
+ yield create_execution_mock
+
+
+@pytest.fixture
+def update_context_mock_v2():
+ with patch.object(MetadataServiceClient, "update_context") as update_context_mock:
+ update_context_mock.side_effect = [
+ # experiment run
+ GapicContext(
+ name=_TEST_EXPERIMENT_RUN_CONTEXT_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_EXPERIMENT_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[
+ constants.SYSTEM_EXPERIMENT_RUN
+ ],
+ metadata={**constants.EXPERIMENT_METADATA},
+ ),
+ # experiment run
+ GapicContext(
+ name=_TEST_EXPERIMENT_RUN_CONTEXT_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_EXPERIMENT_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[
+ constants.SYSTEM_EXPERIMENT_RUN
+ ],
+ metadata=constants.EXPERIMENT_METADATA,
+ parent_contexts=[_TEST_CONTEXT_NAME],
+ ),
+ ]
+
+ yield update_context_mock
+
+
+@pytest.fixture
+def list_contexts_mock():
+ with patch.object(MetadataServiceClient, "list_contexts") as list_contexts_mock:
+ list_contexts_mock.return_value = [
+ GapicContext(
+ name=_TEST_EXPERIMENT_RUN_CONTEXT_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_EXPERIMENT_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[
+ constants.SYSTEM_EXPERIMENT_RUN
+ ],
+ metadata=constants.EXPERIMENT_METADATA,
+ parent_contexts=[_TEST_CONTEXT_NAME],
+ ),
+ GapicContext(
+ name=_TEST_OTHER_EXPERIMENT_RUN_CONTEXT_NAME,
+ display_name=_TEST_OTHER_RUN,
+ schema_title=constants.SYSTEM_EXPERIMENT_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[
+ constants.SYSTEM_EXPERIMENT_RUN
+ ],
+ metadata=constants.EXPERIMENT_METADATA,
+ parent_contexts=[_TEST_CONTEXT_NAME],
+ ),
+ ]
+ yield list_contexts_mock
+
+
+@pytest.fixture
+def add_context_children_mock():
+ with patch.object(
+ MetadataServiceClient, "add_context_children"
+ ) as add_context_children_mock:
+ yield add_context_children_mock
+
+
+@pytest.fixture
+def get_custom_job_mock():
+ with patch.object(JobServiceClient, "get_custom_job") as get_custom_job_mock:
+ yield get_custom_job_mock
+
+
+_EXPERIMENT_RUN_MOCK_POPULATED_1 = copy.deepcopy(
+ _EXPERIMENT_RUN_MOCK_WITH_PARENT_EXPERIMENT
+)
+_EXPERIMENT_RUN_MOCK_POPULATED_1.metadata[constants._PARAM_KEY].update(_TEST_PARAMS)
+_EXPERIMENT_RUN_MOCK_POPULATED_1.metadata[constants._METRIC_KEY].update(_TEST_METRICS)
+_EXPERIMENT_RUN_MOCK_POPULATED_2 = copy.deepcopy(
+ _EXPERIMENT_RUN_MOCK_WITH_PARENT_EXPERIMENT
+)
+_EXPERIMENT_RUN_MOCK_POPULATED_2.display_name = _TEST_OTHER_RUN
+_EXPERIMENT_RUN_MOCK_POPULATED_2.metadata[constants._PARAM_KEY].update(
+ _TEST_OTHER_PARAMS
+)
+_EXPERIMENT_RUN_MOCK_POPULATED_2.metadata[constants._METRIC_KEY].update(
+ _TEST_OTHER_METRICS
+)
+
+_TEST_PIPELINE_RUN_ID = "test-pipeline-run"
+_TEST_PIPELINE_RUN_CONTEXT_NAME = f"{_TEST_PARENT}/contexts/{_TEST_PIPELINE_RUN_ID}"
+
+_TEST_PIPELINE_CONTEXT = GapicContext(
+ name=_TEST_PIPELINE_RUN_CONTEXT_NAME,
+ display_name=_TEST_PIPELINE_RUN_ID,
+ schema_title=constants.SYSTEM_PIPELINE_RUN,
+ parent_contexts=[_TEST_CONTEXT_NAME],
+)
+
+
+@pytest.fixture()
+def list_context_mock_for_experiment_dataframe_mock():
+ with patch.object(MetadataServiceClient, "list_contexts") as list_context_mock:
+ list_context_mock.side_effect = [
+ # experiment runs
+ [
+ _EXPERIMENT_RUN_MOCK_POPULATED_1,
+ _EXPERIMENT_RUN_MOCK_POPULATED_2,
+ _TEST_PIPELINE_CONTEXT,
+ ],
+ # pipeline runs
+ [],
+ ]
+ yield list_context_mock
+
+
+_TEST_LEGACY_METRIC_ARTIFACT = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ schema_title=constants.SYSTEM_METRICS,
+ metadata=_TEST_METRICS,
+)
+
+_TEST_PIPELINE_METRIC_ARTIFACT = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ schema_title=constants.SYSTEM_METRICS,
+ metadata={key: value + 1 for key, value in _TEST_METRICS.items()},
+)
+
+
+@pytest.fixture()
+def list_artifact_mock_for_experiment_dataframe():
+ with patch.object(MetadataServiceClient, "list_artifacts") as list_artifacts_mock:
+ list_artifacts_mock.side_effect = [
+ # pipeline run metric artifact
+ [_TEST_PIPELINE_METRIC_ARTIFACT],
+ ]
+ yield list_artifacts_mock
+
+
+_TEST_PIPELINE_SYSTEM_RUN_EXECUTION = GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ schema_title=constants.SYSTEM_RUN,
+ state=gca_execution.Execution.State.RUNNING,
+ metadata={
+ f"input:{_TEST_PARAM_KEY_1}": _TEST_PARAMS[_TEST_PARAM_KEY_1] + 1,
+ f"input:{_TEST_PARAM_KEY_2}": _TEST_PARAMS[_TEST_PARAM_KEY_2] + 1,
+ # This is automatically logged by the pipeline run but will not be
+ # shown in experiment
+ "vertex-ai-pipelines-artifact-argument-binding": {
+ "output:trainer-metrics": ["artifact-path"]
+ },
+ },
+)
+
+_TEST_LEGACY_SYSTEM_RUN_EXECUTION = GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
+ metadata=_TEST_PARAMS,
+)
+
+
+# backward compatibility
+@pytest.fixture()
+def list_executions_mock_for_experiment_dataframe():
+ with patch.object(MetadataServiceClient, "list_executions") as list_executions_mock:
+ list_executions_mock.side_effect = [
+ # legacy system.run execution
+ [_TEST_LEGACY_SYSTEM_RUN_EXECUTION],
+ # pipeline system.run execution
+ [_TEST_PIPELINE_SYSTEM_RUN_EXECUTION],
+ ]
+ yield list_executions_mock
+
+
+@pytest.fixture
+def get_tensorboard_run_artifact_not_found_mock():
+ with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock:
+ get_artifact_mock.side_effect = exceptions.NotFound("")
+ yield get_artifact_mock
+
+
+_TEST_LEGACY_METRIC_ARTIFACT
+
+_TEST_TENSORBOARD_RUN_ARTIFACT = GapicArtifact(
+ name=experiment_run_resource.ExperimentRun._tensorboard_run_id(
+ _TEST_EXPERIMENT_RUN_CONTEXT_NAME
+ ),
+ uri="https://us-central1-aiplatform.googleapis.com/v1/projects/test-project/locations/us-central1/tensorboards/1028944691210842416/experiments/test-experiment/runs/test-run",
+ schema_title=google.cloud.aiplatform.metadata.constants._TENSORBOARD_RUN_REFERENCE_ARTIFACT.schema_title,
+ schema_version=google.cloud.aiplatform.metadata.constants._TENSORBOARD_RUN_REFERENCE_ARTIFACT.schema_version,
+ state=GapicArtifact.State.LIVE,
+ metadata={
+ google.cloud.aiplatform.metadata.constants._VERTEX_EXPERIMENT_TRACKING_LABEL: True,
+ constants.GCP_ARTIFACT_RESOURCE_NAME_KEY: test_constants.TensorboardConstants._TEST_TENSORBOARD_RUN_NAME,
+ },
+)
+
+
+@pytest.fixture
+def list_tensorboard_time_series_mock_empty():
+ with patch.object(
+ TensorboardServiceClient,
+ "list_tensorboard_time_series",
+ ) as list_tensorboard_time_series_mock:
+ list_tensorboard_time_series_mock.side_effect = [
+ [], # initially empty
+ [],
+ [test_constants.TensorboardConstants._TEST_TENSORBOARD_TIME_SERIES],
+ ]
+ yield list_tensorboard_time_series_mock
+
+
+@pytest.fixture
+def create_tensorboard_run_artifact_mock():
+ with patch.object(MetadataServiceClient, "create_artifact") as create_artifact_mock:
+ create_artifact_mock.side_effect = [_TEST_TENSORBOARD_RUN_ARTIFACT]
+ yield create_artifact_mock
+
+
+@pytest.fixture
+def get_tensorboard_run_artifact_mock():
+ with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock:
+ get_artifact_mock.side_effect = [
+ _TEST_TENSORBOARD_RUN_ARTIFACT,
+ exceptions.NotFound(""),
+ _TEST_LEGACY_METRIC_ARTIFACT,
+ ]
+ yield get_artifact_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestExperiments:
+ def setup_method(self):
+ reload(initializer)
+ reload(metadata)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_run_run_mock",
+ )
+ def test_init_experiment_with_ipython_environment(
+ self,
+ list_default_tensorboard_mock,
+ assign_backing_tensorboard_mock,
+ ipython_is_available_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_run_run_mock",
+ )
+ def test_init_experiment_with_no_ipython_environment(
+ self,
+ list_default_tensorboard_mock,
+ assign_backing_tensorboard_mock,
+ ipython_is_not_available_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ )
+
+ @pytest.mark.usefixtures("get_or_create_default_tb_none_mock")
+ def test_init_experiment_with_existing_metadataStore_and_context(
+ self, get_metadata_store_mock, get_experiment_run_run_mock
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ )
+
+ get_metadata_store_mock.assert_called_once_with(
+ name=_TEST_METADATASTORE, retry=base._DEFAULT_RETRY
+ )
+ get_experiment_run_run_mock.assert_called_once_with(
+ name=_TEST_CONTEXT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_run_run_mock",
+ )
+ def test_init_experiment_with_default_tensorboard(
+ self, list_default_tensorboard_mock, assign_backing_tensorboard_mock
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ )
+
+ list_default_tensorboard_mock.assert_called_once_with(
+ request={
+ "parent": f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}",
+ "filter": "is_default=true",
+ }
+ )
+ assign_backing_tensorboard_mock.assert_called_once()
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_run_run_mock",
+ )
+ def test_init_experiment_tensorboard_false_doesNotSet_backing_tensorboard(
+ self, list_default_tensorboard_mock, assign_backing_tensorboard_mock
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ experiment_tensorboard=False,
+ )
+
+ list_default_tensorboard_mock.assert_not_called()
+ assign_backing_tensorboard_mock.assert_not_called()
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_run_run_mock",
+ )
+ def test_init_experiment_tensorboard_true_sets_backing_tensorboard(
+ self, list_default_tensorboard_mock, assign_backing_tensorboard_mock
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ experiment_tensorboard=True,
+ )
+
+ list_default_tensorboard_mock.assert_called()
+ assign_backing_tensorboard_mock.assert_called()
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_run_run_mock",
+ )
+ def test_init_experiment_tensorboard_none_sets_backing_tensorboard(
+ self, list_default_tensorboard_mock, assign_backing_tensorboard_mock
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ experiment_tensorboard=None,
+ )
+
+ list_default_tensorboard_mock.assert_called()
+ assign_backing_tensorboard_mock.assert_called()
+
+ @pytest.mark.usefixtures("get_metadata_store_mock")
+ def test_create_experiment(self, create_experiment_context_mock):
+ exp = aiplatform.Experiment.create(
+ experiment_name=_TEST_EXPERIMENT,
+ description=_TEST_EXPERIMENT_DESCRIPTION,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ _TRUE_CONTEXT = copy.deepcopy(_TEST_EXPERIMENT_CONTEXT)
+ _TRUE_CONTEXT.name = None
+ _TRUE_CONTEXT.metadata.pop("backing_tensorboard_resource")
+
+ create_experiment_context_mock.assert_called_once_with(
+ parent=_TEST_PARENT, context=_TRUE_CONTEXT, context_id=_TEST_EXPERIMENT
+ )
+
+ assert exp._metadata_context.gca_resource == _TEST_EXPERIMENT_CONTEXT
+
+ @pytest.mark.usefixtures(
+ "get_or_create_default_tb_none_mock",
+ )
+ def test_init_experiment_with_credentials(
+ self,
+ get_metadata_store_mock,
+ get_experiment_run_run_mock,
+ ):
+ creds = credentials.AnonymousCredentials()
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ credentials=creds,
+ )
+
+ assert (
+ metadata._experiment_tracker._experiment._metadata_context.api_client._transport._credentials
+ == creds
+ )
+
+ get_metadata_store_mock.assert_called_once_with(
+ name=_TEST_METADATASTORE, retry=base._DEFAULT_RETRY
+ )
+ get_experiment_run_run_mock.assert_called_once_with(
+ name=_TEST_CONTEXT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_and_get_metadata_store_with_credentials(
+ self, get_metadata_store_mock
+ ):
+ creds = credentials.AnonymousCredentials()
+
+ aiplatform.init(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, credentials=creds
+ )
+
+ store = metadata_store._MetadataStore.get_or_create()
+
+ assert store.api_client._transport._credentials == creds
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock_raise_not_found_exception",
+ "create_metadata_store_mock",
+ )
+ def test_init_and_get_then_create_metadata_store_with_credentials(
+ self,
+ ):
+ creds = credentials.AnonymousCredentials()
+
+ aiplatform.init(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, credentials=creds
+ )
+
+ store = metadata_store._MetadataStore.get_or_create()
+
+ assert store.api_client._transport._credentials == creds
+
+ @pytest.mark.usefixtures("get_or_create_default_tb_none_mock")
+ def test_init_experiment_with_existing_description(
+ self, get_metadata_store_mock, get_experiment_run_run_mock
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ experiment_description=_TEST_EXPERIMENT_DESCRIPTION,
+ )
+
+ get_metadata_store_mock.assert_called_once_with(
+ name=_TEST_METADATASTORE, retry=base._DEFAULT_RETRY
+ )
+ get_experiment_run_run_mock.assert_called_once_with(
+ name=_TEST_CONTEXT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_run_run_mock",
+ "get_or_create_default_tb_none_mock",
+ )
+ def test_init_experiment_without_existing_description(
+ self,
+ update_context_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ experiment_description=_TEST_OTHER_EXPERIMENT_DESCRIPTION,
+ )
+
+ experiment_context = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_EXPERIMENT,
+ description=_TEST_OTHER_EXPERIMENT_DESCRIPTION,
+ schema_title=constants.SYSTEM_EXPERIMENT,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_EXPERIMENT],
+ metadata=constants.EXPERIMENT_METADATA,
+ )
+
+ update_context_mock.assert_called_once_with(context=experiment_context)
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_run_mock",
+ "update_experiment_run_context_to_running",
+ "get_tensorboard_run_artifact_not_found_mock",
+ "get_or_create_default_tb_none_mock",
+ )
+ def test_init_experiment_reset(self):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ )
+ aiplatform.start_run(_TEST_RUN, resume=True)
+
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ assert metadata._experiment_tracker.experiment_name == _TEST_EXPERIMENT
+ assert metadata._experiment_tracker.experiment_run.name == _TEST_RUN
+
+ aiplatform.init(project=_TEST_OTHER_PROJECT, location=_TEST_LOCATION)
+
+ assert metadata._experiment_tracker.experiment_name is None
+ assert metadata._experiment_tracker.experiment_run is None
+
+ @pytest.mark.usefixtures("get_metadata_store_mock", "get_context_wrong_schema_mock")
+ def test_init_experiment_wrong_schema(self):
+ with pytest.raises(ValueError):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ )
+
+ @pytest.mark.usefixtures("get_metadata_store_mock", "get_experiment_mock")
+ def test_init_experiment_from_env(self):
+ os.environ["AIP_EXPERIMENT_NAME"] = _TEST_EXPERIMENT
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ exp = metadata._experiment_tracker.experiment
+ assert exp.name == _TEST_EXPERIMENT
+
+ del os.environ["AIP_EXPERIMENT_NAME"]
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ )
+ def test_start_run_from_env_experiment(
+ self,
+ get_experiment_mock,
+ create_experiment_run_context_mock,
+ add_context_children_mock,
+ ):
+ os.environ["AIP_EXPERIMENT_NAME"] = _TEST_EXPERIMENT
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ aiplatform.start_run(_TEST_RUN)
+
+ get_experiment_mock.assert_called_with(
+ name=_TEST_CONTEXT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ _TRUE_CONTEXT = copy.deepcopy(_EXPERIMENT_RUN_MOCK)
+ _TRUE_CONTEXT.name = None
+
+ create_experiment_run_context_mock.assert_called_with(
+ parent=_TEST_METADATASTORE,
+ context=_TRUE_CONTEXT,
+ context_id=_EXPERIMENT_RUN_MOCK.name.split("/")[-1],
+ )
+
+ add_context_children_mock.assert_called_with(
+ context=_EXPERIMENT_MOCK.name, child_contexts=[_EXPERIMENT_RUN_MOCK.name]
+ )
+
+ del os.environ["AIP_EXPERIMENT_NAME"]
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_run_mock",
+ "get_tensorboard_run_artifact_not_found_mock",
+ "get_or_create_default_tb_none_mock",
+ )
+ def test_init_experiment_run_from_env_run_name(self):
+ os.environ["AIP_EXPERIMENT_RUN_NAME"] = _TEST_RUN
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ )
+
+ run = metadata._experiment_tracker.experiment_run
+ assert run.name == _TEST_RUN
+
+ del os.environ["AIP_EXPERIMENT_RUN_NAME"]
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_run_mock",
+ "get_tensorboard_run_artifact_not_found_mock",
+ "get_or_create_default_tb_none_mock",
+ )
+ def test_init_experiment_run_from_env_run_resource_name(self):
+ os.environ["AIP_EXPERIMENT_RUN_NAME"] = _TEST_EXPERIMENT_RUN_CONTEXT_NAME
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ )
+
+ run = metadata._experiment_tracker.experiment_run
+ assert run.name == _TEST_RUN
+
+ del os.environ["AIP_EXPERIMENT_RUN_NAME"]
+
+ def test_get_experiment(self, get_experiment_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ exp = aiplatform.Experiment.get(_TEST_EXPERIMENT)
+
+ assert exp.name == _TEST_EXPERIMENT
+ get_experiment_mock.assert_called_with(
+ name=_TEST_CONTEXT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_get_experiment_not_found(self, get_experiment_not_found_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ exp = aiplatform.Experiment.get(_TEST_EXPERIMENT)
+
+ assert exp is None
+ get_experiment_not_found_mock.assert_called_with(
+ name=_TEST_CONTEXT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock", "get_tensorboard_run_artifact_not_found_mock"
+ )
+ def test_get_experiment_run(self, get_experiment_run_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ run = aiplatform.ExperimentRun.get(_TEST_RUN, experiment=_TEST_EXPERIMENT)
+
+ assert run.name == _TEST_RUN
+ get_experiment_run_mock.assert_called_with(
+ name=f"{_TEST_CONTEXT_NAME}-{_TEST_RUN}", retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_tensorboard_run_artifact_not_found_mock",
+ "get_execution_not_found_mock",
+ )
+ def test_get_experiment_run_not_found(self, get_experiment_run_not_found_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ run = aiplatform.ExperimentRun.get(_TEST_RUN, experiment=_TEST_EXPERIMENT)
+
+ assert run is None
+ get_experiment_run_not_found_mock.assert_called_with(
+ name=f"{_TEST_CONTEXT_NAME}-{_TEST_RUN}", retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock", "get_or_create_default_tb_none_mock"
+ )
+ def test_start_run(
+ self,
+ get_experiment_mock,
+ create_experiment_run_context_mock,
+ add_context_children_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ )
+ aiplatform.start_run(_TEST_RUN)
+
+ get_experiment_mock.assert_called_with(
+ name=_TEST_CONTEXT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ _TRUE_CONTEXT = copy.deepcopy(_EXPERIMENT_RUN_MOCK)
+ _TRUE_CONTEXT.name = None
+
+ create_experiment_run_context_mock.assert_called_with(
+ parent=_TEST_METADATASTORE,
+ context=_TRUE_CONTEXT,
+ context_id=_EXPERIMENT_RUN_MOCK.name.split("/")[-1],
+ )
+
+ add_context_children_mock.assert_called_with(
+ context=_EXPERIMENT_MOCK.name, child_contexts=[_EXPERIMENT_RUN_MOCK.name]
+ )
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_mock",
+ "get_or_create_default_tb_none_mock",
+ )
+ def test_start_run_fails_when_run_name_too_long(self):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ )
+
+ run_name_too_long = "".join(
+ "a"
+ for _ in range(
+ constants._EXPERIMENT_RUN_MAX_LENGTH + 2 - len(_TEST_EXPERIMENT)
+ )
+ )
+
+ with pytest.raises(ValueError):
+ aiplatform.start_run(run_name_too_long)
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_mock",
+ "create_experiment_run_context_mock",
+ "add_context_children_mock",
+ "get_or_create_default_tb_none_mock",
+ )
+ def test_log_params(
+ self,
+ update_context_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ )
+ aiplatform.start_run(_TEST_RUN)
+ aiplatform.log_params(_TEST_PARAMS)
+
+ _TRUE_CONTEXT = copy.deepcopy(_EXPERIMENT_RUN_MOCK)
+ _TRUE_CONTEXT.metadata[constants._PARAM_KEY].update(_TEST_PARAMS)
+
+ update_context_mock.assert_called_once_with(context=_TRUE_CONTEXT)
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_mock",
+ "create_experiment_run_context_mock",
+ "add_context_children_mock",
+ "get_or_create_default_tb_none_mock",
+ )
+ def test_log_metrics(self, update_context_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ )
+ aiplatform.start_run(_TEST_RUN)
+ aiplatform.log_metrics(_TEST_METRICS)
+
+ _TRUE_CONTEXT = copy.deepcopy(_EXPERIMENT_RUN_MOCK)
+ _TRUE_CONTEXT.metadata[constants._METRIC_KEY].update(_TEST_METRICS)
+
+ update_context_mock.assert_called_once_with(context=_TRUE_CONTEXT)
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_mock",
+ "create_experiment_run_context_mock",
+ "add_context_children_mock",
+ "get_or_create_default_tb_none_mock",
+ )
+ def test_log_classification_metrics(
+ self,
+ create_classification_metrics_artifact_mock,
+ get_classification_metrics_artifact_mock,
+ add_context_artifacts_and_executions_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ )
+ aiplatform.start_run(_TEST_RUN)
+ classification_metrics = aiplatform.log_classification_metrics(
+ display_name=_TEST_CLASSIFICATION_METRICS["display_name"],
+ labels=_TEST_CLASSIFICATION_METRICS["labels"],
+ matrix=_TEST_CLASSIFICATION_METRICS["matrix"],
+ fpr=_TEST_CLASSIFICATION_METRICS["fpr"],
+ tpr=_TEST_CLASSIFICATION_METRICS["tpr"],
+ threshold=_TEST_CLASSIFICATION_METRICS["threshold"],
+ )
+
+ expected_artifact = GapicArtifact(
+ display_name=_TEST_CLASSIFICATION_METRICS["display_name"],
+ schema_title=constants.GOOGLE_CLASSIFICATION_METRICS,
+ schema_version=constants._DEFAULT_SCHEMA_VERSION,
+ metadata=_TEST_CLASSIFICATION_METRICS_METADATA,
+ state=GapicArtifact.State.LIVE,
+ )
+ create_classification_metrics_artifact_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ artifact=expected_artifact,
+ artifact_id=None,
+ )
+
+ get_classification_metrics_artifact_mock.assert_called_once_with(
+ name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
+ )
+ assert isinstance(
+ classification_metrics, google_artifact_schema.ClassificationMetrics
+ )
+
+ add_context_artifacts_and_executions_mock.assert_called_once_with(
+ context=_TEST_EXPERIMENT_RUN_CONTEXT_NAME,
+ artifacts=[_TEST_ARTIFACT_NAME],
+ executions=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_mock",
+ "create_experiment_run_context_mock",
+ "add_context_children_mock",
+ "mock_storage_blob_upload_from_filename",
+ "create_experiment_model_artifact_mock",
+ "get_experiment_model_artifact_mock",
+ "get_metadata_store_mock",
+ "get_or_create_default_tb_none_mock",
+ )
+ def test_log_model(
+ self,
+ add_context_artifacts_and_executions_mock,
+ ):
+ train_x = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
+ train_y = np.dot(train_x, np.array([1, 2])) + 3
+ model = LinearRegression()
+ model.fit(train_x, train_y)
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ experiment=_TEST_EXPERIMENT,
+ )
+ aiplatform.start_run(_TEST_RUN)
+ aiplatform.log_model(model, _TEST_MODEL_ID)
+
+ add_context_artifacts_and_executions_mock.assert_called_once_with(
+ context=_TEST_EXPERIMENT_RUN_CONTEXT_NAME,
+ artifacts=[_TEST_MODEL_NAME],
+ executions=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_mock",
+ "create_experiment_run_context_mock",
+ "add_context_children_mock",
+ "get_tensorboard_mock",
+ "get_tensorboard_run_not_found_mock",
+ "get_tensorboard_experiment_not_found_mock",
+ "get_artifact_not_found_mock",
+ "get_tensorboard_time_series_not_found_mock",
+ "list_tensorboard_time_series_mock_empty",
+ )
+ def test_log_time_series_metrics(
+ self,
+ update_context_mock,
+ create_tensorboard_experiment_mock,
+ create_tensorboard_run_mock,
+ create_tensorboard_run_artifact_mock,
+ add_context_artifacts_and_executions_mock,
+ create_tensorboard_time_series_mock,
+ batch_read_tensorboard_time_series_mock,
+ write_tensorboard_run_data_mock,
+ ):
+ tb = aiplatform.Tensorboard(
+ test_constants.TensorboardConstants._TEST_TENSORBOARD_NAME
+ )
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ experiment_tensorboard=tb,
+ )
+
+ update_context_mock.assert_called_once_with(context=_TEST_EXPERIMENT_CONTEXT)
+
+ aiplatform.start_run(_TEST_RUN)
+ timestamp = utils.get_timestamp_proto()
+ aiplatform.log_time_series_metrics(_TEST_OTHER_METRICS, wall_time=timestamp)
+
+ create_tensorboard_experiment_mock.assert_called_once_with(
+ parent=test_constants.TensorboardConstants._TEST_TENSORBOARD_NAME,
+ tensorboard_experiment_id=_TEST_CONTEXT_ID,
+ tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(
+ display_name=_TEST_CONTEXT_ID,
+ labels=constants._VERTEX_EXPERIMENT_TB_EXPERIMENT_LABEL,
+ ),
+ metadata=(),
+ timeout=None,
+ )
+
+ create_tensorboard_run_mock.assert_called_once_with(
+ parent=test_constants.TensorboardConstants._TEST_TENSORBOARD_EXPERIMENT_NAME,
+ tensorboard_run_id=_TEST_RUN,
+ tensorboard_run=gca_tensorboard_run.TensorboardRun(
+ display_name=_TEST_RUN,
+ ),
+ metadata=(),
+ timeout=None,
+ )
+
+ true_tb_run_artifact = copy.deepcopy(_TEST_TENSORBOARD_RUN_ARTIFACT)
+ true_tb_run_artifact.name = None
+
+ create_tensorboard_run_artifact_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ artifact=true_tb_run_artifact,
+ artifact_id=experiment_run_resource.ExperimentRun._tensorboard_run_id(
+ _TEST_EXECUTION_ID
+ ),
+ )
+
+ add_context_artifacts_and_executions_mock.assert_called_once_with(
+ context=_TEST_EXPERIMENT_RUN_CONTEXT_NAME,
+ artifacts=[_TEST_TENSORBOARD_RUN_ARTIFACT.name],
+ executions=None,
+ )
+
+ create_tensorboard_time_series_mock.assert_called_with(
+ parent=test_constants.TensorboardConstants._TEST_TENSORBOARD_RUN_NAME,
+ tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(
+ display_name=list(_TEST_OTHER_METRICS.keys())[0],
+ value_type="SCALAR",
+ plugin_name="scalars",
+ ),
+ )
+
+ ts_data = [
+ gca_tensorboard_data.TimeSeriesData(
+ tensorboard_time_series_id=test_constants.TensorboardConstants._TEST_TENSORBOARD_TIME_SERIES_ID,
+ value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[
+ gca_tensorboard_data.TimeSeriesDataPoint(
+ scalar=gca_tensorboard_data.Scalar(value=value),
+ wall_time=timestamp,
+ step=2,
+ )
+ ],
+ )
+ for value in _TEST_OTHER_METRICS.values()
+ ]
+
+ write_tensorboard_run_data_mock.assert_called_once_with(
+ tensorboard_run=test_constants.TensorboardConstants._TEST_TENSORBOARD_RUN_NAME,
+ time_series_data=ts_data,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_mock",
+ "create_experiment_run_context_mock",
+ "add_context_children_mock",
+ "get_or_create_default_tb_none_mock",
+ )
+ def test_log_metrics_nest_value_raises_error(self):
+ aiplatform.init(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, experiment=_TEST_EXPERIMENT
+ )
+ aiplatform.start_run(_TEST_RUN)
+ with pytest.raises(TypeError):
+ aiplatform.log_metrics({"test": {"nested": "string"}})
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_mock",
+ "create_experiment_run_context_mock",
+ "add_context_children_mock",
+ "get_or_create_default_tb_none_mock",
+ )
+ def test_log_params_nest_value_raises_error(self):
+ aiplatform.init(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, experiment=_TEST_EXPERIMENT
+ )
+ aiplatform.start_run(_TEST_RUN)
+ with pytest.raises(TypeError):
+ aiplatform.log_params({"test": {"nested": "string"}})
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_mock",
+ "create_experiment_run_context_mock",
+ "add_context_children_mock",
+ "get_artifact_mock",
+ "get_or_create_default_tb_none_mock",
+ )
+ def test_start_execution_and_assign_artifact(
+ self,
+ create_execution_mock,
+ add_execution_events_mock,
+ add_context_artifacts_and_executions_mock,
+ update_execution_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, experiment=_TEST_EXPERIMENT
+ )
+ aiplatform.start_run(_TEST_RUN)
+
+ in_artifact = aiplatform.Artifact(_TEST_ARTIFACT_ID)
+ out_artifact = aiplatform.Artifact(_TEST_ARTIFACT_ID)
+
+ with aiplatform.start_execution(
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ metadata=_TEST_METADATA,
+ ) as exc:
+ exc.assign_input_artifacts([in_artifact])
+ exc.assign_output_artifacts([out_artifact])
+
+ _created_execution = copy.deepcopy(_TEST_EXECUTION)
+ _created_execution.name = None
+
+ create_execution_mock.assert_called_once_with(
+ parent=_TEST_PARENT, execution=_created_execution, execution_id=None
+ )
+
+ in_event = gca_event.Event(
+ artifact=_TEST_ARTIFACT_NAME,
+ type_=gca_event.Event.Type.INPUT,
+ )
+
+ out_event = gca_event.Event(
+ artifact=_TEST_ARTIFACT_NAME,
+ type_=gca_event.Event.Type.OUTPUT,
+ )
+
+ add_execution_events_mock.assert_has_calls(
+ [
+ call(execution=_TEST_EXECUTION.name, events=[in_event]),
+ call(execution=_TEST_EXECUTION.name, events=[out_event]),
+ ]
+ )
+
+ add_context_artifacts_and_executions_mock.assert_has_calls(
+ [
+ call(
+ context=_TEST_EXPERIMENT_RUN_CONTEXT_NAME,
+ artifacts=None,
+ executions=[_TEST_EXECUTION.name],
+ ),
+ call(
+ context=_TEST_EXPERIMENT_RUN_CONTEXT_NAME,
+ artifacts=[_TEST_ARTIFACT_NAME],
+ executions=[],
+ ),
+ call(
+ context=_TEST_EXPERIMENT_RUN_CONTEXT_NAME,
+ artifacts=[_TEST_ARTIFACT_NAME],
+ executions=[],
+ ),
+ ]
+ )
+
+ updated_execution = copy.deepcopy(_TEST_EXECUTION)
+ updated_execution.state = GapicExecution.State.COMPLETE
+
+ update_execution_mock.assert_called_once_with(execution=updated_execution)
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_mock",
+ "create_experiment_run_context_mock",
+ "add_context_children_mock",
+ "get_or_create_default_tb_none_mock",
+ )
+ def test_end_run(
+ self,
+ update_context_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ )
+ aiplatform.start_run(_TEST_RUN)
+ aiplatform.end_run()
+
+ _TRUE_CONTEXT = copy.deepcopy(_EXPERIMENT_RUN_MOCK)
+ _TRUE_CONTEXT.metadata[
+ constants._STATE_KEY
+ ] = gca_execution.Execution.State.COMPLETE.name
+
+ update_context_mock.assert_called_once_with(context=_TRUE_CONTEXT)
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_mock",
+ "create_experiment_run_context_mock",
+ "get_pipeline_job_mock",
+ "get_or_create_default_tb_none_mock",
+ )
+ def test_log_pipeline_job(
+ self,
+ add_context_children_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ experiment=_TEST_EXPERIMENT,
+ )
+ aiplatform.start_run(_TEST_RUN)
+
+ pipeline_job = aiplatform.PipelineJob.get(
+ test_constants.PipelineJobConstants._TEST_PIPELINE_JOB_ID
+ )
+ pipeline_job.wait()
+
+ aiplatform.log(pipeline_job=pipeline_job)
+
+ add_context_children_mock.assert_called_with(
+ context=_EXPERIMENT_RUN_MOCK.name,
+ child_contexts=[
+ pipeline_job.gca_resource.job_detail.pipeline_run_context.name
+ ],
+ )
+
+ @pytest.mark.usefixtures(
+ "get_experiment_mock",
+ )
+ def test_get_experiment_df_passes_experiment_variable(
+ self,
+ list_context_mock_for_experiment_dataframe_mock,
+ list_artifact_mock_for_experiment_dataframe,
+ list_executions_mock_for_experiment_dataframe,
+ get_tensorboard_run_artifact_mock,
+ get_tensorboard_run_mock,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with patch.object(
+ experiment_run_resource.ExperimentRun, "_query_experiment_row"
+ ) as query_experiment_row_mock:
+ row = experiment_resources._ExperimentRow(
+ experiment_run_type=constants.SYSTEM_EXPERIMENT_RUN,
+ name=_TEST_EXPERIMENT,
+ )
+ query_experiment_row_mock.return_value = row
+
+ aiplatform.get_experiment_df(_TEST_EXPERIMENT)
+ _, kwargs = query_experiment_row_mock.call_args_list[0]
+ TestCase.assertTrue(self, kwargs["experiment"].name == _TEST_EXPERIMENT)
+
+ @pytest.mark.usefixtures(
+ "get_experiment_mock",
+ "list_tensorboard_time_series_mock",
+ "batch_read_tensorboard_time_series_mock",
+ )
+ def test_get_experiment_df(
+ self,
+ list_context_mock_for_experiment_dataframe_mock,
+ list_artifact_mock_for_experiment_dataframe,
+ list_executions_mock_for_experiment_dataframe,
+ get_tensorboard_run_artifact_mock,
+ get_tensorboard_run_mock,
+ ):
+ import pandas as pd
+
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ experiment_df = aiplatform.get_experiment_df(_TEST_EXPERIMENT)
+
+ expected_filter = metadata_utils._make_filter_string(
+ parent_contexts=[_TEST_CONTEXT_NAME],
+ schema_title=[
+ constants.SYSTEM_EXPERIMENT_RUN,
+ constants.SYSTEM_PIPELINE_RUN,
+ ],
+ )
+
+ list_context_mock_for_experiment_dataframe_mock.assert_called_once_with(
+ request=dict(parent=_TEST_PARENT, filter=expected_filter)
+ )
+
+ expected_legacy_filter = metadata_utils._make_filter_string(
+ in_context=[_TEST_CONTEXT_NAME], schema_title=[constants.SYSTEM_RUN]
+ )
+ expected_pipeline_filter = metadata_utils._make_filter_string(
+ in_context=[_TEST_PIPELINE_CONTEXT.name], schema_title=constants.SYSTEM_RUN
+ )
+
+ list_executions_mock_for_experiment_dataframe.assert_has_calls(
+ calls=[
+ call(request=dict(parent=_TEST_PARENT, filter=expected_legacy_filter)),
+ call(
+ request=dict(parent=_TEST_PARENT, filter=expected_pipeline_filter)
+ ),
+ ],
+ any_order=False,
+ )
+
+ expected_filter = metadata_utils._make_filter_string(
+ in_context=[_TEST_PIPELINE_CONTEXT.name],
+ schema_title=[
+ constants.SYSTEM_METRICS,
+ constants.GOOGLE_CLASSIFICATION_METRICS,
+ constants.GOOGLE_REGRESSION_METRICS,
+ constants.GOOGLE_FORECASTING_METRICS,
+ ],
+ )
+
+ list_artifact_mock_for_experiment_dataframe.assert_has_calls(
+ calls=[call(request=dict(parent=_TEST_PARENT, filter=expected_filter))],
+ any_order=False,
+ )
+
+ experiment_df_truth = pd.DataFrame(
+ [
+ {
+ "experiment_name": _TEST_EXPERIMENT,
+ "run_type": constants.SYSTEM_EXPERIMENT_RUN,
+ "state": gca_execution.Execution.State.RUNNING.name,
+ "run_name": _TEST_RUN,
+ "param.%s" % _TEST_PARAM_KEY_1: _TEST_PARAMS[_TEST_PARAM_KEY_1],
+ "param.%s" % _TEST_PARAM_KEY_2: _TEST_PARAMS[_TEST_PARAM_KEY_2],
+ "metric.%s" % _TEST_METRIC_KEY_1: _TEST_METRICS[_TEST_METRIC_KEY_1],
+ "metric.%s" % _TEST_METRIC_KEY_2: _TEST_METRICS[_TEST_METRIC_KEY_2],
+ "time_series_metric.accuracy": test_constants.TensorboardConstants._TEST_TENSORBOARD_TIME_SERIES_DATA.values[
+ 0
+ ].scalar.value,
+ },
+ {
+ "experiment_name": _TEST_EXPERIMENT,
+ "run_type": constants.SYSTEM_EXPERIMENT_RUN,
+ "state": gca_execution.Execution.State.RUNNING.name,
+ "run_name": _TEST_OTHER_RUN,
+ "param.%s"
+ % _TEST_PARAM_KEY_1: _TEST_OTHER_PARAMS[_TEST_PARAM_KEY_1],
+ "param.%s"
+ % _TEST_PARAM_KEY_2: _TEST_OTHER_PARAMS[_TEST_PARAM_KEY_2],
+ "metric.%s"
+ % _TEST_METRIC_KEY_2: _TEST_OTHER_METRICS[_TEST_METRIC_KEY_2],
+ },
+ {
+ "experiment_name": _TEST_EXPERIMENT,
+ "run_type": constants.SYSTEM_PIPELINE_RUN,
+ "state": gca_execution.Execution.State.RUNNING.name,
+ "run_name": _TEST_PIPELINE_RUN_ID,
+ "param.%s" % _TEST_PARAM_KEY_1: _TEST_PARAMS[_TEST_PARAM_KEY_1] + 1,
+ "param.%s" % _TEST_PARAM_KEY_2: _TEST_PARAMS[_TEST_PARAM_KEY_2] + 1,
+ "metric.%s" % _TEST_METRIC_KEY_1: _TEST_METRICS[_TEST_METRIC_KEY_1]
+ + 1,
+ "metric.%s" % _TEST_METRIC_KEY_2: _TEST_METRICS[_TEST_METRIC_KEY_2]
+ + 1,
+ },
+ {
+ "experiment_name": _TEST_EXPERIMENT,
+ "run_type": constants.SYSTEM_RUN,
+ "state": gca_execution.Execution.State.STATE_UNSPECIFIED.name,
+ "run_name": _TEST_RUN,
+ "param.%s" % _TEST_PARAM_KEY_1: _TEST_PARAMS[_TEST_PARAM_KEY_1],
+ "param.%s" % _TEST_PARAM_KEY_2: _TEST_PARAMS[_TEST_PARAM_KEY_2],
+ "metric.%s" % _TEST_METRIC_KEY_1: _TEST_METRICS[_TEST_METRIC_KEY_1],
+ "metric.%s" % _TEST_METRIC_KEY_2: _TEST_METRICS[_TEST_METRIC_KEY_2],
+ },
+ ]
+ )
+
+ _assert_frame_equal_with_sorted_columns(experiment_df, experiment_df_truth)
+
+ @pytest.mark.usefixtures("get_context_not_found_mock")
+ def test_get_experiment_df_not_exist(self):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ with pytest.raises(exceptions.NotFound):
+ aiplatform.get_experiment_df(_TEST_EXPERIMENT)
+
+ @pytest.mark.usefixtures("get_pipeline_context_mock")
+ def test_get_experiment_df_wrong_schema(self):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ with pytest.raises(ValueError):
+ aiplatform.get_experiment_df(_TEST_EXPERIMENT)
+
+ @pytest.mark.usefixtures(
+ "get_tensorboard_run_artifact_not_found_mock", "get_metadata_store_mock"
+ )
+ def test_run_metadata_not_set(self, get_empty_experiment_run_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ run = aiplatform.ExperimentRun.get(_TEST_RUN, experiment=_TEST_EXPERIMENT)
+
+ params = run.get_params()
+ metrics = run.get_metrics()
+ state = run.get_state()
+
+ assert params == {}
+ assert metrics == {}
+ assert state == gca_execution.Execution.State.STATE_UNSPECIFIED.name
+
+ @pytest.mark.usefixtures(
+ "get_experiment_run_with_custom_jobs_mock",
+ "get_metadata_store_mock",
+ "get_tensorboard_run_artifact_not_found_mock",
+ )
+ def test_experiment_run_get_logged_custom_jobs(self, get_custom_job_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ run = aiplatform.ExperimentRun(_TEST_RUN, experiment=_TEST_EXPERIMENT)
+ jobs = run.get_logged_custom_jobs()
+
+ assert len(jobs) == 1
+ get_custom_job_mock.assert_called_once_with(
+ name=_TEST_CUSTOM_JOB_NAME,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_metadata_store_mock",
+ "get_experiment_mock",
+ "get_experiment_run_mock",
+ "get_context_mock",
+ "list_contexts_mock",
+ "list_executions_mock",
+ "get_artifact_mock_with_metadata",
+ "update_context_mock",
+ )
+ def test_update_experiment_run_after_list(
+ self,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ experiment_run_list = aiplatform.ExperimentRun.list(experiment=_TEST_EXPERIMENT)
+ experiment_run_list[0].update_state(gca_execution.Execution.State.FAILED)
+
+
+class TestTensorboard:
+ def test_get_or_create_default_tb_with_existing_default(
+ self, list_default_tensorboard_mock
+ ):
+ tensorboard = metadata._get_or_create_default_tensorboard()
+
+ list_default_tensorboard_mock.assert_called_once_with(
+ request={
+ "parent": f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}",
+ "filter": "is_default=true",
+ }
+ )
+ assert tensorboard.name == _TEST_DEFAULT_TENSORBOARD_NAME
+
+ def test_get_or_create_default_tb_no_existing_default(
+ self,
+ list_default_tensorboard_empty_mock,
+ create_default_tensorboard_mock,
+ ):
+ tensorboard = metadata._get_or_create_default_tensorboard()
+
+ list_default_tensorboard_empty_mock.assert_called_once_with(
+ request={
+ "parent": f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}",
+ "filter": "is_default=true",
+ }
+ )
+ create_default_tensorboard_mock.assert_called_once()
+ assert tensorboard.name == _TEST_DEFAULT_TENSORBOARD_NAME
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_metadata_models.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_metadata_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..df970949c0f96f27b146f949f740b77ce1b6f7bf
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_metadata_models.py
@@ -0,0 +1,890 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import datetime
+import os
+import pickle
+from importlib import reload
+from unittest import mock
+from unittest.mock import patch
+import uuid
+
+from google.auth import credentials as auth_credentials
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform.metadata import constants
+from google.cloud.aiplatform.metadata import metadata
+from google.cloud.aiplatform.metadata import _models
+from google.cloud.aiplatform.models import Model
+from google.cloud.aiplatform_v1 import Artifact as GapicArtifact
+from google.cloud.aiplatform_v1 import MetadataStore as GapicMetadataStore
+from google.cloud.aiplatform_v1 import MetadataServiceClient
+import numpy as np
+import pytest
+import sklearn
+from sklearn.datasets import make_classification
+from sklearn.linear_model import LinearRegression
+import tensorflow as tf
+import xgboost as xgb
+
+
+# project
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_BUCKET_NAME = "gs://test-bucket"
+_TEST_PARENT = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/default"
+)
+_TEST_CREDENTIALS = mock.Mock(
+ spec=auth_credentials.AnonymousCredentials(),
+ universe_domain="googleapis.com",
+)
+
+
+# artifact
+_TEST_ARTIFACT_ID = "test-model-id"
+_TEST_URI = "gs://test-uri"
+_TEST_DISPLAY_NAME = "test-model-display-name"
+
+_TEST_ARTIFACT_ID = "test-model-id"
+_TEST_ARTIFACT_NAME = f"{_TEST_PARENT}/artifacts/{_TEST_ARTIFACT_ID}"
+
+_TEST_TIMESTAMP = "2022-11-30-00-00-00"
+_TEST_DATETIME = datetime.datetime.strptime(_TEST_TIMESTAMP, "%Y-%m-%d-%H-%M-%S")
+
+_TEST_UUID = uuid.UUID("fa2db23f-1b13-412d-beea-94602448e4ce")
+
+_TEST_INPUT_EXAMPLE = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
+
+_TEST_MODEL_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/models/{_TEST_ARTIFACT_ID}"
+)
+
+
+@pytest.fixture
+def mock_datetime_now(monkeypatch):
+ class DateTime(datetime.datetime):
+ @classmethod
+ def now(cls):
+ return _TEST_DATETIME
+
+ monkeypatch.setattr(datetime, "datetime", DateTime)
+
+
+@pytest.fixture
+def mock_uuid():
+ with patch.object(uuid, "uuid4", return_value=_TEST_UUID) as mock_uuid:
+ yield mock_uuid
+
+
+@pytest.fixture
+def mock_keras_save_model():
+ with patch.object(tf.keras.models.Sequential, "save") as mock_keras_save_model:
+ yield mock_keras_save_model
+
+
+@pytest.fixture
+def mock_tf_save_model():
+ with patch("tensorflow.saved_model.save") as mock_tf_save_model:
+ yield mock_tf_save_model
+
+
+@pytest.fixture
+def mock_storage_blob_upload_from_filename():
+ with patch(
+ "google.cloud.storage.Blob.upload_from_filename"
+ ) as mock_blob_upload_from_filename, patch(
+ "google.cloud.storage.Bucket.exists", return_value=True
+ ):
+ yield mock_blob_upload_from_filename
+
+
+@pytest.fixture
+def mock_storage_blob_download_sklearn_model_file():
+ def create_model_file(filename):
+ train_x = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
+ train_y = np.dot(train_x, np.array([1, 2])) + 3
+ model = LinearRegression()
+ model.fit(train_x, train_y)
+ with open(filename, "wb") as model_file:
+ pickle.dump(model, model_file)
+
+ with patch(
+ "google.cloud.storage.Blob.download_to_filename", wraps=create_model_file
+ ) as mock_blob_download_to_filename, patch(
+ "google.cloud.storage.Bucket.exists", return_value=True
+ ):
+ yield mock_blob_download_to_filename
+
+
+@pytest.fixture
+def mock_storage_blob_download_xgboost_booster_file():
+ def create_model_file(filename):
+ x, y = make_classification()
+ dtrain = xgb.DMatrix(data=x, label=y)
+ booster = xgb.train(
+ params={"num_parallel_tree": 4, "subsample": 0.5, "num_class": 2},
+ dtrain=dtrain,
+ )
+ booster.save_model(filename)
+
+ with patch(
+ "google.cloud.storage.Blob.download_to_filename", wraps=create_model_file
+ ) as mock_blob_download_to_filename, patch(
+ "google.cloud.storage.Bucket.exists", return_value=True
+ ):
+ yield mock_blob_download_to_filename
+
+
+@pytest.fixture
+def mock_storage_blob_download_xgboost_xgbmodel_file():
+ def create_model_file(filename):
+ x, y = make_classification()
+ model = xgb.XGBClassifier()
+ model.fit(x, y)
+ model.save_model(filename)
+
+ with patch(
+ "google.cloud.storage.Blob.download_to_filename", wraps=create_model_file
+ ) as mock_blob_download_to_filename, patch(
+ "google.cloud.storage.Bucket.exists", return_value=True
+ ):
+ yield mock_blob_download_to_filename
+
+
+@pytest.fixture
+def mock_storage_blob_download_input_example():
+ def create_input_example_file(filename):
+ filepath, _ = os.path.split(filename)
+ _models._save_input_example(_TEST_INPUT_EXAMPLE, filepath)
+
+ with patch(
+ "google.cloud.storage.Blob.download_to_filename",
+ wraps=create_input_example_file,
+ ) as mock_blob_download_to_filename, patch(
+ "google.cloud.storage.Bucket.exists", return_value=True
+ ):
+ yield mock_blob_download_to_filename
+
+
+@pytest.fixture
+def mock_load_tensorflow_keras_model():
+ with patch("tensorflow.keras.models.load_model") as load_tensorflow_keras_model:
+ yield load_tensorflow_keras_model
+
+
+@pytest.fixture
+def mock_load_tensorflow_module_model():
+ with patch("tensorflow.saved_model.load") as load_tensorflow_keras_model:
+ yield load_tensorflow_keras_model
+
+
+_TEST_SKLEARN_MODEL_ARTIFACT = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ uri=_TEST_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=constants.GOOGLE_EXPERIMENT_MODEL,
+ schema_version=constants._DEFAULT_SCHEMA_VERSION,
+ state=GapicArtifact.State.LIVE,
+ metadata={
+ "frameworkName": "sklearn",
+ "frameworkVersion": "1.0",
+ "modelFile": "model.pkl",
+ "modelClass": "sklearn.linear_model._base.LinearRegression",
+ },
+)
+
+
+@pytest.fixture
+def create_experiment_model_artifact_mock():
+ with patch.object(MetadataServiceClient, "create_artifact") as create_artifact_mock:
+ create_artifact_mock.return_value = _TEST_SKLEARN_MODEL_ARTIFACT
+ yield create_artifact_mock
+
+
+@pytest.fixture
+def get_sklearn_model_artifact_mock():
+ with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock:
+ get_artifact_mock.return_value = _TEST_SKLEARN_MODEL_ARTIFACT
+ yield get_artifact_mock
+
+
+_TEST_XGBOOST_BOOSTER_ARTIFACT = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ uri=_TEST_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=constants.GOOGLE_EXPERIMENT_MODEL,
+ schema_version=constants._DEFAULT_SCHEMA_VERSION,
+ state=GapicArtifact.State.LIVE,
+ metadata={
+ "frameworkName": "xgboost",
+ "frameworkVersion": "1.5",
+ "modelFile": "model.bst",
+ "modelClass": "xgboost.core.Booster",
+ },
+)
+
+
+@pytest.fixture
+def get_xgboost_booster_artifact_mock():
+ with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock:
+ get_artifact_mock.return_value = _TEST_XGBOOST_BOOSTER_ARTIFACT
+ yield get_artifact_mock
+
+
+_TEST_XGBOOST_XGBMODEL_ARTIFACT = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ uri=_TEST_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=constants.GOOGLE_EXPERIMENT_MODEL,
+ schema_version=constants._DEFAULT_SCHEMA_VERSION,
+ state=GapicArtifact.State.LIVE,
+ metadata={
+ "frameworkName": "xgboost",
+ "frameworkVersion": "1.5",
+ "modelFile": "model.bst",
+ "modelClass": "xgboost.sklearn.XGBClassifier",
+ },
+)
+
+
+@pytest.fixture
+def get_xgboost_xgbmodel_artifact_mock():
+ with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock:
+ get_artifact_mock.return_value = _TEST_XGBOOST_XGBMODEL_ARTIFACT
+ yield get_artifact_mock
+
+
+_TEST_TENSORFLOW_KERAS_ARTIFACT = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ uri=_TEST_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=constants.GOOGLE_EXPERIMENT_MODEL,
+ schema_version=constants._DEFAULT_SCHEMA_VERSION,
+ state=GapicArtifact.State.LIVE,
+ metadata={
+ "frameworkName": "tensorflow",
+ "frameworkVersion": "2.8",
+ "modelFile": "saved_model",
+ "modelClass": "tensorflow.keras.Model",
+ },
+)
+
+
+@pytest.fixture
+def get_tensorflow_keras_artifact_mock():
+ with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock:
+ get_artifact_mock.return_value = _TEST_TENSORFLOW_KERAS_ARTIFACT
+ yield get_artifact_mock
+
+
+_TEST_TENSORFLOW_MODULE_ARTIFACT = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ uri=_TEST_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=constants.GOOGLE_EXPERIMENT_MODEL,
+ schema_version=constants._DEFAULT_SCHEMA_VERSION,
+ state=GapicArtifact.State.LIVE,
+ metadata={
+ "frameworkName": "tensorflow",
+ "frameworkVersion": "2.8",
+ "modelFile": "saved_model",
+ "modelClass": "tensorflow.Module",
+ },
+)
+
+
+@pytest.fixture
+def get_tensorflow_module_artifact_mock():
+ with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock:
+ get_artifact_mock.return_value = _TEST_TENSORFLOW_MODULE_ARTIFACT
+ yield get_artifact_mock
+
+
+@pytest.fixture
+def model_upload_mock():
+ with patch.object(Model, "upload") as upload_model_mock:
+ yield upload_model_mock
+
+
+@pytest.fixture
+def get_metadata_store_mock():
+ with patch.object(
+ MetadataServiceClient, "get_metadata_store"
+ ) as get_metadata_store_mock:
+ get_metadata_store_mock.return_value = GapicMetadataStore(name=_TEST_PARENT)
+ yield get_metadata_store_mock
+
+
+class TestModels:
+ def setup_method(self):
+ reload(initializer)
+ reload(metadata)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.usefixtures(
+ "mock_datetime_now",
+ "mock_uuid",
+ "get_metadata_store_mock",
+ )
+ def test_save_model_sklearn(
+ self,
+ mock_storage_blob_upload_from_filename,
+ create_experiment_model_artifact_mock,
+ get_sklearn_model_artifact_mock,
+ ):
+ train_x = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
+ train_y = np.dot(train_x, np.array([1, 2])) + 3
+ model = LinearRegression()
+ model.fit(train_x, train_y)
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ aiplatform.save_model(model, _TEST_ARTIFACT_ID)
+
+ # Verify that the model file is correctly uploaded to gcs
+ upload_file_path = mock_storage_blob_upload_from_filename.call_args[1][
+ "filename"
+ ]
+ assert upload_file_path.endswith("model.pkl")
+
+ # Verify the model artifact is created correctly
+ expected_artifact = GapicArtifact(
+ uri=f"{_TEST_BUCKET_NAME}/{_TEST_TIMESTAMP}-{_TEST_UUID.hex[:5]}-sklearn-model",
+ schema_title=constants.GOOGLE_EXPERIMENT_MODEL,
+ schema_version=constants._DEFAULT_SCHEMA_VERSION,
+ metadata={
+ "frameworkName": "sklearn",
+ "frameworkVersion": sklearn.__version__,
+ "modelFile": "model.pkl",
+ "modelClass": "sklearn.linear_model._base.LinearRegression",
+ },
+ state=GapicArtifact.State.LIVE,
+ )
+ create_experiment_model_artifact_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ artifact=expected_artifact,
+ artifact_id=_TEST_ARTIFACT_ID,
+ )
+
+ get_sklearn_model_artifact_mock.assert_called_once_with(
+ name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_storage_blob_upload_from_filename",
+ "get_sklearn_model_artifact_mock",
+ "get_metadata_store_mock",
+ )
+ def test_save_model_with_all_args(
+ self,
+ create_experiment_model_artifact_mock,
+ ):
+ train_x = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
+ train_y = np.dot(train_x, np.array([1, 2])) + 3
+ model = LinearRegression()
+ model.fit(train_x, train_y)
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ aiplatform.save_model(
+ model=model,
+ artifact_id=_TEST_ARTIFACT_ID,
+ uri=_TEST_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ input_example=_TEST_INPUT_EXAMPLE,
+ )
+
+ # Verify the model artifact is created correctly
+ expected_artifact = GapicArtifact(
+ display_name=_TEST_DISPLAY_NAME,
+ uri=_TEST_URI,
+ schema_title=constants.GOOGLE_EXPERIMENT_MODEL,
+ schema_version=constants._DEFAULT_SCHEMA_VERSION,
+ metadata={
+ "frameworkName": "sklearn",
+ "frameworkVersion": sklearn.__version__,
+ "modelFile": "model.pkl",
+ "modelClass": "sklearn.linear_model._base.LinearRegression",
+ "predictSchemata": {"instanceSchemaUri": f"{_TEST_URI}/instance.yaml"},
+ },
+ state=GapicArtifact.State.LIVE,
+ )
+ create_experiment_model_artifact_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ artifact=expected_artifact,
+ artifact_id=_TEST_ARTIFACT_ID,
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_datetime_now",
+ "mock_uuid",
+ "get_metadata_store_mock",
+ )
+ def test_save_model_xgboost_booster(
+ self,
+ mock_storage_blob_upload_from_filename,
+ create_experiment_model_artifact_mock,
+ get_xgboost_booster_artifact_mock,
+ ):
+ # Fix the bug that xgb.__version__ in third_party returns a byte not string
+ xgb.__version__ = "1.5.1"
+
+ x, y = make_classification()
+ dtrain = xgb.DMatrix(data=x, label=y)
+ booster = xgb.train(
+ params={"num_parallel_tree": 4, "subsample": 0.5, "num_class": 2},
+ dtrain=dtrain,
+ )
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ aiplatform.save_model(booster, _TEST_ARTIFACT_ID)
+
+ # Verify that the model file is correctly uploaded to gcs
+ upload_file_path = mock_storage_blob_upload_from_filename.call_args[1][
+ "filename"
+ ]
+ assert upload_file_path.endswith("model.bst")
+
+ # Verify the model artifact is created correctly
+ expected_artifact = GapicArtifact(
+ uri=f"{_TEST_BUCKET_NAME}/{_TEST_TIMESTAMP}-{_TEST_UUID.hex[:5]}-xgboost-model",
+ schema_title=constants.GOOGLE_EXPERIMENT_MODEL,
+ schema_version=constants._DEFAULT_SCHEMA_VERSION,
+ metadata={
+ "frameworkName": "xgboost",
+ "frameworkVersion": xgb.__version__,
+ "modelFile": "model.bst",
+ "modelClass": "xgboost.core.Booster",
+ },
+ state=GapicArtifact.State.LIVE,
+ )
+ create_experiment_model_artifact_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ artifact=expected_artifact,
+ artifact_id=_TEST_ARTIFACT_ID,
+ )
+
+ get_xgboost_booster_artifact_mock.assert_called_once_with(
+ name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_datetime_now",
+ "mock_uuid",
+ "get_metadata_store_mock",
+ )
+ def test_save_model_xgboost_xgbmodel(
+ self,
+ mock_storage_blob_upload_from_filename,
+ create_experiment_model_artifact_mock,
+ get_xgboost_xgbmodel_artifact_mock,
+ ):
+ # Fix the bug that xgb.__version__ in third_party returns a byte not string
+ xgb.__version__ = "1.5.1"
+
+ x, y = make_classification()
+ xgb_model = xgb.XGBClassifier()
+ xgb_model.fit(x, y)
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ aiplatform.save_model(xgb_model, _TEST_ARTIFACT_ID)
+
+ # Verify that the model file is correctly uploaded to gcs
+ upload_file_path = mock_storage_blob_upload_from_filename.call_args[1][
+ "filename"
+ ]
+ assert upload_file_path.endswith("model.bst")
+
+ # Verify the model artifact is created correctly
+ expected_artifact = GapicArtifact(
+ uri=f"{_TEST_BUCKET_NAME}/{_TEST_TIMESTAMP}-{_TEST_UUID.hex[:5]}-xgboost-model",
+ schema_title=constants.GOOGLE_EXPERIMENT_MODEL,
+ schema_version=constants._DEFAULT_SCHEMA_VERSION,
+ metadata={
+ "frameworkName": "xgboost",
+ "frameworkVersion": xgb.__version__,
+ "modelFile": "model.bst",
+ "modelClass": "xgboost.sklearn.XGBClassifier",
+ },
+ state=GapicArtifact.State.LIVE,
+ )
+ create_experiment_model_artifact_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ artifact=expected_artifact,
+ artifact_id=_TEST_ARTIFACT_ID,
+ )
+
+ get_xgboost_xgbmodel_artifact_mock.assert_called_once_with(
+ name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_datetime_now",
+ "mock_uuid",
+ "get_metadata_store_mock",
+ )
+ def test_save_model_tensorflow_keras(
+ self,
+ mock_keras_save_model,
+ create_experiment_model_artifact_mock,
+ get_tensorflow_keras_artifact_mock,
+ ):
+ x = np.random.random((100, 3))
+ y = np.random.random((100, 1))
+ model = tf.keras.Sequential(
+ [tf.keras.layers.Dense(5, input_shape=(3,)), tf.keras.layers.Softmax()]
+ )
+ model.compile(optimizer="adam", loss="mean_squared_error")
+ model.fit(x, y)
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ aiplatform.save_model(model, _TEST_ARTIFACT_ID)
+
+ mock_keras_save_model.assert_called_once_with(
+ f"{_TEST_BUCKET_NAME}/{_TEST_TIMESTAMP}-{_TEST_UUID.hex[:5]}"
+ + "-tensorflow-model/saved_model",
+ )
+
+ # Verify the model artifact is created correctly
+ expected_artifact = GapicArtifact(
+ uri=f"{_TEST_BUCKET_NAME}/{_TEST_TIMESTAMP}-{_TEST_UUID.hex[:5]}-tensorflow-model",
+ schema_title=constants.GOOGLE_EXPERIMENT_MODEL,
+ schema_version=constants._DEFAULT_SCHEMA_VERSION,
+ metadata={
+ "frameworkName": "tensorflow",
+ "frameworkVersion": tf.__version__,
+ "modelFile": "saved_model",
+ "modelClass": "tensorflow.keras.Model",
+ },
+ state=GapicArtifact.State.LIVE,
+ )
+ create_experiment_model_artifact_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ artifact=expected_artifact,
+ artifact_id=_TEST_ARTIFACT_ID,
+ )
+
+ get_tensorflow_keras_artifact_mock.assert_called_once_with(
+ name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_datetime_now",
+ "mock_uuid",
+ "get_metadata_store_mock",
+ )
+ def test_save_model_tensorflow_module(
+ self,
+ mock_tf_save_model,
+ create_experiment_model_artifact_mock,
+ get_tensorflow_module_artifact_mock,
+ ):
+ class Adder(tf.Module):
+ @tf.function(input_signature=[tf.TensorSpec(shape=[], dtype=tf.float32)])
+ def add(self, x):
+ return x + x
+
+ model = Adder()
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ aiplatform.save_model(model, _TEST_ARTIFACT_ID)
+
+ mock_tf_save_model.assert_called_once_with(
+ model,
+ f"{_TEST_BUCKET_NAME}/{_TEST_TIMESTAMP}-{_TEST_UUID.hex[:5]}"
+ + "-tensorflow-model/saved_model",
+ )
+
+ # Verify the model artifact is created correctly
+ expected_artifact = GapicArtifact(
+ uri=f"{_TEST_BUCKET_NAME}/{_TEST_TIMESTAMP}-{_TEST_UUID.hex[:5]}-tensorflow-model",
+ schema_title=constants.GOOGLE_EXPERIMENT_MODEL,
+ schema_version=constants._DEFAULT_SCHEMA_VERSION,
+ metadata={
+ "frameworkName": "tensorflow",
+ "frameworkVersion": tf.__version__,
+ "modelFile": "saved_model",
+ "modelClass": "tensorflow.Module",
+ },
+ state=GapicArtifact.State.LIVE,
+ )
+ create_experiment_model_artifact_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ artifact=expected_artifact,
+ artifact_id=_TEST_ARTIFACT_ID,
+ )
+
+ get_tensorflow_module_artifact_mock.assert_called_once_with(
+ name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_load_model_sklearn(
+ self,
+ mock_storage_blob_download_sklearn_model_file,
+ get_sklearn_model_artifact_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ experiment_model = aiplatform.get_experiment_model(_TEST_ARTIFACT_ID)
+ model = experiment_model.load_model()
+
+ # Verify that the correct model artifact is retrieved by its ID
+ get_sklearn_model_artifact_mock.assert_called_once_with(
+ name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ # Verify that the model file is downloaded correctly
+ download_file_path = mock_storage_blob_download_sklearn_model_file.call_args[1][
+ "filename"
+ ]
+ assert download_file_path.endswith("model.pkl")
+
+ # Verify the loaded model
+ assert model.__class__.__name__ == "LinearRegression"
+
+ def test_load_model_xgboost_booster(
+ self,
+ mock_storage_blob_download_xgboost_booster_file,
+ get_xgboost_booster_artifact_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ # Fix the bug that xgb.__version__ in third_party returns a byte not string
+ xgb.__version__ = "1.5.1"
+
+ experiment_model = aiplatform.get_experiment_model(_TEST_ARTIFACT_ID)
+ model = experiment_model.load_model()
+
+ # Verify that the correct model artifact is retrieved by its ID
+ get_xgboost_booster_artifact_mock.assert_called_once_with(
+ name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ # Verify that the model file is downloaded correctly
+ download_file_path = mock_storage_blob_download_xgboost_booster_file.call_args[
+ 1
+ ]["filename"]
+ assert download_file_path.endswith("model.bst")
+
+ # Verify the loaded model
+ assert model.__class__.__name__ == "Booster"
+
+ def test_load_model_xgboost_xgbmodel(
+ self,
+ mock_storage_blob_download_xgboost_xgbmodel_file,
+ get_xgboost_xgbmodel_artifact_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ # Fix the bug that xgb.__version__ in third_party returns a byte not string
+ xgb.__version__ = "1.5.1"
+
+ experiment_model = aiplatform.get_experiment_model(_TEST_ARTIFACT_ID)
+ model = experiment_model.load_model()
+
+ # Verify that the correct model artifact is retrieved by its ID
+ get_xgboost_xgbmodel_artifact_mock.assert_called_once_with(
+ name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ # Verify that the model file is downloaded correctly
+ download_file_path = mock_storage_blob_download_xgboost_xgbmodel_file.call_args[
+ 1
+ ]["filename"]
+ assert download_file_path.endswith("model.bst")
+
+ # Verify the loaded model
+ assert model.__class__.__name__ == "XGBClassifier"
+
+ def test_load_model_tensorflow_keras(
+ self,
+ mock_load_tensorflow_keras_model,
+ get_tensorflow_keras_artifact_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ experiment_model = aiplatform.get_experiment_model(_TEST_ARTIFACT_ID)
+ experiment_model.load_model()
+
+ # Verify that the correct model artifact is retrieved by its ID
+ get_tensorflow_keras_artifact_mock.assert_called_once_with(
+ name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ # Verify that the model file is loaded correctly
+ mock_load_tensorflow_keras_model.assert_called_once_with(
+ f"{_TEST_URI}/saved_model",
+ )
+
+ def test_load_model_tensorflow_module(
+ self,
+ mock_load_tensorflow_module_model,
+ get_tensorflow_module_artifact_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+ experiment_model = aiplatform.get_experiment_model(_TEST_ARTIFACT_ID)
+ experiment_model.load_model()
+
+ # Verify that the correct model artifact is retrieved by its ID
+ get_tensorflow_module_artifact_mock.assert_called_once_with(
+ name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ # Verify that the model file is loaded correctly
+ mock_load_tensorflow_module_model.assert_called_once_with(
+ f"{_TEST_URI}/saved_model",
+ )
+
+ def test_register_model_sklearn(
+ self, model_upload_mock, get_sklearn_model_artifact_mock
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ experiment_model = aiplatform.get_experiment_model(_TEST_ARTIFACT_ID)
+ experiment_model.register_model(display_name=_TEST_DISPLAY_NAME)
+
+ # Verify that the correct model artifact is retrieved by its ID
+ get_sklearn_model_artifact_mock.assert_called_once_with(
+ name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
+ )
+ # register_model API calls Model.upload internally to register the model
+ # Since Model.upload is tested in "test_models.py", here we only need to
+ # make sure register_model is sending the right args to Model.upload
+ model_upload_mock.assert_called_once_with(
+ serving_container_image_uri="us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-0:latest",
+ artifact_uri=_TEST_URI,
+ model_id=None,
+ parent_model=None,
+ is_default_version=True,
+ version_aliases=None,
+ version_description=None,
+ display_name=_TEST_DISPLAY_NAME,
+ description=None,
+ labels=None,
+ serving_container_predict_route=None,
+ serving_container_health_route=None,
+ serving_container_command=None,
+ serving_container_args=None,
+ serving_container_environment_variables=None,
+ serving_container_ports=None,
+ instance_schema_uri=None,
+ parameters_schema_uri=None,
+ prediction_schema_uri=None,
+ explanation_metadata=None,
+ explanation_parameters=None,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ encryption_spec_key_name=None,
+ staging_bucket=None,
+ sync=True,
+ upload_request_timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_storage_blob_download_input_example",
+ "get_sklearn_model_artifact_mock",
+ )
+ def test_get_experiment_model_info(self):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+ experiment_model = aiplatform.get_experiment_model(_TEST_ARTIFACT_ID)
+ model_info = experiment_model.get_model_info()
+
+ expected_model_info = {
+ "model_class": "sklearn.linear_model._base.LinearRegression",
+ "framework_name": "sklearn",
+ "framework_version": "1.0",
+ "input_example": {
+ "type": "numpy.ndarray",
+ "data": _TEST_INPUT_EXAMPLE.tolist(),
+ },
+ }
+ assert model_info == expected_model_info
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_metadata_resources.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_metadata_resources.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c3f5269f8b1c9b8f452a3f083ecd586070784d8
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_metadata_resources.py
@@ -0,0 +1,1171 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from importlib import reload
+from unittest.mock import patch
+
+import pytest
+from google.api_core import exceptions
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform.compat.types import event as gca_event
+from google.cloud.aiplatform.metadata import artifact
+from google.cloud.aiplatform.metadata import context
+from google.cloud.aiplatform.metadata import execution
+from google.cloud.aiplatform.metadata import utils as metadata_utils
+from google.cloud.aiplatform_v1 import (
+ MetadataServiceClient,
+ AddExecutionEventsResponse,
+ Event,
+ LineageSubgraph,
+ Execution as GapicExecution,
+ Context as GapicContext,
+ Artifact as GapicArtifact,
+ MetadataStore as GapicMetadataStore,
+ AddContextArtifactsAndExecutionsResponse,
+)
+
+import constants as test_constants
+
+# project
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_METADATA_STORE = "test-metadata-store"
+_TEST_ALT_LOCATION = "europe-west4"
+_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/{_TEST_METADATA_STORE}"
+
+# resource attributes
+_TEST_DISPLAY_NAME = "test-display-name"
+_TEST_SCHEMA_TITLE = "test.Example"
+_TEST_SCHEMA_VERSION = "0.0.1"
+_TEST_DESCRIPTION = "test description"
+_TEST_METADATA = {"test-param1": 1, "test-param2": "test-value", "test-param3": True}
+_TEST_UPDATED_METADATA = {
+ "test-param1": 2,
+ "test-param2": "test-value-1",
+ "test-param3": False,
+}
+_TEST_ORDER_BY = "test_order_by"
+
+# context
+_TEST_CONTEXT_ID = "test-context-id"
+_TEST_CONTEXT_NAME = f"{_TEST_PARENT}/contexts/{_TEST_CONTEXT_ID}"
+
+# artifact
+_TEST_ARTIFACT_ID = "test-artifact-id"
+_TEST_ARTIFACT_NAME = f"{_TEST_PARENT}/artifacts/{_TEST_ARTIFACT_ID}"
+
+# execution
+_TEST_EXECUTION_ID = "test-execution-id"
+_TEST_EXECUTION_NAME = f"{_TEST_PARENT}/executions/{_TEST_EXECUTION_ID}"
+
+
+@pytest.fixture
+def get_context_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.return_value = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ )
+ yield get_context_mock
+
+
+@pytest.fixture
+def get_context_for_get_or_create_mock():
+ with patch.object(
+ MetadataServiceClient, "get_context"
+ ) as get_context_for_get_or_create_mock:
+ get_context_for_get_or_create_mock.side_effect = exceptions.NotFound(
+ "test: Context Not Found"
+ )
+ yield get_context_for_get_or_create_mock
+
+
+@pytest.fixture
+def create_context_mock():
+ with patch.object(MetadataServiceClient, "create_context") as create_context_mock:
+ create_context_mock.return_value = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ )
+ yield create_context_mock
+
+
+@pytest.fixture
+def list_contexts_mock():
+ with patch.object(MetadataServiceClient, "list_contexts") as list_contexts_mock:
+ list_contexts_mock.return_value = [
+ GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ ),
+ GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ ),
+ ]
+ yield list_contexts_mock
+
+
+@pytest.fixture
+def update_context_mock():
+ with patch.object(MetadataServiceClient, "update_context") as update_context_mock:
+ update_context_mock.return_value = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ yield update_context_mock
+
+
+@pytest.fixture
+def update_context_with_errors_mock():
+ with patch.object(
+ MetadataServiceClient, "update_context"
+ ) as update_context_with_errors_mock:
+ update_context_with_errors_mock.side_effect = [
+ exceptions.Aborted(
+ "Specified Context `etag`: `1` does not match server `etag`: `2`"
+ ),
+ GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ ),
+ ]
+ yield update_context_with_errors_mock
+
+
+@pytest.fixture
+def update_context_with_errors_mock_2():
+ with patch.object(
+ MetadataServiceClient, "update_context"
+ ) as update_context_with_errors_mock_2:
+ update_context_with_errors_mock_2.side_effect = [
+ exceptions.Aborted(
+ "Specified Context `etag`: `2` does not match server `etag`: `1`"
+ )
+ ]
+ yield update_context_with_errors_mock_2
+
+
+@pytest.fixture
+def update_context_with_errors_mock_3():
+ with patch.object(
+ MetadataServiceClient, "update_context"
+ ) as update_context_with_errors_mock_3:
+ update_context_with_errors_mock_3.side_effect = [
+ exceptions.Aborted(
+ "Specified Context `etag`: `1` does not match server `etag`: `2`"
+ )
+ ] * 6
+ yield update_context_with_errors_mock_2
+
+
+@pytest.fixture
+def add_context_artifacts_and_executions_mock():
+ with patch.object(
+ MetadataServiceClient, "add_context_artifacts_and_executions"
+ ) as add_context_artifacts_and_executions_mock:
+ add_context_artifacts_and_executions_mock.return_value = (
+ AddContextArtifactsAndExecutionsResponse()
+ )
+ yield add_context_artifacts_and_executions_mock
+
+
+@pytest.fixture
+def get_execution_mock():
+ with patch.object(MetadataServiceClient, "get_execution") as get_execution_mock:
+ get_execution_mock.return_value = GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ )
+ yield get_execution_mock
+
+
+@pytest.fixture
+def get_execution_for_get_or_create_mock():
+ with patch.object(
+ MetadataServiceClient, "get_execution"
+ ) as get_execution_for_get_or_create_mock:
+ get_execution_for_get_or_create_mock.side_effect = exceptions.NotFound(
+ "test: Execution Not Found"
+ )
+ yield get_execution_for_get_or_create_mock
+
+
+@pytest.fixture
+def create_execution_mock():
+ with patch.object(
+ MetadataServiceClient, "create_execution"
+ ) as create_execution_mock:
+ create_execution_mock.return_value = GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ state=GapicExecution.State.RUNNING,
+ )
+ yield create_execution_mock
+
+
+@pytest.fixture
+def list_executions_mock():
+ with patch.object(MetadataServiceClient, "list_executions") as list_executions_mock:
+ list_executions_mock.return_value = [
+ GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ ),
+ GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ ),
+ ]
+ yield list_executions_mock
+
+
+@pytest.fixture
+def query_execution_inputs_and_outputs_mock():
+ with patch.object(
+ MetadataServiceClient, "query_execution_inputs_and_outputs"
+ ) as query_execution_inputs_and_outputs_mock:
+ query_execution_inputs_and_outputs_mock.return_value = LineageSubgraph(
+ artifacts=[
+ GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ ),
+ ],
+ events=[
+ gca_event.Event(
+ artifact=_TEST_ARTIFACT_NAME,
+ execution=_TEST_EXECUTION_NAME,
+ type_=gca_event.Event.Type.OUTPUT,
+ )
+ ],
+ )
+ yield query_execution_inputs_and_outputs_mock
+
+
+@pytest.fixture
+def update_execution_mock():
+ with patch.object(
+ MetadataServiceClient, "update_execution"
+ ) as update_execution_mock:
+ update_execution_mock.return_value = GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ state=GapicExecution.State.RUNNING,
+ )
+ yield update_execution_mock
+
+
+@pytest.fixture
+def add_execution_events_mock():
+ with patch.object(
+ MetadataServiceClient, "add_execution_events"
+ ) as add_execution_events_mock:
+ add_execution_events_mock.return_value = AddExecutionEventsResponse()
+ yield add_execution_events_mock
+
+
+@pytest.fixture
+def get_artifact_mock():
+ with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock:
+ get_artifact_mock.return_value = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ )
+ yield get_artifact_mock
+
+
+@pytest.fixture
+def get_artifact_for_get_or_create_mock():
+ with patch.object(
+ MetadataServiceClient, "get_artifact"
+ ) as get_artifact_for_get_or_create_mock:
+ get_artifact_for_get_or_create_mock.side_effect = exceptions.NotFound(
+ "test: Artifact Not Found"
+ )
+ yield get_artifact_for_get_or_create_mock
+
+
+@pytest.fixture
+def create_artifact_mock():
+ with patch.object(MetadataServiceClient, "create_artifact") as create_artifact_mock:
+ create_artifact_mock.return_value = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ state=GapicArtifact.State.LIVE,
+ )
+ yield create_artifact_mock
+
+
+@pytest.fixture
+def list_artifacts_mock():
+ with patch.object(MetadataServiceClient, "list_artifacts") as list_artifacts_mock:
+ list_artifacts_mock.return_value = [
+ GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ ),
+ GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ ),
+ ]
+ yield list_artifacts_mock
+
+
+@pytest.fixture
+def update_artifact_mock():
+ with patch.object(MetadataServiceClient, "update_artifact") as update_artifact_mock:
+ update_artifact_mock.return_value = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ state=GapicArtifact.State.LIVE,
+ )
+ yield update_artifact_mock
+
+
+@pytest.fixture
+def get_metadata_store_mock():
+ with patch.object(
+ MetadataServiceClient, "get_metadata_store"
+ ) as get_metadata_store_mock:
+ get_metadata_store_mock.return_value = GapicMetadataStore(
+ name=_TEST_METADATA_STORE
+ )
+ yield get_metadata_store_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestContext:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_init_context(self, get_context_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ context.Context(resource_name=_TEST_CONTEXT_NAME)
+ get_context_mock.assert_called_once_with(
+ name=_TEST_CONTEXT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_context_with_id(self, get_context_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ context.Context(
+ resource_name=_TEST_CONTEXT_ID, metadata_store_id=_TEST_METADATA_STORE
+ )
+ get_context_mock.assert_called_once_with(
+ name=_TEST_CONTEXT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_get_or_create_context(
+ self, get_context_for_get_or_create_mock, create_context_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_context = context.Context.get_or_create(
+ resource_id=_TEST_CONTEXT_ID,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+
+ expected_context = GapicContext(
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ )
+ get_context_for_get_or_create_mock.assert_called_once_with(
+ name=_TEST_CONTEXT_NAME, retry=base._DEFAULT_RETRY
+ )
+ create_context_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ context_id=_TEST_CONTEXT_ID,
+ context=expected_context,
+ )
+
+ expected_context.name = _TEST_CONTEXT_NAME
+ assert my_context._gca_resource == expected_context
+
+ def test_get_context(self, get_context_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_context = context.Context.get(
+ resource_id=_TEST_CONTEXT_ID,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+
+ expected_context = GapicContext(
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ )
+ get_context_mock.assert_called_once_with(
+ name=_TEST_CONTEXT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ expected_context.name = _TEST_CONTEXT_NAME
+ assert my_context._gca_resource == expected_context
+
+ @pytest.mark.usefixtures("get_context_mock")
+ @pytest.mark.usefixtures("create_context_mock")
+ def test_update_context(self, update_context_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_context = context.Context._create(
+ resource_id=_TEST_CONTEXT_ID,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+ my_context.update(_TEST_UPDATED_METADATA)
+
+ updated_context = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+
+ update_context_mock.assert_called_once_with(context=updated_context)
+ assert my_context._gca_resource == updated_context
+
+ @pytest.mark.usefixtures("get_context_mock")
+ @pytest.mark.usefixtures("create_context_mock")
+ def test_update_context_with_retry_success(self, update_context_with_errors_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_context = context.Context._create(
+ resource_id=_TEST_CONTEXT_ID,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+ my_context.update(_TEST_UPDATED_METADATA)
+
+ updated_context = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+
+ update_context_with_errors_mock.assert_called_with(context=updated_context)
+ assert my_context._gca_resource == updated_context
+
+ @pytest.mark.usefixtures("get_context_mock")
+ @pytest.mark.usefixtures("create_context_mock")
+ @pytest.mark.usefixtures("update_context_with_errors_mock_2")
+ def test_update_context_with_retry_etag_order_failure(self):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_context = context.Context._create(
+ resource_id=_TEST_CONTEXT_ID,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+ with pytest.raises(exceptions.Aborted):
+ my_context.update(_TEST_UPDATED_METADATA)
+
+ @pytest.mark.usefixtures("get_context_mock")
+ @pytest.mark.usefixtures("create_context_mock")
+ @pytest.mark.usefixtures("update_context_with_errors_mock_3")
+ def test_update_context_with_retry_too_many_error_failure(self):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_context = context.Context._create(
+ resource_id=_TEST_CONTEXT_ID,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+ with pytest.raises(exceptions.Aborted):
+ my_context.update(_TEST_UPDATED_METADATA)
+
+ @pytest.mark.usefixtures("get_context_mock")
+ def test_list_contexts(self, list_contexts_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ filter_query = "test-filter"
+ context_list = context.Context.list(
+ filter=filter_query,
+ metadata_store_id=_TEST_METADATA_STORE,
+ order_by=_TEST_ORDER_BY,
+ )
+
+ expected_context = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ )
+
+ list_contexts_mock.assert_called_once_with(
+ request={
+ "parent": _TEST_PARENT,
+ "filter": filter_query,
+ "order_by": _TEST_ORDER_BY,
+ }
+ )
+ assert len(context_list) == 2
+ # pylint: disable-next=protected-access
+ assert context_list[0]._gca_resource == expected_context
+ # pylint: disable-next=protected-access
+ assert context_list[1]._gca_resource == expected_context
+
+ @pytest.mark.usefixtures("get_context_mock")
+ def test_add_artifacts_and_executions(
+ self, add_context_artifacts_and_executions_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ my_context = context.Context.get_or_create(
+ resource_id=_TEST_CONTEXT_ID,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+
+ my_context.add_artifacts_and_executions(
+ artifact_resource_names=[_TEST_ARTIFACT_NAME],
+ execution_resource_names=[_TEST_EXECUTION_NAME],
+ )
+ add_context_artifacts_and_executions_mock.assert_called_once_with(
+ context=_TEST_CONTEXT_NAME,
+ artifacts=[_TEST_ARTIFACT_NAME],
+ executions=[_TEST_EXECUTION_NAME],
+ )
+
+ @pytest.mark.usefixtures("get_context_mock")
+ def test_add_artifacts_only(self, add_context_artifacts_and_executions_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ my_context = context.Context.get_or_create(
+ resource_id=_TEST_CONTEXT_ID,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+
+ my_context.add_artifacts_and_executions(
+ artifact_resource_names=[_TEST_ARTIFACT_NAME]
+ )
+ add_context_artifacts_and_executions_mock.assert_called_once_with(
+ context=_TEST_CONTEXT_NAME,
+ artifacts=[_TEST_ARTIFACT_NAME],
+ executions=None,
+ )
+
+ @pytest.mark.usefixtures("get_context_mock")
+ def test_add_executions_only(self, add_context_artifacts_and_executions_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ my_context = context.Context.get_or_create(
+ resource_id=_TEST_CONTEXT_ID,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+
+ my_context.add_artifacts_and_executions(
+ execution_resource_names=[_TEST_EXECUTION_NAME]
+ )
+ add_context_artifacts_and_executions_mock.assert_called_once_with(
+ context=_TEST_CONTEXT_NAME,
+ artifacts=None,
+ executions=[_TEST_EXECUTION_NAME],
+ )
+
+
+_VERTEX_MODEL_ARTIFACT_URI = f"https://{_TEST_LOCATION}-aiplatform.googleapis.com/v1/{test_constants.ModelConstants._TEST_MODEL_OBJ_WITH_VERSION.name}"
+
+
+@pytest.fixture
+def list_vertex_model_artifact_mock():
+ with patch.object(MetadataServiceClient, "list_artifacts") as list_artifacts_mock:
+ list_artifacts_mock.return_value = [
+ GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ uri=_VERTEX_MODEL_ARTIFACT_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ )
+ ]
+ yield list_artifacts_mock
+
+
+@pytest.fixture
+def list_artifact_empty_mock():
+ with patch.object(MetadataServiceClient, "list_artifacts") as list_artifacts_mock:
+ list_artifacts_mock.return_value = []
+ yield list_artifacts_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestExecution:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_init_execution(self, get_execution_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ execution.Execution(execution_name=_TEST_EXECUTION_NAME)
+ get_execution_mock.assert_called_once_with(
+ name=_TEST_EXECUTION_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_execution_with_id(self, get_execution_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ execution.Execution(
+ execution_name=_TEST_EXECUTION_ID, metadata_store_id=_TEST_METADATA_STORE
+ )
+ get_execution_mock.assert_called_once_with(
+ name=_TEST_EXECUTION_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_get_or_create_execution(
+ self, get_execution_for_get_or_create_mock, create_execution_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_execution = execution.Execution.get_or_create(
+ resource_id=_TEST_EXECUTION_ID,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+
+ expected_execution = GapicExecution(
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ state=GapicExecution.State.RUNNING,
+ )
+ get_execution_for_get_or_create_mock.assert_called_once_with(
+ name=_TEST_EXECUTION_NAME, retry=base._DEFAULT_RETRY
+ )
+ create_execution_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ execution_id=_TEST_EXECUTION_ID,
+ execution=expected_execution,
+ )
+
+ expected_execution.name = _TEST_EXECUTION_NAME
+ assert my_execution._gca_resource == expected_execution
+
+ def test_get_execution(self, get_execution_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_execution = execution.Execution.get(
+ resource_id=_TEST_EXECUTION_ID,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+
+ expected_execution = GapicExecution(
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ )
+ get_execution_mock.assert_called_once_with(
+ name=_TEST_EXECUTION_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ expected_execution.name = _TEST_EXECUTION_NAME
+ assert my_execution._gca_resource == expected_execution
+
+ @pytest.mark.usefixtures("get_execution_mock")
+ @pytest.mark.usefixtures("create_execution_mock")
+ def test_update_execution(self, update_execution_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_execution = execution.Execution._create(
+ resource_id=_TEST_EXECUTION_ID,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+ my_execution.update(metadata=_TEST_UPDATED_METADATA)
+
+ updated_execution = GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ state=GapicExecution.State.RUNNING,
+ )
+
+ update_execution_mock.assert_called_once_with(execution=updated_execution)
+ assert my_execution._gca_resource == updated_execution
+
+ @pytest.mark.usefixtures("get_execution_mock")
+ def test_list_executions(self, list_executions_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ filter_query = "test-filter"
+ execution_list = execution.Execution.list(
+ filter=filter_query,
+ metadata_store_id=_TEST_METADATA_STORE,
+ order_by=_TEST_ORDER_BY,
+ )
+
+ expected_execution = GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ )
+
+ list_executions_mock.assert_called_once_with(
+ request={
+ "parent": _TEST_PARENT,
+ "filter": filter_query,
+ "order_by": _TEST_ORDER_BY,
+ }
+ )
+ assert len(execution_list) == 2
+ # pylint: disable-next=protected-access
+ assert execution_list[0]._gca_resource == expected_execution
+ # pylint: disable-next=protected-access
+ assert execution_list[1]._gca_resource == expected_execution
+
+ @pytest.mark.usefixtures("get_execution_mock", "get_artifact_mock")
+ def test_add_artifact(self, add_execution_events_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ my_execution = execution.Execution.get_or_create(
+ resource_id=_TEST_EXECUTION_ID,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+
+ my_artifact = aiplatform.Artifact(_TEST_ARTIFACT_ID)
+ my_execution.assign_output_artifacts(artifacts=[my_artifact])
+ add_execution_events_mock.assert_called_once_with(
+ execution=_TEST_EXECUTION_NAME,
+ events=[Event(artifact=_TEST_ARTIFACT_NAME, type_=Event.Type.OUTPUT)],
+ )
+
+ @pytest.mark.usefixtures("get_execution_mock", "get_model_with_version_mock")
+ def test_add_vertex_model(
+ self, add_execution_events_mock, list_vertex_model_artifact_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ my_execution = execution.Execution.get_or_create(
+ resource_id=_TEST_EXECUTION_ID,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+
+ my_model = aiplatform.Model(test_constants.ModelConstants._TEST_MODEL_NAME)
+ my_execution.assign_output_artifacts(artifacts=[my_model])
+
+ list_vertex_model_artifact_mock.assert_called_once_with(
+ request=dict(
+ parent="projects/test-project/locations/us-central1/metadataStores/default",
+ filter=metadata_utils._make_filter_string(
+ schema_title="google.VertexModel", uri=_VERTEX_MODEL_ARTIFACT_URI
+ ),
+ )
+ )
+
+ add_execution_events_mock.assert_called_once_with(
+ execution=_TEST_EXECUTION_NAME,
+ events=[Event(artifact=_TEST_ARTIFACT_NAME, type_=Event.Type.OUTPUT)],
+ )
+
+ @pytest.mark.usefixtures(
+ "get_execution_mock", "get_model_with_version_mock", "get_metadata_store_mock"
+ )
+ def test_add_vertex_model_not_resolved(
+ self, add_execution_events_mock, list_artifact_empty_mock, create_artifact_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ my_execution = execution.Execution.get_or_create(
+ resource_id=_TEST_EXECUTION_ID,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+
+ my_model = aiplatform.Model(test_constants.ModelConstants._TEST_MODEL_NAME)
+ my_execution.assign_output_artifacts(artifacts=[my_model])
+
+ list_artifact_empty_mock.assert_called_once_with(
+ request=dict(
+ parent="projects/test-project/locations/us-central1/metadataStores/default",
+ filter=metadata_utils._make_filter_string(
+ schema_title="google.VertexModel", uri=_VERTEX_MODEL_ARTIFACT_URI
+ ),
+ )
+ )
+
+ expected_artifact = GapicArtifact(
+ schema_title="google.VertexModel",
+ display_name=test_constants.ModelConstants._TEST_MODEL_OBJ_WITH_VERSION.display_name,
+ uri=_VERTEX_MODEL_ARTIFACT_URI,
+ metadata={
+ "resourceName": test_constants.ModelConstants._TEST_MODEL_OBJ_WITH_VERSION.name
+ },
+ state=GapicArtifact.State.LIVE,
+ )
+
+ create_artifact_mock.assert_called_once_with(
+ parent="projects/test-project/locations/us-central1/metadataStores/default",
+ artifact=expected_artifact,
+ artifact_id=None,
+ )
+
+ add_execution_events_mock.assert_called_once_with(
+ execution=_TEST_EXECUTION_NAME,
+ events=[Event(artifact=_TEST_ARTIFACT_NAME, type_=Event.Type.OUTPUT)],
+ )
+
+ @pytest.mark.usefixtures("get_execution_mock")
+ def test_query_input_and_output_artifacts(
+ self, query_execution_inputs_and_outputs_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ my_execution = execution.Execution.get_or_create(
+ resource_id=_TEST_EXECUTION_ID,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+
+ artifact_list = my_execution.get_output_artifacts()
+
+ expected_artifact = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ )
+
+ query_execution_inputs_and_outputs_mock.assert_called_once_with(
+ execution=_TEST_EXECUTION_NAME,
+ )
+ assert len(artifact_list) == 1
+ assert artifact_list[0]._gca_resource == expected_artifact
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestArtifact:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_init_artifact(self, get_artifact_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ artifact.Artifact(artifact_name=_TEST_ARTIFACT_NAME)
+ get_artifact_mock.assert_called_once_with(
+ name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_artifact_with_id(self, get_artifact_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ artifact.Artifact(
+ artifact_name=_TEST_ARTIFACT_ID, metadata_store_id=_TEST_METADATA_STORE
+ )
+ get_artifact_mock.assert_called_once_with(
+ name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_extract_metadata_store_id_with_valid_resource_name(self):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ assert "store_id" == artifact.Artifact._extract_metadata_store_id(
+ resource_name="projects/project/locations/location/metadataStores/store_id/resource_type/resource_id",
+ resource_noun="resource_type",
+ )
+ assert "store_id" == artifact.Artifact._extract_metadata_store_id(
+ resource_name="projects/project/locations/location/metadataStores/store_id/resource_type/resource_id@version",
+ resource_noun="resource_type",
+ )
+
+ def test_extract_metadata_store_id_with_invalid_resource_name(self):
+ invalid_resouce_name = (
+ "projects/project/locations/location/resource_type/resource_id/"
+ )
+ with pytest.raises(ValueError):
+ artifact.Artifact._extract_metadata_store_id(
+ resource_name=invalid_resouce_name, resource_noun="resource_type"
+ )
+
+ def test_get_or_create_artifact(
+ self, get_artifact_for_get_or_create_mock, create_artifact_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_artifact = artifact.Artifact.get_or_create(
+ resource_id=_TEST_ARTIFACT_ID,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+
+ expected_artifact = GapicArtifact(
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ state=GapicArtifact.State.LIVE,
+ )
+ get_artifact_for_get_or_create_mock.assert_called_once_with(
+ name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
+ )
+ create_artifact_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ artifact_id=_TEST_ARTIFACT_ID,
+ artifact=expected_artifact,
+ )
+
+ expected_artifact.name = _TEST_ARTIFACT_NAME
+ assert my_artifact._gca_resource == expected_artifact
+
+ def test_get_artifact(self, get_artifact_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_artifact = artifact.Artifact.get(
+ resource_id=_TEST_ARTIFACT_ID,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+
+ expected_artifact = GapicArtifact(
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ )
+ get_artifact_mock.assert_called_once_with(
+ name=_TEST_ARTIFACT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ expected_artifact.name = _TEST_ARTIFACT_NAME
+ assert my_artifact._gca_resource == expected_artifact
+
+ @pytest.mark.usefixtures("get_artifact_mock")
+ @pytest.mark.usefixtures("create_artifact_mock")
+ def test_update_artifact(self, update_artifact_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_artifact = artifact.Artifact._create(
+ resource_id=_TEST_ARTIFACT_ID,
+ schema_title=_TEST_SCHEMA_TITLE,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ metadata_store_id=_TEST_METADATA_STORE,
+ )
+ my_artifact.update(metadata=_TEST_UPDATED_METADATA)
+
+ updated_artifact = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ state=GapicArtifact.State.LIVE,
+ )
+
+ update_artifact_mock.assert_called_once_with(artifact=updated_artifact)
+ assert my_artifact._gca_resource == updated_artifact
+
+ @pytest.mark.usefixtures("get_artifact_mock")
+ def test_list_artifacts(self, list_artifacts_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ filter_query = "test-filter"
+ artifact_list = artifact.Artifact.list(
+ filter=filter_query,
+ metadata_store_id=_TEST_METADATA_STORE,
+ order_by=_TEST_ORDER_BY,
+ )
+
+ expected_artifact = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ )
+
+ list_artifacts_mock.assert_called_once_with(
+ request={
+ "parent": _TEST_PARENT,
+ "filter": filter_query,
+ "order_by": _TEST_ORDER_BY,
+ }
+ )
+ assert len(artifact_list) == 2
+ # pylint: disable-next=protected-access
+ assert artifact_list[0]._gca_resource == expected_artifact
+ # pylint: disable-next=protected-access
+ assert artifact_list[1]._gca_resource == expected_artifact
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_metadata_schema.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_metadata_schema.py
new file mode 100644
index 0000000000000000000000000000000000000000..8251e91332d89e876d1b7b77f0f58866e5f4eb52
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_metadata_schema.py
@@ -0,0 +1,1587 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import json
+import pytest
+
+from importlib import reload
+from unittest import mock
+from unittest.mock import patch
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform.compat.types import artifact as gca_artifact
+from google.cloud.aiplatform.compat.types import execution as gca_execution
+from google.cloud.aiplatform.metadata import metadata
+from google.cloud.aiplatform.metadata.schema import base_artifact
+from google.cloud.aiplatform.metadata.schema import base_execution
+from google.cloud.aiplatform.metadata.schema import base_context
+from google.cloud.aiplatform.metadata.schema.google import (
+ artifact_schema as google_artifact_schema,
+)
+from google.cloud.aiplatform.metadata.schema.system import (
+ artifact_schema as system_artifact_schema,
+)
+from google.cloud.aiplatform.metadata.schema.system import (
+ context_schema as system_context_schema,
+)
+from google.cloud.aiplatform.metadata.schema.system import (
+ execution_schema as system_execution_schema,
+)
+from google.cloud.aiplatform.metadata.schema import utils
+from google.cloud.aiplatform_v1 import MetadataServiceClient
+from google.cloud.aiplatform_v1 import Artifact as GapicArtifact
+from google.cloud.aiplatform_v1 import Execution as GapicExecution
+
+
+# project
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_METADATA_STORE = "test-metadata-store"
+_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+
+# resource attributes
+_TEST_ARTIFACT_STATE = gca_artifact.Artifact.State.STATE_UNSPECIFIED
+_TEST_EXECUTION_STATE = gca_execution.Execution.State.STATE_UNSPECIFIED
+_TEST_URI = "test-uri"
+_TEST_DISPLAY_NAME = "test-display-name"
+_TEST_SCHEMA_TITLE = "test.Example"
+_TEST_SCHEMA_VERSION = "0.0.1"
+_TEST_DESCRIPTION = "test description"
+_TEST_METADATA = {"test-param1": 1, "test-param2": "test-value", "test-param3": True}
+_TEST_UPDATED_METADATA = {
+ "test-param1": 2.0,
+ "test-param2": "test-value-1",
+ "test-param3": False,
+}
+
+# artifact
+_TEST_ARTIFACT_ID = "test-artifact-id"
+_TEST_ARTIFACT_NAME = f"{_TEST_PARENT}/metadataStores/{_TEST_METADATA_STORE}/artifacts/{_TEST_ARTIFACT_ID}"
+
+# execution
+_TEST_EXECUTION_ID = "test-execution-id"
+_TEST_EXECUTION_NAME = f"{_TEST_PARENT}/metadataStores/{_TEST_METADATA_STORE}/executions/{_TEST_EXECUTION_ID}"
+
+# context
+_TEST_CONTEXT_ID = "test-context-id"
+_TEST_CONTEXT_NAME = (
+ f"{_TEST_PARENT}/metadataStores/{_TEST_METADATA_STORE}/contexts/{_TEST_CONTEXT_ID}"
+)
+
+
+@pytest.fixture
+def get_artifact_mock():
+ with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock:
+ get_artifact_mock.return_value = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ state=GapicArtifact.State.STATE_UNSPECIFIED,
+ )
+ yield get_artifact_mock
+
+
+@pytest.fixture
+def initializer_create_client_mock():
+ with patch.object(
+ initializer.global_config, "create_client"
+ ) as initializer_create_client_mock:
+ yield initializer_create_client_mock
+
+
+@pytest.fixture
+def base_artifact_init_with_resouce_name_mock():
+ with patch.object(
+ base_artifact.BaseArtifactSchema, "_init_with_resource_name"
+ ) as base_artifact_init_with_resouce_name_mock:
+ yield base_artifact_init_with_resouce_name_mock
+
+
+@pytest.fixture
+def base_execution_init_with_resouce_name_mock():
+ with patch.object(
+ base_execution.BaseExecutionSchema, "_init_with_resource_name"
+ ) as base_execution_init_with_resouce_name_mock:
+ yield base_execution_init_with_resouce_name_mock
+
+
+@pytest.fixture
+def base_context_init_with_resouce_name_mock():
+ with patch.object(
+ base_context.BaseContextSchema, "_init_with_resource_name"
+ ) as base_context_init_with_resouce_name_mock:
+ yield base_context_init_with_resouce_name_mock
+
+
+@pytest.fixture
+def get_execution_mock():
+ with patch.object(MetadataServiceClient, "get_execution") as get_execution_mock:
+ get_execution_mock.return_value = GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ state=GapicExecution.State.RUNNING,
+ )
+ yield get_execution_mock
+
+
+@pytest.fixture
+def get_context_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.return_value = GapicExecution(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ )
+ yield get_context_mock
+
+
+@pytest.fixture
+def create_artifact_mock():
+ with patch.object(MetadataServiceClient, "create_artifact") as create_artifact_mock:
+ create_artifact_mock.return_value = GapicArtifact(
+ name=_TEST_ARTIFACT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ state=GapicArtifact.State.STATE_UNSPECIFIED,
+ )
+ yield create_artifact_mock
+
+
+@pytest.fixture
+def create_execution_mock():
+ with patch.object(
+ MetadataServiceClient, "create_execution"
+ ) as create_execution_mock:
+ create_execution_mock.return_value = GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ state=GapicExecution.State.RUNNING,
+ )
+ yield create_execution_mock
+
+
+@pytest.fixture
+def create_context_mock():
+ with patch.object(MetadataServiceClient, "create_context") as create_context_mock:
+ create_context_mock.return_value = GapicExecution(
+ name=_TEST_CONTEXT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_title=_TEST_SCHEMA_TITLE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_METADATA,
+ )
+ yield create_context_mock
+
+
+@pytest.fixture
+def list_artifacts_mock():
+ with patch.object(MetadataServiceClient, "list_artifacts") as list_artifacts_mock:
+ list_artifacts_mock.return_value = []
+ yield list_artifacts_mock
+
+
+@pytest.fixture
+def list_executions_mock():
+ with patch.object(MetadataServiceClient, "list_executions") as list_executions_mock:
+ list_executions_mock.return_value = []
+ yield list_executions_mock
+
+
+@pytest.fixture
+def list_contexts_mock():
+ with patch.object(MetadataServiceClient, "list_contexts") as list_contexts_mock:
+ list_contexts_mock.return_value = []
+ yield list_contexts_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestMetadataBaseArtifactSchema:
+ def setup_method(self):
+ reload(initializer)
+ reload(metadata)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_base_class_instatiated_uses_schema_title(self):
+ class TestArtifact(base_artifact.BaseArtifactSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ artifact = TestArtifact()
+ assert artifact.schema_title == _TEST_SCHEMA_TITLE
+
+ def test_base_class_print_output(self, capsys):
+ class TestArtifact(base_artifact.BaseArtifactSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ artifact = TestArtifact()
+ print(artifact)
+ captured = capsys.readouterr()
+ assert (
+ captured.out
+ == f"{object.__repr__(artifact)}\n"
+ + f"schema_title: {_TEST_SCHEMA_TITLE}\n"
+ )
+
+ def test_base_class_inherited_methods_error(self):
+ class TestArtifact(base_artifact.BaseArtifactSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ artifact = TestArtifact()
+
+ with pytest.raises(RuntimeError) as exception:
+ artifact.resource_name
+ assert str(exception.value) == "TestArtifact resource has not been created."
+
+ with pytest.raises(RuntimeError) as exception:
+ artifact.lineage_console_uri
+ assert str(exception.value) == "TestArtifact resource has not been created."
+
+ with pytest.raises(RuntimeError) as exception:
+ artifact.sync_resource()
+ assert str(exception.value) == "TestArtifact resource has not been created."
+
+ with pytest.raises(RuntimeError) as exception:
+ artifact.update(
+ metadata=_TEST_UPDATED_METADATA,
+ description=_TEST_DESCRIPTION,
+ )
+ assert str(exception.value) == "TestArtifact resource has not been created."
+
+ def test_base_class_parameters_overrides_default_values(self):
+ class TestArtifact(base_artifact.BaseArtifactSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ artifact = TestArtifact(
+ state=_TEST_ARTIFACT_STATE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ artifact_id=_TEST_ARTIFACT_ID,
+ uri=_TEST_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ assert artifact.state == _TEST_ARTIFACT_STATE
+ assert artifact.state == _TEST_ARTIFACT_STATE
+ assert artifact.schema_version == _TEST_SCHEMA_VERSION
+ assert artifact.artifact_id == _TEST_ARTIFACT_ID
+ assert artifact.schema_title == _TEST_SCHEMA_TITLE
+ assert artifact.uri == _TEST_URI
+ assert artifact.display_name == _TEST_DISPLAY_NAME
+ assert artifact.description == _TEST_DESCRIPTION
+ assert artifact.metadata == _TEST_UPDATED_METADATA
+
+ def test_base_class_without_schema_title_raises_error(self):
+ with pytest.raises(TypeError):
+ base_artifact.BaseArtifactSchema()
+
+ @pytest.mark.usefixtures("create_artifact_mock", "get_artifact_mock")
+ def test_create_is_called_with_default_parameters(self, create_artifact_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ class TestArtifact(base_artifact.BaseArtifactSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ artifact = TestArtifact(
+ uri=_TEST_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ state=_TEST_ARTIFACT_STATE,
+ )
+ artifact.create(metadata_store_id=_TEST_METADATA_STORE)
+ create_artifact_mock.assert_called_once_with(
+ parent=f"{_TEST_PARENT}/metadataStores/{_TEST_METADATA_STORE}",
+ artifact=mock.ANY,
+ artifact_id=None,
+ )
+ _, _, kwargs = create_artifact_mock.mock_calls[0]
+ assert kwargs["artifact"].schema_title == _TEST_SCHEMA_TITLE
+ assert kwargs["artifact"].uri == _TEST_URI
+ assert kwargs["artifact"].display_name == _TEST_DISPLAY_NAME
+ assert kwargs["artifact"].description == _TEST_DESCRIPTION
+ assert kwargs["artifact"].metadata == _TEST_UPDATED_METADATA
+ assert kwargs["artifact"].state == _TEST_ARTIFACT_STATE
+
+ @pytest.mark.usefixtures(
+ "base_artifact_init_with_resouce_name_mock",
+ "initializer_create_client_mock",
+ "create_artifact_mock",
+ "get_artifact_mock",
+ )
+ def test_artifact_create_call_sets_the_user_agent_header(
+ self, initializer_create_client_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ class TestArtifact(base_artifact.BaseArtifactSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ artifact = TestArtifact(
+ uri=_TEST_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ state=_TEST_ARTIFACT_STATE,
+ )
+ artifact.create()
+ _, _, kwargs = initializer_create_client_mock.mock_calls[0]
+ assert kwargs["appended_user_agent"] == [
+ "sdk_command/aiplatform.metadata.schema.base_artifact.BaseArtifactSchema.create"
+ ]
+
+ @pytest.mark.usefixtures(
+ "initializer_create_client_mock",
+ "create_artifact_mock",
+ "get_artifact_mock",
+ )
+ def test_artifact_init_call_sets_the_user_agent_header(
+ self, initializer_create_client_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ class TestArtifact(base_artifact.BaseArtifactSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ artifact = TestArtifact(
+ uri=_TEST_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ state=_TEST_ARTIFACT_STATE,
+ )
+ artifact._init_with_resource_name(artifact_name=_TEST_ARTIFACT_NAME)
+ _, _, kwargs = initializer_create_client_mock.mock_calls[0]
+ assert kwargs["appended_user_agent"] == [
+ "sdk_command/aiplatform.metadata.schema.base_artifact.BaseArtifactSchema._init_with_resource_name"
+ ]
+
+ def test_list_artifacts(self, list_artifacts_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ class TestArtifact(base_artifact.BaseArtifactSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ TestArtifact.list()
+ list_artifacts_mock.assert_called_once_with(
+ request={
+ "parent": f"{_TEST_PARENT}/metadataStores/default",
+ "filter": f'schema_title="{_TEST_SCHEMA_TITLE}"',
+ }
+ )
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestMetadataBaseExecutionSchema:
+ def setup_method(self):
+ reload(initializer)
+ reload(metadata)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_base_class_overrides_default_schema_title(self):
+ class TestExecution(base_execution.BaseExecutionSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ execution = TestExecution()
+ assert execution.schema_title == _TEST_SCHEMA_TITLE
+
+ def test_base_class_print_output(self, capsys):
+ class TestExecution(base_execution.BaseExecutionSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ execution = TestExecution()
+ print(execution)
+ captured = capsys.readouterr()
+ assert (
+ captured.out
+ == f"{object.__repr__(execution)}\n"
+ + f"schema_title: {_TEST_SCHEMA_TITLE}\n"
+ )
+
+ def test_base_class_inherited_methods_error(self):
+ class TestExecution(base_execution.BaseExecutionSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ execution = TestExecution()
+
+ with pytest.raises(RuntimeError) as exception:
+ execution.resource_name
+ assert str(exception.value) == "TestExecution resource has not been created."
+
+ with pytest.raises(RuntimeError) as exception:
+ execution.assign_input_artifacts(artifacts=[_TEST_ARTIFACT_NAME])
+ assert str(exception.value) == "TestExecution resource has not been created."
+
+ with pytest.raises(RuntimeError) as exception:
+ execution.assign_output_artifacts(artifacts=[_TEST_ARTIFACT_NAME])
+ assert str(exception.value) == "TestExecution resource has not been created."
+
+ with pytest.raises(RuntimeError) as exception:
+ execution.get_input_artifacts()
+ assert str(exception.value) == "TestExecution resource has not been created."
+
+ with pytest.raises(RuntimeError) as exception:
+ execution.get_output_artifacts()
+ assert str(exception.value) == "TestExecution resource has not been created."
+
+ with pytest.raises(RuntimeError) as exception:
+ execution.update(
+ state=_TEST_EXECUTION_STATE,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ assert str(exception.value) == "TestExecution resource has not been created."
+
+ def test_base_class_parameters_overrides_default_values(self):
+ class TestExecution(base_execution.BaseExecutionSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ execution = TestExecution(
+ state=_TEST_EXECUTION_STATE,
+ schema_version=_TEST_SCHEMA_VERSION,
+ execution_id=_TEST_EXECUTION_ID,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ assert execution.state == _TEST_EXECUTION_STATE
+ assert execution.schema_version == _TEST_SCHEMA_VERSION
+ assert execution.execution_id == _TEST_EXECUTION_ID
+ assert execution.schema_title == _TEST_SCHEMA_TITLE
+ assert execution.display_name == _TEST_DISPLAY_NAME
+ assert execution.description == _TEST_DESCRIPTION
+ assert execution.metadata == _TEST_UPDATED_METADATA
+
+ def test_base_class_without_schema_title_raises_error(self):
+ with pytest.raises(TypeError):
+ base_execution.BaseExecutionSchema()
+
+ @pytest.mark.usefixtures("create_execution_mock", "get_execution_mock")
+ def test_create_method_calls_gapic_library_with_correct_parameters(
+ self, create_execution_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ class TestExecution(base_execution.BaseExecutionSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ execution = TestExecution(
+ state=_TEST_EXECUTION_STATE,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ execution.create(metadata_store_id=_TEST_METADATA_STORE)
+ create_execution_mock.assert_called_once_with(
+ parent=f"{_TEST_PARENT}/metadataStores/{_TEST_METADATA_STORE}",
+ execution=mock.ANY,
+ execution_id=None,
+ )
+ _, _, kwargs = create_execution_mock.mock_calls[0]
+ assert kwargs["execution"].schema_title == _TEST_SCHEMA_TITLE
+ assert kwargs["execution"].state == _TEST_EXECUTION_STATE
+ assert kwargs["execution"].display_name == _TEST_DISPLAY_NAME
+ assert kwargs["execution"].description == _TEST_DESCRIPTION
+ assert kwargs["execution"].metadata == _TEST_UPDATED_METADATA
+
+ @pytest.mark.usefixtures(
+ "base_execution_init_with_resouce_name_mock",
+ "initializer_create_client_mock",
+ "create_execution_mock",
+ "get_execution_mock",
+ )
+ def test_execution_create_call_sets_the_user_agent_header(
+ self, initializer_create_client_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ class TestExecution(base_execution.BaseExecutionSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ execution = TestExecution(
+ state=_TEST_EXECUTION_STATE,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ execution.create(metadata_store_id=_TEST_METADATA_STORE)
+ _, _, kwargs = initializer_create_client_mock.mock_calls[0]
+ assert kwargs["appended_user_agent"] == [
+ "sdk_command/aiplatform.metadata.schema.base_execution.BaseExecutionSchema.create"
+ ]
+
+ @pytest.mark.usefixtures(
+ "base_execution_init_with_resouce_name_mock",
+ "initializer_create_client_mock",
+ "create_execution_mock",
+ "get_execution_mock",
+ )
+ def test_execution_start_execution_call_sets_the_user_agent_header(
+ self, initializer_create_client_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ class TestExecution(base_execution.BaseExecutionSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ execution = TestExecution(
+ state=_TEST_EXECUTION_STATE,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ execution.start_execution()
+ _, _, kwargs = initializer_create_client_mock.mock_calls[0]
+ assert kwargs["appended_user_agent"] == [
+ "sdk_command/aiplatform.metadata.schema.base_execution.BaseExecutionSchema.start_execution"
+ ]
+
+ @pytest.mark.usefixtures(
+ "initializer_create_client_mock",
+ "create_execution_mock",
+ "get_execution_mock",
+ )
+ def test_execution_init_call_sets_the_user_agent_header(
+ self, initializer_create_client_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ class TestExecution(base_execution.BaseExecutionSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ execution = TestExecution(
+ state=_TEST_EXECUTION_STATE,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ execution._init_with_resource_name(execution_name=_TEST_EXECUTION_NAME)
+ _, _, kwargs = initializer_create_client_mock.mock_calls[0]
+ assert kwargs["appended_user_agent"] == [
+ "sdk_command/aiplatform.metadata.schema.base_execution.BaseExecutionSchema._init_with_resource_name"
+ ]
+
+ def test_list_executions(self, list_executions_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ class TestExecution(base_execution.BaseExecutionSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ TestExecution.list()
+ list_executions_mock.assert_called_once_with(
+ request={
+ "parent": f"{_TEST_PARENT}/metadataStores/default",
+ "filter": f'schema_title="{_TEST_SCHEMA_TITLE}"',
+ }
+ )
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestMetadataBaseContextSchema:
+ def setup_method(self):
+ reload(initializer)
+ reload(metadata)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_base_context_class_instatiated_uses_schema_title(self):
+ class TestContext(base_context.BaseContextSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ context = TestContext()
+ assert context.schema_title == _TEST_SCHEMA_TITLE
+
+ def test_base_class_print_output(self, capsys):
+ class TestContext(base_context.BaseContextSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ context = TestContext()
+ print(context)
+ captured = capsys.readouterr()
+ assert (
+ captured.out
+ == f"{object.__repr__(context)}\n" + f"schema_title: {_TEST_SCHEMA_TITLE}\n"
+ )
+
+ def test_base_class_inherited_methods_error(self):
+ class TestContext(base_context.BaseContextSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ context = TestContext()
+
+ with pytest.raises(RuntimeError) as exception:
+ context.resource_name
+ assert str(exception.value) == "TestContext resource has not been created."
+
+ with pytest.raises(RuntimeError) as exception:
+ context.add_artifacts_and_executions(
+ artifact_resource_names=[_TEST_ARTIFACT_NAME],
+ )
+ assert str(exception.value) == "TestContext resource has not been created."
+
+ with pytest.raises(RuntimeError) as exception:
+ context.get_artifacts()
+ assert str(exception.value) == "TestContext resource has not been created."
+
+ with pytest.raises(RuntimeError) as exception:
+ context.add_context_children(
+ contexts=[_TEST_CONTEXT_NAME],
+ )
+ assert str(exception.value) == "TestContext resource has not been created."
+
+ with pytest.raises(RuntimeError) as exception:
+ context.query_lineage_subgraph()
+ assert str(exception.value) == "TestContext resource has not been created."
+
+ with pytest.raises(RuntimeError) as exception:
+ context.get_executions()
+ assert str(exception.value) == "TestContext resource has not been created."
+
+ def test_base_context_class_parameters_overrides_default_values(self):
+ class TestContext(base_context.BaseContextSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ context = TestContext(
+ schema_version=_TEST_SCHEMA_VERSION,
+ context_id=_TEST_CONTEXT_ID,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ assert context.schema_version == _TEST_SCHEMA_VERSION
+ assert context.context_id == _TEST_CONTEXT_ID
+ assert context.schema_title == _TEST_SCHEMA_TITLE
+ assert context.display_name == _TEST_DISPLAY_NAME
+ assert context.description == _TEST_DESCRIPTION
+ assert context.metadata == _TEST_UPDATED_METADATA
+
+ def test_base_context_class_without_schema_title_raises_error(self):
+ with pytest.raises(TypeError):
+ base_context.BaseContextSchema()
+
+ @pytest.mark.usefixtures("create_context_mock", "get_context_mock")
+ def test_base_context_create_is_called_with_default_parameters(
+ self, create_context_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ class TestContext(base_context.BaseContextSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ context = TestContext(
+ schema_version=_TEST_SCHEMA_VERSION,
+ context_id=_TEST_CONTEXT_ID,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ context.create(metadata_store_id=_TEST_METADATA_STORE)
+ create_context_mock.assert_called_once_with(
+ parent=f"{_TEST_PARENT}/metadataStores/{_TEST_METADATA_STORE}",
+ context=mock.ANY,
+ context_id=_TEST_CONTEXT_ID,
+ )
+ _, _, kwargs = create_context_mock.mock_calls[0]
+ assert kwargs["context"].schema_title == _TEST_SCHEMA_TITLE
+ assert kwargs["context"].display_name == _TEST_DISPLAY_NAME
+ assert kwargs["context"].description == _TEST_DESCRIPTION
+ assert kwargs["context"].metadata == _TEST_UPDATED_METADATA
+
+ @pytest.mark.usefixtures(
+ "base_context_init_with_resouce_name_mock",
+ "initializer_create_client_mock",
+ "create_context_mock",
+ "get_context_mock",
+ )
+ def test_base_context_create_call_sets_the_user_agent_header(
+ self, initializer_create_client_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ class TestContext(base_context.BaseContextSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ context = TestContext(
+ schema_version=_TEST_SCHEMA_VERSION,
+ context_id=_TEST_CONTEXT_ID,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ context.create()
+ _, _, kwargs = initializer_create_client_mock.mock_calls[0]
+ assert kwargs["appended_user_agent"] == [
+ "sdk_command/aiplatform.metadata.schema.base_context.BaseContextSchema.create"
+ ]
+
+ @pytest.mark.usefixtures(
+ "initializer_create_client_mock",
+ "create_context_mock",
+ "get_context_mock",
+ )
+ def test_base_context_init_call_sets_the_user_agent_header(
+ self, initializer_create_client_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ class TestContext(base_context.BaseContextSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ context = TestContext(
+ schema_version=_TEST_SCHEMA_VERSION,
+ context_id=_TEST_CONTEXT_ID,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ context._init_with_resource_name(context_name=_TEST_CONTEXT_NAME)
+ _, _, kwargs = initializer_create_client_mock.mock_calls[0]
+ assert kwargs["appended_user_agent"] == [
+ "sdk_command/aiplatform.metadata.schema.base_context.BaseContextSchema._init_with_resource_name"
+ ]
+
+ def test_list_contexts(self, list_contexts_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ class TestContext(base_context.BaseContextSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ TestContext.list()
+ list_contexts_mock.assert_called_once_with(
+ request={
+ "parent": f"{_TEST_PARENT}/metadataStores/default",
+ "filter": f'schema_title="{_TEST_SCHEMA_TITLE}"',
+ }
+ )
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestMetadataGoogleArtifactSchema:
+ def setup_method(self):
+ reload(initializer)
+ reload(metadata)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_vertex_dataset_schema_title_is_set_correctly(self):
+ artifact = google_artifact_schema.VertexDataset(
+ vertex_dataset_name=_TEST_ARTIFACT_NAME,
+ )
+ assert artifact.schema_title == "google.VertexDataset"
+
+ def test_vertex_dataset_constructor_parameters_are_set_correctly(self):
+ artifact = google_artifact_schema.VertexDataset(
+ vertex_dataset_name=f"{_TEST_PARENT}/datasets/dataset-id",
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata={},
+ )
+ assert (
+ artifact.uri
+ == "https://us-central1-aiplatform.googleapis.com/v1/projects/test-project/locations/us-central1/datasets/dataset-id"
+ )
+ assert artifact.display_name == _TEST_DISPLAY_NAME
+ assert artifact.description == _TEST_DESCRIPTION
+ assert artifact.metadata == {
+ "resourceName": "projects/test-project/locations/us-central1/datasets/dataset-id"
+ }
+ assert artifact.schema_version == _TEST_SCHEMA_VERSION
+
+ def test_vertex_model_schema_title_is_set_correctly(self):
+ artifact = google_artifact_schema.VertexModel(
+ vertex_model_name=_TEST_ARTIFACT_NAME,
+ )
+ assert artifact.schema_title == "google.VertexModel"
+
+ def test_vertex_model_constructor_parameters_are_set_correctly(self):
+ artifact = google_artifact_schema.VertexModel(
+ vertex_model_name=f"{_TEST_PARENT}/models/model-id",
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata={},
+ )
+ assert (
+ artifact.uri
+ == "https://us-central1-aiplatform.googleapis.com/v1/projects/test-project/locations/us-central1/models/model-id"
+ )
+ assert artifact.display_name == _TEST_DISPLAY_NAME
+ assert artifact.description == _TEST_DESCRIPTION
+ assert artifact.metadata == {
+ "resourceName": "projects/test-project/locations/us-central1/models/model-id"
+ }
+ assert artifact.schema_version == _TEST_SCHEMA_VERSION
+
+ def test_vertex_endpoint_schema_title_is_set_correctly(self):
+ artifact = google_artifact_schema.VertexEndpoint(
+ vertex_endpoint_name=_TEST_ARTIFACT_NAME,
+ )
+ assert artifact.schema_title == "google.VertexEndpoint"
+
+ def test_vertex_endpoint_constructor_parameters_are_set_correctly(self):
+ artifact = google_artifact_schema.VertexEndpoint(
+ vertex_endpoint_name=f"{_TEST_PARENT}/endpoints/endpoint-id",
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata={},
+ )
+ assert (
+ artifact.uri
+ == "https://us-central1-aiplatform.googleapis.com/v1/projects/test-project/locations/us-central1/endpoints/endpoint-id"
+ )
+ assert artifact.display_name == _TEST_DISPLAY_NAME
+ assert artifact.description == _TEST_DESCRIPTION
+ assert artifact.metadata == {
+ "resourceName": "projects/test-project/locations/us-central1/endpoints/endpoint-id"
+ }
+ assert artifact.schema_version == _TEST_SCHEMA_VERSION
+
+ def test_unmanaged_container_model_title_is_set_correctly(self):
+ predict_schemata = utils.PredictSchemata(
+ instance_schema_uri="instance_uri",
+ prediction_schema_uri="prediction_uri",
+ parameters_schema_uri="parameters_uri",
+ )
+
+ container_spec = utils.ContainerSpec(
+ image_uri="gcr.io/test_container_image_uri"
+ )
+ artifact = google_artifact_schema.UnmanagedContainerModel(
+ predict_schemata=predict_schemata,
+ container_spec=container_spec,
+ )
+ assert artifact.schema_title == "google.UnmanagedContainerModel"
+
+ def test_unmanaged_container_model_constructor_parameters_are_set_correctly(self):
+ predict_schemata = utils.PredictSchemata(
+ instance_schema_uri="instance_uri",
+ prediction_schema_uri="prediction_uri",
+ parameters_schema_uri="parameters_uri",
+ )
+
+ container_spec = utils.ContainerSpec(
+ image_uri="gcr.io/test_container_image_uri"
+ )
+
+ artifact = google_artifact_schema.UnmanagedContainerModel(
+ predict_schemata=predict_schemata,
+ container_spec=container_spec,
+ artifact_id=_TEST_ARTIFACT_ID,
+ uri=_TEST_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ expected_metadata = {
+ "test-param1": 2.0,
+ "test-param2": "test-value-1",
+ "test-param3": False,
+ "predictSchemata": {
+ "instanceSchemaUri": "instance_uri",
+ "parametersSchemaUri": "parameters_uri",
+ "predictionSchemaUri": "prediction_uri",
+ },
+ "containerSpec": {"imageUri": "gcr.io/test_container_image_uri"},
+ }
+
+ assert artifact.artifact_id == _TEST_ARTIFACT_ID
+ assert artifact.uri == _TEST_URI
+ assert artifact.display_name == _TEST_DISPLAY_NAME
+ assert artifact.description == _TEST_DESCRIPTION
+ assert json.dumps(artifact.metadata, sort_keys=True) == json.dumps(
+ expected_metadata, sort_keys=True
+ )
+ assert artifact.schema_version == _TEST_SCHEMA_VERSION
+
+ def test_classification_metrics_title_is_set_correctly(self):
+ artifact = google_artifact_schema.ClassificationMetrics()
+ assert artifact.schema_title == "google.ClassificationMetrics"
+
+ def test_classification_metrics_constructor_parameters_are_set_correctly(self):
+ aggregation_type = "MACRO_AVERAGE"
+ aggregation_threshold = 0.5
+ recall = 0.5
+ precision = 0.5
+ f1_score = 0.5
+ accuracy = 0.5
+ au_prc = 1.0
+ au_roc = 2.0
+ log_loss = 0.5
+ confusion_matrix = utils.ConfusionMatrix(
+ matrix=[[9.0, 1.0], [1.0, 9.0]],
+ annotation_specs=[
+ utils.AnnotationSpec(display_name="cat"),
+ utils.AnnotationSpec(display_name="dog"),
+ ],
+ )
+ confidence_metrics = [
+ utils.ConfidenceMetric(
+ confidence_threshold=0.9, recall=0.1, false_positive_rate=0.1
+ ),
+ utils.ConfidenceMetric(
+ confidence_threshold=0.5, recall=0.5, false_positive_rate=0.7
+ ),
+ utils.ConfidenceMetric(
+ confidence_threshold=0.1, recall=0.9, false_positive_rate=0.9
+ ),
+ ]
+
+ artifact = google_artifact_schema.ClassificationMetrics(
+ aggregation_type=aggregation_type,
+ aggregation_threshold=aggregation_threshold,
+ recall=recall,
+ precision=precision,
+ f1_score=f1_score,
+ accuracy=accuracy,
+ au_prc=au_prc,
+ au_roc=au_roc,
+ log_loss=log_loss,
+ confusion_matrix=confusion_matrix,
+ confidence_metrics=confidence_metrics,
+ artifact_id=_TEST_ARTIFACT_ID,
+ uri=_TEST_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ expected_metadata = {
+ "test-param1": _TEST_UPDATED_METADATA["test-param1"],
+ "test-param2": _TEST_UPDATED_METADATA["test-param2"],
+ "test-param3": _TEST_UPDATED_METADATA["test-param3"],
+ "aggregationType": aggregation_type,
+ "aggregationThreshold": aggregation_threshold,
+ "recall": recall,
+ "precision": precision,
+ "f1Score": f1_score,
+ "accuracy": accuracy,
+ "auPrc": au_prc,
+ "auRoc": au_roc,
+ "logLoss": log_loss,
+ "confusionMatrix": confusion_matrix.to_dict(),
+ "confidenceMetrics": [
+ confidence_metric.to_dict() for confidence_metric in confidence_metrics
+ ],
+ }
+
+ assert artifact.artifact_id == _TEST_ARTIFACT_ID
+ assert artifact.uri == _TEST_URI
+ assert artifact.display_name == _TEST_DISPLAY_NAME
+ assert artifact.description == _TEST_DESCRIPTION
+ assert json.dumps(artifact.metadata, sort_keys=True) == json.dumps(
+ expected_metadata, sort_keys=True
+ )
+ assert artifact.schema_version == _TEST_SCHEMA_VERSION
+
+ def test_classification_metrics_wrong_aggregation_type(self):
+ with pytest.raises(ValueError) as exception:
+ google_artifact_schema.ClassificationMetrics(
+ aggregation_type="unspecified_type"
+ )
+ assert (
+ str(exception.value)
+ == "aggregation_type can only be 'AGGREGATION_TYPE_UNSPECIFIED', 'MACRO_AVERAGE', or 'MICRO_AVERAGE'."
+ )
+
+ def test_regression_metrics_title_is_set_correctly(self):
+ artifact = google_artifact_schema.RegressionMetrics()
+ assert artifact.schema_title == "google.RegressionMetrics"
+
+ def test_regression_metrics_constructor_parameters_are_set_correctly(self):
+ root_mean_squared_error = 1.0
+ mean_absolute_error = 2.0
+ mean_absolute_percentage_error = 0.2
+ r_squared = 0.5
+ root_mean_squared_log_error = 0.9
+
+ artifact = google_artifact_schema.RegressionMetrics(
+ root_mean_squared_error=root_mean_squared_error,
+ mean_absolute_error=mean_absolute_error,
+ mean_absolute_percentage_error=mean_absolute_percentage_error,
+ r_squared=r_squared,
+ root_mean_squared_log_error=root_mean_squared_log_error,
+ artifact_id=_TEST_ARTIFACT_ID,
+ uri=_TEST_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ expected_metadata = {
+ "test-param1": 2.0,
+ "test-param2": "test-value-1",
+ "test-param3": False,
+ "rootMeanSquaredError": 1.0,
+ "meanAbsoluteError": 2.0,
+ "meanAbsolutePercentageError": 0.2,
+ "rSquared": 0.5,
+ "rootMeanSquaredLogError": 0.9,
+ }
+
+ assert artifact.artifact_id == _TEST_ARTIFACT_ID
+ assert artifact.uri == _TEST_URI
+ assert artifact.display_name == _TEST_DISPLAY_NAME
+ assert artifact.description == _TEST_DESCRIPTION
+ assert json.dumps(artifact.metadata, sort_keys=True) == json.dumps(
+ expected_metadata, sort_keys=True
+ )
+ assert artifact.schema_version == _TEST_SCHEMA_VERSION
+
+ def test_forecasting_metrics_title_is_set_correctly(self):
+ artifact = google_artifact_schema.ForecastingMetrics()
+ assert artifact.schema_title == "google.ForecastingMetrics"
+
+ def test_forecasting_metrics_constructor_parameters_are_set_correctly(self):
+ root_mean_squared_error = 1.0
+ mean_absolute_error = 2.0
+ mean_absolute_percentage_error = 0.2
+ r_squared = 0.5
+ root_mean_squared_log_error = 0.9
+ weighted_absolute_percentage_error = 4.0
+ root_mean_squared_percentage_error = 0.7
+ symmetric_mean_absolute_percentage_error = 0.8
+
+ artifact = google_artifact_schema.ForecastingMetrics(
+ root_mean_squared_error=root_mean_squared_error,
+ mean_absolute_error=mean_absolute_error,
+ mean_absolute_percentage_error=mean_absolute_percentage_error,
+ r_squared=r_squared,
+ root_mean_squared_log_error=root_mean_squared_log_error,
+ weighted_absolute_percentage_error=weighted_absolute_percentage_error,
+ root_mean_squared_percentage_error=root_mean_squared_percentage_error,
+ symmetric_mean_absolute_percentage_error=symmetric_mean_absolute_percentage_error,
+ artifact_id=_TEST_ARTIFACT_ID,
+ uri=_TEST_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ expected_metadata = {
+ "test-param1": 2.0,
+ "test-param2": "test-value-1",
+ "test-param3": False,
+ "rootMeanSquaredError": 1.0,
+ "meanAbsoluteError": 2.0,
+ "meanAbsolutePercentageError": 0.2,
+ "rSquared": 0.5,
+ "rootMeanSquaredLogError": 0.9,
+ "weightedAbsolutePercentageError": 4.0,
+ "rootMeanSquaredPercentageError": 0.7,
+ "symmetricMeanAbsolutePercentageError": 0.8,
+ }
+
+ assert artifact.artifact_id == _TEST_ARTIFACT_ID
+ assert artifact.uri == _TEST_URI
+ assert artifact.display_name == _TEST_DISPLAY_NAME
+ assert artifact.description == _TEST_DESCRIPTION
+ assert json.dumps(artifact.metadata, sort_keys=True) == json.dumps(
+ expected_metadata, sort_keys=True
+ )
+ assert artifact.schema_version == _TEST_SCHEMA_VERSION
+
+ def test_experiment_model_title_is_set_correctly(self):
+ artifact = google_artifact_schema.ExperimentModel(
+ framework_name="sklearn",
+ framework_version="1.0.0",
+ model_file="model.pkl",
+ uri=_TEST_URI,
+ )
+ assert artifact.schema_title == "google.ExperimentModel"
+ assert artifact.framework_name == "sklearn"
+ assert artifact.framework_version == "1.0.0"
+ assert artifact.uri == _TEST_URI
+
+ def test_experiment_model_wrong_metadata_key(self):
+ with pytest.raises(ValueError) as exception:
+ google_artifact_schema.ExperimentModel(
+ framework_name="sklearn",
+ framework_version="1.0.0",
+ model_file="model.pkl",
+ uri=_TEST_URI,
+ metadata={"modelFile": "abc"},
+ )
+ assert (
+ str(exception.value) == "'modelFile' is a system reserved key in metadata."
+ )
+
+ def test_experiment_model_constructor_parameters_are_set_correctly(self):
+ predict_schemata = utils.PredictSchemata(
+ instance_schema_uri="instance_uri",
+ prediction_schema_uri="prediction_uri",
+ parameters_schema_uri="parameters_uri",
+ )
+
+ artifact = google_artifact_schema.ExperimentModel(
+ framework_name="sklearn",
+ framework_version="1.0.0",
+ model_file="model.pkl",
+ model_class="sklearn.linear_model._base.LinearRegression",
+ predict_schemata=predict_schemata,
+ artifact_id=_TEST_ARTIFACT_ID,
+ uri=_TEST_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ expected_metadata = {
+ "test-param1": 2.0,
+ "test-param2": "test-value-1",
+ "test-param3": False,
+ "frameworkName": "sklearn",
+ "frameworkVersion": "1.0.0",
+ "modelFile": "model.pkl",
+ "modelClass": "sklearn.linear_model._base.LinearRegression",
+ "predictSchemata": {
+ "instanceSchemaUri": "instance_uri",
+ "parametersSchemaUri": "parameters_uri",
+ "predictionSchemaUri": "prediction_uri",
+ },
+ }
+
+ assert artifact.artifact_id == _TEST_ARTIFACT_ID
+ assert artifact.uri == _TEST_URI
+ assert artifact.display_name == _TEST_DISPLAY_NAME
+ assert artifact.description == _TEST_DESCRIPTION
+ assert json.dumps(artifact.metadata, sort_keys=True) == json.dumps(
+ expected_metadata, sort_keys=True
+ )
+ assert artifact.schema_version == _TEST_SCHEMA_VERSION
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestMetadataSystemArtifactSchema:
+ def setup_method(self):
+ reload(initializer)
+ reload(metadata)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_system_dataset_schema_title_is_set_correctly(self):
+ artifact = system_artifact_schema.Dataset()
+ assert artifact.schema_title == "system.Dataset"
+
+ def test_system_dataset_constructor_parameters_are_set_correctly(self):
+ artifact = system_artifact_schema.Dataset(
+ uri=_TEST_URI,
+ artifact_id=_TEST_ARTIFACT_ID,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ assert artifact.uri == _TEST_URI
+ assert artifact.artifact_id == _TEST_ARTIFACT_ID
+ assert artifact.display_name == _TEST_DISPLAY_NAME
+ assert artifact.description == _TEST_DESCRIPTION
+ assert artifact.metadata == _TEST_UPDATED_METADATA
+ assert artifact.schema_version == _TEST_SCHEMA_VERSION
+
+ def test_system_artifact_schema_title_is_set_correctly(self):
+ artifact = system_artifact_schema.Artifact()
+ assert artifact.schema_title == "system.Artifact"
+
+ def test_system_artifact_constructor_parameters_are_set_correctly(self):
+ artifact = system_artifact_schema.Artifact(
+ uri=_TEST_URI,
+ artifact_id=_TEST_ARTIFACT_ID,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ assert artifact.uri == _TEST_URI
+ assert artifact.artifact_id == _TEST_ARTIFACT_ID
+ assert artifact.display_name == _TEST_DISPLAY_NAME
+ assert artifact.description == _TEST_DESCRIPTION
+ assert artifact.metadata == _TEST_UPDATED_METADATA
+ assert artifact.schema_version == _TEST_SCHEMA_VERSION
+
+ def test_system_model_schema_title_is_set_correctly(self):
+ artifact = system_artifact_schema.Model()
+ assert artifact.schema_title == "system.Model"
+
+ def test_system_model_constructor_parameters_are_set_correctly(self):
+ artifact = system_artifact_schema.Model(
+ uri=_TEST_URI,
+ artifact_id=_TEST_ARTIFACT_ID,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ assert artifact.uri == _TEST_URI
+ assert artifact.artifact_id == _TEST_ARTIFACT_ID
+ assert artifact.display_name == _TEST_DISPLAY_NAME
+ assert artifact.description == _TEST_DESCRIPTION
+ assert artifact.metadata == _TEST_UPDATED_METADATA
+ assert artifact.schema_version == _TEST_SCHEMA_VERSION
+
+ def test_system_metrics_schema_title_is_set_correctly(self):
+ artifact = system_artifact_schema.Metrics()
+ assert artifact.schema_title == "system.Metrics"
+
+ def test_system_metrics_values_default_to_none(self):
+ artifact = system_artifact_schema.Metrics()
+ assert artifact._gca_resource.metadata is None
+
+ def test_system_metrics_constructor_parameters_are_set_correctly(self):
+ artifact = system_artifact_schema.Metrics(
+ accuracy=0.1,
+ precision=0.2,
+ recall=0.3,
+ f1score=0.4,
+ mean_absolute_error=0.5,
+ mean_squared_error=0.6,
+ artifact_id=_TEST_ARTIFACT_ID,
+ uri=_TEST_URI,
+ display_name=_TEST_DISPLAY_NAME,
+ schema_version=_TEST_SCHEMA_VERSION,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ assert artifact.uri == _TEST_URI
+ assert artifact.artifact_id == _TEST_ARTIFACT_ID
+ assert artifact.display_name == _TEST_DISPLAY_NAME
+ assert artifact.description == _TEST_DESCRIPTION
+ assert artifact.schema_version == _TEST_SCHEMA_VERSION
+ assert artifact.metadata["accuracy"] == 0.1
+ assert artifact.metadata["precision"] == 0.2
+ assert artifact.metadata["recall"] == 0.3
+ assert artifact.metadata["f1score"] == 0.4
+ assert artifact.metadata["mean_absolute_error"] == 0.5
+ assert artifact.metadata["mean_squared_error"] == 0.6
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestMetadataSystemSchemaExecution:
+ def setup_method(self):
+ reload(initializer)
+ reload(metadata)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ # Test system.Execution Schemas
+ def test_system_container_execution_schema_title_is_set_correctly(self):
+ execution = system_execution_schema.ContainerExecution()
+ assert execution.schema_title == "system.ContainerExecution"
+
+ def test_system_custom_job_execution_schema_title_is_set_correctly(self):
+ execution = system_execution_schema.CustomJobExecution()
+ assert execution.schema_title == "system.CustomJobExecution"
+
+ def test_system_run_execution_schema_title_is_set_correctly(self):
+ execution = system_execution_schema.Run()
+ assert execution.schema_title == "system.Run"
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestMetadataSystemSchemaContext:
+ def setup_method(self):
+ reload(initializer)
+ reload(metadata)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ # Test system.Context Schemas
+ @pytest.mark.usefixtures("create_context_mock", "get_context_mock")
+ def test_create_is_called_with_default_parameters(self, create_context_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ class TestContext(base_context.BaseContextSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ context = TestContext(
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ context.create(metadata_store_id=_TEST_METADATA_STORE)
+ create_context_mock.assert_called_once_with(
+ parent=f"{_TEST_PARENT}/metadataStores/{_TEST_METADATA_STORE}",
+ context=mock.ANY,
+ context_id=None,
+ )
+ _, _, kwargs = create_context_mock.mock_calls[0]
+ assert kwargs["context"].schema_title == _TEST_SCHEMA_TITLE
+ assert kwargs["context"].display_name == _TEST_DISPLAY_NAME
+ assert kwargs["context"].description == _TEST_DESCRIPTION
+ assert kwargs["context"].metadata == _TEST_UPDATED_METADATA
+
+ def test_system_experiment_schema_title_is_set_correctly(self):
+ context = system_context_schema.Experiment()
+ assert context.schema_title == "system.Experiment"
+
+ def test_system_experiment_run_schema_title_is_set_correctly(self):
+ context = system_context_schema.ExperimentRun()
+ assert context.schema_title == "system.ExperimentRun"
+
+ def test_system_experiment_run_parameters_are_set_correctly(self):
+ context = system_context_schema.ExperimentRun(experiment_id=_TEST_CONTEXT_ID)
+ assert context.metadata["experiment_id"] == _TEST_CONTEXT_ID
+
+ def test_system_pipeline_schema_title_is_set_correctly(self):
+ context = system_context_schema.Pipeline()
+ assert context.schema_title == "system.Pipeline"
+
+ def test_system_pipeline_run_schema_title_is_set_correctly(self):
+ context = system_context_schema.PipelineRun()
+ assert context.schema_title == "system.PipelineRun"
+
+ def test_system_pipeline_run_parameters_are_set_correctly(self):
+ context = system_context_schema.PipelineRun(pipeline_id=_TEST_CONTEXT_ID)
+ assert context.metadata["pipeline_id"] == _TEST_CONTEXT_ID
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestMetadataUtils:
+ def setup_method(self):
+ reload(initializer)
+ reload(metadata)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_predict_schemata_to_dict_method_returns_correct_schema(self):
+ predict_schemata = utils.PredictSchemata(
+ instance_schema_uri="instance_uri",
+ prediction_schema_uri="prediction_uri",
+ parameters_schema_uri="parameters_uri",
+ )
+ expected_results = {
+ "instanceSchemaUri": "instance_uri",
+ "parametersSchemaUri": "parameters_uri",
+ "predictionSchemaUri": "prediction_uri",
+ }
+
+ assert json.dumps(predict_schemata.to_dict()) == json.dumps(expected_results)
+
+ def test_create_uri_from_resource_name_for_valid_resouce_names(self):
+ valid_resouce_names = [
+ "projects/project/locations/location/resource_type/resource_id",
+ "projects/project/locations/location/resource_type/resource_id@version",
+ "projects/project/locations/location/metadataStores/store_id/resource_type/resource_id",
+ "projects/project/locations/location/metadataStores/store_id/resource_type/resource_id@version",
+ ]
+ for resouce_name in valid_resouce_names:
+ uri = utils.create_uri_from_resource_name(resource_name=resouce_name)
+ assert (
+ uri == f"https://location-aiplatform.googleapis.com/v1/{resouce_name}"
+ )
+
+ def test_create_uri_from_resource_name_for_invalid_resouce_names(self):
+ invalid_resouce_name = (
+ "projects/project/locations/location/resource_type/resource_id/"
+ )
+ with pytest.raises(ValueError):
+ utils.create_uri_from_resource_name(resource_name=invalid_resouce_name)
+
+ def test_container_spec_to_dict_method_returns_correct_schema(self):
+ container_spec = utils.ContainerSpec(
+ image_uri="gcr.io/some_container_image_uri",
+ command=["test_command"],
+ args=["test_args"],
+ env=[{"env_var_name": "env_var_value"}],
+ ports=[1],
+ predict_route="test_prediction_rout",
+ health_route="test_health_rout",
+ )
+
+ expected_results = {
+ "imageUri": "gcr.io/some_container_image_uri",
+ "command": ["test_command"],
+ "args": ["test_args"],
+ "env": [{"env_var_name": "env_var_value"}],
+ "ports": [1],
+ "predictRoute": "test_prediction_rout",
+ "healthRoute": "test_health_rout",
+ }
+
+ assert json.dumps(container_spec.to_dict()) == json.dumps(expected_results)
+
+ def test_annotation_spec_to_dict_method_returns_correct_schema(self):
+ annotation_spec = utils.AnnotationSpec(
+ display_name="test_display_name",
+ id="test_annotation_id",
+ )
+
+ expected_results = {
+ "displayName": "test_display_name",
+ "id": "test_annotation_id",
+ }
+
+ assert json.dumps(annotation_spec.to_dict(), sort_keys=True) == json.dumps(
+ expected_results, sort_keys=True
+ )
+
+ def test_confusion_matrix_to_dict_method_returns_correct_schema(self):
+ confusion_matrix = utils.ConfusionMatrix(
+ matrix=[[9, 1], [1, 9]],
+ annotation_specs=[
+ utils.AnnotationSpec(display_name="cat"),
+ utils.AnnotationSpec(display_name="dog"),
+ ],
+ )
+
+ expected_results = {
+ "rows": [[9, 1], [1, 9]],
+ "annotationSpecs": [
+ {"displayName": "cat"},
+ {"displayName": "dog"},
+ ],
+ }
+
+ assert json.dumps(confusion_matrix.to_dict(), sort_keys=True) == json.dumps(
+ expected_results, sort_keys=True
+ )
+
+ def test_confusion_matrix_to_dict_method_length_error(self):
+ confusion_matrix = utils.ConfusionMatrix(
+ matrix=[[9, 1], [1, 9]],
+ annotation_specs=[
+ utils.AnnotationSpec(display_name="cat"),
+ utils.AnnotationSpec(display_name="dog"),
+ utils.AnnotationSpec(display_name="bird"),
+ ],
+ )
+
+ with pytest.raises(ValueError) as exception:
+ confusion_matrix.to_dict()
+ assert (
+ str(exception.value)
+ == "Length of annotation_specs and matrix must be the same. Got lengths 3 and 2 respectively."
+ )
+
+ def test_confidence_metric_to_dict_method_returns_correct_schema(self):
+ confidence_metric = utils.ConfidenceMetric(
+ confidence_threshold=0.5,
+ recall=0.5,
+ precision=0.5,
+ f1_score=0.5,
+ max_predictions=1,
+ false_positive_rate=0.5,
+ accuracy=0.5,
+ true_positive_count=1,
+ false_positive_count=1,
+ false_negative_count=1,
+ true_negative_count=1,
+ recall_at_1=0.5,
+ precision_at_1=0.5,
+ false_positive_rate_at_1=0.5,
+ f1_score_at_1=0.5,
+ confusion_matrix=utils.ConfusionMatrix(
+ matrix=[[9, 1], [1, 9]],
+ annotation_specs=[
+ utils.AnnotationSpec(display_name="cat"),
+ utils.AnnotationSpec(display_name="dog"),
+ ],
+ ),
+ )
+
+ expected_results = {
+ "confidenceThreshold": 0.5,
+ "recall": 0.5,
+ "precision": 0.5,
+ "f1Score": 0.5,
+ "maxPredictions": 1,
+ "falsePositiveRate": 0.5,
+ "accuracy": 0.5,
+ "truePositiveCount": 1,
+ "falsePositiveCount": 1,
+ "falseNegativeCount": 1,
+ "trueNegativeCount": 1,
+ "recallAt1": 0.5,
+ "precisionAt1": 0.5,
+ "falsePositiveRateAt1": 0.5,
+ "f1ScoreAt1": 0.5,
+ "confusionMatrix": {
+ "rows": [[9, 1], [1, 9]],
+ "annotationSpecs": [
+ {"displayName": "cat"},
+ {"displayName": "dog"},
+ ],
+ },
+ }
+
+ assert json.dumps(confidence_metric.to_dict(), sort_keys=True) == json.dumps(
+ expected_results, sort_keys=True
+ )
+
+ @pytest.mark.usefixtures("create_execution_mock", "get_execution_mock")
+ def test_start_execution_method_calls_gapic_library_with_correct_parameters(
+ self, create_execution_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ class TestExecution(base_execution.BaseExecutionSchema):
+ schema_title = _TEST_SCHEMA_TITLE
+
+ execution = TestExecution(
+ state=_TEST_EXECUTION_STATE,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ metadata=_TEST_UPDATED_METADATA,
+ )
+ execution.start_execution()
+ create_execution_mock.assert_called_once_with(
+ parent=f"{_TEST_PARENT}/metadataStores/default",
+ execution=mock.ANY,
+ execution_id=None,
+ )
+ _, _, kwargs = create_execution_mock.mock_calls[0]
+ assert kwargs["execution"].schema_title == _TEST_SCHEMA_TITLE
+ assert kwargs["execution"].display_name == _TEST_DISPLAY_NAME
+ assert kwargs["execution"].description == _TEST_DESCRIPTION
+ assert kwargs["execution"].metadata == _TEST_UPDATED_METADATA
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_metadata_store.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_metadata_store.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3585b519e8812ba1c1f327bd7c4f613d3ec353d
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_metadata_store.py
@@ -0,0 +1,231 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from importlib import reload
+from unittest import mock
+from unittest.mock import patch
+
+import pytest
+from google.api_core import operation
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform.metadata import metadata_store
+from google.cloud.aiplatform_v1 import MetadataServiceClient
+from google.cloud.aiplatform_v1 import MetadataStore as GapicMetadataStore
+from google.cloud.aiplatform.compat.types import (
+ encryption_spec as gca_encryption_spec,
+)
+from google.cloud.aiplatform.compat.types import metadata_service
+
+# project
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_ALT_LOCATION = "europe-west4"
+_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+
+# metadata_store
+_TEST_ID = "test-id"
+_TEST_DEFAULT_ID = "default"
+
+_TEST_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/{_TEST_ID}"
+)
+_TEST_ALT_LOC_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_ALT_LOCATION}/metadataStores/{_TEST_ID}"
+)
+_TEST_DEFAULT_NAME = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/{_TEST_DEFAULT_ID}"
+
+_TEST_INVALID_NAME = f"prj/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/{_TEST_ID}"
+
+# CMEK encryption
+_TEST_ENCRYPTION_KEY_NAME = "key_1234"
+_TEST_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_ENCRYPTION_KEY_NAME
+)
+
+
+@pytest.fixture
+def get_metadata_store_mock():
+ with patch.object(
+ MetadataServiceClient, "get_metadata_store"
+ ) as get_metadata_store_mock:
+ get_metadata_store_mock.return_value = GapicMetadataStore(
+ name=_TEST_NAME,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ yield get_metadata_store_mock
+
+
+@pytest.fixture
+def get_default_metadata_store_mock():
+ with patch.object(
+ MetadataServiceClient, "get_metadata_store"
+ ) as get_metadata_store_mock:
+ get_metadata_store_mock.return_value = GapicMetadataStore(
+ name=_TEST_DEFAULT_NAME,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ yield get_metadata_store_mock
+
+
+@pytest.fixture
+def get_metadata_store_without_name_mock():
+ with patch.object(
+ MetadataServiceClient, "get_metadata_store"
+ ) as get_metadata_store_mock:
+ get_metadata_store_mock.return_value = GapicMetadataStore(
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ yield get_metadata_store_mock
+
+
+@pytest.fixture
+def create_metadata_store_mock():
+ with patch.object(
+ MetadataServiceClient, "create_metadata_store"
+ ) as create_metadata_store_mock:
+ create_metadata_store_lro_mock = mock.Mock(operation.Operation)
+ create_metadata_store_lro_mock.result.return_value = GapicMetadataStore(
+ name=_TEST_NAME,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ create_metadata_store_mock.return_value = create_metadata_store_lro_mock
+ yield create_metadata_store_mock
+
+
+@pytest.fixture
+def create_default_metadata_store_mock():
+ with patch.object(
+ MetadataServiceClient, "create_metadata_store"
+ ) as create_metadata_store_mock:
+ create_metadata_store_lro_mock = mock.Mock(operation.Operation)
+ create_metadata_store_lro_mock.result.return_value = GapicMetadataStore(
+ name=_TEST_DEFAULT_NAME,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ create_metadata_store_mock.return_value = create_metadata_store_lro_mock
+ yield create_metadata_store_mock
+
+
+@pytest.fixture
+def delete_metadata_store_mock():
+ with mock.patch.object(
+ MetadataServiceClient, "delete_metadata_store"
+ ) as delete_metadata_store_mock:
+ delete_metadata_store_lro_mock = mock.Mock(operation.Operation)
+ delete_metadata_store_lro_mock.result.return_value = (
+ metadata_service.DeleteMetadataStoreRequest()
+ )
+ delete_metadata_store_mock.return_value = delete_metadata_store_lro_mock
+ yield delete_metadata_store_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestMetadataStore:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.usefixtures("google_auth_mock")
+ def test_init_metadata_store(self, get_metadata_store_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ metadata_store._MetadataStore(metadata_store_name=_TEST_NAME)
+ get_metadata_store_mock.assert_called_once_with(
+ name=_TEST_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_metadata_store_with_id(self, get_metadata_store_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ metadata_store._MetadataStore(metadata_store_name=_TEST_ID)
+ get_metadata_store_mock.assert_called_once_with(
+ name=_TEST_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_metadata_store_with_default_id(self, get_metadata_store_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ metadata_store._MetadataStore()
+ get_metadata_store_mock.assert_called_once_with(
+ name=_TEST_DEFAULT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_metadata_store_with_location_override(self, get_metadata_store_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ metadata_store._MetadataStore(
+ metadata_store_name=_TEST_ID, location=_TEST_ALT_LOCATION
+ )
+ get_metadata_store_mock.assert_called_once_with(
+ name=_TEST_ALT_LOC_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures("get_metadata_store_mock")
+ def test_init_metadata_store_with_invalid_name(self):
+ with pytest.raises(ValueError):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ metadata_store._MetadataStore(metadata_store_name=_TEST_INVALID_NAME)
+
+ @pytest.mark.usefixtures("get_default_metadata_store_mock")
+ def test_init_aiplatform_with_encryption_key_name_and_create_default_metadata_store(
+ self, create_default_metadata_store_mock
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ )
+
+ my_metadata_store = metadata_store._MetadataStore._create(
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ )
+
+ expected_metadata_store = GapicMetadataStore(
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+
+ create_default_metadata_store_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ metadata_store_id=_TEST_DEFAULT_ID,
+ metadata_store=expected_metadata_store,
+ )
+
+ expected_metadata_store.name = _TEST_DEFAULT_NAME
+ assert my_metadata_store._gca_resource == expected_metadata_store
+
+ @pytest.mark.usefixtures("get_metadata_store_mock")
+ def test_create_non_default_metadata_store(self, create_metadata_store_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_metadata_store = metadata_store._MetadataStore._create(
+ metadata_store_id=_TEST_ID,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ )
+
+ expected_metadata_store = GapicMetadataStore(
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+
+ create_metadata_store_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ metadata_store_id=_TEST_ID,
+ metadata_store=expected_metadata_store,
+ )
+
+ expected_metadata_store.name = _TEST_NAME
+ assert my_metadata_store._gca_resource == expected_metadata_store
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_model_evaluation.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_model_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..01c038d0ecd9e448282e1b7e1fba936fb4faf7fa
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_model_evaluation.py
@@ -0,0 +1,1482 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import datetime
+import importlib
+import pytest
+import yaml
+import json
+from google.protobuf import json_format
+from google.protobuf import struct_pb2
+
+from unittest import mock
+from urllib import request
+from google.api_core import datetime_helpers
+from google.auth import credentials as auth_credentials
+from google.cloud.aiplatform.metadata import constants
+
+from google.cloud import storage
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import models
+from google.cloud.aiplatform.utils import gcs_utils
+
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
+ metadata_service_client_v1 as metadata_service_client,
+ job_service_client_v1 as job_service_client,
+)
+from google.cloud.aiplatform.model_evaluation import model_evaluation_job
+
+from google.cloud.aiplatform_v1.services.pipeline_service import (
+ client as pipeline_service_client_v1,
+)
+
+from google.cloud.aiplatform.compat.types import model as gca_model
+
+from google.cloud.aiplatform_v1 import Execution as GapicExecution
+from google.cloud.aiplatform_v1 import MetadataServiceClient
+
+from google.cloud.aiplatform.compat.types import (
+ pipeline_job as gca_pipeline_job,
+ pipeline_state as gca_pipeline_state,
+ model_evaluation as gca_model_evaluation,
+ context as gca_context,
+ artifact as gca_artifact,
+ batch_prediction_job as gca_batch_prediction_job,
+)
+
+import constants as test_constants
+
+_TEST_PROJECT = test_constants.ProjectConstants._TEST_PROJECT
+_TEST_LOCATION = test_constants.ProjectConstants._TEST_LOCATION
+_TEST_MODEL_NAME = "test-model"
+_TEST_MODEL_ID = test_constants.ModelConstants._TEST_ID
+_TEST_EVAL_ID = "1028944691210842622"
+_TEST_EXPERIMENT = "test-experiment"
+_TEST_BATCH_PREDICTION_JOB_ID = "614161631630327111"
+_TEST_COMPONENT_IDENTIFIER = "fpc-model-evaluation"
+
+_TEST_MODEL_RESOURCE_NAME = test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME
+
+_TEST_MODEL_EVAL_RESOURCE_NAME = (
+ model_service_client.ModelServiceClient.model_evaluation_path(
+ _TEST_PROJECT,
+ _TEST_LOCATION,
+ _TEST_MODEL_ID,
+ _TEST_EVAL_ID,
+ )
+)
+
+_TEST_BATCH_PREDICTION_RESOURCE_NAME = (
+ job_service_client.JobServiceClient.batch_prediction_job_path(
+ _TEST_PROJECT, _TEST_LOCATION, _TEST_BATCH_PREDICTION_JOB_ID
+ )
+)
+
+_TEST_MODEL_EVAL_METRICS = test_constants.ModelConstants._TEST_MODEL_EVAL_METRICS
+
+_TEST_INVALID_MODEL_RESOURCE_NAME = (
+ f"prj/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/models/{_TEST_MODEL_ID}"
+)
+
+# pipeline job
+_TEST_ID = "1028944691210842416"
+_TEST_PIPELINE_JOB_DISPLAY_NAME = "sample-pipeline-job-display-name"
+_TEST_PIPELINE_JOB_ID = "sample-test-pipeline-202111111"
+_TEST_GCS_BUCKET_NAME = "my-bucket"
+_TEST_CREDENTIALS = auth_credentials.AnonymousCredentials()
+_TEST_SERVICE_ACCOUNT = "abcde@my-project.iam.gserviceaccount.com"
+_TEST_PIPELINE_ROOT = f"gs://{_TEST_GCS_BUCKET_NAME}/pipeline_root"
+_TEST_PIPELINE_CREATE_TIME = datetime.datetime.now()
+
+_TEST_KFP_TEMPLATE_URI = "https://us-kfp.pkg.dev/vertex-evaluation/pipeline-templates/evaluation-automl-tabular-classification-pipeline/1.0.0"
+
+_TEST_TEMPLATE_REF = {
+ "base_uri": "https://us-kfp.pkg.dev/vertex-evaluation/pipeline-templates/evaluation",
+ "tag": "20230713_1737",
+}
+_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+_TEST_NETWORK = f"projects/{_TEST_PROJECT}/global/networks/{_TEST_PIPELINE_JOB_ID}"
+
+_TEST_PIPELINE_JOB_NAME = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/pipelineJobs/{_TEST_PIPELINE_JOB_ID}"
+_TEST_INVALID_PIPELINE_JOB_NAME = (
+ f"prj/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/{_TEST_PIPELINE_JOB_ID}"
+)
+_TEST_MODEL_EVAL_PIPELINE_JOB_DISPLAY_NAME = "test-eval-job"
+_TEST_EVAL_RESOURCE_DISPLAY_NAME = "my-eval-resource-display-name"
+
+_TEST_MODEL_EVAL_METADATA = {"pipeline_job_resource_name": _TEST_PIPELINE_JOB_NAME}
+
+_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES = {
+ "batch_predict_gcs_source_uris": ["gs://my-bucket/my-prediction-data.csv"],
+ "dataflow_service_account": _TEST_SERVICE_ACCOUNT,
+ "batch_predict_instances_format": "csv",
+ "model_name": _TEST_MODEL_RESOURCE_NAME,
+ "evaluation_display_name": _TEST_EVAL_RESOURCE_DISPLAY_NAME,
+ "project": _TEST_PROJECT,
+ "location": _TEST_LOCATION,
+ "batch_predict_gcs_destination_output_uri": _TEST_GCS_BUCKET_NAME,
+ "target_field_name": "predict_class",
+}
+
+_TEST_MODEL_EVAL_PREDICTION_TYPE = "classification"
+
+_TEST_JSON_FORMATTED_MODEL_EVAL_PIPELINE_PARAMETER_VALUES = {
+ "batch_predict_gcs_source_uris": '["gs://sdk-model-eval/batch-pred-heart.csv"]',
+ "dataflow_service_account": _TEST_SERVICE_ACCOUNT,
+ "batch_predict_instances_format": "csv",
+ "model_name": _TEST_MODEL_RESOURCE_NAME,
+ "project": _TEST_PROJECT,
+ "location": _TEST_LOCATION,
+ "batch_predict_gcs_destination_output_uri": _TEST_GCS_BUCKET_NAME,
+ "target_field_name": "predict_class",
+}
+
+_TEST_MODEL_EVAL_PIPELINE_SPEC = {
+ "pipelineInfo": {"name": "evaluation-default-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "batch_predict_gcs_source_uris": {"type": "STRING"},
+ "dataflow_service_account": _TEST_SERVICE_ACCOUNT,
+ "batch_predict_instances_format": {"type": "STRING"},
+ "batch_predict_machine_type": {"type": "STRING"},
+ "location": {"type": "STRING"},
+ "model_name": {"type": "STRING"},
+ "project": {"type": "STRING"},
+ "batch_predict_gcs_destination_output_uri": {"type": "STRING"},
+ "target_field_name": {"type": "STRING"},
+ }
+ },
+ },
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "kfp-1.8.12",
+ "components": {},
+}
+
+_TEST_MODEL_EVAL_PIPELINE_SPEC_JSON = json.dumps(
+ {
+ "pipelineInfo": {"name": "evaluation-default-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "batch_predict_gcs_source_uris": {"type": "STRING"},
+ "dataflow_service_account": {"type": "STRING"},
+ "batch_predict_instances_format": {"type": "STRING"},
+ "batch_predict_machine_type": {"type": "STRING"},
+ "evaluation_class_labels": {"type": "STRING"},
+ "location": {"type": "STRING"},
+ "model_name": {"type": "STRING"},
+ "project": {"type": "STRING"},
+ "batch_predict_gcs_destination_output_uri": {"type": "STRING"},
+ "target_field_name": {"type": "STRING"},
+ }
+ },
+ },
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "kfp-1.8.12",
+ "components": {},
+ }
+)
+
+_TEST_MODEL_EVAL_PIPELINE_SPEC_WITH_CACHING_OPTIONS_JSON = json.dumps(
+ {
+ "pipelineInfo": {"name": "evaluation-default-pipeline"},
+ "root": {
+ "dag": {
+ "tasks": {
+ "model-evaluation-text-generation": {
+ "taskInfo": {"name": "model-evaluation-text-generation"},
+ "cachingOptions": {"enableCache": False},
+ }
+ }
+ },
+ "inputDefinitions": {
+ "parameters": {
+ "batch_predict_gcs_source_uris": {"type": "STRING"},
+ "dataflow_service_account": {"type": "STRING"},
+ "batch_predict_instances_format": {"type": "STRING"},
+ "batch_predict_machine_type": {"type": "STRING"},
+ "evaluation_class_labels": {"type": "STRING"},
+ "location": {"type": "STRING"},
+ "model_name": {"type": "STRING"},
+ "project": {"type": "STRING"},
+ "batch_predict_gcs_destination_output_uri": {"type": "STRING"},
+ "target_field_name": {"type": "STRING"},
+ }
+ },
+ },
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "kfp-1.8.12",
+ "components": {},
+ }
+)
+
+_TEST_INVALID_MODEL_EVAL_PIPELINE_SPEC = json.dumps(
+ {
+ "pipelineInfo": {"name": "my-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "batch_predict_gcs_source_uris": {"type": "STRING"},
+ "dataflow_service_account": {"type": "STRING"},
+ "batch_predict_instances_format": {"type": "STRING"},
+ "model_name": {"type": "STRING"},
+ "project": {"type": "STRING"},
+ "location": {"type": "STRING"},
+ "batch_predict_gcs_destination_output_uri": {"type": "STRING"},
+ "target_field_name": {"type": "STRING"},
+ }
+ },
+ },
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "kfp-1.8.12",
+ "components": {},
+ }
+)
+
+_TEST_MODEL_EVAL_PIPELINE_JOB = json.dumps(
+ {
+ "runtimeConfig": {"parameters": _TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES},
+ "pipelineInfo": {"name": "evaluation-default-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "batch_predict_gcs_source_uris": {"type": "STRING"},
+ "dataflow_service_account": {"type": "STRING"},
+ "batch_predict_instances_format": {"type": "STRING"},
+ "batch_predict_machine_type": {"type": "STRING"},
+ "evaluation_class_labels": {"type": "STRING"},
+ "location": {"type": "STRING"},
+ "model_name": {"type": "STRING"},
+ "project": {"type": "STRING"},
+ "batch_predict_gcs_destination_output_uri": {"type": "STRING"},
+ "target_field_name": {"type": "STRING"},
+ }
+ },
+ },
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "kfp-1.8.12",
+ "components": {},
+ }
+)
+
+_TEST_INVALID_MODEL_EVAL_PIPELINE_JOB = json.dumps(
+ {
+ "runtimeConfig": {
+ "parameterValues": _TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES
+ },
+ "pipelineInfo": {"name": "my-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "batch_predict_gcs_source_uris": {"type": "STRING"},
+ "batch_predict_instances_format": {"type": "STRING"},
+ "model_name": {"type": "STRING"},
+ "project": {"type": "STRING"},
+ "location": {"type": "STRING"},
+ "batch_predict_gcs_destination_output_uri": {"type": "STRING"},
+ "target_field_name": {"type": "STRING"},
+ }
+ },
+ },
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "kfp-1.8.12",
+ "components": {"test_component": {}},
+ }
+)
+
+_EVAL_GCP_RESOURCES_STR = (
+ '{\n "resources": [\n {\n "resourceType": "ModelEvaluation",\n "resourceUri": "https://us-central1-aiplatform.googleapis.com/v1/'
+ + _TEST_MODEL_EVAL_RESOURCE_NAME
+ + '"\n }\n ]\n}'
+)
+
+_BP_JOB_GCP_RESOURCES_STR = (
+ '{\n "resources": [\n {\n "resourceType": "BatchPredictionJob",\n "resourceUri": "https://us-central1-aiplatform.googleapis.com/v1/'
+ + _TEST_BATCH_PREDICTION_RESOURCE_NAME
+ + '"\n }\n ]\n}'
+)
+
+_TEST_PIPELINE_JOB_DETAIL_EVAL = {
+ "output:evaluation_resource_name": _TEST_MODEL_EVAL_RESOURCE_NAME
+}
+
+_TEST_PIPELINE_JOB_DETAIL_BP = {
+ "output:gcp_resources": _BP_JOB_GCP_RESOURCES_STR,
+}
+
+_TEST_EVAL_METRICS_ARTIFACT_NAME = (
+ "projects/123/locations/us-central1/metadataStores/default/artifacts/456"
+)
+_TEST_EVAL_METRICS_ARTIFACT_URI = "gs://test-bucket/eval_pipeline_root/123/evaluation-default-pipeline-20220615135923/model-evaluation-2_-789/evaluation_metrics"
+
+_TEST_EXPERIMENT = "test-experiment"
+_TEST_METADATASTORE = f"{_TEST_PARENT}/metadataStores/default"
+_TEST_CONTEXT_NAME = f"{_TEST_METADATASTORE}/contexts/{_TEST_EXPERIMENT}"
+
+# executions: this is used in test_list_pipeline_based_service
+_TEST_EXECUTION_PARENT = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/default"
+)
+
+_TEST_RUN = "run-1"
+_TEST_OTHER_RUN = "run-2"
+_TEST_EXPERIMENT = "test-experiment"
+_TEST_EXECUTION_ID = f"{_TEST_EXPERIMENT}-{_TEST_RUN}"
+_TEST_EXECUTION_NAME = f"{_TEST_EXECUTION_PARENT}/executions/{_TEST_EXECUTION_ID}"
+
+
+_TEST_OTHER_EXECUTION_ID = f"{_TEST_EXPERIMENT}-{_TEST_OTHER_RUN}"
+_TEST_OTHER_EXECUTION_NAME = (
+ f"{_TEST_EXECUTION_PARENT}/executions/{_TEST_OTHER_EXECUTION_ID}"
+)
+
+# execution metadata parameters: used in test_list_pipeline_based_service
+_TEST_PARAM_KEY_1 = "learning_rate"
+_TEST_PARAM_KEY_2 = "dropout"
+_TEST_PIPELINE_PARAM_KEY = "pipeline_job_resource_name"
+_TEST_PARAMS = {
+ _TEST_PARAM_KEY_1: 0.01,
+ _TEST_PARAM_KEY_2: 0.2,
+ _TEST_PIPELINE_PARAM_KEY: _TEST_PIPELINE_JOB_NAME,
+}
+_TEST_OTHER_PARAMS = {_TEST_PARAM_KEY_1: 0.02, _TEST_PARAM_KEY_2: 0.3}
+
+_TEST_MODEL_EVAL_CLASS_LABELS = ["0", "1", "2"]
+_TEST_TARGET_FIELD_NAME = "species"
+
+
+# model eval mocks
+@pytest.fixture
+def get_model_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ name=_TEST_MODEL_RESOURCE_NAME,
+ )
+
+ yield get_model_mock
+
+
+@pytest.fixture
+def mock_model():
+ model = mock.MagicMock(models.Model)
+ model.name = _TEST_MODEL_ID
+ model._latest_future = None
+ model._exception = None
+ model._gca_resource = gca_model.Model(
+ display_name="test-eval-model",
+ description="This is the mock Model's description",
+ name=_TEST_MODEL_NAME,
+ )
+ yield model
+
+
+@pytest.fixture
+def mock_pipeline_service_create():
+ with mock.patch.object(
+ pipeline_service_client_v1.PipelineServiceClient, "create_pipeline_job"
+ ) as mock_create_pipeline_job:
+ mock_create_pipeline_job.return_value = gca_pipeline_job.PipelineJob(
+ name=_TEST_PIPELINE_JOB_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+ yield mock_create_pipeline_job
+
+
+@pytest.fixture
+def mock_model_eval_get():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model_evaluation"
+ ) as mock_get_model_eval:
+ mock_get_model_eval.return_value = gca_model_evaluation.ModelEvaluation(
+ name=_TEST_MODEL_EVAL_RESOURCE_NAME,
+ metrics=_TEST_MODEL_EVAL_METRICS,
+ metadata=_TEST_MODEL_EVAL_METADATA,
+ )
+ yield mock_get_model_eval
+
+
+_TEST_MODEL_EVAL_LIST = [
+ gca_model_evaluation.ModelEvaluation(
+ name=_TEST_MODEL_EVAL_RESOURCE_NAME,
+ create_time=datetime_helpers.DatetimeWithNanoseconds(
+ 2023, 5, 14, 16, 24, 3, 299558, tzinfo=datetime.timezone.utc
+ ),
+ ),
+ gca_model_evaluation.ModelEvaluation(
+ name=_TEST_MODEL_EVAL_RESOURCE_NAME,
+ create_time=datetime_helpers.DatetimeWithNanoseconds(
+ 2023, 6, 14, 16, 24, 3, 299558, tzinfo=datetime.timezone.utc
+ ),
+ ),
+ gca_model_evaluation.ModelEvaluation(
+ name=_TEST_MODEL_EVAL_RESOURCE_NAME,
+ create_time=datetime_helpers.DatetimeWithNanoseconds(
+ 2023, 7, 14, 16, 24, 3, 299558, tzinfo=datetime.timezone.utc
+ ),
+ ),
+]
+
+
+@pytest.fixture
+def list_model_evaluations_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "list_model_evaluations"
+ ) as list_model_evaluations_mock:
+ list_model_evaluations_mock.return_value = _TEST_MODEL_EVAL_LIST
+ yield list_model_evaluations_mock
+
+
+@pytest.fixture
+def mock_pipeline_bucket_exists():
+ def mock_create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist(
+ output_artifacts_gcs_dir=None,
+ service_account=None,
+ project=None,
+ location=None,
+ credentials=None,
+ ):
+ output_artifacts_gcs_dir = (
+ output_artifacts_gcs_dir
+ or gcs_utils.generate_gcs_directory_for_pipeline_artifacts(
+ project=project,
+ location=location,
+ )
+ )
+ return output_artifacts_gcs_dir
+
+ with mock.patch(
+ "google.cloud.aiplatform.utils.gcs_utils.create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist",
+ wraps=mock_create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist,
+ ) as mock_context:
+ yield mock_context
+
+
+@pytest.fixture
+def mock_artifact():
+ artifact = mock.MagicMock(aiplatform.Artifact)
+ artifact._gca_resource = gca_artifact.Artifact(
+ display_name="evaluation_metrics",
+ name=_TEST_EVAL_METRICS_ARTIFACT_NAME,
+ uri=_TEST_EVAL_METRICS_ARTIFACT_URI,
+ )
+ yield artifact
+
+
+@pytest.fixture
+def get_artifact_mock():
+ with mock.patch.object(
+ metadata_service_client.MetadataServiceClient, "get_artifact"
+ ) as get_artifact_mock:
+ get_artifact_mock.return_value = gca_artifact.Artifact(
+ display_name="evaluation_metrics",
+ name=_TEST_EVAL_METRICS_ARTIFACT_NAME,
+ uri=_TEST_EVAL_METRICS_ARTIFACT_URI,
+ )
+
+ yield get_artifact_mock
+
+
+@pytest.fixture
+def get_batch_prediction_job_mock():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "get_batch_prediction_job"
+ ) as get_bp_job_mock:
+ get_bp_job_mock.return_value = gca_batch_prediction_job.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_RESOURCE_NAME,
+ )
+ yield get_bp_job_mock
+
+
+def make_pipeline_job(state):
+ return gca_pipeline_job.PipelineJob(
+ name=_TEST_PIPELINE_JOB_NAME,
+ state=state,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ job_detail=gca_pipeline_job.PipelineJobDetail(
+ pipeline_run_context=gca_context.Context(
+ name=_TEST_PIPELINE_JOB_NAME,
+ ),
+ task_details=[
+ gca_pipeline_job.PipelineTaskDetail(
+ task_id=123,
+ task_name=_TEST_PIPELINE_JOB_ID,
+ state=gca_pipeline_job.PipelineTaskDetail.State.SUCCEEDED,
+ execution={
+ "metadata": struct_pb2.Struct(
+ fields={
+ key: struct_pb2.Value(string_value=value)
+ for key, value in _TEST_PIPELINE_JOB_DETAIL_EVAL.items()
+ },
+ ),
+ },
+ ),
+ gca_pipeline_job.PipelineTaskDetail(
+ task_id=123,
+ execution=GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
+ metadata={"component_type": _TEST_COMPONENT_IDENTIFIER},
+ ),
+ ),
+ ],
+ ),
+ )
+
+
+@pytest.fixture
+def mock_pipeline_service_get():
+ with mock.patch.object(
+ pipeline_service_client_v1.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_pipeline_job:
+ mock_get_pipeline_job.side_effect = [
+ make_pipeline_job(gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ ]
+
+ yield mock_get_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_service_get_with_fail():
+ with mock.patch.object(
+ pipeline_service_client_v1.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_pipeline_job:
+ mock_get_pipeline_job.side_effect = [
+ make_pipeline_job(gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING),
+ make_pipeline_job(gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING),
+ make_pipeline_job(gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED),
+ ]
+
+ yield mock_get_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_service_get_pending():
+ with mock.patch.object(
+ pipeline_service_client_v1.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_pipeline_job:
+ mock_get_pipeline_job.side_effect = [
+ make_pipeline_job(gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING),
+ make_pipeline_job(gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING),
+ ]
+
+ yield mock_get_pipeline_job
+
+
+@pytest.fixture
+def mock_load_json(job_spec_json):
+ with mock.patch.object(storage.Blob, "download_as_bytes") as mock_load_json:
+ mock_load_json.return_value = json.dumps(job_spec_json).encode()
+ yield mock_load_json
+
+
+@pytest.fixture
+def mock_load_yaml_and_json(job_spec):
+ with mock.patch.object(
+ storage.Blob, "download_as_bytes"
+ ) as mock_load_yaml_and_json:
+ mock_load_yaml_and_json.return_value = job_spec.encode()
+ yield mock_load_yaml_and_json
+
+
+@pytest.fixture
+def mock_invalid_model_eval_job_get():
+ with mock.patch.object(
+ pipeline_service_client_v1.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_model_eval_job:
+ mock_get_model_eval_job.return_value = gca_pipeline_job.PipelineJob(
+ name=_TEST_PIPELINE_JOB_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ # pipeline_spec=_TEST_INVALID_MODEL_EVAL_PIPELINE_SPEC,
+ )
+ yield mock_get_model_eval_job
+
+
+@pytest.fixture
+def mock_model_eval_job_create():
+ with mock.patch.object(
+ pipeline_service_client_v1.PipelineServiceClient, "create_pipeline_job"
+ ) as mock_create_model_eval_job:
+ mock_create_model_eval_job.return_value = gca_pipeline_job.PipelineJob(
+ name=_TEST_PIPELINE_JOB_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ pipeline_spec=_TEST_MODEL_EVAL_PIPELINE_SPEC,
+ )
+ yield mock_create_model_eval_job
+
+
+@pytest.fixture
+def mock_model_eval_job_get():
+ with mock.patch.object(
+ pipeline_service_client_v1.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_model_eval_job:
+ mock_get_model_eval_job.return_value = make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ yield mock_get_model_eval_job
+
+
+@pytest.fixture
+def mock_successfully_completed_eval_job():
+ with mock.patch.object(
+ pipeline_service_client_v1.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_model_eval_job:
+ mock_get_model_eval_job.return_value = make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ yield mock_get_model_eval_job
+
+
+@pytest.fixture
+def mock_failed_completed_eval_job():
+ with mock.patch.object(
+ pipeline_service_client_v1.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_model_eval_job:
+ mock_get_model_eval_job.return_value = make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED
+ )
+ yield mock_get_model_eval_job
+
+
+@pytest.fixture
+def mock_pending_eval_job():
+ with mock.patch.object(
+ pipeline_service_client_v1.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_model_eval_job:
+ mock_get_model_eval_job.return_value = make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING
+ )
+ yield mock_get_model_eval_job
+
+
+def make_failed_eval_job():
+ model_evaluation_job._ModelEvaluationJob._template_ref = _TEST_TEMPLATE_REF
+
+ eval_job_resource = model_evaluation_job._ModelEvaluationJob(
+ evaluation_pipeline_run_name=_TEST_PIPELINE_JOB_NAME
+ )
+ eval_job_resource.backing_pipeline_job = gca_pipeline_job.PipelineJob(
+ name=_TEST_PIPELINE_JOB_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ pipeline_spec=_TEST_MODEL_EVAL_PIPELINE_SPEC,
+ )
+ return eval_job_resource
+
+
+@pytest.fixture
+def get_execution_mock():
+ with mock.patch.object(
+ MetadataServiceClient, "get_execution"
+ ) as get_execution_mock:
+ get_execution_mock.return_value = GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
+ metadata={"component_type": _TEST_COMPONENT_IDENTIFIER},
+ )
+ yield get_execution_mock
+
+
+@pytest.fixture
+def list_executions_mock():
+ with mock.patch.object(
+ MetadataServiceClient, "list_executions"
+ ) as list_executions_mock:
+ list_executions_mock.return_value = [
+ GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
+ metadata=_TEST_PARAMS,
+ ),
+ GapicExecution(
+ name=_TEST_OTHER_EXECUTION_NAME,
+ display_name=_TEST_OTHER_RUN,
+ schema_title=constants.SYSTEM_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
+ metadata=_TEST_OTHER_PARAMS,
+ ),
+ ]
+ yield list_executions_mock
+
+
+@pytest.fixture
+def mock_request_urlopen(job_spec):
+ with mock.patch.object(request, "urlopen") as mock_urlopen:
+ mock_read_response = mock.MagicMock()
+ mock_decode_response = mock.MagicMock()
+ mock_decode_response.return_value = job_spec.encode()
+ mock_read_response.return_value.decode = mock_decode_response
+ mock_urlopen.return_value.read = mock_read_response
+ yield mock_urlopen
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestModelEvaluation:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ )
+
+ def test_init_model_evaluation_with_only_resource_name(self, mock_model_eval_get):
+ aiplatform.ModelEvaluation(evaluation_name=_TEST_MODEL_EVAL_RESOURCE_NAME)
+
+ mock_model_eval_get.assert_called_once_with(
+ name=_TEST_MODEL_EVAL_RESOURCE_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_model_evaluation_with_eval_id_and_model_id(self, mock_model_eval_get):
+ aiplatform.ModelEvaluation(
+ evaluation_name=_TEST_EVAL_ID, model_id=_TEST_MODEL_ID
+ )
+
+ mock_model_eval_get.assert_called_once_with(
+ name=_TEST_MODEL_EVAL_RESOURCE_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_model_evaluatin_with_id_project_and_location(
+ self, mock_model_eval_get
+ ):
+ aiplatform.ModelEvaluation(
+ evaluation_name=_TEST_MODEL_EVAL_RESOURCE_NAME,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ mock_model_eval_get.assert_called_once_with(
+ name=_TEST_MODEL_EVAL_RESOURCE_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_model_evaluation_with_invalid_evaluation_resource_raises(
+ self, mock_model_eval_get
+ ):
+ with pytest.raises(ValueError):
+ aiplatform.ModelEvaluation(evaluation_name=_TEST_MODEL_RESOURCE_NAME)
+
+ def test_get_model_evaluation_metrics(self, mock_model_eval_get):
+ eval_metrics = aiplatform.ModelEvaluation(
+ evaluation_name=_TEST_MODEL_EVAL_RESOURCE_NAME
+ ).metrics
+ assert eval_metrics == _TEST_MODEL_EVAL_METRICS
+
+ def test_no_delete_model_evaluation_method(self, mock_model_eval_get):
+ my_eval = aiplatform.ModelEvaluation(
+ evaluation_name=_TEST_MODEL_EVAL_RESOURCE_NAME
+ )
+
+ with pytest.raises(NotImplementedError):
+ my_eval.delete()
+
+ def test_list_model_evaluations(
+ self,
+ mock_model_eval_get,
+ get_model_mock,
+ list_model_evaluations_mock,
+ ):
+ metrics_list = aiplatform.ModelEvaluation.list(model=_TEST_MODEL_RESOURCE_NAME)
+
+ assert isinstance(metrics_list[0], aiplatform.ModelEvaluation)
+
+ def test_list_model_evaluations_with_order_by(
+ self,
+ mock_model_eval_get,
+ get_model_mock,
+ list_model_evaluations_mock,
+ ):
+ metrics_list = aiplatform.ModelEvaluation.list(
+ model=_TEST_MODEL_RESOURCE_NAME, order_by="create_time desc"
+ )
+
+ assert metrics_list[0].create_time > metrics_list[1].create_time
+
+ def test_get_model_evaluation_pipeline_job(
+ self, mock_model_eval_get, mock_pipeline_service_get
+ ):
+ eval_pipeline_job = aiplatform.ModelEvaluation(
+ evaluation_name=_TEST_MODEL_EVAL_RESOURCE_NAME,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )._backing_pipeline_job
+
+ assert eval_pipeline_job.resource_name == _TEST_PIPELINE_JOB_NAME
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_MODEL_EVAL_PIPELINE_SPEC_JSON],
+ )
+ def test_get_model_evaluation_bp_job(
+ self,
+ mock_pipeline_service_create,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_model,
+ mock_artifact,
+ get_model_mock,
+ mock_model_eval_get,
+ mock_model_eval_job_get,
+ mock_pipeline_service_get,
+ mock_model_eval_job_create,
+ mock_successfully_completed_eval_job,
+ mock_pipeline_bucket_exists,
+ get_artifact_mock,
+ get_batch_prediction_job_mock,
+ mock_request_urlopen,
+ ):
+ test_model_eval_job = model_evaluation_job._ModelEvaluationJob.submit(
+ model_name=_TEST_MODEL_RESOURCE_NAME,
+ prediction_type=_TEST_MODEL_EVAL_PREDICTION_TYPE,
+ instances_format=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "batch_predict_instances_format"
+ ],
+ model_type="automl_tabular",
+ pipeline_root=_TEST_GCS_BUCKET_NAME,
+ target_field_name=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "target_field_name"
+ ],
+ evaluation_pipeline_display_name=_TEST_MODEL_EVAL_PIPELINE_JOB_DISPLAY_NAME,
+ gcs_source_uris=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "batch_predict_gcs_source_uris"
+ ],
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ test_model_eval_job.wait()
+
+ eval_resource = test_model_eval_job.get_model_evaluation()
+
+ assert isinstance(eval_resource, aiplatform.ModelEvaluation)
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_MODEL_EVAL_PIPELINE_SPEC_JSON],
+ )
+ def test_get_model_evaluation_mlmd_resource(
+ self,
+ mock_pipeline_service_create,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_model,
+ mock_artifact,
+ get_model_mock,
+ mock_model_eval_get,
+ mock_model_eval_job_get,
+ mock_pipeline_service_get,
+ mock_model_eval_job_create,
+ mock_successfully_completed_eval_job,
+ mock_pipeline_bucket_exists,
+ get_artifact_mock,
+ mock_request_urlopen,
+ ):
+ test_model_eval_job = model_evaluation_job._ModelEvaluationJob.submit(
+ model_name=_TEST_MODEL_RESOURCE_NAME,
+ prediction_type=_TEST_MODEL_EVAL_PREDICTION_TYPE,
+ instances_format=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "batch_predict_instances_format"
+ ],
+ model_type="automl_tabular",
+ pipeline_root=_TEST_GCS_BUCKET_NAME,
+ target_field_name=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "target_field_name"
+ ],
+ evaluation_pipeline_display_name=_TEST_MODEL_EVAL_PIPELINE_JOB_DISPLAY_NAME,
+ gcs_source_uris=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "batch_predict_gcs_source_uris"
+ ],
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ test_model_eval_job.wait()
+
+ eval_resource = test_model_eval_job.get_model_evaluation()
+
+ assert isinstance(eval_resource, aiplatform.ModelEvaluation)
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestModelEvaluationJob:
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_MODEL_EVAL_PIPELINE_JOB],
+ )
+ def test_init_model_evaluation_job(
+ self,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_model_eval_job_get,
+ get_execution_mock,
+ ):
+ model_evaluation_job._ModelEvaluationJob(
+ evaluation_pipeline_run_name=_TEST_PIPELINE_JOB_NAME
+ )
+
+ mock_model_eval_job_get.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert mock_model_eval_job_get.call_count == 2
+
+ get_execution_mock.assert_called_once
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_INVALID_MODEL_EVAL_PIPELINE_JOB],
+ )
+ def test_init_model_evaluation_job_with_non_eval_pipeline_raises(
+ self,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_invalid_model_eval_job_get,
+ ):
+ """This should fail because we're passing in `mock_invalid_model_eval_job_get`.
+
+ That mock uses a pipeline template that doesn't have the _component_identifier
+ defined in the ModelEvaluationJob class.
+ """
+ with pytest.raises(ValueError):
+ model_evaluation_job._ModelEvaluationJob(
+ evaluation_pipeline_run_name=_TEST_PIPELINE_JOB_NAME
+ )
+
+ def test_init_model_evaluation_job_with_invalid_pipeline_job_name_raises(
+ self,
+ mock_pipeline_service_get,
+ ):
+ with pytest.raises(ValueError):
+ model_evaluation_job._ModelEvaluationJob(
+ evaluation_pipeline_run_name=_TEST_INVALID_PIPELINE_JOB_NAME,
+ )
+
+ @pytest.mark.skip(reason="flaky")
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_MODEL_EVAL_PIPELINE_SPEC_JSON],
+ )
+ @pytest.mark.usefixtures("mock_pipeline_service_create")
+ def test_model_evaluation_job_submit(
+ self,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_model,
+ get_model_mock,
+ mock_model_eval_get,
+ mock_model_eval_job_get,
+ mock_pipeline_service_get,
+ mock_model_eval_job_create,
+ mock_pipeline_bucket_exists,
+ mock_request_urlopen,
+ ):
+ test_model_eval_job = model_evaluation_job._ModelEvaluationJob.submit(
+ model_name=_TEST_MODEL_RESOURCE_NAME,
+ prediction_type=_TEST_MODEL_EVAL_PREDICTION_TYPE,
+ instances_format=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "batch_predict_instances_format"
+ ],
+ model_type="automl_tabular",
+ pipeline_root=_TEST_GCS_BUCKET_NAME,
+ target_field_name=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "target_field_name"
+ ],
+ evaluation_pipeline_display_name=_TEST_MODEL_EVAL_PIPELINE_JOB_DISPLAY_NAME,
+ gcs_source_uris=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "batch_predict_gcs_source_uris"
+ ],
+ job_id=_TEST_PIPELINE_JOB_ID,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ test_model_eval_job.wait()
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameters": {
+ "batch_predict_gcs_source_uris": {
+ "stringValue": '["gs://my-bucket/my-prediction-data.csv"]'
+ },
+ "dataflow_service_account": {"stringValue": _TEST_SERVICE_ACCOUNT},
+ "batch_predict_instances_format": {"stringValue": "csv"},
+ "model_name": {"stringValue": _TEST_MODEL_RESOURCE_NAME},
+ "project": {"stringValue": _TEST_PROJECT},
+ "location": {"stringValue": _TEST_LOCATION},
+ "batch_predict_gcs_destination_output_uri": {
+ "stringValue": _TEST_GCS_BUCKET_NAME
+ },
+ "target_field_name": {"stringValue": "predict_class"},
+ },
+ }
+
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=_TEST_MODEL_EVAL_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "kfp-1.8.12",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ template_uri=_TEST_KFP_TEMPLATE_URI,
+ )
+
+ mock_model_eval_job_create.assert_called_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ timeout=None,
+ )
+
+ assert mock_model_eval_job_get.called_once
+
+ assert mock_pipeline_service_get.called_once
+
+ assert mock_model_eval_job_get.called_once
+
+ @pytest.mark.skip(reason="flaky")
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_MODEL_EVAL_PIPELINE_SPEC_WITH_CACHING_OPTIONS_JSON],
+ )
+ @pytest.mark.usefixtures("mock_pipeline_service_create")
+ def test_model_evaluation_job_submit_with_caching_disabled(
+ self,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_model,
+ get_model_mock,
+ mock_model_eval_get,
+ mock_model_eval_job_get,
+ mock_pipeline_service_get,
+ mock_model_eval_job_create,
+ mock_pipeline_bucket_exists,
+ mock_request_urlopen,
+ ):
+ test_model_eval_job = model_evaluation_job._ModelEvaluationJob.submit(
+ model_name=_TEST_MODEL_RESOURCE_NAME,
+ prediction_type=_TEST_MODEL_EVAL_PREDICTION_TYPE,
+ instances_format=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "batch_predict_instances_format"
+ ],
+ model_type="automl_tabular",
+ pipeline_root=_TEST_GCS_BUCKET_NAME,
+ target_field_name=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "target_field_name"
+ ],
+ evaluation_pipeline_display_name=_TEST_MODEL_EVAL_PIPELINE_JOB_DISPLAY_NAME,
+ gcs_source_uris=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "batch_predict_gcs_source_uris"
+ ],
+ job_id=_TEST_PIPELINE_JOB_ID,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ enable_caching=False,
+ )
+
+ test_model_eval_job.wait()
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameters": {
+ "batch_predict_gcs_source_uris": {
+ "stringValue": '["gs://my-bucket/my-prediction-data.csv"]'
+ },
+ "dataflow_service_account": {"stringValue": _TEST_SERVICE_ACCOUNT},
+ "batch_predict_instances_format": {"stringValue": "csv"},
+ "model_name": {"stringValue": _TEST_MODEL_RESOURCE_NAME},
+ "project": {"stringValue": _TEST_PROJECT},
+ "location": {"stringValue": _TEST_LOCATION},
+ "batch_predict_gcs_destination_output_uri": {
+ "stringValue": _TEST_GCS_BUCKET_NAME
+ },
+ "target_field_name": {"stringValue": "predict_class"},
+ },
+ }
+
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=_TEST_MODEL_EVAL_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "kfp-1.8.12",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ template_uri=_TEST_KFP_TEMPLATE_URI,
+ )
+
+ mock_model_eval_job_create.assert_called_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ timeout=None,
+ )
+
+ assert mock_model_eval_job_get.called_once
+
+ assert mock_pipeline_service_get.called_once
+
+ assert mock_model_eval_job_get.called_once
+
+ @pytest.mark.skip(reason="flaky")
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_MODEL_EVAL_PIPELINE_SPEC_JSON],
+ )
+ def test_model_evaluation_job_submit_with_experiment(
+ self,
+ mock_pipeline_service_create,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_model,
+ get_model_mock,
+ get_experiment_mock,
+ mock_model_eval_get,
+ mock_model_eval_job_get,
+ mock_pipeline_service_get,
+ mock_model_eval_job_create,
+ add_context_children_mock,
+ get_metadata_store_mock,
+ get_context_mock,
+ mock_pipeline_bucket_exists,
+ mock_request_urlopen,
+ ):
+ test_experiment = aiplatform.Experiment(_TEST_EXPERIMENT)
+
+ test_model_eval_job = model_evaluation_job._ModelEvaluationJob.submit(
+ model_name=_TEST_MODEL_RESOURCE_NAME,
+ prediction_type=_TEST_MODEL_EVAL_PREDICTION_TYPE,
+ pipeline_root=_TEST_GCS_BUCKET_NAME,
+ target_field_name=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "target_field_name"
+ ],
+ model_type="automl_tabular",
+ evaluation_pipeline_display_name=_TEST_MODEL_EVAL_PIPELINE_JOB_DISPLAY_NAME,
+ gcs_source_uris=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "batch_predict_gcs_source_uris"
+ ],
+ job_id=_TEST_PIPELINE_JOB_ID,
+ instances_format=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "batch_predict_instances_format"
+ ],
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ experiment=test_experiment,
+ )
+
+ test_model_eval_job.wait()
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameters": {
+ "batch_predict_gcs_source_uris": {
+ "stringValue": '["gs://my-bucket/my-prediction-data.csv"]'
+ },
+ "dataflow_service_account": {"stringValue": _TEST_SERVICE_ACCOUNT},
+ "batch_predict_instances_format": {"stringValue": "csv"},
+ "model_name": {"stringValue": _TEST_MODEL_RESOURCE_NAME},
+ "project": {"stringValue": _TEST_PROJECT},
+ "location": {"stringValue": _TEST_LOCATION},
+ "batch_predict_gcs_destination_output_uri": {
+ "stringValue": _TEST_GCS_BUCKET_NAME
+ },
+ "target_field_name": {"stringValue": "predict_class"},
+ },
+ }
+
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=_TEST_MODEL_EVAL_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "kfp-1.8.12",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ template_uri=_TEST_KFP_TEMPLATE_URI,
+ )
+
+ mock_model_eval_job_create.assert_called_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ timeout=None,
+ )
+
+ get_context_mock.assert_called_with(
+ name=_TEST_CONTEXT_NAME,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ @pytest.mark.skip(reason="flaky")
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_MODEL_EVAL_PIPELINE_SPEC_JSON],
+ )
+ def test_get_model_evaluation_with_successful_pipeline_run_returns_resource(
+ self,
+ mock_pipeline_service_create,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_model,
+ get_model_mock,
+ mock_model_eval_get,
+ mock_model_eval_job_get,
+ mock_pipeline_service_get,
+ mock_model_eval_job_create,
+ mock_successfully_completed_eval_job,
+ mock_pipeline_bucket_exists,
+ mock_request_urlopen,
+ ):
+ test_model_eval_job = model_evaluation_job._ModelEvaluationJob.submit(
+ model_name=_TEST_MODEL_RESOURCE_NAME,
+ prediction_type=_TEST_MODEL_EVAL_PREDICTION_TYPE,
+ pipeline_root=_TEST_GCS_BUCKET_NAME,
+ target_field_name=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "target_field_name"
+ ],
+ model_type="automl_tabular",
+ evaluation_pipeline_display_name=_TEST_MODEL_EVAL_PIPELINE_JOB_DISPLAY_NAME,
+ gcs_source_uris=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "batch_predict_gcs_source_uris"
+ ],
+ instances_format=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "batch_predict_instances_format"
+ ],
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ test_model_eval_job.wait()
+
+ assert (
+ test_model_eval_job.backing_pipeline_job.resource_name
+ == _TEST_PIPELINE_JOB_NAME
+ )
+
+ assert isinstance(
+ test_model_eval_job.backing_pipeline_job, aiplatform.PipelineJob
+ )
+
+ test_eval = test_model_eval_job.get_model_evaluation()
+
+ assert isinstance(test_eval, aiplatform.ModelEvaluation)
+
+ assert test_eval.metrics == _TEST_MODEL_EVAL_METRICS
+
+ mock_model_eval_get.assert_called_with(
+ name=_TEST_MODEL_EVAL_RESOURCE_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert isinstance(test_eval._backing_pipeline_job, aiplatform.PipelineJob)
+
+ @pytest.mark.skip(reason="flaky")
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_MODEL_EVAL_PIPELINE_SPEC_JSON],
+ )
+ def test_model_evaluation_job_get_model_evaluation_with_failed_pipeline_run_raises(
+ self,
+ mock_pipeline_service_create,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_model,
+ get_model_mock,
+ mock_model_eval_get,
+ mock_model_eval_job_get,
+ mock_pipeline_service_get,
+ mock_model_eval_job_create,
+ mock_failed_completed_eval_job,
+ mock_pipeline_bucket_exists,
+ mock_request_urlopen,
+ ):
+ test_model_eval_job = model_evaluation_job._ModelEvaluationJob.submit(
+ model_name=_TEST_MODEL_RESOURCE_NAME,
+ prediction_type=_TEST_MODEL_EVAL_PREDICTION_TYPE,
+ pipeline_root=_TEST_GCS_BUCKET_NAME,
+ target_field_name=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "target_field_name"
+ ],
+ model_type="automl_tabular",
+ evaluation_pipeline_display_name=_TEST_MODEL_EVAL_PIPELINE_JOB_DISPLAY_NAME,
+ gcs_source_uris=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "batch_predict_gcs_source_uris"
+ ],
+ instances_format=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "batch_predict_instances_format"
+ ],
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ with pytest.raises(RuntimeError):
+ test_model_eval_job.get_model_evaluation()
+
+ @pytest.mark.skip(reason="flaky")
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_MODEL_EVAL_PIPELINE_SPEC_JSON],
+ )
+ def test_model_evaluation_job_get_model_evaluation_with_pending_pipeline_run_returns_none(
+ self,
+ mock_pipeline_service_create,
+ job_spec,
+ mock_load_yaml_and_json,
+ mock_model,
+ get_model_mock,
+ mock_model_eval_get,
+ mock_model_eval_job_get,
+ mock_pipeline_service_get,
+ mock_model_eval_job_create,
+ mock_pending_eval_job,
+ mock_pipeline_bucket_exists,
+ mock_request_urlopen,
+ ):
+ test_model_eval_job = model_evaluation_job._ModelEvaluationJob.submit(
+ model_name=_TEST_MODEL_RESOURCE_NAME,
+ prediction_type=_TEST_MODEL_EVAL_PREDICTION_TYPE,
+ pipeline_root=_TEST_GCS_BUCKET_NAME,
+ target_field_name=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "target_field_name"
+ ],
+ model_type="automl_tabular",
+ evaluation_pipeline_display_name=_TEST_MODEL_EVAL_PIPELINE_JOB_DISPLAY_NAME,
+ gcs_source_uris=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "batch_predict_gcs_source_uris"
+ ],
+ instances_format=_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES[
+ "batch_predict_instances_format"
+ ],
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ assert test_model_eval_job.get_model_evaluation() is None
+
+ def test_get_template_url(
+ self,
+ ):
+ template_url = model_evaluation_job._ModelEvaluationJob._get_template_url(
+ model_type="automl_tabular",
+ feature_attributions=False,
+ prediction_type=_TEST_MODEL_EVAL_PREDICTION_TYPE,
+ )
+
+ assert template_url == _TEST_KFP_TEMPLATE_URI
+
+ regression_template_url = (
+ model_evaluation_job._ModelEvaluationJob._get_template_url(
+ model_type="other",
+ feature_attributions=True,
+ prediction_type="regression",
+ )
+ )
+
+ assert (
+ regression_template_url
+ == "https://us-kfp.pkg.dev/vertex-evaluation/pipeline-templates/evaluation-feature-attribution-regression-pipeline/1.0.0"
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_model_garden_models.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_model_garden_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..0678e566915924753bc41a4226103ea81890444e
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_model_garden_models.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+from importlib import reload
+from unittest import mock
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+import constants as test_constants
+
+from google.cloud.aiplatform.compat.services import (
+ model_garden_service_client_v1,
+)
+
+from google.cloud.aiplatform.compat.types import (
+ publisher_model as gca_publisher_model,
+)
+
+from vertexai._model_garden import _model_garden_models
+
+_TEXT_BISON_PUBLISHER_MODEL_DICT = {
+ "name": "publishers/google/models/text-bison",
+ "version_id": "001",
+ "open_source_category": "PROPRIETARY",
+ "launch_stage": gca_publisher_model.PublisherModel.LaunchStage.PUBLIC_PREVIEW,
+ "publisher_model_template": "projects/{user-project}/locations/{location}/publishers/google/models/text-bison@001",
+ "predict_schemata": {
+ "instance_schema_uri": "gs://google-cloud-aiplatform/schema/predict/instance/text_generation_1.0.0.yaml",
+ "parameters_schema_uri": "gs://google-cloud-aiplatfrom/schema/predict/params/text_generation_1.0.0.yaml",
+ "prediction_schema_uri": "gs://google-cloud-aiplatform/schema/predict/prediction/text_generation_1.0.0.yaml",
+ },
+}
+
+_EMBEDDING_GECKO_PUBLISHER_MODEL_DICT = {
+ "name": "publishers/google/models/textembedding-gecko",
+ "version_id": "003",
+ "open_source_category": "PROPRIETARY",
+ "launch_stage": gca_publisher_model.PublisherModel.LaunchStage.GA,
+ "publisher_model_template": "projects/{user-project}/locations/{location}/publishers/google/models/textembedding-gecko@003",
+ "predict_schemata": {
+ "instance_schema_uri": "gs://google-cloud-aiplatform/schema/predict/instance/text_embedding_1.0.0.yaml",
+ "parameters_schema_uri": "gs://google-cloud-aiplatfrom/schema/predict/params/text_embedding_1.0.0.yaml",
+ "prediction_schema_uri": "gs://google-cloud-aiplatform/schema/predict/prediction/text_embedding_1.0.0.yaml",
+ },
+}
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestModelGardenModels:
+ """Unit tests for the _ModelGardenModel base class."""
+
+ class FakeModelGardenBisonModel(_model_garden_models._ModelGardenModel):
+
+ _INSTANCE_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/predict/instance/text_generation_1.0.0.yaml"
+
+ class FakeModelGardenGeckoModel(_model_garden_models._ModelGardenModel):
+
+ _INSTANCE_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/predict/instance/text_embedding_1.0.0.yaml"
+
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_init_model_garden_bison_model_with_from_pretrained(self):
+ """Tests the text generation model."""
+ aiplatform.init(
+ project=test_constants.ProjectConstants._TEST_PROJECT,
+ location=test_constants.ProjectConstants._TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client_v1.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _TEXT_BISON_PUBLISHER_MODEL_DICT
+ ),
+ ) as mock_get_publisher_model:
+ self.FakeModelGardenBisonModel.from_pretrained("text-bison@001")
+
+ mock_get_publisher_model.assert_called_once_with(
+ name="publishers/google/models/text-bison@001",
+ retry=base._DEFAULT_RETRY,
+ )
+
+ def test_init_model_garden_gecko_model_with_from_pretrained(self):
+ """Tests the text generation model."""
+ aiplatform.init(
+ project=test_constants.ProjectConstants._TEST_PROJECT,
+ location=test_constants.ProjectConstants._TEST_LOCATION,
+ )
+ with mock.patch.object(
+ target=model_garden_service_client_v1.ModelGardenServiceClient,
+ attribute="get_publisher_model",
+ return_value=gca_publisher_model.PublisherModel(
+ _EMBEDDING_GECKO_PUBLISHER_MODEL_DICT
+ ),
+ ) as mock_get_publisher_model:
+ self.FakeModelGardenGeckoModel.from_pretrained("textembedding-gecko@003")
+
+ mock_get_publisher_model.assert_called_once_with(
+ name="publishers/google/models/textembedding-gecko@003",
+ retry=base._DEFAULT_RETRY,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_model_monitoring.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_model_monitoring.py
new file mode 100644
index 0000000000000000000000000000000000000000..d74c012a19367e59282b07d3a0f1c14eb6626387
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_model_monitoring.py
@@ -0,0 +1,202 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+
+from google.cloud.aiplatform import model_monitoring
+
+from google.cloud.aiplatform_v1.types import (
+ io as gca_io,
+ model_monitoring as gca_model_monitoring,
+)
+
+_TEST_TARGET_FIELD = "target"
+_TEST_BQ_DATASOURCE = "bq://test/data"
+_TEST_GCS_DATASOURCE = "gs://test/data"
+_TEST_OTHER_DATASOURCE = ""
+_TEST_DRIFT_TRESHOLD = {"key": 0.2}
+_TEST_EMAIL1 = "test1"
+_TEST_EMAIL2 = "test2"
+_TEST_NOTIFICATION_CHANNEL = "projects/123/notificationChannels/456"
+_TEST_VALID_DATA_FORMATS = ["tf-record", "csv", "jsonl"]
+_TEST_SAMPLING_RATE = 0.8
+_TEST_MONITORING_INTERVAL = 1
+_TEST_SKEW_THRESHOLDS = [None, 0.2, {"key": 0.1}]
+_TEST_ATTRIBUTE_SKEW_THRESHOLDS = [None, {"key": 0.1}]
+
+
+class TestModelMonitoringConfigs:
+ """Tests for model monitoring configs."""
+
+ @pytest.mark.parametrize(
+ "data_source",
+ [_TEST_BQ_DATASOURCE, _TEST_GCS_DATASOURCE, _TEST_OTHER_DATASOURCE],
+ )
+ @pytest.mark.parametrize("data_format", _TEST_VALID_DATA_FORMATS)
+ @pytest.mark.parametrize("skew_thresholds", _TEST_SKEW_THRESHOLDS)
+ def test_skew_config_proto_value(self, data_source, data_format, skew_thresholds):
+ """Tests if skew config can be constrctued properly to gapic proto."""
+ attribute_skew_thresholds = {"key": 0.1}
+ skew_config = model_monitoring.SkewDetectionConfig(
+ data_source=data_source,
+ skew_thresholds=skew_thresholds,
+ target_field=_TEST_TARGET_FIELD,
+ attribute_skew_thresholds=attribute_skew_thresholds,
+ data_format=data_format,
+ )
+ # data_format and data source are not used at
+ # TrainingPredictionSkewDetectionConfig.
+ if isinstance(skew_thresholds, dict):
+ expected_gapic_proto = gca_model_monitoring.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig(
+ skew_thresholds={
+ key: gca_model_monitoring.ThresholdConfig(value=val)
+ for key, val in skew_thresholds.items()
+ },
+ attribution_score_skew_thresholds={
+ key: gca_model_monitoring.ThresholdConfig(value=val)
+ for key, val in attribute_skew_thresholds.items()
+ },
+ )
+ else:
+ expected_gapic_proto = gca_model_monitoring.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig(
+ default_skew_threshold=gca_model_monitoring.ThresholdConfig(
+ value=skew_thresholds
+ )
+ if skew_thresholds is not None
+ else None,
+ attribution_score_skew_thresholds={
+ key: gca_model_monitoring.ThresholdConfig(value=val)
+ for key, val in attribute_skew_thresholds.items()
+ },
+ )
+ assert skew_config.as_proto() == expected_gapic_proto
+
+ @pytest.mark.parametrize(
+ "data_source",
+ [_TEST_BQ_DATASOURCE, _TEST_GCS_DATASOURCE, _TEST_OTHER_DATASOURCE],
+ )
+ @pytest.mark.parametrize("data_format", _TEST_VALID_DATA_FORMATS)
+ @pytest.mark.parametrize("skew_thresholds", _TEST_SKEW_THRESHOLDS)
+ @pytest.mark.parametrize(
+ "attribute_skew_thresholds", _TEST_ATTRIBUTE_SKEW_THRESHOLDS
+ )
+ def test_valid_configs(
+ self, data_source, data_format, skew_thresholds, attribute_skew_thresholds
+ ):
+ """Test config creation validity."""
+ random_sample_config = model_monitoring.RandomSampleConfig(
+ sample_rate=_TEST_SAMPLING_RATE
+ )
+
+ schedule_config = model_monitoring.ScheduleConfig(
+ monitor_interval=_TEST_MONITORING_INTERVAL
+ )
+
+ email_alert_config = model_monitoring.EmailAlertConfig(
+ user_emails=[_TEST_EMAIL1, _TEST_EMAIL2]
+ )
+
+ alert_config = model_monitoring.AlertConfig(
+ user_emails=[_TEST_EMAIL1, _TEST_EMAIL2],
+ enable_logging=True,
+ notification_channels=[_TEST_NOTIFICATION_CHANNEL],
+ )
+
+ prediction_drift_config = model_monitoring.DriftDetectionConfig(
+ drift_thresholds=_TEST_DRIFT_TRESHOLD
+ )
+
+ skew_config = model_monitoring.SkewDetectionConfig(
+ data_source=data_source,
+ skew_thresholds=skew_thresholds,
+ target_field=_TEST_TARGET_FIELD,
+ attribute_skew_thresholds=attribute_skew_thresholds,
+ data_format=data_format,
+ )
+ expected_training_dataset = (
+ gca_model_monitoring.ModelMonitoringObjectiveConfig.TrainingDataset(
+ bigquery_source=gca_io.BigQuerySource(input_uri=_TEST_BQ_DATASOURCE),
+ target_field=_TEST_TARGET_FIELD,
+ )
+ )
+
+ xai_config = model_monitoring.ExplanationConfig()
+
+ objective_config = model_monitoring.ObjectiveConfig(
+ skew_detection_config=skew_config,
+ drift_detection_config=prediction_drift_config,
+ explanation_config=xai_config,
+ )
+
+ if data_source == _TEST_BQ_DATASOURCE:
+ assert (
+ objective_config.as_proto().training_dataset
+ == expected_training_dataset
+ )
+ assert (
+ objective_config.as_proto().training_prediction_skew_detection_config
+ == skew_config.as_proto()
+ )
+ assert (
+ objective_config.as_proto().prediction_drift_detection_config
+ == prediction_drift_config.as_proto()
+ )
+ assert objective_config.as_proto().explanation_config == xai_config.as_proto()
+ assert (
+ _TEST_EMAIL1 in email_alert_config.as_proto().email_alert_config.user_emails
+ )
+ assert (
+ _TEST_EMAIL2 in email_alert_config.as_proto().email_alert_config.user_emails
+ )
+ assert _TEST_EMAIL1 in alert_config.as_proto().email_alert_config.user_emails
+ assert _TEST_EMAIL2 in alert_config.as_proto().email_alert_config.user_emails
+ assert (
+ _TEST_NOTIFICATION_CHANNEL in alert_config.as_proto().notification_channels
+ )
+ assert (
+ random_sample_config.as_proto().random_sample_config.sample_rate
+ == _TEST_SAMPLING_RATE
+ )
+ assert (
+ schedule_config.as_proto().monitor_interval.seconds
+ == _TEST_MONITORING_INTERVAL * 3600
+ )
+
+ @pytest.mark.parametrize("data_source", [_TEST_GCS_DATASOURCE])
+ @pytest.mark.parametrize("data_format", ["other"])
+ @pytest.mark.parametrize("skew_thresholds", _TEST_SKEW_THRESHOLDS)
+ @pytest.mark.parametrize(
+ "attribute_skew_thresholds", _TEST_ATTRIBUTE_SKEW_THRESHOLDS
+ )
+ def test_invalid_data_format(
+ self, data_source, data_format, skew_thresholds, attribute_skew_thresholds
+ ):
+ if data_format == "other":
+ with pytest.raises(ValueError) as e:
+ model_monitoring.ObjectiveConfig(
+ skew_detection_config=model_monitoring.SkewDetectionConfig(
+ data_source=data_source,
+ skew_thresholds=skew_thresholds,
+ target_field=_TEST_TARGET_FIELD,
+ attribute_skew_thresholds=attribute_skew_thresholds,
+ data_format=data_format,
+ )
+ ).as_proto()
+ assert (
+ "Unsupported value in skew detection config. `data_format` must be one of tf-record, csv, or jsonl"
+ in str(e.value)
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_models.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..edde450308a56e15c1315f2c05fba039ff097ccb
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_models.py
@@ -0,0 +1,4327 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import importlib
+from concurrent import futures
+import json
+import pathlib
+import pytest
+import requests
+from datetime import datetime
+from unittest import mock
+from unittest.mock import patch
+from urllib import request
+
+from google.api_core import operation as ga_operation
+from google.api_core import exceptions as api_exceptions
+from google.auth import credentials as auth_credentials
+from google.cloud import storage
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base, explain
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import models
+from google.cloud.aiplatform import utils
+from google.cloud.aiplatform.utils import gcs_utils
+from google.cloud.aiplatform.metadata import constants as metadata_constants
+from google.cloud.aiplatform import constants
+
+from google.cloud.aiplatform.preview import models as preview_models
+
+from google.cloud.aiplatform.compat.services import (
+ deployment_resource_pool_service_client,
+ deployment_resource_pool_service_client_v1beta1,
+ endpoint_service_client,
+ endpoint_service_client_v1beta1,
+ model_service_client,
+ job_service_client,
+ pipeline_service_client,
+)
+
+from google.cloud.aiplatform.compat.types import (
+ batch_prediction_job as gca_batch_prediction_job,
+ deployment_resource_pool as gca_deployment_resource_pool,
+ deployment_resource_pool_v1beta1 as gca_deployment_resource_pool_v1beta1,
+ encryption_spec as gca_encryption_spec,
+ endpoint as gca_endpoint,
+ endpoint_service as gca_endpoint_service,
+ endpoint_v1beta1 as gca_endpoint_v1beta1,
+ endpoint_service_v1beta1 as gca_endpoint_service_v1beta1,
+ env_var as gca_env_var,
+ explanation as gca_explanation,
+ io as gca_io,
+ job_state as gca_job_state,
+ machine_resources as gca_machine_resources,
+ machine_resources_v1beta1 as gca_machine_resources_v1beta1,
+ reservation_affinity_v1 as gca_reservation_affinity_v1,
+ manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters_compat,
+ model as gca_model,
+ model_evaluation as gca_model_evaluation,
+ model_service as gca_model_service,
+ pipeline_job as gca_pipeline_job,
+ pipeline_state as gca_pipeline_state,
+ context as gca_context,
+)
+
+from google.cloud.aiplatform.prediction import LocalModel
+from google.cloud.aiplatform_v1 import Execution as GapicExecution
+from google.cloud.aiplatform.model_evaluation import model_evaluation_job
+
+from google.protobuf import (
+ field_mask_pb2,
+ struct_pb2,
+ timestamp_pb2,
+ duration_pb2,
+)
+
+import constants as test_constants
+
+_TEST_PROJECT = test_constants.ProjectConstants._TEST_PROJECT
+_TEST_PROJECT_2 = "test-project-2"
+_TEST_LOCATION = test_constants.ProjectConstants._TEST_LOCATION
+_TEST_LOCATION_2 = "europe-west4"
+_TEST_PARENT = test_constants.ProjectConstants._TEST_PARENT
+_TEST_MODEL_NAME = test_constants.ModelConstants._TEST_MODEL_NAME
+_TEST_MODEL_NAME_ALT = "456"
+_TEST_MODEL_ID = "my-model"
+_TEST_MODEL_PARENT = test_constants.ModelConstants._TEST_MODEL_PARENT
+_TEST_MODEL_PARENT_ALT = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/models/{_TEST_MODEL_NAME_ALT}"
+)
+_TEST_ARTIFACT_URI = "gs://test/artifact/uri"
+_TEST_SERVING_CONTAINER_IMAGE = (
+ test_constants.ModelConstants._TEST_SERVING_CONTAINER_IMAGE
+)
+_TEST_SERVING_CONTAINER_PREDICTION_ROUTE = "predict"
+_TEST_SERVING_CONTAINER_HEALTH_ROUTE = "metadata"
+_TEST_DESCRIPTION = "test description"
+_TEST_SERVING_CONTAINER_COMMAND = ["python3", "run_my_model.py"]
+_TEST_SERVING_CONTAINER_ARGS = ["--test", "arg"]
+_TEST_SERVING_CONTAINER_ENVIRONMENT_VARIABLES = {
+ "learning_rate": 0.01,
+ "loss_fn": "mse",
+}
+_TEST_SERVING_CONTAINER_PORTS = [8888, 10000]
+_TEST_SERVING_CONTAINER_GRPC_PORTS = [7777, 7000]
+_TEST_SERVING_CONTAINER_DEPLOYMENT_TIMEOUT = 100
+_TEST_SERVING_CONTAINER_SHARED_MEMORY_SIZE_MB = 1000
+_TEST_SERVING_CONTAINER_STARTUP_PROBE_EXEC = ["a", "b"]
+_TEST_SERVING_CONTAINER_STARTUP_PROBE_PERIOD_SECONDS = 5
+_TEST_SERVING_CONTAINER_STARTUP_PROBE_TIMEOUT_SECONDS = 100
+_TEST_SERVING_CONTAINER_HEALTH_PROBE_EXEC = ["c", "d"]
+_TEST_SERVING_CONTAINER_HEALTH_PROBE_PERIOD_SECONDS = 20
+_TEST_SERVING_CONTAINER_HEALTH_PROBE_TIMEOUT_SECONDS = 200
+_TEST_ID = "1028944691210842416"
+_TEST_LABEL = test_constants.ProjectConstants._TEST_LABELS
+_TEST_APPENDED_USER_AGENT = ["fake_user_agent", "another_fake_user_agent"]
+
+_TEST_MACHINE_TYPE = "n1-standard-4"
+_TEST_ACCELERATOR_TYPE = "NVIDIA_TESLA_P100"
+_TEST_ACCELERATOR_COUNT = 2
+_TEST_STARTING_REPLICA_COUNT = 2
+_TEST_MAX_REPLICA_COUNT = 12
+
+_TEST_SPOT = True
+_TEST_RESERVATION_AFFINITY_TYPE = "SPECIFIC_RESERVATION"
+_TEST_RESERVATION_AFFINITY_KEY = "compute.googleapis.com/reservation-name"
+_TEST_RESERVATION_AFFINITY_VALUES = [
+ "projects/fake-project-id/zones/fake-zone/reservations/fake-reservation-name"
+]
+
+_TEST_TPU_MACHINE_TYPE = "ct5lp-hightpu-4t"
+_TEST_TPU_TOPOLOGY = "2x2"
+
+_TEST_BATCH_SIZE = 16
+
+_TEST_PIPELINE_RESOURCE_NAME = (
+ "projects/my-project/locations/us-central1/trainingPipeline/12345"
+)
+
+_TEST_BATCH_PREDICTION_GCS_SOURCE = "gs://example-bucket/folder/instance.jsonl"
+_TEST_BATCH_PREDICTION_GCS_SOURCE_LIST = [
+ "gs://example-bucket/folder/instance1.jsonl",
+ "gs://example-bucket/folder/instance2.jsonl",
+]
+_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX = "gs://example-bucket/folder/output"
+_TEST_BATCH_PREDICTION_BQ_PREFIX = "ucaip-sample-tests"
+_TEST_BATCH_PREDICTION_BQ_DEST_PREFIX_WITH_PROTOCOL = (
+ f"bq://{_TEST_BATCH_PREDICTION_BQ_PREFIX}"
+)
+_TEST_BATCH_PREDICTION_DISPLAY_NAME = "test-batch-prediction-job"
+_TEST_BATCH_PREDICTION_JOB_NAME = (
+ job_service_client.JobServiceClient.batch_prediction_job_path(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, batch_prediction_job=_TEST_ID
+ )
+)
+
+_TEST_INSTANCE_SCHEMA_URI = "gs://test/schema/instance.yaml"
+_TEST_PARAMETERS_SCHEMA_URI = "gs://test/schema/parameters.yaml"
+_TEST_PREDICTION_SCHEMA_URI = "gs://test/schema/predictions.yaml"
+
+_TEST_CREDENTIALS = mock.Mock(spec=auth_credentials.AnonymousCredentials())
+_TEST_SERVICE_ACCOUNT = "vinnys@my-project.iam.gserviceaccount.com"
+_TEST_MODEL_GARDEN_SOURCE_MODEL_NAME = "publishers/meta/models/llama3_1"
+
+
+_TEST_EXPLANATION_METADATA = explain.ExplanationMetadata(
+ inputs={
+ "features": {
+ "input_tensor_name": "dense_input",
+ "encoding": "BAG_OF_FEATURES",
+ "modality": "numeric",
+ "index_feature_mapping": ["abc", "def", "ghj"],
+ }
+ },
+ outputs={"medv": {"output_tensor_name": "dense_2"}},
+)
+_TEST_EXPLANATION_PARAMETERS = (
+ test_constants.ModelConstants._TEST_EXPLANATION_PARAMETERS
+)
+_TEST_EXPLANATION_METADATA_EXAMPLES = explain.ExplanationMetadata(
+ outputs={"embedding": {"output_tensor_name": "embedding"}},
+ inputs={
+ "my_input": {
+ "input_tensor_name": "bytes_inputs",
+ "encoding": "IDENTITY",
+ "modality": "image",
+ },
+ "id": {"input_tensor_name": "id", "encoding": "IDENTITY"},
+ },
+)
+_TEST_EXPLANATION_PARAMETERS_EXAMPLES_PRESETS = explain.ExplanationParameters(
+ {
+ "examples": {
+ "example_gcs_source": {
+ "gcs_source": {
+ "uris": ["gs://example-bucket/folder/instance1.jsonl"],
+ },
+ },
+ "neighbor_count": 10,
+ "presets": {"query": "FAST", "modality": "TEXT"},
+ }
+ }
+)
+_TEST_EXPLANATION_PARAMETERS_EXAMPLES_FULL_CONFIG = explain.ExplanationParameters(
+ {
+ "examples": {
+ "example_gcs_source": {
+ "gcs_source": {
+ "uris": ["gs://example-bucket/folder/instance1.jsonl"],
+ },
+ },
+ "neighbor_count": 10,
+ "nearest_neighbor_search_config": [
+ {
+ "contentsDeltaUri": "",
+ "config": {
+ "dimensions": 50,
+ "approximateNeighborsCount": 10,
+ "distanceMeasureType": "SQUARED_L2_DISTANCE",
+ "featureNormType": "NONE",
+ "algorithmConfig": {
+ "treeAhConfig": {
+ "leafNodeEmbeddingCount": 1000,
+ "leafNodesToSearchPercent": 100,
+ }
+ },
+ },
+ }
+ ],
+ }
+ }
+)
+
+# CMEK encryption
+_TEST_ENCRYPTION_KEY_NAME = "key_1234"
+_TEST_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_ENCRYPTION_KEY_NAME
+)
+
+_TEST_MODEL_RESOURCE_NAME = test_constants.ModelConstants._TEST_MODEL_RESOURCE_NAME
+_TEST_MODEL_RESOURCE_NAME_CUSTOM_PROJECT = (
+ model_service_client.ModelServiceClient.model_path(
+ _TEST_PROJECT_2, _TEST_LOCATION, _TEST_ID
+ )
+)
+_TEST_MODEL_RESOURCE_NAME_CUSTOM_LOCATION = (
+ model_service_client.ModelServiceClient.model_path(
+ _TEST_PROJECT, _TEST_LOCATION_2, _TEST_ID
+ )
+)
+
+_TEST_OUTPUT_DIR = "gs://my-output-bucket"
+_TEST_CONTAINER_REGISTRY_DESTINATION = (
+ "us-central1-docker.pkg.dev/projectId/repoName/imageName"
+)
+
+_TEST_EXPORT_FORMAT_ID_IMAGE = "custom-trained"
+_TEST_EXPORT_FORMAT_ID_ARTIFACT = "tf-saved-model"
+
+_TEST_SUPPORTED_EXPORT_FORMATS_IMAGE = [
+ gca_model.Model.ExportFormat(
+ id=_TEST_EXPORT_FORMAT_ID_IMAGE,
+ exportable_contents=[gca_model.Model.ExportFormat.ExportableContent.IMAGE],
+ )
+]
+
+_TEST_SUPPORTED_EXPORT_FORMATS_ARTIFACT = [
+ gca_model.Model.ExportFormat(
+ id=_TEST_EXPORT_FORMAT_ID_ARTIFACT,
+ exportable_contents=[gca_model.Model.ExportFormat.ExportableContent.ARTIFACT],
+ )
+]
+
+_TEST_SUPPORTED_EXPORT_FORMATS_BOTH = [
+ gca_model.Model.ExportFormat(
+ id=_TEST_EXPORT_FORMAT_ID_ARTIFACT,
+ exportable_contents=[
+ gca_model.Model.ExportFormat.ExportableContent.ARTIFACT,
+ gca_model.Model.ExportFormat.ExportableContent.IMAGE,
+ ],
+ )
+]
+
+_TEST_SUPPORTED_EXPORT_FORMATS_UNSUPPORTED = []
+
+# Model Evaluation
+_TEST_MODEL_EVAL_RESOURCE_NAME = f"{_TEST_MODEL_RESOURCE_NAME}/evaluations/{_TEST_ID}"
+_TEST_MODEL_EVAL_METRICS = test_constants.ModelConstants._TEST_MODEL_EVAL_METRICS
+
+_TEST_MODEL_EVAL_LIST = [
+ gca_model_evaluation.ModelEvaluation(
+ name=_TEST_MODEL_EVAL_RESOURCE_NAME,
+ ),
+ gca_model_evaluation.ModelEvaluation(
+ name=_TEST_MODEL_EVAL_RESOURCE_NAME,
+ ),
+ gca_model_evaluation.ModelEvaluation(
+ name=_TEST_MODEL_EVAL_RESOURCE_NAME,
+ ),
+]
+
+# model.evaluate
+_TEST_PIPELINE_JOB_ID = "sample-test-pipeline-202111111"
+_TEST_PIPELINE_JOB_NAME = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/pipelineJobs/{_TEST_PIPELINE_JOB_ID}"
+_TEST_PIPELINE_CREATE_TIME = datetime.now()
+_TEST_NETWORK = f"projects/{_TEST_PROJECT}/global/networks/{_TEST_PIPELINE_JOB_ID}"
+_TEST_MODEL_EVAL_CLASS_LABELS = ["dog", "cat", "rabbit"]
+_TEST_BIGQUERY_EVAL_INPUT_URI = "bq://my-project.my-dataset.my-table"
+_TEST_BIGQUERY_EVAL_DESTINATION_URI = "bq://my-project.my-dataset"
+_TEST_EVAL_RESOURCE_DISPLAY_NAME = "my-eval-resource-display-name"
+_TEST_GCS_BUCKET_NAME = "my-bucket"
+
+_TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES = {
+ "batch_predict_gcs_source_uris": ["gs://my-bucket/my-prediction-data.csv"],
+ "dataflow_service_account": _TEST_SERVICE_ACCOUNT,
+ "batch_predict_instances_format": "csv",
+ "model_name": _TEST_MODEL_RESOURCE_NAME,
+ "evaluation_display_name": _TEST_EVAL_RESOURCE_DISPLAY_NAME,
+ "prediction_type": "classification",
+ "project": _TEST_PROJECT,
+ "location": _TEST_LOCATION,
+ "batch_predict_gcs_destination_output_uri": _TEST_GCS_BUCKET_NAME,
+ "target_field_name": "predict_class",
+}
+
+
+_TEST_MODEL_EVAL_PIPELINE_SPEC_JSON = json.dumps(
+ {
+ "pipelineInfo": {"name": "evaluation-default-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "batch_predict_gcs_source_uris": {"type": "STRING"},
+ "dataflow_service_account": {"type": "STRING"},
+ "batch_predict_instances_format": {"type": "STRING"},
+ "batch_predict_machine_type": {"type": "STRING"},
+ "location": {"type": "STRING"},
+ "model_name": {"type": "STRING"},
+ "prediction_type": {"type": "STRING"},
+ "project": {"type": "STRING"},
+ "batch_predict_gcs_destination_output_uri": {"type": "STRING"},
+ "target_field_name": {"type": "STRING"},
+ }
+ },
+ },
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "kfp-1.8.12",
+ "components": {},
+ }
+)
+
+_TEST_MODEL_EVAL_PIPELINE_JOB = json.dumps(
+ {
+ "runtimeConfig": {"parameters": _TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES},
+ "pipelineInfo": {"name": "evaluation-default-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "batch_predict_gcs_source_uris": {"type": "STRING"},
+ "dataflow_service_account": {"type": "STRING"},
+ "evaluation_class_labels": {"type": "STRING"},
+ "batch_predict_instances_format": {"type": "STRING"},
+ "batch_predict_machine_type": {"type": "STRING"},
+ "location": {"type": "STRING"},
+ "model_name": {"type": "STRING"},
+ "prediction_type": {"type": "STRING"},
+ "project": {"type": "STRING"},
+ "batch_predict_gcs_destination_output_uri": {"type": "STRING"},
+ "target_field_name": {"type": "STRING"},
+ }
+ },
+ },
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "kfp-1.8.12",
+ "components": {},
+ }
+)
+
+_TEST_MODEL_EVAL_PIPELINE_JOB_WITH_BQ_INPUT = json.dumps(
+ {
+ "runtimeConfig": {"parameters": _TEST_MODEL_EVAL_PIPELINE_PARAMETER_VALUES},
+ "pipelineInfo": {"name": "evaluation-default-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "batch_predict_gcs_source_uris": {"type": "STRING"},
+ "dataflow_service_account": {"type": "STRING"},
+ "evaluation_class_labels": {"type": "STRING"},
+ "batch_predict_instances_format": {"type": "STRING"},
+ "batch_predict_predictions_format": {"type": "STRING"},
+ "batch_predict_bigquery_source_uri": {"type": "STRING"},
+ "batch_predict_bigquery_destination_output_uri": {"type": "STRING"},
+ "batch_predict_machine_type": {"type": "STRING"},
+ "location": {"type": "STRING"},
+ "model_name": {"type": "STRING"},
+ "prediction_type": {"type": "STRING"},
+ "project": {"type": "STRING"},
+ "batch_predict_gcs_destination_output_uri": {"type": "STRING"},
+ "target_field_name": {"type": "STRING"},
+ }
+ },
+ },
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "kfp-1.8.12",
+ "components": {},
+ }
+)
+
+_TEST_LOCAL_MODEL = LocalModel(
+ serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+)
+
+_TEST_VERSION_ID = test_constants.ModelConstants._TEST_VERSION_ID
+_TEST_VERSION_ALIAS_1 = test_constants.ModelConstants._TEST_VERSION_ALIAS_1
+_TEST_VERSION_ALIAS_2 = test_constants.ModelConstants._TEST_VERSION_ALIAS_2
+_TEST_MODEL_VERSION_DESCRIPTION_1 = "My version 1 description"
+_TEST_MODEL_VERSION_DESCRIPTION_2 = (
+ test_constants.ModelConstants._TEST_MODEL_VERSION_DESCRIPTION_2
+)
+_TEST_MODEL_VERSION_DESCRIPTION_3 = "My version 3 description"
+
+_TEST_MODEL_VERSIONS_LIST = [
+ gca_model.Model(
+ version_id="1",
+ create_time=timestamp_pb2.Timestamp(),
+ update_time=timestamp_pb2.Timestamp(),
+ display_name=_TEST_MODEL_NAME,
+ name=f"{_TEST_MODEL_PARENT}@1",
+ version_aliases=["default"],
+ version_description=_TEST_MODEL_VERSION_DESCRIPTION_1,
+ ),
+ gca_model.Model(
+ version_id="2",
+ create_time=timestamp_pb2.Timestamp(),
+ update_time=timestamp_pb2.Timestamp(),
+ display_name=_TEST_MODEL_NAME,
+ name=f"{_TEST_MODEL_PARENT}@2",
+ version_aliases=[_TEST_VERSION_ALIAS_1, _TEST_VERSION_ALIAS_2],
+ version_description=_TEST_MODEL_VERSION_DESCRIPTION_2,
+ ),
+ gca_model.Model(
+ version_id="3",
+ create_time=timestamp_pb2.Timestamp(),
+ update_time=timestamp_pb2.Timestamp(),
+ display_name=_TEST_MODEL_NAME,
+ name=f"{_TEST_MODEL_PARENT}@3",
+ version_aliases=[],
+ version_description=_TEST_MODEL_VERSION_DESCRIPTION_3,
+ labels=_TEST_LABEL,
+ ),
+]
+_TEST_MODEL_VERSIONS_WITH_FILTER_LIST = [_TEST_MODEL_VERSIONS_LIST[2]]
+
+_TEST_MODELS_LIST = _TEST_MODEL_VERSIONS_LIST + [
+ gca_model.Model(
+ version_id="1",
+ create_time=timestamp_pb2.Timestamp(),
+ update_time=timestamp_pb2.Timestamp(),
+ display_name=_TEST_MODEL_NAME_ALT,
+ name=_TEST_MODEL_PARENT_ALT,
+ version_aliases=["default"],
+ version_description=_TEST_MODEL_VERSION_DESCRIPTION_1,
+ ),
+]
+
+_TEST_MODEL_OBJ_WITH_VERSION = (
+ test_constants.ModelConstants._TEST_MODEL_OBJ_WITH_VERSION
+)
+
+_TEST_NETWORK = f"projects/{_TEST_PROJECT}/global/networks/{_TEST_ID}"
+
+_TEST_RAW_PREDICT_URL = f"https://{_TEST_LOCATION}-{constants.base.API_BASE_PATH}/v1/projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/endpoints/{_TEST_ID}:rawPredict"
+_TEST_RAW_PREDICT_DATA = b""
+_TEST_RAW_PREDICT_HEADER = {"Content-Type": "application/json"}
+
+_TEST_DRP_NAME = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/deploymentResourcePools/{_TEST_ID}"
+
+_TEST_METRIC_NAME_CPU_UTILIZATION = (
+ "aiplatform.googleapis.com/prediction/online/cpu/utilization"
+)
+_TEST_METRIC_NAME_GPU_UTILIZATION = (
+ "aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle"
+)
+
+_TEST_LABELS = {"label1": "value1", "label2": "value2"}
+
+
+@pytest.fixture
+def mock_model():
+ model = mock.MagicMock(models.Model)
+ model.name = _TEST_ID
+ model._latest_future = None
+ model._exception = None
+ model._gca_resource = gca_model.Model(
+ display_name=_TEST_MODEL_NAME, description=_TEST_DESCRIPTION, labels=_TEST_LABEL
+ )
+ yield model
+
+
+@pytest.fixture
+def update_model_mock(mock_model):
+ with patch.object(model_service_client.ModelServiceClient, "update_model") as mock:
+ mock.return_value = mock_model
+ yield mock
+
+
+@pytest.fixture
+def authorized_session_mock():
+ with patch(
+ "google.auth.transport.requests.AuthorizedSession"
+ ) as MockAuthorizedSession:
+ mock_auth_session = MockAuthorizedSession(_TEST_CREDENTIALS)
+ yield mock_auth_session
+
+
+@pytest.fixture
+def raw_predict_mock(authorized_session_mock):
+ with patch.object(authorized_session_mock, "post") as mock_post:
+ mock_post.return_value = requests.models.Response()
+ yield mock_post
+
+
+@pytest.fixture
+def get_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "get_endpoint"
+ ) as get_endpoint_mock:
+ test_endpoint_resource_name = (
+ endpoint_service_client.EndpointServiceClient.endpoint_path(
+ _TEST_PROJECT, _TEST_LOCATION, _TEST_ID
+ )
+ )
+ get_endpoint_mock.return_value = gca_endpoint.Endpoint(
+ display_name=_TEST_MODEL_NAME,
+ name=test_endpoint_resource_name,
+ )
+ yield get_endpoint_mock
+
+
+@pytest.fixture
+def create_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "create_endpoint"
+ ) as create_endpoint_mock:
+ create_endpoint_lro_mock = mock.Mock(ga_operation.Operation)
+ create_endpoint_lro_mock.result.return_value = gca_endpoint.Endpoint(
+ name=test_constants.EndpointConstants._TEST_ENDPOINT_NAME,
+ display_name=test_constants.EndpointConstants._TEST_DISPLAY_NAME,
+ )
+ create_endpoint_mock.return_value = create_endpoint_lro_mock
+ yield create_endpoint_mock
+
+
+@pytest.fixture
+def get_model_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ name=_TEST_MODEL_RESOURCE_NAME,
+ )
+ yield get_model_mock
+
+
+@pytest.fixture
+def get_model_with_custom_location_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ name=_TEST_MODEL_RESOURCE_NAME_CUSTOM_LOCATION,
+ )
+ yield get_model_mock
+
+
+@pytest.fixture
+def get_model_with_custom_project_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ name=_TEST_MODEL_RESOURCE_NAME_CUSTOM_PROJECT,
+ artifact_uri=_TEST_ARTIFACT_URI,
+ description=_TEST_DESCRIPTION,
+ )
+ yield get_model_mock
+
+
+@pytest.fixture
+def get_model_with_training_job():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ name=_TEST_MODEL_RESOURCE_NAME_CUSTOM_PROJECT,
+ training_pipeline=_TEST_PIPELINE_RESOURCE_NAME,
+ )
+ yield get_model_mock
+
+
+@pytest.fixture
+def get_model_with_supported_export_formats_image():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ name=_TEST_MODEL_RESOURCE_NAME,
+ supported_export_formats=_TEST_SUPPORTED_EXPORT_FORMATS_IMAGE,
+ )
+ yield get_model_mock
+
+
+@pytest.fixture
+def get_model_with_supported_export_formats_artifact():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ name=_TEST_MODEL_RESOURCE_NAME,
+ supported_export_formats=_TEST_SUPPORTED_EXPORT_FORMATS_ARTIFACT,
+ )
+ yield get_model_mock
+
+
+@pytest.fixture
+def get_model_with_supported_export_formats_artifact_and_version():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ name=_TEST_MODEL_RESOURCE_NAME,
+ supported_export_formats=_TEST_SUPPORTED_EXPORT_FORMATS_ARTIFACT,
+ version_id=_TEST_VERSION_ID,
+ )
+ yield get_model_mock
+
+
+@pytest.fixture
+def get_model_with_both_supported_export_formats():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ name=_TEST_MODEL_RESOURCE_NAME,
+ supported_export_formats=_TEST_SUPPORTED_EXPORT_FORMATS_BOTH,
+ )
+ yield get_model_mock
+
+
+@pytest.fixture
+def get_model_with_unsupported_export_formats():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ name=_TEST_MODEL_RESOURCE_NAME,
+ supported_export_formats=_TEST_SUPPORTED_EXPORT_FORMATS_UNSUPPORTED,
+ )
+ yield get_model_mock
+
+
+@pytest.fixture
+def get_model_with_version():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = _TEST_MODEL_OBJ_WITH_VERSION
+ yield get_model_mock
+
+
+@pytest.fixture
+def upload_model_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "upload_model"
+ ) as upload_model_mock:
+ mock_lro = mock.Mock(ga_operation.Operation)
+ mock_lro.result.return_value = gca_model_service.UploadModelResponse(
+ model=_TEST_MODEL_RESOURCE_NAME
+ )
+ upload_model_mock.return_value = mock_lro
+ yield upload_model_mock
+
+
+@pytest.fixture
+def upload_model_with_version_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "upload_model"
+ ) as upload_model_mock:
+ mock_lro = mock.Mock(ga_operation.Operation)
+ mock_lro.result.return_value = gca_model_service.UploadModelResponse(
+ model=_TEST_MODEL_RESOURCE_NAME, model_version_id=_TEST_VERSION_ID
+ )
+ upload_model_mock.return_value = mock_lro
+ yield upload_model_mock
+
+
+@pytest.fixture
+def upload_model_with_custom_project_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "upload_model"
+ ) as upload_model_mock:
+ mock_lro = mock.Mock(ga_operation.Operation)
+ mock_lro.result.return_value = gca_model_service.UploadModelResponse(
+ model=_TEST_MODEL_RESOURCE_NAME_CUSTOM_PROJECT
+ )
+ upload_model_mock.return_value = mock_lro
+ yield upload_model_mock
+
+
+@pytest.fixture
+def upload_model_with_custom_location_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "upload_model"
+ ) as upload_model_mock:
+ mock_lro = mock.Mock(ga_operation.Operation)
+ mock_lro.result.return_value = gca_model_service.UploadModelResponse(
+ model=_TEST_MODEL_RESOURCE_NAME_CUSTOM_LOCATION
+ )
+ upload_model_mock.return_value = mock_lro
+ yield upload_model_mock
+
+
+@pytest.fixture
+def export_model_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "export_model"
+ ) as export_model_mock:
+ export_model_lro_mock = mock.Mock(ga_operation.Operation)
+ export_model_lro_mock.metadata = gca_model_service.ExportModelOperationMetadata(
+ output_info=gca_model_service.ExportModelOperationMetadata.OutputInfo(
+ artifact_output_uri=_TEST_OUTPUT_DIR
+ )
+ )
+ export_model_lro_mock.result.return_value = None
+ export_model_mock.return_value = export_model_lro_mock
+ yield export_model_mock
+
+
+@pytest.fixture
+def delete_model_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "delete_model"
+ ) as delete_model_mock:
+ delete_model_lro_mock = mock.Mock(ga_operation.Operation)
+ delete_model_lro_mock.result.return_value = (
+ gca_model_service.DeleteModelRequest()
+ )
+ delete_model_mock.return_value = delete_model_lro_mock
+ yield delete_model_mock
+
+
+@pytest.fixture
+def copy_model_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "copy_model"
+ ) as copy_model_mock:
+ mock_lro = mock.Mock(ga_operation.Operation)
+ mock_lro.result.return_value = gca_model_service.CopyModelResponse(
+ model=_TEST_MODEL_RESOURCE_NAME_CUSTOM_LOCATION
+ )
+ copy_model_mock.return_value = mock_lro
+ yield copy_model_mock
+
+
+@pytest.fixture
+def deploy_model_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "deploy_model"
+ ) as deploy_model_mock:
+ deployed_model = gca_endpoint.DeployedModel(
+ model=_TEST_MODEL_RESOURCE_NAME,
+ display_name=_TEST_MODEL_NAME,
+ )
+ deploy_model_lro_mock = mock.Mock(ga_operation.Operation)
+ deploy_model_lro_mock.result.return_value = (
+ gca_endpoint_service.DeployModelResponse(
+ deployed_model=deployed_model,
+ )
+ )
+ deploy_model_mock.return_value = deploy_model_lro_mock
+ yield deploy_model_mock
+
+
+@pytest.fixture
+def get_batch_prediction_job_mock():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "get_batch_prediction_job"
+ ) as get_batch_prediction_job_mock:
+ batch_prediction_mock = mock.Mock(
+ spec=gca_batch_prediction_job.BatchPredictionJob
+ )
+ batch_prediction_mock.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
+ batch_prediction_mock.name = _TEST_BATCH_PREDICTION_JOB_NAME
+ get_batch_prediction_job_mock.return_value = batch_prediction_mock
+ yield get_batch_prediction_job_mock
+
+
+@pytest.fixture
+def create_batch_prediction_job_mock():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "create_batch_prediction_job"
+ ) as create_batch_prediction_job_mock:
+ batch_prediction_job_mock = mock.Mock(
+ spec=gca_batch_prediction_job.BatchPredictionJob
+ )
+ batch_prediction_job_mock.name = _TEST_BATCH_PREDICTION_JOB_NAME
+ create_batch_prediction_job_mock.return_value = batch_prediction_job_mock
+ yield create_batch_prediction_job_mock
+
+
+@pytest.fixture
+def get_training_job_non_existent_mock():
+ with patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as get_training_job_non_existent_mock:
+ get_training_job_non_existent_mock.side_effect = api_exceptions.NotFound("404")
+
+ yield get_training_job_non_existent_mock
+
+
+@pytest.fixture
+def create_client_mock():
+ with mock.patch.object(
+ initializer.global_config, "create_client"
+ ) as create_client_mock:
+ api_client_mock = mock.Mock(spec=model_service_client.ModelServiceClient)
+ api_client_mock.get_model.return_value = _TEST_MODEL_OBJ_WITH_VERSION
+ create_client_mock.return_value = api_client_mock
+ yield create_client_mock
+
+
+@pytest.fixture
+def mock_storage_blob_upload_from_filename():
+ with patch(
+ "google.cloud.storage.Blob.upload_from_filename"
+ ) as mock_blob_upload_from_filename, patch(
+ "google.cloud.storage.Bucket.exists", return_value=True
+ ):
+ yield mock_blob_upload_from_filename
+
+
+# ModelEvaluation mocks
+@pytest.fixture
+def mock_model_eval_get():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model_evaluation"
+ ) as mock_get_model_eval:
+ mock_get_model_eval.return_value = gca_model_evaluation.ModelEvaluation(
+ name=_TEST_MODEL_EVAL_RESOURCE_NAME,
+ metrics=_TEST_MODEL_EVAL_METRICS,
+ metadata={"pipeline_job_resource_name": _TEST_PIPELINE_JOB_NAME},
+ )
+ yield mock_get_model_eval
+
+
+@pytest.fixture
+def mock_get_model_evaluation():
+ with mock.patch.object(
+ aiplatform.model_evaluation._ModelEvaluationJob, "get_model_evaluation"
+ ) as mock_get_model_eval:
+ mock_get_model_eval.return_value = aiplatform.ModelEvaluation(
+ evaluation_name=_TEST_MODEL_EVAL_RESOURCE_NAME
+ )
+ yield mock_get_model_eval
+
+
+@pytest.fixture
+def list_model_evaluations_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "list_model_evaluations"
+ ) as list_model_evaluations_mock:
+ list_model_evaluations_mock.return_value = _TEST_MODEL_EVAL_LIST
+ yield list_model_evaluations_mock
+
+
+@pytest.fixture
+def list_model_versions_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "list_model_versions"
+ ) as list_model_versions_mock:
+ list_model_versions_mock.return_value = _TEST_MODEL_VERSIONS_LIST
+ yield list_model_versions_mock
+
+
+@pytest.fixture
+def list_model_versions_with_filter_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "list_model_versions"
+ ) as list_model_versions_mock:
+ list_model_versions_mock.return_value = _TEST_MODEL_VERSIONS_WITH_FILTER_LIST
+ yield list_model_versions_mock
+
+
+@pytest.fixture
+def list_models_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "list_models"
+ ) as list_models_mock:
+ list_models_mock.return_value = _TEST_MODELS_LIST
+ yield list_models_mock
+
+
+@pytest.fixture
+def delete_model_version_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "delete_model_version"
+ ) as delete_model_version_mock:
+ mock_lro = mock.Mock(ga_operation.Operation)
+ delete_model_version_mock.return_value = mock_lro
+ yield delete_model_version_mock
+
+
+@pytest.fixture
+def merge_version_aliases_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "merge_version_aliases"
+ ) as merge_version_aliases_mock:
+ merge_version_aliases_mock.return_value = _TEST_MODEL_OBJ_WITH_VERSION
+ yield merge_version_aliases_mock
+
+
+# model.evaluate fixtures
+@pytest.fixture
+def mock_pipeline_service_create():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_pipeline_job"
+ ) as mock_create_pipeline_job:
+ mock_create_pipeline_job.return_value = gca_pipeline_job.PipelineJob(
+ name=_TEST_PIPELINE_JOB_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+ yield mock_create_pipeline_job
+
+
+_TEST_COMPONENT_IDENTIFIER = "fpc-model-evaluation"
+_TEST_BATCH_PREDICTION_JOB_ID = "614161631630327111"
+
+_TEST_BATCH_PREDICTION_RESOURCE_NAME = (
+ job_service_client.JobServiceClient.batch_prediction_job_path(
+ _TEST_PROJECT, _TEST_LOCATION, _TEST_BATCH_PREDICTION_JOB_ID
+ )
+)
+
+_EVAL_GCP_RESOURCES_STR = (
+ '{\n "resources": [\n {\n "resourceType": "ModelEvaluation",\n "resourceUri": "https://us-central1-aiplatform.googleapis.com/v1/'
+ + _TEST_MODEL_EVAL_RESOURCE_NAME
+ + '"\n }\n ]\n}'
+)
+
+_BP_JOB_GCP_RESOURCES_STR = (
+ '{\n "resources": [\n {\n "resourceType": "BatchPredictionJob",\n "resourceUri": "https://us-central1-aiplatform.googleapis.com/v1/'
+ + _TEST_BATCH_PREDICTION_RESOURCE_NAME
+ + '"\n }\n ]\n}'
+)
+
+_TEST_PIPELINE_JOB_DETAIL_EVAL = {
+ "output:gcp_resources": _EVAL_GCP_RESOURCES_STR,
+ "component_type": _TEST_COMPONENT_IDENTIFIER,
+}
+
+_TEST_PIPELINE_JOB_DETAIL_BP = {
+ "output:gcp_resources": _BP_JOB_GCP_RESOURCES_STR,
+}
+
+_TEST_EVAL_METRICS_ARTIFACT_NAME = (
+ "projects/123/locations/us-central1/metadataStores/default/artifacts/456"
+)
+_TEST_EVAL_METRICS_ARTIFACT_URI = "gs://test-bucket/eval_pipeline_root/123/evaluation-default-pipeline-20220615135923/model-evaluation-2_-789/evaluation_metrics"
+
+
+# executions: this is used in test_list_pipeline_based_service
+_TEST_EXECUTION_PARENT = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/default"
+)
+
+_TEST_RUN = "run-1"
+_TEST_EXPERIMENT = "test-experiment"
+_TEST_EXECUTION_ID = f"{_TEST_EXPERIMENT}-{_TEST_RUN}"
+_TEST_EXECUTION_NAME = f"{_TEST_EXECUTION_PARENT}/executions/{_TEST_EXECUTION_ID}"
+
+
+def make_pipeline_job(state):
+ return gca_pipeline_job.PipelineJob(
+ name=_TEST_PIPELINE_JOB_NAME,
+ state=state,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ job_detail=gca_pipeline_job.PipelineJobDetail(
+ pipeline_run_context=gca_context.Context(
+ name=_TEST_PIPELINE_JOB_NAME,
+ ),
+ task_details=[
+ gca_pipeline_job.PipelineTaskDetail(
+ task_id=123,
+ task_name=_TEST_PIPELINE_JOB_ID,
+ state=gca_pipeline_job.PipelineTaskDetail.State.SUCCEEDED,
+ execution={
+ "metadata": struct_pb2.Struct(
+ fields={
+ key: struct_pb2.Value(string_value=value)
+ for key, value in _TEST_PIPELINE_JOB_DETAIL_EVAL.items()
+ },
+ ),
+ },
+ ),
+ gca_pipeline_job.PipelineTaskDetail(
+ task_id=123,
+ execution=GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_RUN,
+ schema_title=metadata_constants.SYSTEM_RUN,
+ schema_version=metadata_constants.SCHEMA_VERSIONS[
+ metadata_constants.SYSTEM_RUN
+ ],
+ metadata={"component_type": _TEST_COMPONENT_IDENTIFIER},
+ ),
+ ),
+ ],
+ ),
+ )
+
+
+@pytest.fixture
+def mock_pipeline_service_get():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_pipeline_job:
+ mock_get_pipeline_job.side_effect = [
+ make_pipeline_job(gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ ]
+
+ yield mock_get_pipeline_job
+
+
+@pytest.fixture
+def mock_successfully_completed_eval_job():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_model_eval_job:
+ mock_get_model_eval_job.return_value = make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ yield mock_get_model_eval_job
+
+
+@pytest.fixture
+def mock_pipeline_bucket_exists():
+ def mock_create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist(
+ output_artifacts_gcs_dir=None,
+ service_account=None,
+ project=None,
+ location=None,
+ credentials=None,
+ ):
+ output_artifacts_gcs_dir = (
+ output_artifacts_gcs_dir
+ or gcs_utils.generate_gcs_directory_for_pipeline_artifacts(
+ project=project,
+ location=location,
+ )
+ )
+ return output_artifacts_gcs_dir
+
+ with mock.patch(
+ "google.cloud.aiplatform.utils.gcs_utils.create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist",
+ wraps=mock_create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist,
+ ) as mock_context:
+ yield mock_context
+
+
+@pytest.fixture
+def mock_load_yaml_and_json(job_spec_json):
+ with patch.object(storage.Blob, "download_as_bytes") as mock_load_yaml_and_json:
+ mock_load_yaml_and_json.return_value = job_spec_json.encode()
+ yield mock_load_yaml_and_json
+
+
+@pytest.fixture
+def mock_request_urlopen(job_spec_json):
+ with mock.patch.object(request, "urlopen") as mock_urlopen:
+ mock_read_response = mock.MagicMock()
+ mock_decode_response = mock.MagicMock()
+ mock_decode_response.return_value = job_spec_json.encode()
+ mock_read_response.return_value.decode = mock_decode_response
+ mock_urlopen.return_value.read = mock_read_response
+ yield mock_urlopen
+
+
+@pytest.fixture
+def preview_get_drp_mock():
+ with mock.patch.object(
+ deployment_resource_pool_service_client_v1beta1.DeploymentResourcePoolServiceClient,
+ "get_deployment_resource_pool",
+ ) as get_drp_mock:
+ machine_spec = gca_machine_resources_v1beta1.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+
+ autoscaling_metric_specs = [
+ gca_machine_resources_v1beta1.AutoscalingMetricSpec(
+ metric_name=_TEST_METRIC_NAME_CPU_UTILIZATION, target=70
+ ),
+ gca_machine_resources_v1beta1.AutoscalingMetricSpec(
+ metric_name=_TEST_METRIC_NAME_GPU_UTILIZATION, target=70
+ ),
+ ]
+
+ dedicated_resources = gca_machine_resources_v1beta1.DedicatedResources(
+ machine_spec=machine_spec,
+ min_replica_count=10,
+ max_replica_count=20,
+ autoscaling_metric_specs=autoscaling_metric_specs,
+ )
+
+ get_drp_mock.return_value = (
+ gca_deployment_resource_pool_v1beta1.DeploymentResourcePool(
+ name=_TEST_DRP_NAME,
+ dedicated_resources=dedicated_resources,
+ )
+ )
+ yield get_drp_mock
+
+
+@pytest.fixture
+def get_drp_mock():
+ with mock.patch.object(
+ deployment_resource_pool_service_client.DeploymentResourcePoolServiceClient,
+ "get_deployment_resource_pool",
+ ) as get_drp_mock:
+ machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+
+ autoscaling_metric_specs = [
+ gca_machine_resources.AutoscalingMetricSpec(
+ metric_name=_TEST_METRIC_NAME_CPU_UTILIZATION, target=70
+ ),
+ gca_machine_resources.AutoscalingMetricSpec(
+ metric_name=_TEST_METRIC_NAME_GPU_UTILIZATION, target=70
+ ),
+ ]
+
+ dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=machine_spec,
+ min_replica_count=10,
+ max_replica_count=20,
+ autoscaling_metric_specs=autoscaling_metric_specs,
+ )
+
+ get_drp_mock.return_value = gca_deployment_resource_pool.DeploymentResourcePool(
+ name=_TEST_DRP_NAME,
+ dedicated_resources=dedicated_resources,
+ )
+ yield get_drp_mock
+
+
+@pytest.fixture
+def preview_deploy_model_mock():
+ with mock.patch.object(
+ endpoint_service_client_v1beta1.EndpointServiceClient, "deploy_model"
+ ) as preview_deploy_model_mock:
+ deployed_model = gca_endpoint_v1beta1.DeployedModel(model=_TEST_MODEL_NAME)
+ deploy_model_lro_mock = mock.Mock(ga_operation.Operation)
+ deploy_model_lro_mock.result.return_value = (
+ gca_endpoint_service_v1beta1.DeployModelResponse(
+ deployed_model=deployed_model,
+ )
+ )
+ preview_deploy_model_mock.return_value = deploy_model_lro_mock
+ yield preview_deploy_model_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestModel:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_constructor_creates_client(self, create_client_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ models.Model(_TEST_ID)
+ create_client_mock.assert_any_call(
+ client_class=utils.ModelClientWithOverride,
+ credentials=initializer.global_config.credentials,
+ location_override=_TEST_LOCATION,
+ appended_user_agent=None,
+ )
+
+ def test_constructor_create_client_with_custom_location(self, create_client_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ models.Model(_TEST_ID, location=_TEST_LOCATION_2)
+ create_client_mock.assert_any_call(
+ client_class=utils.ModelClientWithOverride,
+ credentials=initializer.global_config.credentials,
+ location_override=_TEST_LOCATION_2,
+ appended_user_agent=None,
+ )
+
+ def test_constructor_creates_client_with_custom_credentials(
+ self, create_client_mock
+ ):
+ creds = auth_credentials.AnonymousCredentials()
+ models.Model(_TEST_ID, credentials=creds)
+ create_client_mock.assert_any_call(
+ client_class=utils.ModelClientWithOverride,
+ credentials=creds,
+ location_override=_TEST_LOCATION,
+ appended_user_agent=None,
+ )
+
+ def test_constructor_gets_model(self, get_model_mock):
+ models.Model(_TEST_ID)
+ get_model_mock.assert_called_once_with(
+ name=_TEST_MODEL_RESOURCE_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_constructor_gets_model_with_custom_project(self, get_model_mock):
+ models.Model(_TEST_ID, project=_TEST_PROJECT_2)
+ test_model_resource_name = model_service_client.ModelServiceClient.model_path(
+ _TEST_PROJECT_2, _TEST_LOCATION, _TEST_ID
+ )
+ get_model_mock.assert_called_once_with(
+ name=test_model_resource_name, retry=base._DEFAULT_RETRY
+ )
+
+ def test_constructor_gets_model_with_custom_location(self, get_model_mock):
+ models.Model(_TEST_ID, location=_TEST_LOCATION_2)
+ test_model_resource_name = model_service_client.ModelServiceClient.model_path(
+ _TEST_PROJECT, _TEST_LOCATION_2, _TEST_ID
+ )
+ get_model_mock.assert_called_once_with(
+ name=test_model_resource_name, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_upload_uploads_and_gets_model(
+ self, upload_model_mock, get_model_mock, sync
+ ):
+
+ my_model = models.Model.upload(
+ display_name=_TEST_MODEL_NAME,
+ serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ sync=sync,
+ upload_request_timeout=None,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ container_spec=container_spec,
+ version_aliases=["default"],
+ )
+
+ upload_model_mock.assert_called_once_with(
+ request=gca_model_service.UploadModelRequest(
+ parent=initializer.global_config.common_location_path(),
+ model=managed_model,
+ ),
+ timeout=None,
+ )
+
+ get_model_mock.assert_called_once_with(
+ name=_TEST_MODEL_RESOURCE_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_upload_without_serving_container_image_uri_throw_error(
+ self, upload_model_mock, get_model_mock
+ ):
+ expected_message = (
+ "The parameter `serving_container_image_uri` is required "
+ "if no `local_model` is provided."
+ )
+
+ with pytest.raises(ValueError) as exception:
+ _ = models.Model.upload(
+ display_name=_TEST_MODEL_NAME,
+ serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ artifact_uri=_TEST_ARTIFACT_URI,
+ )
+
+ assert str(exception.value) == expected_message
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_upload_with_local_model(self, upload_model_mock, get_model_mock, sync):
+ managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ container_spec=_TEST_LOCAL_MODEL.get_serving_container_spec(),
+ version_aliases=["default"],
+ )
+
+ my_model = models.Model.upload(
+ local_model=_TEST_LOCAL_MODEL,
+ display_name=_TEST_MODEL_NAME,
+ sync=sync,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ upload_model_mock.assert_called_once_with(
+ request=gca_model_service.UploadModelRequest(
+ parent=initializer.global_config.common_location_path(),
+ model=managed_model,
+ ),
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_upload_with_local_model_overwrite_all_serving_container_parameters(
+ self, upload_model_mock, get_model_mock, sync
+ ):
+ container_spec = gca_model.ModelContainerSpec(
+ image_uri="another-image-uri",
+ predict_route="another-predict-route",
+ health_route="another-health-route",
+ )
+ local_model = LocalModel(serving_container_spec=container_spec)
+ managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ container_spec=container_spec,
+ version_aliases=["default"],
+ )
+
+ my_model = models.Model.upload(
+ serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ local_model=local_model,
+ display_name=_TEST_MODEL_NAME,
+ sync=sync,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ upload_model_mock.assert_called_once_with(
+ request=gca_model_service.UploadModelRequest(
+ parent=initializer.global_config.common_location_path(),
+ model=managed_model,
+ ),
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_upload_with_timeout(self, upload_model_mock, get_model_mock, sync):
+ my_model = models.Model.upload(
+ display_name=_TEST_MODEL_NAME,
+ serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ sync=sync,
+ upload_request_timeout=180.0,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ )
+
+ managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ container_spec=container_spec,
+ version_aliases=["default"],
+ )
+
+ upload_model_mock.assert_called_once_with(
+ request=gca_model_service.UploadModelRequest(
+ parent=initializer.global_config.common_location_path(),
+ model=managed_model,
+ ),
+ timeout=180.0,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_upload_with_timeout_not_explicitly_set(
+ self, upload_model_mock, get_model_mock, sync
+ ):
+ my_model = models.Model.upload(
+ display_name=_TEST_MODEL_NAME,
+ serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ sync=sync,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ )
+
+ managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ container_spec=container_spec,
+ version_aliases=["default"],
+ )
+
+ upload_model_mock.assert_called_once_with(
+ request=gca_model_service.UploadModelRequest(
+ parent=initializer.global_config.common_location_path(),
+ model=managed_model,
+ ),
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_upload_uploads_and_gets_model_with_labels(
+ self, upload_model_mock, get_model_mock, sync
+ ):
+
+ my_model = models.Model.upload(
+ display_name=_TEST_MODEL_NAME,
+ serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ labels=_TEST_LABEL,
+ upload_request_timeout=None,
+ sync=sync,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ container_spec=container_spec,
+ labels=_TEST_LABEL,
+ version_aliases=["default"],
+ )
+
+ upload_model_mock.assert_called_once_with(
+ request=gca_model_service.UploadModelRequest(
+ parent=initializer.global_config.common_location_path(),
+ model=managed_model,
+ ),
+ timeout=None,
+ )
+
+ get_model_mock.assert_called_once_with(
+ name=_TEST_MODEL_RESOURCE_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_upload_raises_with_impartial_explanation_spec(self):
+
+ with pytest.raises(ValueError) as e:
+ models.Model.upload(
+ display_name=_TEST_MODEL_NAME,
+ artifact_uri=_TEST_ARTIFACT_URI,
+ serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ explanation_metadata=_TEST_EXPLANATION_METADATA
+ # Missing the required explanations_parameters field
+ )
+
+ assert e.match(
+ regexp=r"To get model explanation, `explanation_parameters` "
+ "must be specified."
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_upload_with_parameters_without_metadata(
+ self, upload_model_mock, get_model_mock, sync
+ ):
+ my_model = models.Model.upload(
+ display_name=_TEST_MODEL_NAME,
+ serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ explanation_parameters=_TEST_EXPLANATION_PARAMETERS,
+ # No explanation_metadata provided
+ sync=sync,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ )
+
+ managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ container_spec=container_spec,
+ explanation_spec=gca_model.explanation.ExplanationSpec(
+ parameters=_TEST_EXPLANATION_PARAMETERS,
+ ),
+ version_aliases=["default"],
+ )
+
+ upload_model_mock.assert_called_once_with(
+ request=gca_model_service.UploadModelRequest(
+ parent=initializer.global_config.common_location_path(),
+ model=managed_model,
+ ),
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_upload_with_parameters_for_examples_presets(
+ self, upload_model_mock, get_model_mock, sync
+ ):
+ my_model = models.Model.upload(
+ display_name=_TEST_MODEL_NAME,
+ serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ explanation_parameters=_TEST_EXPLANATION_PARAMETERS_EXAMPLES_PRESETS,
+ explanation_metadata=_TEST_EXPLANATION_METADATA_EXAMPLES,
+ sync=sync,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ )
+
+ managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ container_spec=container_spec,
+ explanation_spec=gca_model.explanation.ExplanationSpec(
+ metadata=_TEST_EXPLANATION_METADATA_EXAMPLES,
+ parameters=_TEST_EXPLANATION_PARAMETERS_EXAMPLES_PRESETS,
+ ),
+ version_aliases=["default"],
+ )
+
+ upload_model_mock.assert_called_once_with(
+ request=gca_model_service.UploadModelRequest(
+ parent=initializer.global_config.common_location_path(),
+ model=managed_model,
+ ),
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_upload_with_parameters_for_examples_full_config(
+ self, upload_model_mock, get_model_mock, sync
+ ):
+ my_model = models.Model.upload(
+ display_name=_TEST_MODEL_NAME,
+ serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ explanation_parameters=_TEST_EXPLANATION_PARAMETERS_EXAMPLES_FULL_CONFIG,
+ explanation_metadata=_TEST_EXPLANATION_METADATA_EXAMPLES,
+ sync=sync,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ )
+
+ managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ container_spec=container_spec,
+ explanation_spec=gca_model.explanation.ExplanationSpec(
+ metadata=_TEST_EXPLANATION_METADATA_EXAMPLES,
+ parameters=_TEST_EXPLANATION_PARAMETERS_EXAMPLES_FULL_CONFIG,
+ ),
+ version_aliases=["default"],
+ )
+
+ upload_model_mock.assert_called_once_with(
+ request=gca_model_service.UploadModelRequest(
+ parent=initializer.global_config.common_location_path(),
+ model=managed_model,
+ ),
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_upload_uploads_and_gets_model_with_all_args(
+ self, upload_model_mock, get_model_mock, sync
+ ):
+
+ my_model = models.Model.upload(
+ display_name=_TEST_MODEL_NAME,
+ artifact_uri=_TEST_ARTIFACT_URI,
+ serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ instance_schema_uri=_TEST_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_PREDICTION_SCHEMA_URI,
+ description=_TEST_DESCRIPTION,
+ serving_container_command=_TEST_SERVING_CONTAINER_COMMAND,
+ serving_container_args=_TEST_SERVING_CONTAINER_ARGS,
+ serving_container_environment_variables=_TEST_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ serving_container_ports=_TEST_SERVING_CONTAINER_PORTS,
+ serving_container_grpc_ports=_TEST_SERVING_CONTAINER_GRPC_PORTS,
+ explanation_metadata=_TEST_EXPLANATION_METADATA,
+ explanation_parameters=_TEST_EXPLANATION_PARAMETERS,
+ labels=_TEST_LABEL,
+ sync=sync,
+ upload_request_timeout=None,
+ serving_container_deployment_timeout=_TEST_SERVING_CONTAINER_DEPLOYMENT_TIMEOUT,
+ serving_container_shared_memory_size_mb=_TEST_SERVING_CONTAINER_SHARED_MEMORY_SIZE_MB,
+ serving_container_startup_probe_exec=_TEST_SERVING_CONTAINER_STARTUP_PROBE_EXEC,
+ serving_container_startup_probe_period_seconds=_TEST_SERVING_CONTAINER_STARTUP_PROBE_PERIOD_SECONDS,
+ serving_container_startup_probe_timeout_seconds=_TEST_SERVING_CONTAINER_STARTUP_PROBE_TIMEOUT_SECONDS,
+ serving_container_health_probe_exec=_TEST_SERVING_CONTAINER_HEALTH_PROBE_EXEC,
+ serving_container_health_probe_period_seconds=_TEST_SERVING_CONTAINER_HEALTH_PROBE_PERIOD_SECONDS,
+ serving_container_health_probe_timeout_seconds=_TEST_SERVING_CONTAINER_HEALTH_PROBE_TIMEOUT_SECONDS,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_SERVING_CONTAINER_PORTS
+ ]
+
+ grpc_ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_SERVING_CONTAINER_GRPC_PORTS
+ ]
+
+ deployment_timeout = duration_pb2.Duration(
+ seconds=_TEST_SERVING_CONTAINER_DEPLOYMENT_TIMEOUT
+ )
+
+ startup_probe = gca_model.Probe(
+ exec=gca_model.Probe.ExecAction(
+ command=_TEST_SERVING_CONTAINER_STARTUP_PROBE_EXEC
+ ),
+ period_seconds=_TEST_SERVING_CONTAINER_STARTUP_PROBE_PERIOD_SECONDS,
+ timeout_seconds=_TEST_SERVING_CONTAINER_STARTUP_PROBE_TIMEOUT_SECONDS,
+ )
+
+ health_probe = gca_model.Probe(
+ exec=gca_model.Probe.ExecAction(
+ command=_TEST_SERVING_CONTAINER_HEALTH_PROBE_EXEC
+ ),
+ period_seconds=_TEST_SERVING_CONTAINER_HEALTH_PROBE_PERIOD_SECONDS,
+ timeout_seconds=_TEST_SERVING_CONTAINER_HEALTH_PROBE_TIMEOUT_SECONDS,
+ )
+
+ container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_SERVING_CONTAINER_COMMAND,
+ args=_TEST_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ grpc_ports=grpc_ports,
+ deployment_timeout=deployment_timeout,
+ shared_memory_size_mb=_TEST_SERVING_CONTAINER_SHARED_MEMORY_SIZE_MB,
+ startup_probe=startup_probe,
+ health_probe=health_probe,
+ )
+
+ managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ description=_TEST_DESCRIPTION,
+ artifact_uri=_TEST_ARTIFACT_URI,
+ container_spec=container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_PREDICTION_SCHEMA_URI,
+ ),
+ explanation_spec=gca_model.explanation.ExplanationSpec(
+ metadata=_TEST_EXPLANATION_METADATA,
+ parameters=_TEST_EXPLANATION_PARAMETERS,
+ ),
+ labels=_TEST_LABEL,
+ version_aliases=["default"],
+ )
+
+ upload_model_mock.assert_called_once_with(
+ request=gca_model_service.UploadModelRequest(
+ parent=initializer.global_config.common_location_path(),
+ model=managed_model,
+ ),
+ timeout=None,
+ )
+ get_model_mock.assert_called_once_with(
+ name=_TEST_MODEL_RESOURCE_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures("get_model_with_custom_project_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_upload_uploads_and_gets_model_with_custom_project(
+ self,
+ upload_model_with_custom_project_mock,
+ get_model_with_custom_project_mock,
+ sync,
+ ):
+
+ test_model_resource_name = model_service_client.ModelServiceClient.model_path(
+ _TEST_PROJECT_2, _TEST_LOCATION, _TEST_ID
+ )
+
+ my_model = models.Model.upload(
+ display_name=_TEST_MODEL_NAME,
+ artifact_uri=_TEST_ARTIFACT_URI,
+ serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ project=_TEST_PROJECT_2,
+ sync=sync,
+ upload_request_timeout=None,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ artifact_uri=_TEST_ARTIFACT_URI,
+ container_spec=container_spec,
+ version_aliases=["default"],
+ )
+
+ upload_model_with_custom_project_mock.assert_called_once_with(
+ request=gca_model_service.UploadModelRequest(
+ parent=f"projects/{_TEST_PROJECT_2}/locations/{_TEST_LOCATION}",
+ model=managed_model,
+ ),
+ timeout=None,
+ )
+
+ get_model_with_custom_project_mock.assert_called_once_with(
+ name=test_model_resource_name, retry=base._DEFAULT_RETRY
+ )
+
+ assert my_model.uri == _TEST_ARTIFACT_URI
+ assert my_model.supported_export_formats == {}
+ assert my_model.supported_deployment_resources_types == []
+ assert my_model.supported_input_storage_formats == []
+ assert my_model.supported_output_storage_formats == []
+ assert my_model.description == _TEST_DESCRIPTION
+
+ @pytest.mark.usefixtures("get_model_with_custom_project_mock")
+ def test_accessing_properties_with_no_resource_raises(
+ self,
+ ):
+
+ test_model_resource_name = model_service_client.ModelServiceClient.model_path(
+ _TEST_PROJECT_2, _TEST_LOCATION, _TEST_ID
+ )
+
+ my_model = models.Model(test_model_resource_name)
+ my_model._gca_resource = None
+
+ with pytest.raises(RuntimeError) as e:
+ my_model.uri
+ e.match(regexp=r"Model resource has not been created.")
+
+ with pytest.raises(RuntimeError) as e:
+ my_model.supported_export_formats
+ e.match(regexp=r"Model resource has not been created.")
+
+ with pytest.raises(RuntimeError) as e:
+ my_model.supported_deployment_resources_types
+ e.match(regexp=r"Model resource has not been created.")
+
+ with pytest.raises(RuntimeError) as e:
+ my_model.supported_input_storage_formats
+ e.match(regexp=r"Model resource has not been created.")
+
+ with pytest.raises(RuntimeError) as e:
+ my_model.supported_output_storage_formats
+ e.match(regexp=r"Model resource has not been created.")
+
+ with pytest.raises(RuntimeError) as e:
+ my_model.description
+ e.match(regexp=r"Model resource has not been created.")
+
+ @pytest.mark.usefixtures("get_model_with_custom_location_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_upload_uploads_and_gets_model_with_custom_location(
+ self,
+ upload_model_with_custom_location_mock,
+ get_model_with_custom_location_mock,
+ sync,
+ ):
+ test_model_resource_name = model_service_client.ModelServiceClient.model_path(
+ _TEST_PROJECT, _TEST_LOCATION_2, _TEST_ID
+ )
+
+ my_model = models.Model.upload(
+ display_name=_TEST_MODEL_NAME,
+ artifact_uri=_TEST_ARTIFACT_URI,
+ serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ location=_TEST_LOCATION_2,
+ sync=sync,
+ upload_request_timeout=None,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ artifact_uri=_TEST_ARTIFACT_URI,
+ container_spec=container_spec,
+ version_aliases=["default"],
+ )
+
+ upload_model_with_custom_location_mock.assert_called_once_with(
+ request=gca_model_service.UploadModelRequest(
+ parent=f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION_2}",
+ model=managed_model,
+ ),
+ timeout=None,
+ )
+
+ get_model_with_custom_location_mock.assert_called_once_with(
+ name=test_model_resource_name, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_upload_with_model_garden_source(
+ self, upload_model_mock, get_model_mock, sync
+ ):
+
+ my_model = models.Model.upload(
+ display_name=_TEST_MODEL_NAME,
+ serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ sync=sync,
+ upload_request_timeout=None,
+ model_garden_source_model_name=_TEST_MODEL_GARDEN_SOURCE_MODEL_NAME,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ container_spec=container_spec,
+ version_aliases=["default"],
+ base_model_source=gca_model.Model.BaseModelSource(
+ model_garden_source=gca_model.ModelGardenSource(
+ public_model_name=_TEST_MODEL_GARDEN_SOURCE_MODEL_NAME
+ )
+ ),
+ )
+
+ upload_model_mock.assert_called_once_with(
+ request=gca_model_service.UploadModelRequest(
+ parent=initializer.global_config.common_location_path(),
+ model=managed_model,
+ ),
+ timeout=None,
+ )
+
+ get_model_mock.assert_called_once_with(
+ name=_TEST_MODEL_RESOURCE_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy(self, deploy_model_mock, sync):
+
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+
+ test_endpoint = models.Endpoint(_TEST_ID)
+
+ assert (
+ test_model.deploy(
+ test_endpoint,
+ sync=sync,
+ )
+ == test_endpoint
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_endpoint_mock", "get_model_mock", "create_endpoint_mock"
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_timeout(self, deploy_model_mock, sync):
+
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+
+ test_endpoint = models.Endpoint(_TEST_ID)
+
+ test_model.deploy(test_endpoint, sync=sync, deploy_request_timeout=180.0)
+
+ if not sync:
+ test_endpoint.wait()
+
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=180.0,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_endpoint_mock", "get_model_mock", "create_endpoint_mock"
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_timeout_not_explicitly_set(self, deploy_model_mock, sync):
+
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+
+ test_endpoint = models.Endpoint(_TEST_ID)
+
+ test_model.deploy(
+ test_endpoint,
+ sync=sync,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_with_version")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_version(self, deploy_model_mock, sync):
+
+ test_model = models.Model(_TEST_MODEL_NAME)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ version = _TEST_MODEL_OBJ_WITH_VERSION.version_id
+
+ test_endpoint = models.Endpoint(_TEST_ID)
+
+ test_endpoint = test_model.deploy(
+ test_endpoint,
+ sync=sync,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=f"{test_model.resource_name}@{version}",
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_endpoint_mock", "get_model_mock", "create_endpoint_mock"
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_no_endpoint(self, deploy_model_mock, sync):
+
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint = test_model.deploy(sync=sync)
+
+ if not sync:
+ test_endpoint.wait()
+
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_endpoint_mock", "get_model_mock", "create_endpoint_mock"
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_no_endpoint_dedicated_resources(self, deploy_model_mock, sync):
+
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ test_endpoint = test_model.deploy(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ sync=sync,
+ deploy_request_timeout=None,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+ expected_dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ spot=False,
+ )
+ expected_deployed_model = gca_endpoint.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_endpoint_mock", "get_model_mock", "create_endpoint_mock"
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_no_endpoint_with_tpu_topology(self, deploy_model_mock, sync):
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ test_endpoint = test_model.deploy(
+ machine_type=_TEST_TPU_MACHINE_TYPE,
+ tpu_topology=_TEST_TPU_TOPOLOGY,
+ sync=sync,
+ deploy_request_timeout=None,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_TPU_MACHINE_TYPE,
+ tpu_topology=_TEST_TPU_TOPOLOGY,
+ )
+ expected_dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ spot=False,
+ )
+ expected_deployed_model = gca_endpoint.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_endpoint_mock", "get_model_mock", "create_endpoint_mock"
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_no_endpoint_with_spot(self, deploy_model_mock, sync):
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ test_endpoint = test_model.deploy(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ sync=sync,
+ deploy_request_timeout=None,
+ spot=_TEST_SPOT,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+ expected_dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ spot=True,
+ )
+ expected_deployed_model = gca_endpoint.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_endpoint_mock", "get_model_mock", "create_endpoint_mock"
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_no_endpoint_with_specific_reservation_affinity(
+ self, deploy_model_mock, sync
+ ):
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ test_endpoint = test_model.deploy(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ sync=sync,
+ deploy_request_timeout=None,
+ reservation_affinity_type=_TEST_RESERVATION_AFFINITY_TYPE,
+ reservation_affinity_key=_TEST_RESERVATION_AFFINITY_KEY,
+ reservation_affinity_values=_TEST_RESERVATION_AFFINITY_VALUES,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_reservation_affinity = gca_reservation_affinity_v1.ReservationAffinity(
+ reservation_affinity_type=_TEST_RESERVATION_AFFINITY_TYPE,
+ key=_TEST_RESERVATION_AFFINITY_KEY,
+ values=_TEST_RESERVATION_AFFINITY_VALUES,
+ )
+ expected_machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ reservation_affinity=expected_reservation_affinity,
+ )
+ expected_dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ spot=False,
+ )
+ expected_deployed_model = gca_endpoint.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_endpoint_mock", "get_model_mock", "create_endpoint_mock"
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_no_endpoint_with_any_reservation_affinity(
+ self, deploy_model_mock, sync
+ ):
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ test_endpoint = test_model.deploy(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ sync=sync,
+ deploy_request_timeout=None,
+ reservation_affinity_type="ANY_RESERVATION",
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_reservation_affinity = gca_reservation_affinity_v1.ReservationAffinity(
+ reservation_affinity_type="ANY_RESERVATION",
+ )
+ expected_machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ reservation_affinity=expected_reservation_affinity,
+ )
+ expected_dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ spot=False,
+ )
+ expected_deployed_model = gca_endpoint.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_endpoint_mock", "get_model_mock", "create_endpoint_mock"
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_no_endpoint_with_explanations(self, deploy_model_mock, sync):
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ test_endpoint = test_model.deploy(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ explanation_metadata=_TEST_EXPLANATION_METADATA,
+ explanation_parameters=_TEST_EXPLANATION_PARAMETERS,
+ sync=sync,
+ deploy_request_timeout=None,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+ expected_dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ spot=False,
+ )
+ expected_deployed_model = gca_endpoint.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ explanation_spec=gca_endpoint.explanation.ExplanationSpec(
+ metadata=_TEST_EXPLANATION_METADATA,
+ parameters=_TEST_EXPLANATION_PARAMETERS,
+ ),
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_endpoint_mock", "get_model_mock", "create_endpoint_mock"
+ )
+ def test_deploy_raises_with_impartial_explanation_spec(self):
+
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+
+ with pytest.raises(ValueError) as e:
+ test_model.deploy(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ explanation_metadata=_TEST_EXPLANATION_METADATA,
+ # Missing required `explanation_parameters` argument
+ )
+
+ assert e.match(
+ regexp=r"To get model explanation, `explanation_parameters` "
+ "must be specified."
+ )
+
+ @pytest.mark.usefixtures(
+ "get_endpoint_mock", "get_model_mock", "create_endpoint_mock"
+ )
+ def test_deploy_no_endpoint_with_network(self, deploy_model_mock):
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+
+ test_endpoint = test_model.deploy(network=_TEST_NETWORK)
+ # Ensure endpoint created with `network` is a PrivateEndpoint
+ assert isinstance(test_endpoint, models.PrivateEndpoint)
+
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+
+ # Ensure traffic_split is set to `None` for PrivateEndpoint
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split=None,
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_disable_container_logging(self, deploy_model_mock, sync):
+
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+
+ test_endpoint = models.Endpoint(_TEST_ID)
+
+ assert (
+ test_model.deploy(
+ test_endpoint,
+ disable_container_logging=True,
+ sync=sync,
+ )
+ == test_endpoint
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ disable_container_logging=True,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_model_mock",
+ "create_endpoint_mock",
+ "get_endpoint_mock",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_preview_deploy_with_fast_tryout_enabled(
+ self, preview_deploy_model_mock, create_endpoint_mock, sync
+ ):
+ test_model = models.Model(_TEST_ID).preview
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+
+ test_endpoint = test_model.deploy(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ disable_container_logging=True,
+ sync=sync,
+ deploy_request_timeout=None,
+ fast_tryout_enabled=True,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ display_name=_TEST_MODEL_NAME + "_endpoint",
+ dedicated_endpoint_enabled=True,
+ )
+
+ create_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ endpoint=expected_endpoint,
+ metadata=(),
+ timeout=None,
+ endpoint_id=None,
+ )
+
+ expected_machine_spec = gca_machine_resources_v1beta1.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+ expected_dedicated_resources = gca_machine_resources_v1beta1.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ expected_deployed_model = gca_endpoint_v1beta1.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ faster_deployment_config=gca_endpoint_v1beta1.FasterDeploymentConfig(
+ fast_tryout_enabled=True
+ ),
+ )
+ preview_deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_model_mock",
+ "create_endpoint_mock",
+ "get_endpoint_mock",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_fast_tryout_enabled(
+ self, deploy_model_mock, create_endpoint_mock, sync
+ ):
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+
+ test_endpoint = test_model.deploy(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ disable_container_logging=True,
+ sync=sync,
+ deploy_request_timeout=None,
+ fast_tryout_enabled=True,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ display_name=_TEST_MODEL_NAME + "_endpoint",
+ dedicated_endpoint_enabled=True,
+ )
+
+ create_endpoint_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ endpoint=expected_endpoint,
+ metadata=(),
+ timeout=None,
+ endpoint_id=None,
+ )
+
+ expected_machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+ expected_dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ expected_deployed_model = gca_endpoint.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ disable_container_logging=True,
+ faster_deployment_config=gca_endpoint.FasterDeploymentConfig(
+ fast_tryout_enabled=True
+ ),
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_model_mock",
+ "create_endpoint_mock",
+ "get_endpoint_mock",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_preview_deploy_with_system_labels(self, preview_deploy_model_mock, sync):
+ test_model = models.Model(_TEST_ID).preview
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+
+ test_endpoint = test_model.deploy(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ sync=sync,
+ deploy_request_timeout=None,
+ system_labels=_TEST_LABELS,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_machine_spec = gca_machine_resources_v1beta1.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+ expected_dedicated_resources = gca_machine_resources_v1beta1.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ expected_deployed_model = gca_endpoint_v1beta1.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ enable_container_logging=True,
+ faster_deployment_config=gca_endpoint_v1beta1.FasterDeploymentConfig(),
+ system_labels=_TEST_LABELS,
+ )
+ preview_deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_model_mock",
+ "create_endpoint_mock",
+ "get_endpoint_mock",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_system_labels(self, deploy_model_mock, sync):
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+
+ test_endpoint = test_model.deploy(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ sync=sync,
+ deploy_request_timeout=None,
+ system_labels=_TEST_LABELS,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+ expected_dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ expected_deployed_model = gca_endpoint.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ system_labels=_TEST_LABELS,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_model_mock",
+ "preview_get_drp_mock",
+ "create_endpoint_mock",
+ "get_endpoint_mock",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_preview_deploy_with_deployment_resource_pool(
+ self, preview_deploy_model_mock, sync
+ ):
+ test_model = models.Model(_TEST_ID).preview
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.SHARED_RESOURCES,
+ )
+ test_drp = preview_models.DeploymentResourcePool(_TEST_DRP_NAME)
+
+ test_endpoint = test_model.deploy(
+ deployment_resource_pool=test_drp,
+ sync=sync,
+ deploy_request_timeout=None,
+ )
+ if not sync:
+ test_endpoint.wait()
+
+ deployed_model = gca_endpoint_v1beta1.DeployedModel(
+ shared_resources=_TEST_DRP_NAME,
+ model=test_model.resource_name,
+ display_name=None,
+ enable_container_logging=True,
+ )
+ preview_deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_model_mock", "get_drp_mock", "create_endpoint_mock", "get_endpoint_mock"
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_deployment_resource_pool(self, deploy_model_mock, sync):
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.SHARED_RESOURCES,
+ )
+ test_drp = models.DeploymentResourcePool(_TEST_DRP_NAME)
+
+ test_endpoint = test_model.deploy(
+ deployment_resource_pool=test_drp,
+ sync=sync,
+ deploy_request_timeout=None,
+ )
+ if not sync:
+ test_endpoint.wait()
+
+ deployed_model = gca_endpoint.DeployedModel(
+ shared_resources=_TEST_DRP_NAME,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_model_mock", "get_batch_prediction_job_mock")
+ def test_init_aiplatform_with_encryption_key_name_and_batch_predict_gcs_source_and_dest(
+ self, create_batch_prediction_job_mock, sync
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ )
+ test_model = models.Model(_TEST_ID)
+
+ # Make SDK batch_predict method call
+ batch_prediction_job = test_model.batch_predict(
+ job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX,
+ sync=sync,
+ create_request_timeout=None,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ if not sync:
+ batch_prediction_job.wait()
+
+ # Construct expected request
+ expected_gapic_batch_prediction_job = (
+ gca_batch_prediction_job.BatchPredictionJob(
+ display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ model=model_service_client.ModelServiceClient.model_path(
+ _TEST_PROJECT, _TEST_LOCATION, _TEST_ID
+ ),
+ input_config=gca_batch_prediction_job.BatchPredictionJob.InputConfig(
+ instances_format="jsonl",
+ gcs_source=gca_io.GcsSource(
+ uris=[_TEST_BATCH_PREDICTION_GCS_SOURCE]
+ ),
+ ),
+ output_config=gca_batch_prediction_job.BatchPredictionJob.OutputConfig(
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX
+ ),
+ predictions_format="jsonl",
+ ),
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ )
+
+ create_batch_prediction_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_model_mock", "get_batch_prediction_job_mock")
+ def test_init_aiplatform_with_service_account_and_batch_predict_gcs_source_and_dest(
+ self, create_batch_prediction_job_mock, sync
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ test_model = models.Model(_TEST_ID)
+
+ # Make SDK batch_predict method call
+ batch_prediction_job = test_model.batch_predict(
+ job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ batch_prediction_job.wait()
+
+ # Construct expected request
+ expected_gapic_batch_prediction_job = (
+ gca_batch_prediction_job.BatchPredictionJob(
+ display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ model=model_service_client.ModelServiceClient.model_path(
+ _TEST_PROJECT, _TEST_LOCATION, _TEST_ID
+ ),
+ input_config=gca_batch_prediction_job.BatchPredictionJob.InputConfig(
+ instances_format="jsonl",
+ gcs_source=gca_io.GcsSource(
+ uris=[_TEST_BATCH_PREDICTION_GCS_SOURCE]
+ ),
+ ),
+ output_config=gca_batch_prediction_job.BatchPredictionJob.OutputConfig(
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX
+ ),
+ predictions_format="jsonl",
+ ),
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ )
+
+ create_batch_prediction_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_model_mock", "get_batch_prediction_job_mock")
+ def test_batch_predict_gcs_source_and_dest(
+ self, create_batch_prediction_job_mock, sync
+ ):
+
+ test_model = models.Model(_TEST_ID)
+
+ # Make SDK batch_predict method call
+ batch_prediction_job = test_model.batch_predict(
+ job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX,
+ sync=sync,
+ create_request_timeout=None,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ if not sync:
+ batch_prediction_job.wait()
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_model_with_version", "get_batch_prediction_job_mock")
+ def test_batch_predict_with_version(self, sync, create_batch_prediction_job_mock):
+
+ test_model = models.Model(_TEST_MODEL_NAME, version=_TEST_VERSION_ALIAS_1)
+
+ # Make SDK batch_predict method call
+ batch_prediction_job = test_model.batch_predict(
+ job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX,
+ sync=sync,
+ create_request_timeout=None,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ if not sync:
+ batch_prediction_job.wait()
+
+ # Construct expected request
+ expected_gapic_batch_prediction_job = (
+ gca_batch_prediction_job.BatchPredictionJob(
+ display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ model=f"{_TEST_MODEL_PARENT}@{_TEST_VERSION_ID}",
+ input_config=gca_batch_prediction_job.BatchPredictionJob.InputConfig(
+ instances_format="jsonl",
+ gcs_source=gca_io.GcsSource(
+ uris=[_TEST_BATCH_PREDICTION_GCS_SOURCE]
+ ),
+ ),
+ output_config=gca_batch_prediction_job.BatchPredictionJob.OutputConfig(
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX
+ ),
+ predictions_format="jsonl",
+ ),
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ )
+
+ create_batch_prediction_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_model_mock", "get_batch_prediction_job_mock")
+ def test_batch_predict_gcs_source_bq_dest(
+ self, create_batch_prediction_job_mock, sync
+ ):
+
+ test_model = models.Model(_TEST_ID)
+
+ # Make SDK batch_predict method call
+ batch_prediction_job = test_model.batch_predict(
+ job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX,
+ sync=sync,
+ create_request_timeout=None,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ if not sync:
+ batch_prediction_job.wait()
+
+ # Construct expected request
+ expected_gapic_batch_prediction_job = (
+ gca_batch_prediction_job.BatchPredictionJob(
+ display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ model=model_service_client.ModelServiceClient.model_path(
+ _TEST_PROJECT, _TEST_LOCATION, _TEST_ID
+ ),
+ input_config=gca_batch_prediction_job.BatchPredictionJob.InputConfig(
+ instances_format="jsonl",
+ gcs_source=gca_io.GcsSource(
+ uris=[_TEST_BATCH_PREDICTION_GCS_SOURCE]
+ ),
+ ),
+ output_config=gca_batch_prediction_job.BatchPredictionJob.OutputConfig(
+ bigquery_destination=gca_io.BigQueryDestination(
+ output_uri=_TEST_BATCH_PREDICTION_BQ_DEST_PREFIX_WITH_PROTOCOL
+ ),
+ predictions_format="bigquery",
+ ),
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ )
+
+ create_batch_prediction_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_model_mock", "get_batch_prediction_job_mock")
+ def test_batch_predict_with_all_args(self, create_batch_prediction_job_mock, sync):
+ test_model = models.Model(_TEST_ID)
+ creds = auth_credentials.AnonymousCredentials()
+
+ # Make SDK batch_predict method call passing all arguments
+ batch_prediction_job = test_model.batch_predict(
+ job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX,
+ predictions_format="csv",
+ model_parameters={},
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ starting_replica_count=_TEST_STARTING_REPLICA_COUNT,
+ max_replica_count=_TEST_MAX_REPLICA_COUNT,
+ generate_explanation=True,
+ explanation_metadata=_TEST_EXPLANATION_METADATA,
+ explanation_parameters=_TEST_EXPLANATION_PARAMETERS,
+ labels=_TEST_LABEL,
+ credentials=creds,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ batch_size=_TEST_BATCH_SIZE,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ if not sync:
+ batch_prediction_job.wait()
+
+ # Construct expected request
+ expected_gapic_batch_prediction_job = gca_batch_prediction_job.BatchPredictionJob(
+ display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ model=model_service_client.ModelServiceClient.model_path(
+ _TEST_PROJECT, _TEST_LOCATION, _TEST_ID
+ ),
+ input_config=gca_batch_prediction_job.BatchPredictionJob.InputConfig(
+ instances_format="jsonl",
+ gcs_source=gca_io.GcsSource(uris=[_TEST_BATCH_PREDICTION_GCS_SOURCE]),
+ ),
+ output_config=gca_batch_prediction_job.BatchPredictionJob.OutputConfig(
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX
+ ),
+ predictions_format="csv",
+ ),
+ dedicated_resources=gca_machine_resources.BatchDedicatedResources(
+ machine_spec=gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ ),
+ starting_replica_count=_TEST_STARTING_REPLICA_COUNT,
+ max_replica_count=_TEST_MAX_REPLICA_COUNT,
+ ),
+ manual_batch_tuning_parameters=gca_manual_batch_tuning_parameters_compat.ManualBatchTuningParameters(
+ batch_size=_TEST_BATCH_SIZE
+ ),
+ generate_explanation=True,
+ explanation_spec=gca_explanation.ExplanationSpec(
+ metadata=_TEST_EXPLANATION_METADATA,
+ parameters=_TEST_EXPLANATION_PARAMETERS,
+ ),
+ labels=_TEST_LABEL,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+
+ create_batch_prediction_job_mock.assert_called_once_with(
+ parent=f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}",
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_model_mock", "get_batch_prediction_job_mock")
+ def test_batch_predict_no_source(self, create_batch_prediction_job_mock):
+
+ test_model = models.Model(_TEST_ID)
+
+ # Make SDK batch_predict method call without source
+ with pytest.raises(ValueError) as e:
+ test_model.batch_predict(
+ job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX,
+ )
+
+ assert e.match(regexp=r"source")
+
+ @pytest.mark.usefixtures("get_model_mock", "get_batch_prediction_job_mock")
+ def test_batch_predict_two_sources(self, create_batch_prediction_job_mock):
+
+ test_model = models.Model(_TEST_ID)
+
+ # Make SDK batch_predict method call with two sources
+ with pytest.raises(ValueError) as e:
+ test_model.batch_predict(
+ job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ bigquery_source=_TEST_BATCH_PREDICTION_BQ_PREFIX,
+ bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX,
+ )
+
+ assert e.match(regexp=r"source")
+
+ @pytest.mark.usefixtures("get_model_mock", "get_batch_prediction_job_mock")
+ def test_batch_predict_no_destination(self):
+
+ test_model = models.Model(_TEST_ID)
+
+ # Make SDK batch_predict method call without destination
+ with pytest.raises(ValueError) as e:
+ test_model.batch_predict(
+ job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ )
+
+ assert e.match(regexp=r"destination")
+
+ @pytest.mark.usefixtures("get_model_mock", "get_batch_prediction_job_mock")
+ def test_batch_predict_wrong_instance_format(self):
+
+ test_model = models.Model(_TEST_ID)
+
+ # Make SDK batch_predict method call
+ with pytest.raises(ValueError) as e:
+ test_model.batch_predict(
+ job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ instances_format="wrong",
+ bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX,
+ )
+
+ assert e.match(regexp=r"accepted instances format")
+
+ @pytest.mark.usefixtures("get_model_mock", "get_batch_prediction_job_mock")
+ def test_batch_predict_wrong_prediction_format(self):
+
+ test_model = models.Model(_TEST_ID)
+
+ # Make SDK batch_predict method call
+ with pytest.raises(ValueError) as e:
+ test_model.batch_predict(
+ job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE,
+ predictions_format="wrong",
+ bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX,
+ )
+
+ assert e.match(regexp=r"accepted prediction format")
+
+ @pytest.mark.usefixtures("get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_delete_model(self, delete_model_mock, sync):
+
+ test_model = models.Model(_TEST_ID)
+ test_model.delete(sync=sync)
+
+ if not sync:
+ test_model.wait()
+
+ delete_model_mock.assert_called_once_with(name=test_model.resource_name)
+
+ @pytest.mark.usefixtures("get_model_mock")
+ def test_print_model(self):
+ test_model = models.Model(_TEST_ID)
+ assert (
+ repr(test_model)
+ == f"{object.__repr__(test_model)} \nresource name: {test_model.resource_name}"
+ )
+
+ @pytest.mark.usefixtures("get_model_mock")
+ def test_print_model_if_waiting(self):
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource = None
+ test_model._latest_future = futures.Future()
+ assert (
+ repr(test_model)
+ == f"{object.__repr__(test_model)} is waiting for upstream dependencies to complete."
+ )
+
+ @pytest.mark.usefixtures("get_model_mock")
+ def test_print_model_if_exception(self):
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource = None
+ mock_exception = Exception("mock exception")
+ test_model._exception = mock_exception
+ assert (
+ repr(test_model)
+ == f"{object.__repr__(test_model)} failed with {str(mock_exception)}"
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_model_with_supported_export_formats_artifact")
+ def test_export_model_as_artifact(self, export_model_mock, sync):
+ test_model = models.Model(_TEST_ID)
+
+ if not sync:
+ test_model.wait()
+
+ test_model.export_model(
+ export_format_id=_TEST_EXPORT_FORMAT_ID_ARTIFACT,
+ artifact_destination=_TEST_OUTPUT_DIR,
+ )
+
+ expected_output_config = gca_model_service.ExportModelRequest.OutputConfig(
+ export_format_id=_TEST_EXPORT_FORMAT_ID_ARTIFACT,
+ artifact_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_OUTPUT_DIR
+ ),
+ )
+
+ export_model_mock.assert_called_once_with(
+ name=f"{_TEST_PARENT}/models/{_TEST_ID}",
+ output_config=expected_output_config,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures(
+ "get_model_with_supported_export_formats_artifact_and_version"
+ )
+ def test_export_model_with_version(self, export_model_mock, sync):
+ test_model = models.Model(f"{_TEST_ID}@{_TEST_VERSION_ID}")
+
+ if not sync:
+ test_model.wait()
+
+ test_model.export_model(
+ export_format_id=_TEST_EXPORT_FORMAT_ID_ARTIFACT,
+ artifact_destination=_TEST_OUTPUT_DIR,
+ )
+
+ expected_output_config = gca_model_service.ExportModelRequest.OutputConfig(
+ export_format_id=_TEST_EXPORT_FORMAT_ID_ARTIFACT,
+ artifact_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_OUTPUT_DIR
+ ),
+ )
+
+ export_model_mock.assert_called_once_with(
+ name=f"{_TEST_PARENT}/models/{_TEST_ID}@{_TEST_VERSION_ID}",
+ output_config=expected_output_config,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_model_with_supported_export_formats_image")
+ def test_export_model_as_image(self, export_model_mock, sync):
+ test_model = models.Model(_TEST_ID)
+
+ test_model.export_model(
+ export_format_id=_TEST_EXPORT_FORMAT_ID_IMAGE,
+ image_destination=_TEST_CONTAINER_REGISTRY_DESTINATION,
+ )
+
+ if not sync:
+ test_model.wait()
+
+ expected_output_config = gca_model_service.ExportModelRequest.OutputConfig(
+ export_format_id=_TEST_EXPORT_FORMAT_ID_IMAGE,
+ image_destination=gca_io.ContainerRegistryDestination(
+ output_uri=_TEST_CONTAINER_REGISTRY_DESTINATION
+ ),
+ )
+
+ export_model_mock.assert_called_once_with(
+ name=f"{_TEST_PARENT}/models/{_TEST_ID}",
+ output_config=expected_output_config,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_model_with_both_supported_export_formats")
+ def test_export_model_as_both_formats(self, export_model_mock, sync):
+ """Exports a 'tf-saved-model' as both an artifact and an image"""
+
+ test_model = models.Model(_TEST_ID)
+
+ test_model.export_model(
+ export_format_id=_TEST_EXPORT_FORMAT_ID_ARTIFACT,
+ image_destination=_TEST_CONTAINER_REGISTRY_DESTINATION,
+ artifact_destination=_TEST_OUTPUT_DIR,
+ )
+
+ if not sync:
+ test_model.wait()
+
+ expected_output_config = gca_model_service.ExportModelRequest.OutputConfig(
+ export_format_id=_TEST_EXPORT_FORMAT_ID_ARTIFACT,
+ image_destination=gca_io.ContainerRegistryDestination(
+ output_uri=_TEST_CONTAINER_REGISTRY_DESTINATION
+ ),
+ artifact_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_OUTPUT_DIR
+ ),
+ )
+
+ export_model_mock.assert_called_once_with(
+ name=f"{_TEST_PARENT}/models/{_TEST_ID}",
+ output_config=expected_output_config,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_model_with_unsupported_export_formats")
+ def test_export_model_not_supported(self, export_model_mock, sync):
+ test_model = models.Model(_TEST_ID)
+
+ with pytest.raises(ValueError) as e:
+ test_model.export_model(
+ export_format_id=_TEST_EXPORT_FORMAT_ID_IMAGE,
+ image_destination=_TEST_CONTAINER_REGISTRY_DESTINATION,
+ )
+
+ if not sync:
+ test_model.wait()
+
+ assert e.match(
+ regexp=f"The model `{_TEST_PARENT}/models/{_TEST_ID}` is not exportable."
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_model_with_supported_export_formats_image")
+ def test_export_model_as_image_with_invalid_args(self, export_model_mock, sync):
+
+ # Passing an artifact destination on an image-only Model
+ with pytest.raises(ValueError) as dest_type_err:
+ test_model = models.Model(_TEST_ID)
+
+ test_model.export_model(
+ export_format_id=_TEST_EXPORT_FORMAT_ID_IMAGE,
+ artifact_destination=_TEST_OUTPUT_DIR,
+ sync=sync,
+ )
+
+ if not sync:
+ test_model.wait()
+
+ # Passing no destination type
+ with pytest.raises(ValueError) as no_dest_err:
+ test_model = models.Model(_TEST_ID)
+
+ test_model.export_model(
+ export_format_id=_TEST_EXPORT_FORMAT_ID_IMAGE,
+ sync=sync,
+ )
+
+ if not sync:
+ test_model.wait()
+
+ # Passing an invalid export format ID
+ with pytest.raises(ValueError) as format_err:
+ test_model = models.Model(_TEST_ID)
+ test_model.export_model(
+ export_format_id=_TEST_EXPORT_FORMAT_ID_ARTIFACT,
+ image_destination=_TEST_CONTAINER_REGISTRY_DESTINATION,
+ sync=sync,
+ )
+
+ if not sync:
+ test_model.wait()
+
+ assert dest_type_err.match(
+ regexp=r"This model can not be exported as an artifact."
+ )
+ assert no_dest_err.match(regexp=r"Please provide an")
+ assert format_err.match(
+ regexp=f"'{_TEST_EXPORT_FORMAT_ID_ARTIFACT}' is not a supported export format"
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.usefixtures("get_model_with_supported_export_formats_artifact")
+ def test_export_model_as_artifact_with_invalid_args(self, export_model_mock, sync):
+ test_model = models.Model(_TEST_ID)
+
+ # Passing an image destination on an artifact-only Model
+ with pytest.raises(ValueError) as e:
+ test_model.export_model(
+ export_format_id=_TEST_EXPORT_FORMAT_ID_ARTIFACT,
+ image_destination=_TEST_CONTAINER_REGISTRY_DESTINATION,
+ sync=sync,
+ )
+
+ if not sync:
+ test_model.wait()
+
+ assert e.match(
+ regexp=r"This model can not be exported as a container image."
+ )
+
+ @pytest.mark.usefixtures(
+ "get_training_job_non_existent_mock", "get_model_with_training_job"
+ )
+ def test_get_and_return_subclass_not_found(self):
+ test_model = models.Model(_TEST_ID)
+
+ # Attempt to access Model's training job that no longer exists
+ with pytest.raises(api_exceptions.NotFound) as e:
+ test_model.training_job
+
+ assert e.match(
+ regexp=(
+ r"The training job used to create this model could not be found: "
+ rf"{_TEST_PIPELINE_RESOURCE_NAME}"
+ )
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize(
+ "model_file_name",
+ ["my_model.xgb", "my_model.pkl", "my_model.joblib", "my_model.bst"],
+ )
+ def test_upload_xgboost_model_file_uploads_and_gets_model(
+ self,
+ tmp_path: pathlib.Path,
+ model_file_name: str,
+ mock_storage_blob_upload_from_filename,
+ upload_model_mock,
+ get_model_mock,
+ sync: bool,
+ ):
+ model_file_path = tmp_path / model_file_name
+ model_file_path.touch()
+
+ my_model = models.Model.upload_xgboost_model_file(
+ model_file_path=str(model_file_path),
+ xgboost_version="1.4",
+ display_name=_TEST_MODEL_NAME,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ sync=sync,
+ upload_request_timeout=None,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ upload_model_mock.assert_called_once()
+ upload_model_call_kwargs = upload_model_mock.call_args[1]
+ upload_model_model = upload_model_call_kwargs["request"].model
+
+ # Verifying the container image selection
+ assert (
+ upload_model_model.container_spec.image_uri
+ == "us-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.1-4:latest"
+ )
+
+ # Verifying the staging bucket name generation
+ assert upload_model_model.artifact_uri.startswith(
+ f"gs://{_TEST_PROJECT}-vertex-staging-{_TEST_LOCATION}"
+ )
+ assert "/vertex_ai_auto_staging/" in upload_model_model.artifact_uri
+
+ # Verifying that the model was renamed to a file name that is acceptable for Model.upload
+ staged_model_file_path = mock_storage_blob_upload_from_filename.call_args[1][
+ "filename"
+ ]
+ staged_model_file_name = staged_model_file_path.split("/")[-1]
+ assert staged_model_file_name in ["model.bst", "model.pkl", "model.joblib"]
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize(
+ "model_file_name",
+ [
+ "model.bst",
+ "model.pkl",
+ "model.joblib",
+ "saved_model.pb",
+ "saved_model.pbtxt",
+ ],
+ )
+ def test_upload_stages_data_uploads_and_gets_model(
+ self,
+ tmp_path: pathlib.Path,
+ model_file_name: str,
+ mock_storage_blob_upload_from_filename,
+ upload_model_mock,
+ get_model_mock,
+ sync: bool,
+ ):
+ model_file_path = tmp_path / model_file_name
+ model_file_path.touch()
+
+ my_model = models.Model.upload(
+ artifact_uri=str(tmp_path),
+ serving_container_image_uri="us-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.1-4:latest",
+ display_name=_TEST_MODEL_NAME,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ sync=sync,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ upload_model_mock.assert_called_once()
+ upload_model_call_kwargs = upload_model_mock.call_args[1]
+ upload_model_model = upload_model_call_kwargs["request"].model
+
+ # Verifying the staging bucket name generation
+ assert upload_model_model.artifact_uri.startswith(
+ f"gs://{_TEST_PROJECT}-vertex-staging-{_TEST_LOCATION}"
+ )
+ assert "/vertex_ai_auto_staging/" in upload_model_model.artifact_uri
+
+ # Verifying that the model was renamed to a file name that is acceptable for Model.upload
+ staged_model_file_path = mock_storage_blob_upload_from_filename.call_args[1][
+ "filename"
+ ]
+ staged_model_file_name = staged_model_file_path.split("/")[-1]
+ assert staged_model_file_name in [
+ "model.bst",
+ "model.pkl",
+ "model.joblib",
+ "saved_model.pb",
+ "saved_model.pbtxt",
+ ]
+
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize(
+ "model_file_name",
+ ["my_model.pkl", "my_model.joblib"],
+ )
+ def test_upload_scikit_learn_model_file_uploads_and_gets_model(
+ self,
+ tmp_path: pathlib.Path,
+ model_file_name: str,
+ mock_storage_blob_upload_from_filename,
+ upload_model_mock,
+ get_model_mock,
+ sync: bool,
+ ):
+ model_file_path = tmp_path / model_file_name
+ model_file_path.touch()
+
+ my_model = models.Model.upload_scikit_learn_model_file(
+ model_file_path=str(model_file_path),
+ sklearn_version="0.24",
+ display_name=_TEST_MODEL_NAME,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ sync=sync,
+ upload_request_timeout=None,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ upload_model_mock.assert_called_once()
+ upload_model_call_kwargs = upload_model_mock.call_args[1]
+ upload_model_model = upload_model_call_kwargs["request"].model
+
+ # Verifying the container image selection
+ assert (
+ upload_model_model.container_spec.image_uri
+ == "us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-24:latest"
+ )
+
+ # Verifying the staging bucket name generation
+ assert upload_model_model.artifact_uri.startswith(
+ f"gs://{_TEST_PROJECT}-vertex-staging-{_TEST_LOCATION}"
+ )
+ assert "/vertex_ai_auto_staging/" in upload_model_model.artifact_uri
+
+ # Verifying that the model was renamed to a file name that is acceptable for Model.upload
+ staged_model_file_path = mock_storage_blob_upload_from_filename.call_args[1][
+ "filename"
+ ]
+ staged_model_file_name = staged_model_file_path.split("/")[-1]
+ assert staged_model_file_name in ["model.pkl", "model.joblib"]
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_upload_tensorflow_saved_model_uploads_and_gets_model(
+ self,
+ tmp_path: pathlib.Path,
+ mock_storage_blob_upload_from_filename,
+ upload_model_mock,
+ get_model_mock,
+ sync: bool,
+ ):
+ saved_model_dir = tmp_path / "saved_model"
+ saved_model_dir.mkdir()
+ (saved_model_dir / "saved_model.pb").touch()
+
+ my_model = models.Model.upload_tensorflow_saved_model(
+ saved_model_dir=str(saved_model_dir),
+ tensorflow_version="2.6",
+ use_gpu=True,
+ display_name=_TEST_MODEL_NAME,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ sync=sync,
+ upload_request_timeout=None,
+ )
+
+ if not sync:
+ my_model.wait()
+
+ upload_model_mock.assert_called_once()
+ upload_model_call_kwargs = upload_model_mock.call_args[1]
+ upload_model_model = upload_model_call_kwargs["request"].model
+
+ # Verifying the container image selection
+ assert (
+ upload_model_model.container_spec.image_uri
+ == "us-docker.pkg.dev/vertex-ai/prediction/tf2-gpu.2-6:latest"
+ )
+
+ # Verifying the staging bucket name generation
+ assert upload_model_model.artifact_uri.startswith(
+ f"gs://{_TEST_PROJECT}-vertex-staging-{_TEST_LOCATION}"
+ )
+ assert "/vertex_ai_auto_staging/" in upload_model_model.artifact_uri
+
+ # Verifying that the model files were uploaded
+ staged_model_file_path = mock_storage_blob_upload_from_filename.call_args[1][
+ "filename"
+ ]
+ staged_model_file_name = staged_model_file_path.split("/")[-1]
+ assert staged_model_file_name in ["saved_model.pb", "saved_model.pbtxt"]
+
+ def test_copy_as_new_model(self, copy_model_mock, get_model_mock):
+
+ test_model = models.Model(_TEST_ID)
+ test_model.copy(destination_location=_TEST_LOCATION_2)
+
+ copy_model_mock.assert_called_once_with(
+ request=gca_model_service.CopyModelRequest(
+ parent=initializer.global_config.common_location_path(
+ location=_TEST_LOCATION_2
+ ),
+ source_model=_TEST_MODEL_RESOURCE_NAME,
+ ),
+ timeout=None,
+ )
+
+ def test_copy_as_new_version(self, copy_model_mock, get_model_mock):
+ test_model = models.Model(_TEST_ID)
+ test_model.copy(
+ destination_location=_TEST_LOCATION_2,
+ destination_parent_model=_TEST_MODEL_NAME_ALT,
+ )
+
+ copy_model_mock.assert_called_once_with(
+ request=gca_model_service.CopyModelRequest(
+ parent=initializer.global_config.common_location_path(
+ location=_TEST_LOCATION_2
+ ),
+ source_model=_TEST_MODEL_RESOURCE_NAME,
+ parent_model=model_service_client.ModelServiceClient.model_path(
+ _TEST_PROJECT, _TEST_LOCATION_2, _TEST_MODEL_NAME_ALT
+ ),
+ ),
+ timeout=None,
+ )
+
+ def test_copy_as_new_model_custom_id(self, copy_model_mock, get_model_mock):
+ test_model = models.Model(_TEST_ID)
+ test_model.copy(
+ destination_location=_TEST_LOCATION_2, destination_model_id=_TEST_MODEL_ID
+ )
+
+ copy_model_mock.assert_called_once_with(
+ request=gca_model_service.CopyModelRequest(
+ parent=initializer.global_config.common_location_path(
+ location=_TEST_LOCATION_2
+ ),
+ source_model=_TEST_MODEL_RESOURCE_NAME,
+ model_id=_TEST_MODEL_ID,
+ ),
+ timeout=None,
+ )
+
+ def test_copy_with_invalid_params(self, copy_model_mock, get_model_mock):
+ with pytest.raises(ValueError) as e:
+ test_model = models.Model(_TEST_ID)
+ test_model.copy(
+ destination_location=_TEST_LOCATION,
+ destination_model_id=_TEST_MODEL_ID,
+ destination_parent_model=_TEST_MODEL_RESOURCE_NAME,
+ )
+
+ assert e.match(
+ regexp=r"`destination_model_id` and `destination_parent_model` can not be set together."
+ )
+
+ @pytest.mark.usefixtures("get_model_mock")
+ def test_update(self, update_model_mock, get_model_mock):
+
+ test_model = models.Model(_TEST_ID)
+
+ test_model.update(
+ display_name=_TEST_MODEL_NAME,
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABEL,
+ )
+
+ current_model_proto = gca_model.Model(
+ display_name=_TEST_MODEL_NAME,
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABEL,
+ name=_TEST_MODEL_RESOURCE_NAME,
+ )
+
+ update_mask = field_mask_pb2.FieldMask(
+ paths=["display_name", "description", "labels"]
+ )
+
+ update_model_mock.assert_called_once_with(
+ model=current_model_proto, update_mask=update_mask
+ )
+
+ def test_get_model_evaluation_with_evaluation_id(
+ self,
+ mock_model_eval_get,
+ get_model_mock,
+ list_model_evaluations_mock,
+ ):
+ test_model = models.Model(model_name=_TEST_MODEL_RESOURCE_NAME)
+
+ test_model.get_model_evaluation(evaluation_id=_TEST_ID)
+
+ mock_model_eval_get.assert_called_once_with(
+ name=_TEST_MODEL_EVAL_RESOURCE_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_get_model_evaluation_with_evaluation_and_instantiated_version(
+ self,
+ mock_model_eval_get,
+ get_model_mock,
+ list_model_evaluations_mock,
+ ):
+ test_model = models.Model(
+ model_name=f"{_TEST_MODEL_RESOURCE_NAME}@{_TEST_VERSION_ID}"
+ )
+
+ test_model.get_model_evaluation(evaluation_id=_TEST_ID)
+
+ mock_model_eval_get.assert_called_once_with(
+ name=_TEST_MODEL_EVAL_RESOURCE_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ list_model_evaluations_mock.assert_called_once_with(
+ request={"parent": test_model.versioned_resource_name}
+ )
+
+ def test_get_model_evaluation_without_id(
+ self,
+ mock_model_eval_get,
+ get_model_mock,
+ list_model_evaluations_mock,
+ ):
+ test_model = models.Model(model_name=_TEST_MODEL_RESOURCE_NAME)
+
+ test_model.get_model_evaluation()
+
+ list_model_evaluations_mock.assert_called_once_with(
+ request={"parent": _TEST_MODEL_RESOURCE_NAME}
+ )
+
+ def test_list_model_evaluations(
+ self,
+ get_model_mock,
+ mock_model_eval_get,
+ list_model_evaluations_mock,
+ ):
+
+ test_model = models.Model(model_name=_TEST_MODEL_RESOURCE_NAME)
+
+ eval_list = test_model.list_model_evaluations()
+
+ list_model_evaluations_mock.assert_called_once_with(
+ request={"parent": _TEST_MODEL_RESOURCE_NAME}
+ )
+
+ assert len(eval_list) == len(_TEST_MODEL_EVAL_LIST)
+
+ def test_list_model_evaluations_with_version(
+ self,
+ get_model_mock,
+ mock_model_eval_get,
+ list_model_evaluations_mock,
+ ):
+
+ test_model = models.Model(
+ model_name=f"{_TEST_MODEL_RESOURCE_NAME}@{_TEST_VERSION_ID}"
+ )
+
+ test_model.list_model_evaluations()
+
+ list_model_evaluations_mock.assert_called_once_with(
+ request={"parent": test_model.versioned_resource_name}
+ )
+
+ def test_init_with_version_in_resource_name(self, get_model_with_version):
+ model = models.Model(
+ model_name=models.ModelRegistry._get_versioned_name(
+ _TEST_MODEL_NAME, _TEST_VERSION_ALIAS_1
+ )
+ )
+
+ assert model.version_aliases == [_TEST_VERSION_ALIAS_1, _TEST_VERSION_ALIAS_2]
+ assert model.display_name == _TEST_MODEL_NAME
+ assert model.resource_name == _TEST_MODEL_PARENT
+ assert model.version_id == _TEST_VERSION_ID
+ assert model.version_description == _TEST_MODEL_VERSION_DESCRIPTION_2
+ # The Model yielded from upload should not have a version in resource name
+ assert "@" not in model.resource_name
+ # The Model yielded from upload SHOULD have a version in the versioned resource name
+ assert model.versioned_resource_name.endswith(f"@{_TEST_VERSION_ID}")
+
+ def test_init_with_version_arg(self, get_model_with_version):
+ model = models.Model(model_name=_TEST_MODEL_NAME, version=_TEST_VERSION_ID)
+
+ assert model.version_aliases == [_TEST_VERSION_ALIAS_1, _TEST_VERSION_ALIAS_2]
+ assert model.display_name == _TEST_MODEL_NAME
+ assert model.resource_name == _TEST_MODEL_PARENT
+ assert model.version_id == _TEST_VERSION_ID
+ assert model.version_description == _TEST_MODEL_VERSION_DESCRIPTION_2
+ # The Model yielded from upload should not have a version in resource name
+ assert "@" not in model.resource_name
+ # The Model yielded from upload SHOULD have a version in the versioned resource name
+ assert model.versioned_resource_name.endswith(f"@{_TEST_VERSION_ID}")
+
+ @pytest.mark.parametrize(
+ "parent,location,project",
+ [
+ (_TEST_MODEL_NAME, _TEST_LOCATION, _TEST_PROJECT),
+ (_TEST_MODEL_PARENT, None, None),
+ ],
+ )
+ @pytest.mark.parametrize(
+ "aliases,default,goal",
+ [
+ (["alias1", "alias2"], True, ["alias1", "alias2", "default"]),
+ (None, True, ["default"]),
+ (["alias1", "alias2", "default"], True, ["alias1", "alias2", "default"]),
+ (["alias1", "alias2", "default"], False, ["alias1", "alias2", "default"]),
+ (["alias1", "alias2"], False, ["alias1", "alias2"]),
+ (None, False, []),
+ ],
+ )
+ @pytest.mark.parametrize(
+ "callable, model_file_path, saved_model",
+ [
+ (models.Model.upload, None, None),
+ (models.Model.upload_scikit_learn_model_file, "my_model.pkl", None),
+ (models.Model.upload_tensorflow_saved_model, None, "saved_model.pb"),
+ (models.Model.upload_xgboost_model_file, "my_model.xgb", None),
+ ],
+ )
+ def test_upload_new_version(
+ self,
+ upload_model_with_version_mock,
+ get_model_with_version,
+ mock_storage_blob_upload_from_filename,
+ parent,
+ location,
+ project,
+ aliases,
+ default,
+ goal,
+ callable,
+ model_file_path,
+ saved_model,
+ tmp_path: pathlib.Path,
+ ):
+ args = {
+ "display_name": _TEST_MODEL_NAME,
+ "location": location,
+ "project": project,
+ "sync": True,
+ "upload_request_timeout": None,
+ "model_id": _TEST_ID,
+ "parent_model": parent,
+ "version_description": _TEST_MODEL_VERSION_DESCRIPTION_2,
+ "version_aliases": aliases,
+ "is_default_version": default,
+ }
+ if model_file_path:
+ model_file_path = tmp_path / model_file_path
+ model_file_path.touch()
+ args["model_file_path"] = str(model_file_path)
+ elif saved_model:
+ saved_model_dir = tmp_path / "saved_model"
+ saved_model_dir.mkdir()
+ (saved_model_dir / saved_model).touch()
+ args["saved_model_dir"] = str(saved_model_dir)
+ else:
+ args["serving_container_image_uri"] = _TEST_SERVING_CONTAINER_IMAGE
+
+ _ = callable(**args)
+
+ upload_model_with_version_mock.assert_called_once()
+ upload_model_call_kwargs = upload_model_with_version_mock.call_args[1]
+ upload_model_request = upload_model_call_kwargs["request"]
+
+ assert upload_model_request.model.display_name == _TEST_MODEL_NAME
+ assert upload_model_request.model.version_aliases == goal
+ assert (
+ upload_model_request.model.version_description
+ == _TEST_MODEL_VERSION_DESCRIPTION_2
+ )
+ assert upload_model_request.parent_model == _TEST_MODEL_PARENT
+ assert upload_model_request.model_id == _TEST_ID
+
+ def test_get_model_instance_from_registry(self, get_model_with_version):
+ registry = models.ModelRegistry(_TEST_MODEL_PARENT)
+ model = registry.get_model(_TEST_VERSION_ALIAS_1)
+ assert model.version_aliases == [_TEST_VERSION_ALIAS_1, _TEST_VERSION_ALIAS_2]
+ assert model.display_name == _TEST_MODEL_NAME
+ assert model.resource_name == _TEST_MODEL_PARENT
+ assert model.version_id == _TEST_VERSION_ID
+ assert model.version_description == _TEST_MODEL_VERSION_DESCRIPTION_2
+
+ def test_list_versions(self, list_model_versions_mock, get_model_with_version):
+ my_model = models.Model(_TEST_MODEL_NAME, _TEST_PROJECT, _TEST_LOCATION)
+ versions = my_model.versioning_registry.list_versions()
+
+ assert len(versions) == len(_TEST_MODEL_VERSIONS_LIST)
+
+ for i in range(len(versions)):
+ ver = versions[i]
+ model = _TEST_MODEL_VERSIONS_LIST[i]
+ assert ver.version_id == model.version_id
+ assert ver.version_create_time == model.version_create_time
+ assert ver.version_update_time == model.version_update_time
+ assert ver.model_display_name == model.display_name
+ assert ver.version_aliases == model.version_aliases
+ assert ver.version_description == model.version_description
+
+ assert model.name.startswith(ver.model_resource_name)
+ assert model.name.endswith(ver.version_id)
+
+ def test_list_versions_with_filter(
+ self, list_model_versions_with_filter_mock, get_model_with_version
+ ):
+ my_model = models.Model(_TEST_MODEL_NAME, _TEST_PROJECT, _TEST_LOCATION)
+ versions = my_model.versioning_registry.list_versions(
+ filter='labels.team="experimentation"'
+ )
+
+ assert len(versions) == len(_TEST_MODEL_VERSIONS_WITH_FILTER_LIST)
+
+ ver = versions[0]
+ model = _TEST_MODEL_VERSIONS_WITH_FILTER_LIST[0]
+ assert ver.version_id == "3"
+ assert ver.version_create_time == model.version_create_time
+ assert ver.version_update_time == model.version_update_time
+ assert ver.model_display_name == model.display_name
+ assert ver.version_aliases == model.version_aliases
+ assert ver.version_description == model.version_description
+
+ assert model.name.startswith(ver.model_resource_name)
+ assert model.name.endswith(ver.version_id)
+
+ def test_get_version_info(self, get_model_with_version):
+ my_model = models.Model(_TEST_MODEL_NAME, _TEST_PROJECT, _TEST_LOCATION)
+ ver = my_model.versioning_registry.get_version_info("2")
+ model = _TEST_MODEL_OBJ_WITH_VERSION
+
+ assert ver.version_id == model.version_id
+ assert ver.version_create_time == model.version_create_time
+ assert ver.version_update_time == model.version_update_time
+ assert ver.model_display_name == model.display_name
+ assert ver.version_aliases == model.version_aliases
+ assert ver.version_description == model.version_description
+
+ assert model.name.startswith(ver.model_resource_name)
+ assert model.name.endswith(ver.version_id)
+
+ def test_delete_version(self, delete_model_version_mock, get_model_with_version):
+ my_model = models.Model(_TEST_MODEL_NAME, _TEST_PROJECT, _TEST_LOCATION)
+ my_model.versioning_registry.delete_version(_TEST_VERSION_ALIAS_1)
+
+ delete_model_version_mock.assert_called_once_with(
+ name=models.ModelRegistry._get_versioned_name(
+ _TEST_MODEL_PARENT, _TEST_VERSION_ALIAS_1
+ )
+ )
+
+ @pytest.mark.usefixtures("get_model_mock")
+ def test_update_version(
+ self, update_model_mock, get_model_mock, get_model_with_version
+ ):
+ my_model = models.Model(_TEST_MODEL_NAME, _TEST_PROJECT, _TEST_LOCATION)
+ my_model.versioning_registry.update_version(
+ _TEST_VERSION_ALIAS_1,
+ version_description="update version",
+ labels=_TEST_LABEL,
+ )
+
+ model_to_update = _TEST_MODEL_OBJ_WITH_VERSION
+ model_to_update.version_description = "update version"
+ model_to_update.labels = _TEST_LABEL
+
+ update_mask = field_mask_pb2.FieldMask(paths=["version_description", "labels"])
+
+ update_model_mock.assert_called_once_with(
+ model=model_to_update, update_mask=update_mask
+ )
+
+ def test_add_versions(self, merge_version_aliases_mock, get_model_with_version):
+ my_model = models.Model(_TEST_MODEL_NAME, _TEST_PROJECT, _TEST_LOCATION)
+ my_model.versioning_registry.add_version_aliases(
+ ["new-alias", "other-new-alias"], _TEST_VERSION_ALIAS_1
+ )
+
+ merge_version_aliases_mock.assert_called_once_with(
+ name=models.ModelRegistry._get_versioned_name(
+ _TEST_MODEL_PARENT, _TEST_VERSION_ALIAS_1
+ ),
+ version_aliases=["new-alias", "other-new-alias"],
+ )
+
+ def test_remove_versions(self, merge_version_aliases_mock, get_model_with_version):
+ my_model = models.Model(_TEST_MODEL_NAME, _TEST_PROJECT, _TEST_LOCATION)
+ my_model.versioning_registry.remove_version_aliases(
+ ["old-alias", "other-old-alias"], _TEST_VERSION_ALIAS_1
+ )
+
+ merge_version_aliases_mock.assert_called_once_with(
+ name=models.ModelRegistry._get_versioned_name(
+ _TEST_MODEL_PARENT, _TEST_VERSION_ALIAS_1
+ ),
+ version_aliases=["-old-alias", "-other-old-alias"],
+ )
+
+ @pytest.mark.parametrize(
+ "resource",
+ [
+ "abc",
+ "abc@1",
+ "abc@my-alias",
+ pytest.param("@5", marks=pytest.mark.xfail),
+ pytest.param("abc@", marks=pytest.mark.xfail),
+ pytest.param("abc#alias", marks=pytest.mark.xfail),
+ ],
+ )
+ def test_model_resource_id_validator(self, resource):
+ models.Model._revisioned_resource_id_validator(resource)
+
+ def test_list(self, list_models_mock):
+ models_list = models.Model.list()
+
+ assert len(models_list) == len(_TEST_MODELS_LIST)
+
+ for i in range(len(models_list)):
+ listed_model = models_list[i]
+ ideal_model = _TEST_MODELS_LIST[i]
+ assert listed_model.version_id == ideal_model.version_id
+ assert listed_model.version_create_time == ideal_model.version_create_time
+ assert listed_model.version_update_time == ideal_model.version_update_time
+ assert listed_model.display_name == ideal_model.display_name
+ assert listed_model.version_aliases == ideal_model.version_aliases
+ assert listed_model.version_description == ideal_model.version_description
+
+ assert ideal_model.name.startswith(listed_model.resource_name)
+ if "@" in ideal_model.name:
+ assert ideal_model.name.endswith(listed_model.version_id)
+
+ assert listed_model.versioning_registry
+ assert listed_model._revisioned_resource_id_validator
+
+ @pytest.mark.usefixtures(
+ "get_endpoint_mock",
+ "get_model_mock",
+ "create_endpoint_mock",
+ "raw_predict_mock",
+ )
+ def test_raw_predict(self, raw_predict_mock):
+ test_endpoint = models.Endpoint(_TEST_ID)
+ test_endpoint.raw_predict(_TEST_RAW_PREDICT_DATA, _TEST_RAW_PREDICT_HEADER)
+ raw_predict_mock.assert_called_once_with(
+ url=_TEST_RAW_PREDICT_URL,
+ data=_TEST_RAW_PREDICT_DATA,
+ headers=_TEST_RAW_PREDICT_HEADER,
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec_json",
+ [_TEST_MODEL_EVAL_PIPELINE_JOB],
+ )
+ def test_model_evaluate_with_gcs_input_uris(
+ self,
+ get_model_mock,
+ mock_model_eval_get,
+ mock_get_model_evaluation,
+ list_model_evaluations_mock,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_successfully_completed_eval_job,
+ mock_pipeline_bucket_exists,
+ mock_load_yaml_and_json,
+ job_spec_json,
+ mock_request_urlopen,
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ test_model = models.Model(model_name=_TEST_MODEL_RESOURCE_NAME)
+
+ eval_job = test_model.evaluate(
+ prediction_type="classification",
+ target_field_name="class",
+ class_labels=_TEST_MODEL_EVAL_CLASS_LABELS,
+ staging_bucket="gs://my-eval-staging-path",
+ gcs_source_uris=["gs://test-bucket/test-file.csv"],
+ )
+
+ assert isinstance(eval_job, model_evaluation_job._ModelEvaluationJob)
+
+ assert mock_pipeline_service_create.called_once
+
+ assert mock_pipeline_service_get.called_once
+
+ eval_job.wait()
+
+ eval_resource = eval_job.get_model_evaluation()
+
+ assert isinstance(eval_resource, aiplatform.ModelEvaluation)
+
+ assert eval_resource.metrics == _TEST_MODEL_EVAL_METRICS
+
+ assert isinstance(eval_resource._backing_pipeline_job, aiplatform.PipelineJob)
+
+ @pytest.mark.parametrize(
+ "job_spec_json",
+ [_TEST_MODEL_EVAL_PIPELINE_JOB_WITH_BQ_INPUT],
+ )
+ def test_model_evaluate_with_bigquery_input(
+ self,
+ get_model_mock,
+ mock_model_eval_get,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_load_yaml_and_json,
+ mock_pipeline_bucket_exists,
+ job_spec_json,
+ mock_request_urlopen,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket="gs://my-bucket")
+
+ test_model = models.Model(model_name=_TEST_MODEL_RESOURCE_NAME)
+
+ eval_job = test_model.evaluate(
+ prediction_type="classification",
+ target_field_name="class",
+ class_labels=_TEST_MODEL_EVAL_CLASS_LABELS,
+ bigquery_source_uri=_TEST_BIGQUERY_EVAL_INPUT_URI,
+ bigquery_destination_output_uri=_TEST_BIGQUERY_EVAL_DESTINATION_URI,
+ )
+
+ assert isinstance(eval_job, model_evaluation_job._ModelEvaluationJob)
+
+ assert mock_pipeline_service_create.called_once
+
+ assert mock_pipeline_service_get.called_once
+
+ @pytest.mark.parametrize(
+ "job_spec_json",
+ [_TEST_MODEL_EVAL_PIPELINE_JOB],
+ )
+ def test_model_evaluate_using_initialized_staging_bucket(
+ self,
+ get_model_mock,
+ mock_model_eval_get,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ mock_load_yaml_and_json,
+ job_spec_json,
+ mock_request_urlopen,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket="gs://my-bucket")
+
+ test_model = models.Model(model_name=_TEST_MODEL_RESOURCE_NAME)
+
+ eval_job = test_model.evaluate(
+ prediction_type="classification",
+ target_field_name="class",
+ class_labels=_TEST_MODEL_EVAL_CLASS_LABELS,
+ gcs_source_uris=["gs://test-bucket/test-file.csv"],
+ )
+
+ assert isinstance(eval_job, model_evaluation_job._ModelEvaluationJob)
+
+ assert mock_pipeline_service_create.called_once
+
+ assert mock_pipeline_service_get.called_once
+
+ def test_model_evaluate_with_no_staging_path_or_initialized_staging_bucket_raises(
+ self,
+ get_model_mock,
+ mock_model_eval_get,
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ test_model = models.Model(model_name=_TEST_MODEL_RESOURCE_NAME)
+
+ with pytest.raises(ValueError):
+ test_model.evaluate(
+ prediction_type="classification",
+ target_field_name="class",
+ class_labels=_TEST_MODEL_EVAL_CLASS_LABELS,
+ gcs_source_uris=["gs://test-bucket/test-file.csv"],
+ )
+
+ def test_model_evaluate_with_invalid_prediction_type_raises(
+ self,
+ get_model_mock,
+ mock_model_eval_get,
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ test_model = models.Model(model_name=_TEST_MODEL_RESOURCE_NAME)
+
+ with pytest.raises(ValueError):
+ test_model.evaluate(
+ prediction_type="invalid_prediction_type",
+ target_field_name="class",
+ gcs_source_uris=["gs://test-bucket/test-file.csv"],
+ )
+
+ def test_model_evaluate_with_invalid_gcs_uri_raises(
+ self,
+ get_model_mock,
+ mock_model_eval_get,
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ test_model = models.Model(model_name=_TEST_MODEL_RESOURCE_NAME)
+
+ with pytest.raises(ValueError):
+ test_model.evaluate(
+ prediction_type="classification",
+ target_field_name="class",
+ gcs_source_uris=["storage.googleapis.com/test-bucket/test-file.csv"],
+ )
+
+ def test_model_evaluate_with_invalid_bq_uri_raises(
+ self,
+ get_model_mock,
+ mock_model_eval_get,
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ test_model = models.Model(model_name=_TEST_MODEL_RESOURCE_NAME)
+
+ with pytest.raises(ValueError):
+ test_model.evaluate(
+ prediction_type="classification",
+ target_field_name="class",
+ bigquery_source_uri="my-project.my-dataset.my-table",
+ bigquery_destination_output_uri="bq://my-project.my-dataset.my-table",
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_persistent_resource_preview.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_persistent_resource_preview.py
new file mode 100644
index 0000000000000000000000000000000000000000..f189af2a8b191e247f85f275e577f0deb14d7203
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_persistent_resource_preview.py
@@ -0,0 +1,368 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import copy
+import importlib
+from unittest import mock
+
+from google.api_core import operation as ga_operation
+from google.cloud import aiplatform
+from google.cloud.aiplatform.compat.services import (
+ persistent_resource_service_client_v1,
+)
+from google.cloud.aiplatform.compat.services import (
+ persistent_resource_service_client_v1beta1 as persistent_resource_service_client_compat,
+)
+from google.cloud.aiplatform.compat.types import (
+ encryption_spec_v1beta1 as encryption_spec_compat,
+)
+from google.cloud.aiplatform.compat.types import (
+ persistent_resource_service_v1beta1 as persistent_resource_service_compat,
+)
+from google.cloud.aiplatform.compat.types import (
+ persistent_resource_v1beta1 as persistent_resource_compat,
+)
+from google.cloud.aiplatform.preview import persistent_resource
+import constants as test_constants
+import pytest
+
+
+_TEST_PROJECT = test_constants.ProjectConstants._TEST_PROJECT
+_TEST_LOCATION = test_constants.ProjectConstants._TEST_LOCATION
+_TEST_PARENT = test_constants.ProjectConstants._TEST_PARENT
+
+_TEST_PERSISTENT_RESOURCE_ID = (
+ test_constants.PersistentResourceConstants._TEST_PERSISTENT_RESOURCE_ID
+)
+_TEST_PERSISTENT_RESOURCE_DISPLAY_NAME = (
+ test_constants.PersistentResourceConstants._TEST_PERSISTENT_RESOURCE_DISPLAY_NAME
+)
+_TEST_LABELS = test_constants.ProjectConstants._TEST_LABELS
+_TEST_NETWORK = test_constants.TrainingJobConstants._TEST_NETWORK
+_TEST_RESERVED_IP_RANGES = test_constants.TrainingJobConstants._TEST_RESERVED_IP_RANGES
+_TEST_KEY_NAME = test_constants.TrainingJobConstants._TEST_DEFAULT_ENCRYPTION_KEY_NAME
+_TEST_SERVICE_ACCOUNT = test_constants.ProjectConstants._TEST_SERVICE_ACCOUNT
+
+_TEST_PERSISTENT_RESOURCE_PROTO = persistent_resource_compat.PersistentResource(
+ name=_TEST_PERSISTENT_RESOURCE_ID,
+ resource_pools=[
+ test_constants.PersistentResourceConstants._TEST_RESOURCE_POOL,
+ ],
+)
+
+
+def _get_persistent_resource_proto(
+ state=None, name=None, error=None
+) -> persistent_resource_compat.PersistentResource:
+ persistent_resource_proto = copy.deepcopy(_TEST_PERSISTENT_RESOURCE_PROTO)
+ persistent_resource_proto.name = name
+ persistent_resource_proto.state = state
+ persistent_resource_proto.error = error
+ return persistent_resource_proto
+
+
+def _get_resource_name(name=None, project=_TEST_PROJECT, location=_TEST_LOCATION):
+ return "projects/{}/locations/{}/persistentResources/{}".format(
+ project, location, name
+ )
+
+
+@pytest.fixture
+def create_preview_persistent_resource_mock():
+ with mock.patch.object(
+ (persistent_resource_service_client_compat.PersistentResourceServiceClient),
+ "create_persistent_resource",
+ ) as create_persistent_resource_mock:
+ create_lro = mock.Mock(ga_operation.Operation)
+ create_lro.result.return_value = None
+
+ create_persistent_resource_mock.return_value = create_lro
+ yield create_persistent_resource_mock
+
+
+@pytest.fixture
+def get_persistent_resource_mock():
+ with mock.patch.object(
+ (persistent_resource_service_client_v1.PersistentResourceServiceClient),
+ "get_persistent_resource",
+ ) as get_persistent_resource_mock:
+ get_persistent_resource_mock.side_effect = [
+ _get_persistent_resource_proto(
+ name=_TEST_PERSISTENT_RESOURCE_ID,
+ state=(persistent_resource_compat.PersistentResource.State.RUNNING),
+ ),
+ ]
+
+ yield get_persistent_resource_mock
+
+
+_TEST_LIST_RESOURCE_1 = _get_persistent_resource_proto(
+ name="resource_1",
+ state=(persistent_resource_compat.PersistentResource.State.RUNNING),
+)
+_TEST_LIST_RESOURCE_2 = _get_persistent_resource_proto(
+ name="resource_2",
+ state=(persistent_resource_compat.PersistentResource.State.PROVISIONING),
+)
+_TEST_LIST_RESOURCE_3 = _get_persistent_resource_proto(
+ name="resource_3",
+ state=(persistent_resource_compat.PersistentResource.State.STOPPING),
+)
+_TEST_LIST_RESOURCE_4 = _get_persistent_resource_proto(
+ name="resource_4",
+ state=(persistent_resource_compat.PersistentResource.State.ERROR),
+)
+
+_TEST_PERSISTENT_RESOURCE_LIST = [
+ _TEST_LIST_RESOURCE_1,
+ _TEST_LIST_RESOURCE_2,
+ _TEST_LIST_RESOURCE_3,
+ _TEST_LIST_RESOURCE_4,
+]
+
+
+@pytest.fixture
+def list_persistent_resources_mock():
+ with mock.patch.object(
+ (persistent_resource_service_client_v1.PersistentResourceServiceClient),
+ "list_persistent_resources",
+ ) as list_persistent_resources_mock:
+ list_persistent_resources_mock.return_value = _TEST_PERSISTENT_RESOURCE_LIST
+
+ yield list_persistent_resources_mock
+
+
+@pytest.fixture
+def delete_persistent_resource_mock():
+ with mock.patch.object(
+ (persistent_resource_service_client_v1.PersistentResourceServiceClient),
+ "delete_persistent_resource",
+ ) as delete_persistent_resource_mock:
+ delete_lro = mock.Mock(ga_operation.Operation)
+ delete_lro.result.return_value = (
+ persistent_resource_service_compat.DeletePersistentResourceRequest()
+ )
+ delete_persistent_resource_mock.return_value = delete_lro
+ yield delete_persistent_resource_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestPersistentResourcePreview:
+ def setup_method(self):
+ importlib.reload(aiplatform.initializer)
+ importlib.reload(aiplatform)
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ def teardown_method(self):
+ aiplatform.initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_persistent_resource(
+ self,
+ create_preview_persistent_resource_mock,
+ get_persistent_resource_mock,
+ sync,
+ ):
+ my_test_resource = persistent_resource.PersistentResource.create(
+ persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID,
+ display_name=_TEST_PERSISTENT_RESOURCE_DISPLAY_NAME,
+ resource_pools=[
+ test_constants.PersistentResourceConstants._TEST_RESOURCE_POOL,
+ ],
+ labels=_TEST_LABELS,
+ sync=sync,
+ )
+
+ if not sync:
+ my_test_resource.wait()
+
+ expected_persistent_resource_arg = _get_persistent_resource_proto(
+ name=_TEST_PERSISTENT_RESOURCE_ID,
+ )
+
+ expected_persistent_resource_arg.display_name = (
+ _TEST_PERSISTENT_RESOURCE_DISPLAY_NAME
+ )
+ expected_persistent_resource_arg.labels = _TEST_LABELS
+
+ create_preview_persistent_resource_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID,
+ persistent_resource=expected_persistent_resource_arg,
+ timeout=None,
+ )
+
+ get_persistent_resource_mock.assert_called_once()
+ _, mock_kwargs = get_persistent_resource_mock.call_args
+ assert mock_kwargs["name"] == _get_resource_name(
+ name=_TEST_PERSISTENT_RESOURCE_ID
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_persistent_resource_with_network(
+ self,
+ create_preview_persistent_resource_mock,
+ get_persistent_resource_mock,
+ sync,
+ ):
+ my_test_resource = persistent_resource.PersistentResource.create(
+ persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID,
+ resource_pools=[
+ test_constants.PersistentResourceConstants._TEST_RESOURCE_POOL,
+ ],
+ network=_TEST_NETWORK,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ sync=sync,
+ )
+
+ if not sync:
+ my_test_resource.wait()
+
+ expected_persistent_resource_arg = _get_persistent_resource_proto(
+ name=_TEST_PERSISTENT_RESOURCE_ID,
+ )
+
+ expected_persistent_resource_arg.network = _TEST_NETWORK
+ expected_persistent_resource_arg.reserved_ip_ranges = _TEST_RESERVED_IP_RANGES
+
+ create_preview_persistent_resource_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID,
+ persistent_resource=expected_persistent_resource_arg,
+ timeout=None,
+ )
+ get_persistent_resource_mock.assert_called_once()
+ _, mock_kwargs = get_persistent_resource_mock.call_args
+ assert mock_kwargs["name"] == _get_resource_name(
+ name=_TEST_PERSISTENT_RESOURCE_ID
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_persistent_resource_with_kms_key(
+ self,
+ create_preview_persistent_resource_mock,
+ get_persistent_resource_mock,
+ sync,
+ ):
+ my_test_resource = persistent_resource.PersistentResource.create(
+ persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID,
+ resource_pools=[
+ test_constants.PersistentResourceConstants._TEST_RESOURCE_POOL,
+ ],
+ kms_key_name=_TEST_KEY_NAME,
+ sync=sync,
+ )
+
+ if not sync:
+ my_test_resource.wait()
+
+ expected_persistent_resource_arg = _get_persistent_resource_proto(
+ name=_TEST_PERSISTENT_RESOURCE_ID,
+ )
+
+ expected_persistent_resource_arg.encryption_spec = (
+ encryption_spec_compat.EncryptionSpec(kms_key_name=_TEST_KEY_NAME)
+ )
+
+ create_preview_persistent_resource_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID,
+ persistent_resource=expected_persistent_resource_arg,
+ timeout=None,
+ )
+ get_persistent_resource_mock.assert_called_once()
+ _, mock_kwargs = get_persistent_resource_mock.call_args
+ assert mock_kwargs["name"] == _get_resource_name(
+ name=_TEST_PERSISTENT_RESOURCE_ID
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_persistent_resource_with_service_account(
+ self,
+ create_preview_persistent_resource_mock,
+ get_persistent_resource_mock,
+ sync,
+ ):
+ my_test_resource = persistent_resource.PersistentResource.create(
+ persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID,
+ resource_pools=[
+ test_constants.PersistentResourceConstants._TEST_RESOURCE_POOL,
+ ],
+ service_account=_TEST_SERVICE_ACCOUNT,
+ sync=sync,
+ )
+
+ if not sync:
+ my_test_resource.wait()
+
+ expected_persistent_resource_arg = _get_persistent_resource_proto(
+ name=_TEST_PERSISTENT_RESOURCE_ID,
+ )
+
+ service_account_spec = persistent_resource_compat.ServiceAccountSpec(
+ enable_custom_service_account=True, service_account=_TEST_SERVICE_ACCOUNT
+ )
+ expected_persistent_resource_arg.resource_runtime_spec = (
+ persistent_resource_compat.ResourceRuntimeSpec(
+ service_account_spec=service_account_spec
+ )
+ )
+
+ create_preview_persistent_resource_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID,
+ persistent_resource=expected_persistent_resource_arg,
+ timeout=None,
+ )
+ get_persistent_resource_mock.assert_called_once()
+ _, mock_kwargs = get_persistent_resource_mock.call_args
+ assert mock_kwargs["name"] == _get_resource_name(
+ name=_TEST_PERSISTENT_RESOURCE_ID
+ )
+
+ def test_list_persistent_resources(self, list_persistent_resources_mock):
+ resource_list = persistent_resource.PersistentResource.list()
+
+ list_persistent_resources_mock.assert_called_once()
+ assert len(resource_list) == len(_TEST_PERSISTENT_RESOURCE_LIST)
+
+ for i in range(len(resource_list)):
+ actual_resource = resource_list[i]
+ expected_resource = _TEST_PERSISTENT_RESOURCE_LIST[i]
+
+ assert actual_resource.name == expected_resource.name
+ assert actual_resource.state == expected_resource.state
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_delete_persistent_resource(
+ self,
+ get_persistent_resource_mock,
+ delete_persistent_resource_mock,
+ sync,
+ ):
+ test_resource = persistent_resource.PersistentResource(
+ _TEST_PERSISTENT_RESOURCE_ID
+ )
+ test_resource.delete(sync=sync)
+
+ if not sync:
+ test_resource.wait()
+
+ get_persistent_resource_mock.assert_called_once()
+ delete_persistent_resource_mock.assert_called_once_with(
+ name=_TEST_PERSISTENT_RESOURCE_ID,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_pipeline_based_service.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_pipeline_based_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..08a9583333d3b23295f322e467ecd4ad2aac1f5c
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_pipeline_based_service.py
@@ -0,0 +1,657 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import datetime
+import json
+import pytest
+from unittest import mock
+
+from google.auth import credentials as auth_credentials
+from google.protobuf import json_format
+from google.cloud import storage
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform.metadata import constants
+from google.cloud.aiplatform.utils import gcs_utils
+import constants as test_constants
+
+
+from google.cloud.aiplatform_v1.services.pipeline_service import (
+ client as pipeline_service_client_v1,
+)
+from google.cloud.aiplatform_v1.types import (
+ pipeline_job as gca_pipeline_job_v1,
+)
+from google.cloud.aiplatform_v1.types import (
+ pipeline_state as gca_pipeline_state_v1,
+)
+
+from google.cloud.aiplatform._pipeline_based_service import (
+ pipeline_based_service,
+)
+
+from google.cloud.aiplatform_v1 import Execution as GapicExecution
+from google.cloud.aiplatform_v1 import MetadataServiceClient
+
+
+# pipeline job
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_PIPELINE_JOB_DISPLAY_NAME = "sample-pipeline-job-display-name"
+_TEST_PIPELINE_JOB_ID = "sample-test-pipeline-202111111"
+_TEST_GCS_BUCKET_NAME = "my-bucket"
+_TEST_CREDENTIALS = auth_credentials.AnonymousCredentials()
+_TEST_SERVICE_ACCOUNT = "abcde@my-project.iam.gserviceaccount.com"
+_TEST_COMPONENT_IDENTIFIER = "fake-pipeline-based-service"
+_TEST_PIPELINE_NAME_IDENTIFIER = "my-pipeline"
+_TEST_INVALID_PIPELINE_NAME_IDENTIFIER = "not-a-valid-pipeline-name"
+_TEST_PIPELINE_CREATE_TIME = datetime.datetime.now()
+
+
+_TEST_TEMPLATE_PATH = f"gs://{_TEST_GCS_BUCKET_NAME}/job_spec.json"
+_TEST_TEMPLATE_REF = {"test_pipeline_type": _TEST_TEMPLATE_PATH}
+_TEST_PIPELINE_ROOT = f"gs://{_TEST_GCS_BUCKET_NAME}/pipeline_root"
+_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+_TEST_NETWORK = f"projects/{_TEST_PROJECT}/global/networks/{_TEST_PIPELINE_JOB_ID}"
+
+_TEST_PIPELINE_JOB_NAME = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/pipelineJobs/{_TEST_PIPELINE_JOB_ID}"
+_TEST_INVALID_PIPELINE_JOB_NAME = (
+ f"prj/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/{_TEST_PIPELINE_JOB_ID}"
+)
+
+# executions: this is used in test_list_pipeline_based_service
+_TEST_EXECUTION_PARENT = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/default"
+)
+
+_TEST_RUN = "run-1"
+_TEST_OTHER_RUN = "run-2"
+_TEST_EXPERIMENT = "test-experiment"
+_TEST_EXECUTION_ID = f"{_TEST_EXPERIMENT}-{_TEST_RUN}"
+_TEST_EXECUTION_NAME = f"{_TEST_EXECUTION_PARENT}/executions/{_TEST_EXECUTION_ID}"
+
+
+_TEST_OTHER_EXECUTION_ID = f"{_TEST_EXPERIMENT}-{_TEST_OTHER_RUN}"
+_TEST_OTHER_EXECUTION_NAME = (
+ f"{_TEST_EXECUTION_PARENT}/executions/{_TEST_OTHER_EXECUTION_ID}"
+)
+
+# execution metadata parameters: used in test_list_pipeline_based_service
+_TEST_PARAM_KEY_1 = "learning_rate"
+_TEST_PARAM_KEY_2 = "dropout"
+_TEST_PIPELINE_PARAM_KEY = "pipeline_job_resource_name"
+_TEST_PARAMS = {
+ _TEST_PARAM_KEY_1: 0.01,
+ _TEST_PARAM_KEY_2: 0.2,
+ _TEST_PIPELINE_PARAM_KEY: _TEST_PIPELINE_JOB_NAME,
+}
+_TEST_OTHER_PARAMS = {_TEST_PARAM_KEY_1: 0.02, _TEST_PARAM_KEY_2: 0.3}
+
+
+# pipeline based service template json
+_TEST_PIPELINE_PARAMETER_VALUES = {
+ "string_param": "hello world",
+ "bool_param": True,
+ "double_param": 12.34,
+ "int_param": 5678,
+ "list_int_param": [123, 456, 789],
+ "list_string_param": ["lorem", "ipsum"],
+ "struct_param": {"key1": 12345, "key2": 67890},
+}
+
+_TEST_PIPELINE_SPEC_JSON = json.dumps(
+ {
+ "pipelineInfo": {"name": "my-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "string_param": {"parameterType": "STRING"},
+ "bool_param": {"parameterType": "BOOLEAN"},
+ "double_param": {"parameterType": "NUMBER_DOUBLE"},
+ "int_param": {"parameterType": "NUMBER_INTEGER"},
+ "list_int_param": {"parameterType": "LIST"},
+ "list_string_param": {"parameterType": "LIST"},
+ "struct_param": {"parameterType": "STRUCT"},
+ }
+ },
+ },
+ "schemaVersion": "2.1.0",
+ "components": {},
+ }
+)
+
+_TEST_PIPELINE_JOB = json.dumps(
+ {
+ "runtimeConfig": {"parameterValues": {}},
+ "pipelineSpec": json.loads(_TEST_PIPELINE_SPEC_JSON),
+ }
+)
+
+
+def make_pipeline_job(state):
+ return gca_pipeline_job_v1.PipelineJob(
+ name=_TEST_PIPELINE_JOB_NAME,
+ state=state,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ pipeline_spec=json.loads(_TEST_PIPELINE_SPEC_JSON),
+ job_detail=gca_pipeline_job_v1.PipelineJobDetail(
+ task_details=[
+ gca_pipeline_job_v1.PipelineTaskDetail(
+ task_id=123,
+ execution=GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
+ metadata={"component_type": _TEST_COMPONENT_IDENTIFIER},
+ ),
+ ),
+ ],
+ ),
+ )
+
+
+@pytest.fixture
+def mock_pipeline_service_create():
+ with mock.patch.object(
+ pipeline_service_client_v1.PipelineServiceClient, "create_pipeline_job"
+ ) as mock_create_pipeline_job:
+ mock_create_pipeline_job.return_value = make_pipeline_job(
+ gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ yield mock_create_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_job_get():
+ with mock.patch.object(
+ pipeline_service_client_v1.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_pipeline_job:
+ mock_get_pipeline_job.side_effect = [
+ make_pipeline_job(
+ gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_RUNNING
+ ),
+ make_pipeline_job(
+ gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ ]
+
+ yield mock_get_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_service_get_with_fail():
+ with mock.patch.object(
+ pipeline_service_client_v1.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_pipeline_job:
+ mock_get_pipeline_job.side_effect = [
+ make_pipeline_job(
+ gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_RUNNING
+ ),
+ make_pipeline_job(
+ gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_RUNNING
+ ),
+ make_pipeline_job(
+ gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_FAILED
+ ),
+ ]
+
+ yield mock_get_pipeline_job
+
+
+@pytest.fixture
+def mock_load_yaml_and_json(job_spec_json):
+ with mock.patch.object(
+ storage.Blob, "download_as_bytes"
+ ) as mock_load_yaml_and_json:
+ mock_load_yaml_and_json.return_value = job_spec_json.encode()
+ yield mock_load_yaml_and_json
+
+
+@pytest.fixture
+def mock_pipeline_based_service_get():
+ with mock.patch.object(
+ pipeline_service_client_v1.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_pipeline_based_service:
+ mock_get_pipeline_based_service.return_value = make_pipeline_job(
+ gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ yield mock_get_pipeline_based_service
+
+
+@pytest.fixture
+def get_execution_mock():
+ with mock.patch.object(
+ MetadataServiceClient, "get_execution"
+ ) as get_execution_mock:
+ get_execution_mock.return_value = GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
+ metadata={"component_type": _TEST_COMPONENT_IDENTIFIER},
+ )
+ yield get_execution_mock
+
+
+@pytest.fixture
+def list_executions_mock():
+ with mock.patch.object(
+ MetadataServiceClient, "list_executions"
+ ) as list_executions_mock:
+ list_executions_mock.return_value = [
+ GapicExecution(
+ name=_TEST_EXECUTION_NAME,
+ display_name=_TEST_RUN,
+ schema_title=constants.SYSTEM_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
+ metadata=_TEST_PARAMS,
+ ),
+ GapicExecution(
+ name=_TEST_OTHER_EXECUTION_NAME,
+ display_name=_TEST_OTHER_RUN,
+ schema_title=constants.SYSTEM_RUN,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
+ metadata=_TEST_OTHER_PARAMS,
+ ),
+ ]
+ yield list_executions_mock
+
+
+@pytest.fixture
+def mock_pipeline_bucket_exists():
+ def mock_create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist(
+ output_artifacts_gcs_dir=None,
+ service_account=None,
+ project=None,
+ location=None,
+ credentials=None,
+ ):
+ output_artifacts_gcs_dir = (
+ output_artifacts_gcs_dir
+ or gcs_utils.generate_gcs_directory_for_pipeline_artifacts(
+ project=project,
+ location=location,
+ )
+ )
+ return output_artifacts_gcs_dir
+
+ with mock.patch(
+ "google.cloud.aiplatform.utils.gcs_utils.create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist",
+ wraps=mock_create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist,
+ ) as mock_context:
+ yield mock_context
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestPipelineBasedService:
+ class FakePipelineBasedService(
+ pipeline_based_service._VertexAiPipelineBasedService
+ ):
+ _template_ref = _TEST_TEMPLATE_REF
+ _metadata_output_artifact = "TODO"
+ _creation_log_message = (
+ "Created PipelineJob for your fake PipelineBasedService."
+ )
+ _component_identifier = _TEST_COMPONENT_IDENTIFIER
+ _template_name_identifier = None
+
+ @classmethod
+ def submit(cls) -> pipeline_based_service._VertexAiPipelineBasedService:
+ return cls._create_and_submit_pipeline_job(
+ template_params={}, template_path=_TEST_TEMPLATE_PATH
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec_json",
+ [_TEST_PIPELINE_JOB],
+ )
+ @pytest.mark.parametrize(
+ "pipeline_name", [_TEST_PIPELINE_JOB_ID, _TEST_PIPELINE_JOB_NAME]
+ )
+ def test_init_pipeline_based_service(
+ self,
+ pipeline_name,
+ mock_pipeline_job_get,
+ mock_pipeline_based_service_get,
+ mock_load_yaml_and_json,
+ job_spec_json,
+ mock_pipeline_service_create,
+ get_execution_mock,
+ mock_pipeline_bucket_exists,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ )
+
+ pipeline_service = self.FakePipelineBasedService(
+ pipeline_job_name=pipeline_name
+ )
+
+ mock_pipeline_based_service_get.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert get_execution_mock.call_count == 1
+
+ # There are 2 get requests made for each item: 1 in the constructor and
+ # 1 in the validation method
+ assert mock_pipeline_based_service_get.call_count == 2
+
+ assert not mock_pipeline_service_create.called
+
+ assert pipeline_service.backing_pipeline_job._gca_resource == make_pipeline_job(
+ gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec_json",
+ [_TEST_PIPELINE_JOB],
+ )
+ @pytest.mark.parametrize(
+ "pipeline_name", [_TEST_PIPELINE_JOB_ID, _TEST_PIPELINE_JOB_NAME]
+ )
+ def test_init_pipeline_based_service_with_template_name_identifier(
+ self,
+ pipeline_name,
+ mock_pipeline_job_get,
+ mock_pipeline_based_service_get,
+ mock_load_yaml_and_json,
+ job_spec_json,
+ mock_pipeline_service_create,
+ get_execution_mock,
+ mock_pipeline_bucket_exists,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ )
+
+ self.FakePipelineBasedService._template_name_identifier = (
+ _TEST_PIPELINE_NAME_IDENTIFIER
+ )
+
+ self.FakePipelineBasedService(pipeline_job_name=_TEST_PIPELINE_JOB_ID)
+
+ mock_pipeline_based_service_get.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec_json",
+ [_TEST_PIPELINE_JOB],
+ )
+ @pytest.mark.parametrize(
+ "pipeline_name", [_TEST_PIPELINE_JOB_ID, _TEST_PIPELINE_JOB_NAME]
+ )
+ def test_init_pipeline_based_service_with_invalid_template_name_identifier_raises(
+ self,
+ pipeline_name,
+ mock_pipeline_job_get,
+ mock_pipeline_based_service_get,
+ mock_load_yaml_and_json,
+ job_spec_json,
+ mock_pipeline_service_create,
+ get_execution_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ self.FakePipelineBasedService._template_name_identifier = (
+ _TEST_INVALID_PIPELINE_NAME_IDENTIFIER
+ )
+
+ with pytest.raises(ValueError):
+ self.FakePipelineBasedService(pipeline_job_name=_TEST_PIPELINE_JOB_ID)
+
+ @pytest.mark.parametrize(
+ "job_spec_json",
+ [_TEST_PIPELINE_JOB],
+ )
+ @pytest.mark.parametrize(
+ "pipeline_name", [_TEST_PIPELINE_JOB_ID, _TEST_PIPELINE_JOB_NAME]
+ )
+ def test_init_pipeline_based_service_with_failed_pipeline_run(
+ self,
+ pipeline_name,
+ mock_pipeline_service_get_with_fail,
+ mock_load_yaml_and_json,
+ job_spec_json,
+ get_execution_mock,
+ mock_pipeline_bucket_exists,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ )
+
+ self.FakePipelineBasedService._template_name_identifier = None
+
+ self.FakePipelineBasedService(pipeline_job_name=_TEST_PIPELINE_JOB_ID)
+
+ mock_pipeline_service_get_with_fail.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert get_execution_mock.call_count == 1
+
+ @pytest.mark.parametrize(
+ "pipeline_name", [_TEST_PIPELINE_JOB_ID, _TEST_PIPELINE_JOB_NAME]
+ )
+ def test_init_pipeline_based_service_without_template_ref_raises(
+ self,
+ pipeline_name,
+ mock_pipeline_job_get,
+ mock_pipeline_service_create,
+ ):
+ """Raises TypeError since abstract properties are not set.
+
+ _VertexAiPipelineBasedService class should only be instantiated
+ through a child class.
+ """
+
+ with pytest.raises(TypeError):
+ pipeline_based_service._VertexAiPipelineBasedService(
+ pipeline_job_id=pipeline_name,
+ )
+
+ def test_init_pipeline_based_service_with_invalid_pipeline_run_id_raises(
+ self,
+ mock_pipeline_job_get,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ with pytest.raises(ValueError):
+ self.FakePipelineBasedService(
+ pipeline_job_name=_TEST_INVALID_PIPELINE_JOB_NAME,
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec_json",
+ [_TEST_PIPELINE_JOB],
+ )
+ def test_create_and_submit_pipeline_job(
+ self,
+ mock_pipeline_job_get,
+ mock_pipeline_service_create,
+ mock_load_yaml_and_json,
+ job_spec_json,
+ mock_pipeline_bucket_exists,
+ ):
+
+ import yaml
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ encryption_spec_key_name=test_constants.ProjectConstants._TEST_ENCRYPTION_KEY_NAME,
+ )
+
+ self.FakePipelineBasedService._template_name_identifier = None
+
+ test_pipeline_service = (
+ self.FakePipelineBasedService._create_and_submit_pipeline_job(
+ job_id=_TEST_PIPELINE_JOB_ID,
+ template_params=_TEST_PIPELINE_PARAMETER_VALUES,
+ template_path=_TEST_TEMPLATE_PATH,
+ pipeline_root=_TEST_PIPELINE_ROOT,
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+ )
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_PIPELINE_ROOT,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ }
+ runtime_config = gca_pipeline_job_v1.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec_json = yaml.safe_load(job_spec_json)
+
+ pipeline_spec = job_spec_json.get("pipelineSpec") or job_spec_json
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job_v1.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.1.0",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ encryption_spec=test_constants.ProjectConstants._TEST_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ timeout=None,
+ )
+
+ assert mock_pipeline_service_create.call_count == 1
+
+ test_backing_pipeline_job = test_pipeline_service.backing_pipeline_job
+
+ assert mock_pipeline_job_get.call_count == 1
+
+ assert (
+ test_pipeline_service.gca_resource.name
+ == test_backing_pipeline_job.resource_name
+ )
+
+ def test_list_pipeline_based_service(
+ self,
+ mock_pipeline_based_service_get,
+ get_execution_mock,
+ list_executions_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ test_list_request = self.FakePipelineBasedService.list()
+
+ list_executions_mock.assert_called_once_with(
+ request={
+ "parent": _TEST_EXECUTION_PARENT,
+ "filter": f"metadata.component_type.string_value={self.FakePipelineBasedService._component_identifier}",
+ }
+ )
+
+ assert isinstance(
+ test_list_request[0], pipeline_based_service._VertexAiPipelineBasedService
+ )
+
+ assert (
+ test_list_request[0]._template_ref
+ == self.FakePipelineBasedService._template_ref
+ )
+
+ # only 1 of the 2 executions in list_executions_mock matches the
+ # properties of FakePipelineBasedService
+ assert len(test_list_request) == 1
+
+ def test_list_pipeline_based_service_with_template_name_identifier(
+ self,
+ mock_pipeline_based_service_get,
+ get_execution_mock,
+ list_executions_mock,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ self.FakePipelineBasedService._template_name_identifier = (
+ _TEST_INVALID_PIPELINE_NAME_IDENTIFIER
+ )
+
+ test_list_request = self.FakePipelineBasedService.list()
+
+ # None of the mock pipelines match the `_template_name_identifier`
+ # set above, so the returned list should be empty
+ assert len(test_list_request) == 0
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_pipeline_job_schedules.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_pipeline_job_schedules.py
new file mode 100644
index 0000000000000000000000000000000000000000..c27b7b608669464504e35d7272c127dba0a3a3cd
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_pipeline_job_schedules.py
@@ -0,0 +1,2270 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from datetime import datetime
+from importlib import reload
+import json
+from typing import Any, Dict
+from unittest import mock
+from unittest.mock import patch
+from urllib import request
+
+from google.auth import credentials as auth_credentials
+from google.cloud import storage
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import pipeline_jobs
+from google.cloud.aiplatform.constants import pipeline as pipeline_constants
+from google.cloud.aiplatform.compat.services import (
+ pipeline_service_client,
+ schedule_service_client,
+)
+from google.cloud.aiplatform.compat.types import (
+ context as gca_context,
+ encryption_spec as gca_encryption_spec_compat,
+ pipeline_job as gca_pipeline_job,
+ pipeline_state as gca_pipeline_state,
+ schedule as gca_schedule,
+)
+from google.cloud.aiplatform import (
+ pipeline_job_schedules,
+)
+from google.cloud.aiplatform.preview.pipelinejob import (
+ pipeline_jobs as preview_pipeline_jobs,
+)
+from google.cloud.aiplatform.preview.pipelinejobschedule import (
+ pipeline_job_schedules as preview_pipeline_job_schedules,
+)
+from google.cloud.aiplatform.utils import gcs_utils
+import pytest
+import yaml
+
+from google.protobuf import struct_pb2
+from google.protobuf import json_format
+from google.protobuf import field_mask_pb2 as field_mask
+
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_PIPELINE_JOB_DISPLAY_NAME = "sample-pipeline-job-display-name"
+_TEST_GCS_BUCKET_NAME = "my-bucket"
+_TEST_GCS_OUTPUT_DIRECTORY = f"gs://{_TEST_GCS_BUCKET_NAME}/output_artifacts/"
+_TEST_CREDENTIALS = auth_credentials.AnonymousCredentials()
+_TEST_SERVICE_ACCOUNT = "abcde@my-project.iam.gserviceaccount.com"
+
+_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME = "sample-pipeline-job-schedule-display-name"
+_TEST_PIPELINE_JOB_SCHEDULE_ID = "sample-test-schedule-20230417"
+_TEST_PIPELINE_JOB_SCHEDULE_NAME = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/schedules/{_TEST_PIPELINE_JOB_SCHEDULE_ID}"
+_TEST_PIPELINE_JOB_SCHEDULE_CRON = "* * * * *"
+_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT = 1
+_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT = 2
+
+_TEST_UPDATED_PIPELINE_JOB_SCHEDULE_CRON = "1 1 1 1 1"
+_TEST_UPDATED_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT = 5
+
+_TEST_TEMPLATE_PATH = f"gs://{_TEST_GCS_BUCKET_NAME}/job_spec.json"
+_TEST_AR_TEMPLATE_PATH = "https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"
+_TEST_HTTPS_TEMPLATE_PATH = "https://raw.githubusercontent.com/repo/pipeline.json"
+_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+_TEST_NETWORK = (
+ f"projects/{_TEST_PROJECT}/global/networks/{_TEST_PIPELINE_JOB_SCHEDULE_ID}"
+)
+
+_TEST_PIPELINE_JOB_LIST_READ_MASK = field_mask.FieldMask(
+ paths=pipeline_constants._READ_MASK_FIELDS
+)
+_TEST_PIPELINE_PARAMETER_VALUES_LEGACY = {"string_param": "hello"}
+_TEST_PIPELINE_PARAMETER_VALUES = {
+ "string_param": "hello world",
+ "bool_param": True,
+ "double_param": 12.34,
+ "int_param": 5678,
+ "list_int_param": [123, 456, 789],
+ "list_string_param": ["lorem", "ipsum"],
+ "struct_param": {"key1": 12345, "key2": 67890},
+}
+
+_TEST_PIPELINE_INPUT_ARTIFACTS = {
+ "vertex_model": "456",
+}
+
+_TEST_PIPELINE_SPEC_LEGACY_JSON = json.dumps(
+ {
+ "pipelineInfo": {"name": "my-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {"parameters": {"string_param": {"type": "STRING"}}},
+ },
+ "schemaVersion": "2.0.0",
+ "components": {},
+ }
+)
+_TEST_PIPELINE_SPEC_LEGACY_YAML = """\
+pipelineInfo:
+ name: my-pipeline
+root:
+ dag:
+ tasks: {}
+ inputDefinitions:
+ parameters:
+ string_param:
+ type: STRING
+schemaVersion: 2.0.0
+components: {}
+"""
+_TEST_PIPELINE_SPEC_JSON = json.dumps(
+ {
+ "pipelineInfo": {"name": "my-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "string_param": {"parameterType": "STRING"},
+ "bool_param": {"parameterType": "BOOLEAN"},
+ "double_param": {"parameterType": "NUMBER_DOUBLE"},
+ "int_param": {"parameterType": "NUMBER_INTEGER"},
+ "list_int_param": {"parameterType": "LIST"},
+ "list_string_param": {"parameterType": "LIST"},
+ "struct_param": {"parameterType": "STRUCT"},
+ }
+ },
+ },
+ "schemaVersion": "2.1.0",
+ "components": {},
+ }
+)
+_TEST_PIPELINE_SPEC_YAML = """\
+pipelineInfo:
+ name: my-pipeline
+root:
+ dag:
+ tasks: {}
+ inputDefinitions:
+ parameters:
+ string_param:
+ parameterType: STRING
+ bool_param:
+ parameterType: BOOLEAN
+ double_param:
+ parameterType: NUMBER_DOUBLE
+ int_param:
+ parameterType: NUMBER_INTEGER
+ list_int_param:
+ parameterType: LIST
+ list_string_param:
+ parameterType: LIST
+ struct_param:
+ parameterType: STRUCT
+schemaVersion: 2.1.0
+components: {}
+"""
+_TEST_TFX_PIPELINE_SPEC_JSON = json.dumps(
+ {
+ "pipelineInfo": {"name": "my-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {"parameters": {"string_param": {"type": "STRING"}}},
+ },
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "tfx-1.4.0",
+ "components": {},
+ }
+)
+_TEST_TFX_PIPELINE_SPEC_YAML = """\
+pipelineInfo:
+ name: my-pipeline
+root:
+ dag:
+ tasks: {}
+ inputDefinitions:
+ parameters:
+ string_param:
+ type: STRING
+schemaVersion: 2.0.0
+sdkVersion: tfx-1.4.0
+components: {}
+"""
+
+_TEST_PIPELINE_JOB_LEGACY = json.dumps(
+ {"runtimeConfig": {}, "pipelineSpec": json.loads(_TEST_PIPELINE_SPEC_LEGACY_JSON)}
+)
+_TEST_PIPELINE_JOB = json.dumps(
+ {
+ "runtimeConfig": {"parameter_values": _TEST_PIPELINE_PARAMETER_VALUES},
+ "pipelineSpec": json.loads(_TEST_PIPELINE_SPEC_JSON),
+ }
+)
+_TEST_PIPELINE_JOB_TFX = json.dumps(
+ {"runtimeConfig": {}, "pipelineSpec": json.loads(_TEST_TFX_PIPELINE_SPEC_JSON)}
+)
+
+_TEST_CREATE_PIPELINE_JOB_REQUEST = {
+ "parent": _TEST_PARENT,
+ "pipeline_job": {
+ "runtime_config": {"parameter_values": _TEST_PIPELINE_PARAMETER_VALUES},
+ "pipeline_spec": json.loads(_TEST_PIPELINE_SPEC_JSON),
+ },
+}
+
+
+_TEST_SCHEDULE_GET_METHOD_NAME = "get_fake_schedule"
+_TEST_SCHEDULE_LIST_METHOD_NAME = "list_fake_schedules"
+_TEST_SCHEDULE_CANCEL_METHOD_NAME = "cancel_fake_schedule"
+_TEST_SCHEDULE_DELETE_METHOD_NAME = "delete_fake_schedule"
+
+_TEST_PIPELINE_CREATE_TIME = datetime.now()
+
+
+@pytest.fixture
+def mock_schedule_service_create():
+ with mock.patch.object(
+ schedule_service_client.ScheduleServiceClient, "create_schedule"
+ ) as mock_create_schedule:
+ mock_create_schedule.return_value = gca_schedule.Schedule(
+ name=_TEST_PIPELINE_JOB_SCHEDULE_NAME,
+ state=gca_schedule.Schedule.State.COMPLETED,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ create_pipeline_job_request=_TEST_CREATE_PIPELINE_JOB_REQUEST,
+ )
+ yield mock_create_schedule
+
+
+@pytest.fixture
+def mock_schedule_bucket_exists():
+ def mock_create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist(
+ output_artifacts_gcs_dir=None,
+ service_account=None,
+ project=None,
+ location=None,
+ credentials=None,
+ ):
+ output_artifacts_gcs_dir = (
+ output_artifacts_gcs_dir
+ or gcs_utils.generate_gcs_directory_for_pipeline_artifacts(
+ project=project,
+ location=location,
+ )
+ )
+ return output_artifacts_gcs_dir
+
+ with mock.patch(
+ "google.cloud.aiplatform.utils.gcs_utils.create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist",
+ wraps=mock_create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist,
+ ) as mock_context:
+ yield mock_context
+
+
+def make_schedule(state):
+ return gca_schedule.Schedule(
+ name=_TEST_PIPELINE_JOB_SCHEDULE_NAME,
+ state=state,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ create_pipeline_job_request=_TEST_CREATE_PIPELINE_JOB_REQUEST,
+ )
+
+
+def make_pipeline_job(state):
+ test_pipeline_job_name = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/pipelineJobs/sample-test-pipeline-20230605"
+ return gca_pipeline_job.PipelineJob(
+ name=test_pipeline_job_name,
+ state=state,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ job_detail=gca_pipeline_job.PipelineJobDetail(
+ pipeline_run_context=gca_context.Context(
+ name=test_pipeline_job_name,
+ )
+ ),
+ )
+
+
+@pytest.fixture
+def mock_schedule_service_get():
+ with mock.patch.object(
+ schedule_service_client.ScheduleServiceClient, "get_schedule"
+ ) as mock_get_schedule:
+ mock_get_schedule.side_effect = [
+ make_schedule(gca_schedule.Schedule.State.ACTIVE),
+ make_schedule(gca_schedule.Schedule.State.COMPLETED),
+ make_schedule(gca_schedule.Schedule.State.COMPLETED),
+ make_schedule(gca_schedule.Schedule.State.COMPLETED),
+ make_schedule(gca_schedule.Schedule.State.COMPLETED),
+ make_schedule(gca_schedule.Schedule.State.COMPLETED),
+ make_schedule(gca_schedule.Schedule.State.COMPLETED),
+ make_schedule(gca_schedule.Schedule.State.COMPLETED),
+ make_schedule(gca_schedule.Schedule.State.COMPLETED),
+ ]
+
+ yield mock_get_schedule
+
+
+@pytest.fixture
+def mock_schedule_service_get_with_fail():
+ with mock.patch.object(
+ schedule_service_client.ScheduleServiceClient, "get_schedule"
+ ) as mock_get_schedule:
+ mock_get_schedule.side_effect = [
+ make_schedule(gca_schedule.Schedule.State.ACTIVE),
+ make_schedule(gca_schedule.Schedule.State.ACTIVE),
+ make_schedule(gca_schedule.Schedule.State.STATE_UNSPECIFIED),
+ ]
+
+ yield mock_get_schedule
+
+
+@pytest.fixture
+def mock_schedule_service_pause():
+ with mock.patch.object(
+ schedule_service_client.ScheduleServiceClient, "pause_schedule"
+ ) as mock_pause_schedule:
+ yield mock_pause_schedule
+
+
+@pytest.fixture
+def mock_schedule_service_resume():
+ with mock.patch.object(
+ schedule_service_client.ScheduleServiceClient, "resume_schedule"
+ ) as mock_resume_schedule:
+ yield mock_resume_schedule
+
+
+@pytest.fixture
+def mock_schedule_service_list():
+ with mock.patch.object(
+ schedule_service_client.ScheduleServiceClient, "list_schedules"
+ ) as mock_list_schedules:
+ mock_list_schedules.return_value = [
+ make_schedule(gca_schedule.Schedule.State.COMPLETED),
+ make_schedule(gca_schedule.Schedule.State.COMPLETED),
+ make_schedule(gca_schedule.Schedule.State.COMPLETED),
+ ]
+ yield mock_list_schedules
+
+
+@pytest.fixture
+def mock_pipeline_service_list():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "list_pipeline_jobs"
+ ) as mock_list_pipeline_jobs:
+ mock_list_pipeline_jobs.return_value = [
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ ]
+ yield mock_list_pipeline_jobs
+
+
+@pytest.fixture
+def mock_schedule_service_update():
+ with mock.patch.object(
+ schedule_service_client.ScheduleServiceClient, "update_schedule"
+ ) as mock_update_schedule:
+ mock_update_schedule.return_value = gca_schedule.Schedule(
+ name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ state=gca_schedule.Schedule.State.COMPLETED,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ cron=_TEST_UPDATED_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_UPDATED_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ create_pipeline_job_request=_TEST_CREATE_PIPELINE_JOB_REQUEST,
+ )
+ yield mock_update_schedule
+
+
+@pytest.fixture
+def mock_load_yaml_and_json(job_spec):
+ with patch.object(storage.Blob, "download_as_bytes") as mock_load_yaml_and_json:
+ mock_load_yaml_and_json.return_value = job_spec.encode()
+ yield mock_load_yaml_and_json
+
+
+@pytest.fixture
+def mock_request_urlopen(job_spec):
+ with patch.object(request, "urlopen") as mock_urlopen:
+ mock_read_response = mock.MagicMock()
+ mock_decode_response = mock.MagicMock()
+ mock_decode_response.return_value = job_spec.encode()
+ mock_read_response.return_value.decode = mock_decode_response
+ mock_urlopen.return_value.read = mock_read_response
+ yield mock_urlopen
+
+
+def dict_to_struct(d: Dict[str, Any]) -> struct_pb2.Struct:
+ s = struct_pb2.Struct()
+ s.update(d)
+ return s
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestPipelineJobSchedule:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_call_preview_schedule_service_create(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Creates a PipelineJobSchedule.
+
+ Creates PipelineJob with template stored in GCS bucket.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = preview_pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ pipeline_job_schedule.create(
+ cron_expression=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ "inputArtifacts": {"vertex_model": {"artifactId": "456"}},
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job_schedule = gca_schedule.Schedule(
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ create_pipeline_job_request={
+ "parent": _TEST_PARENT,
+ "pipeline_job": {
+ "runtime_config": runtime_config,
+ "pipeline_spec": dict_to_struct(pipeline_spec),
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ },
+ },
+ )
+
+ mock_schedule_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ schedule=expected_gapic_pipeline_job_schedule,
+ timeout=None,
+ )
+
+ assert pipeline_job_schedule._gca_resource == make_schedule(
+ gca_schedule.Schedule.State.COMPLETED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_call_schedule_service_create(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Creates a PipelineJobSchedule.
+
+ Creates PipelineJob with template stored in GCS bucket.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ "inputArtifacts": {"vertex_model": {"artifactId": "456"}},
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job_schedule = gca_schedule.Schedule(
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ create_pipeline_job_request={
+ "parent": _TEST_PARENT,
+ "pipeline_job": {
+ "runtime_config": runtime_config,
+ "pipeline_spec": dict_to_struct(pipeline_spec),
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ },
+ },
+ )
+
+ mock_schedule_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ schedule=expected_gapic_pipeline_job_schedule,
+ timeout=None,
+ )
+
+ assert pipeline_job_schedule._gca_resource == make_schedule(
+ gca_schedule.Schedule.State.COMPLETED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_call_schedule_service_create_uses_pipeline_job_project_location(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Creates a PipelineJobSchedule.
+
+ Tests that the PipelineJobSchedule is created in the same project and location as the PipelineJob.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ project="managed-pipeline-test",
+ location="europe-west4",
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ assert pipeline_job_schedule.project == "managed-pipeline-test"
+ assert pipeline_job_schedule.location == "europe-west4"
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_call_schedule_service_create_uses_pipeline_job_labels(
+ self,
+ mock_schedule_service_create,
+ mock_pipeline_service_list,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Creates a PipelineJobSchedule.
+
+ Tests that PipelineJobs created through PipelineJobSchedule inherit the labels of the init PipelineJob.
+ """
+ TEST_PIPELINE_JOB_LABELS = {"name": "test_xx"}
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ labels=TEST_PIPELINE_JOB_LABELS,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ "inputArtifacts": {"vertex_model": {"artifactId": "456"}},
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job_schedule = gca_schedule.Schedule(
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ create_pipeline_job_request={
+ "parent": _TEST_PARENT,
+ "pipeline_job": {
+ "runtime_config": runtime_config,
+ "pipeline_spec": dict_to_struct(pipeline_spec),
+ "labels": TEST_PIPELINE_JOB_LABELS,
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ },
+ },
+ )
+
+ mock_schedule_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ schedule=expected_gapic_pipeline_job_schedule,
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_call_schedule_service_create_uses_pipeline_job_encryption_spec_key_name(
+ self,
+ mock_schedule_service_create,
+ mock_pipeline_service_list,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Creates a PipelineJobSchedule.
+
+ Tests that PipelineJobs created through PipelineJobSchedule inherit the encryption_spec_key_name of the init PipelineJob.
+ """
+ TEST_PIPELINE_JOB_ENCRYPTION_SPEC_KEY_NAME = "encryption_spec_key_name"
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ encryption_spec_key_name=TEST_PIPELINE_JOB_ENCRYPTION_SPEC_KEY_NAME,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ "inputArtifacts": {"vertex_model": {"artifactId": "456"}},
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job_schedule = gca_schedule.Schedule(
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ create_pipeline_job_request={
+ "parent": _TEST_PARENT,
+ "pipeline_job": {
+ "runtime_config": runtime_config,
+ "pipeline_spec": dict_to_struct(pipeline_spec),
+ "encryption_spec": gca_encryption_spec_compat.EncryptionSpec(
+ kms_key_name=TEST_PIPELINE_JOB_ENCRYPTION_SPEC_KEY_NAME
+ ),
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ },
+ },
+ )
+
+ mock_schedule_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ schedule=expected_gapic_pipeline_job_schedule,
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_call_schedule_service_create_uses_specified_project_location(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Creates a PipelineJobSchedule.
+
+ Tests that PipelineJobSchedule is created in the specified project and location over the PipelineJob's.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ project="managed-pipeline-test",
+ location="europe-west4",
+ )
+
+ assert job.project == _TEST_PROJECT
+ assert job.location == _TEST_LOCATION
+
+ assert pipeline_job_schedule.project == "managed-pipeline-test"
+ assert pipeline_job_schedule.location == "europe-west4"
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_call_schedule_service_create_with_different_timezone(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Creates a PipelineJobSchedule.
+
+ Creates PipelineJobSchedule with cron expression in different timezone.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ test_pipeline_job_schedule_cron_tz_expression = "TZ=America/New_York * * * * *"
+ pipeline_job_schedule.create(
+ cron=test_pipeline_job_schedule_cron_tz_expression,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ "inputArtifacts": {"vertex_model": {"artifactId": "456"}},
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job_schedule = gca_schedule.Schedule(
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ cron=test_pipeline_job_schedule_cron_tz_expression,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ create_pipeline_job_request={
+ "parent": _TEST_PARENT,
+ "pipeline_job": {
+ "runtime_config": runtime_config,
+ "pipeline_spec": dict_to_struct(pipeline_spec),
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ },
+ },
+ )
+
+ mock_schedule_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ schedule=expected_gapic_pipeline_job_schedule,
+ timeout=None,
+ )
+
+ assert pipeline_job_schedule._gca_resource == make_schedule(
+ gca_schedule.Schedule.State.COMPLETED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_call_schedule_service_create_artifact_registry(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ mock_request_urlopen,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Creates a PipelineJobSchedule.
+
+ Creates PipelineJob with template stored in artifact registry.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_AR_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job_schedule = gca_schedule.Schedule(
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ create_pipeline_job_request={
+ "parent": _TEST_PARENT,
+ "pipeline_job": {
+ "runtime_config": runtime_config,
+ "pipeline_spec": dict_to_struct(pipeline_spec),
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ "template_uri": _TEST_AR_TEMPLATE_PATH,
+ },
+ },
+ )
+
+ mock_schedule_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ schedule=expected_gapic_pipeline_job_schedule,
+ timeout=None,
+ )
+
+ assert pipeline_job_schedule._gca_resource == make_schedule(
+ gca_schedule.Schedule.State.COMPLETED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_call_schedule_service_create_https(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ mock_request_urlopen,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Creates a PipelineJobSchedule.
+
+ Creates PipelineJob with template stored in https.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_HTTPS_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job_schedule = gca_schedule.Schedule(
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ create_pipeline_job_request={
+ "parent": _TEST_PARENT,
+ "pipeline_job": {
+ "runtime_config": runtime_config,
+ "pipeline_spec": dict_to_struct(pipeline_spec),
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ "template_uri": _TEST_HTTPS_TEMPLATE_PATH,
+ },
+ },
+ )
+
+ mock_schedule_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ schedule=expected_gapic_pipeline_job_schedule,
+ timeout=None,
+ )
+
+ assert pipeline_job_schedule._gca_resource == make_schedule(
+ gca_schedule.Schedule.State.COMPLETED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_call_schedule_service_create_with_timeout(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Creates a PipelineJobSchedule.
+
+ Sets timeout for PipelineJobSchedule creation.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=180.0,
+ )
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job_schedule = gca_schedule.Schedule(
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ create_pipeline_job_request={
+ "parent": _TEST_PARENT,
+ "pipeline_job": {
+ "runtime_config": runtime_config,
+ "pipeline_spec": dict_to_struct(pipeline_spec),
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ },
+ },
+ )
+
+ mock_schedule_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ schedule=expected_gapic_pipeline_job_schedule,
+ timeout=180.0,
+ )
+
+ assert pipeline_job_schedule._gca_resource == make_schedule(
+ gca_schedule.Schedule.State.COMPLETED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_call_schedule_service_create_with_timeout_not_explicitly_set(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Creates a PipelineJobSchedule.
+
+ Does not set timeout for PipelineJobSchedule creation.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job_schedule = gca_schedule.Schedule(
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ create_pipeline_job_request={
+ "parent": _TEST_PARENT,
+ "pipeline_job": {
+ "runtime_config": runtime_config,
+ "pipeline_spec": dict_to_struct(pipeline_spec),
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ },
+ },
+ )
+
+ mock_schedule_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ schedule=expected_gapic_pipeline_job_schedule,
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_call_preview_pipeline_job_create_schedule(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Creates a PipelineJobSchedule via PipelineJob.create_schedule()."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = preview_pipeline_jobs._PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = job.create_schedule(
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ cron_expression=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ "inputArtifacts": {"vertex_model": {"artifactId": "456"}},
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+ expected_gapic_pipeline_job_schedule = gca_schedule.Schedule(
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ create_pipeline_job_request={
+ "parent": _TEST_PARENT,
+ "pipeline_job": {
+ "runtime_config": runtime_config,
+ "pipeline_spec": dict_to_struct(pipeline_spec),
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ },
+ },
+ )
+
+ mock_schedule_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ schedule=expected_gapic_pipeline_job_schedule,
+ timeout=None,
+ )
+
+ assert pipeline_job_schedule._gca_resource == make_schedule(
+ gca_schedule.Schedule.State.COMPLETED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_call_pipeline_job_create_schedule(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Creates a PipelineJobSchedule via PipelineJob.create_schedule()."""
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = job.create_schedule(
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ "inputArtifacts": {"vertex_model": {"artifactId": "456"}},
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+ expected_gapic_pipeline_job_schedule = gca_schedule.Schedule(
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ create_pipeline_job_request={
+ "parent": _TEST_PARENT,
+ "pipeline_job": {
+ "runtime_config": runtime_config,
+ "pipeline_spec": dict_to_struct(pipeline_spec),
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ },
+ },
+ )
+
+ mock_schedule_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ schedule=expected_gapic_pipeline_job_schedule,
+ timeout=None,
+ )
+
+ assert pipeline_job_schedule._gca_resource == make_schedule(
+ gca_schedule.Schedule.State.COMPLETED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_call_pipeline_job_create_schedule_uses_pipeline_job_project_location(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Creates a PipelineJobSchedule via PipelineJob.create_schedule().
+
+ Tests that the PipelineJobSchedule is created in the same project and location as the PipelineJob.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ project="managed-pipeline-test",
+ location="europe-west4",
+ )
+
+ pipeline_job_schedule = job.create_schedule(
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ assert pipeline_job_schedule.project == "managed-pipeline-test"
+ assert pipeline_job_schedule.location == "europe-west4"
+
+ @pytest.mark.usefixtures("mock_schedule_service_get")
+ def test_get_schedule(self, mock_schedule_service_get):
+ aiplatform.init(project=_TEST_PROJECT)
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule.get(
+ schedule_id=_TEST_PIPELINE_JOB_SCHEDULE_ID
+ )
+
+ mock_schedule_service_get.assert_called_once_with(
+ name=_TEST_PIPELINE_JOB_SCHEDULE_NAME, retry=base._DEFAULT_RETRY
+ )
+ assert isinstance(
+ pipeline_job_schedule, pipeline_job_schedules.PipelineJobSchedule
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_done_method_schedule_service(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ )
+ assert pipeline_job_schedule.done() is False
+
+ pipeline_job_schedule.wait()
+
+ assert pipeline_job_schedule.done() is True
+
+ @pytest.mark.usefixtures(
+ "mock_schedule_service_create",
+ "mock_schedule_service_get",
+ "mock_schedule_bucket_exists",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_PIPELINE_SPEC_JSON,
+ _TEST_PIPELINE_SPEC_YAML,
+ _TEST_PIPELINE_JOB,
+ _TEST_PIPELINE_SPEC_LEGACY_JSON,
+ _TEST_PIPELINE_SPEC_LEGACY_YAML,
+ _TEST_PIPELINE_JOB_LEGACY,
+ ],
+ )
+ def test_pause_resume_schedule_service(
+ self,
+ mock_schedule_service_pause,
+ mock_schedule_service_resume,
+ mock_load_yaml_and_json,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ )
+
+ pipeline_job_schedule.pause()
+
+ mock_schedule_service_pause.assert_called_once_with(
+ name=_TEST_PIPELINE_JOB_SCHEDULE_NAME
+ )
+
+ pipeline_job_schedule.resume()
+
+ mock_schedule_service_resume.assert_called_once_with(
+ name=_TEST_PIPELINE_JOB_SCHEDULE_NAME
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_schedule_service_create",
+ "mock_schedule_service_get",
+ "mock_schedule_bucket_exists",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_list_schedules(self, mock_schedule_service_list, mock_load_yaml_and_json):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ pipeline_job_schedule.list()
+
+ mock_schedule_service_list.assert_called_once_with(
+ request={"parent": _TEST_PARENT}
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_schedule_service_create",
+ "mock_schedule_service_get",
+ "mock_schedule_bucket_exists",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_preview_list_schedule_jobs(
+ self,
+ mock_pipeline_service_list,
+ mock_load_yaml_and_json,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = preview_pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ pipeline_job_schedule.create(
+ cron_expression=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ pipeline_job_schedule.list_jobs()
+
+ mock_pipeline_service_list.assert_called_once_with(
+ request={
+ "parent": _TEST_PARENT,
+ "filter": f"schedule_name={_TEST_PIPELINE_JOB_SCHEDULE_NAME}",
+ },
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_schedule_service_create",
+ "mock_schedule_service_get",
+ "mock_schedule_bucket_exists",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_preview_list_schedule_jobs_with_read_mask(
+ self,
+ mock_pipeline_service_list,
+ mock_load_yaml_and_json,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = preview_pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ pipeline_job_schedule.create(
+ cron_expression=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ pipeline_job_schedule.list_jobs(enable_simple_view=True)
+
+ mock_pipeline_service_list.assert_called_once_with(
+ request={
+ "parent": _TEST_PARENT,
+ "read_mask": _TEST_PIPELINE_JOB_LIST_READ_MASK,
+ "filter": f"schedule_name={_TEST_PIPELINE_JOB_SCHEDULE_NAME}",
+ },
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_schedule_service_create",
+ "mock_schedule_service_get",
+ "mock_schedule_bucket_exists",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_list_schedule_jobs(
+ self,
+ mock_pipeline_service_list,
+ mock_load_yaml_and_json,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ pipeline_job_schedule.list_jobs(enable_simple_view=False)
+
+ mock_pipeline_service_list.assert_called_once_with(
+ request={
+ "parent": _TEST_PARENT,
+ "filter": f"schedule_name={_TEST_PIPELINE_JOB_SCHEDULE_NAME}",
+ },
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_schedule_service_create",
+ "mock_schedule_service_get",
+ "mock_schedule_bucket_exists",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_list_schedule_jobs_with_read_mask(
+ self,
+ mock_pipeline_service_list,
+ mock_load_yaml_and_json,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ pipeline_job_schedule.list_jobs()
+
+ mock_pipeline_service_list.assert_called_once_with(
+ request={
+ "parent": _TEST_PARENT,
+ "read_mask": _TEST_PIPELINE_JOB_LIST_READ_MASK,
+ "filter": f"schedule_name={_TEST_PIPELINE_JOB_SCHEDULE_NAME}",
+ },
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_schedule_service_create",
+ "mock_schedule_service_get",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_PIPELINE_SPEC_JSON,
+ _TEST_PIPELINE_SPEC_YAML,
+ _TEST_PIPELINE_JOB,
+ _TEST_PIPELINE_SPEC_LEGACY_JSON,
+ _TEST_PIPELINE_SPEC_LEGACY_YAML,
+ _TEST_PIPELINE_JOB_LEGACY,
+ ],
+ )
+ def test_pause_pipeline_job_schedule_without_created(
+ self,
+ mock_schedule_service_pause,
+ mock_load_yaml_and_json,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ pipeline_job_schedule.pause()
+
+ assert e.match(regexp=r"Schedule resource has not been created")
+
+ @pytest.mark.usefixtures(
+ "mock_schedule_service_create",
+ "mock_schedule_service_get",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_PIPELINE_SPEC_JSON,
+ _TEST_PIPELINE_SPEC_YAML,
+ _TEST_PIPELINE_JOB,
+ _TEST_PIPELINE_SPEC_LEGACY_JSON,
+ _TEST_PIPELINE_SPEC_LEGACY_YAML,
+ _TEST_PIPELINE_JOB_LEGACY,
+ ],
+ )
+ def test_resume_pipeline_job_schedule_without_created(
+ self,
+ mock_schedule_service_resume,
+ mock_load_yaml_and_json,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ pipeline_job_schedule.resume()
+
+ assert e.match(regexp=r"Schedule resource has not been created")
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_call_schedule_service_update(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_update,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Updates a PipelineJobSchedule.
+
+ Updates cron and max_run_count.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ pipeline_job_schedule.update(
+ cron=_TEST_UPDATED_PIPELINE_JOB_SCHEDULE_CRON,
+ max_run_count=_TEST_UPDATED_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ )
+
+ expected_gapic_pipeline_job_schedule = gca_schedule.Schedule(
+ name=_TEST_PIPELINE_JOB_SCHEDULE_NAME,
+ state=gca_schedule.Schedule.State.COMPLETED,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ cron=_TEST_UPDATED_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_UPDATED_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ create_pipeline_job_request=_TEST_CREATE_PIPELINE_JOB_REQUEST,
+ )
+ assert (
+ pipeline_job_schedule._gca_resource == expected_gapic_pipeline_job_schedule
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_call_schedule_service_update_before_create(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_update,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Updates a PipelineJobSchedule.
+
+ Raises error because PipelineJobSchedule should be created before update.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ pipeline_job_schedule.update(
+ cron=_TEST_UPDATED_PIPELINE_JOB_SCHEDULE_CRON,
+ max_run_count=_TEST_UPDATED_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ )
+
+ assert e.match(
+ regexp=r"Not updating PipelineJobSchedule: PipelineJobSchedule must be active or completed."
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_get_max_run_count_before_create(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Gets the PipelineJobSchedule max_run_count before creating.
+
+ Raises error because PipelineJobSchedule should be created first.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ pipeline_job_schedule.max_run_count
+
+ assert e.match(regexp=r"PipelineJobSchedule resource has not been created.")
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ pipeline_job_schedule.max_run_count
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_get_cron_before_create(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Gets the PipelineJobSchedule cron before creating.
+
+ Raises error because PipelineJobSchedule should be created first.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ pipeline_job_schedule.cron
+
+ assert e.match(regexp=r"PipelineJobSchedule resource has not been created.")
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ pipeline_job_schedule.cron
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_get_cron_expression_before_create(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Gets the PipelineJobSchedule cron expression before creating.
+
+ Raises error because PipelineJobSchedule should be created first.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = preview_pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ pipeline_job_schedule.cron_expression
+
+ assert e.match(regexp=r"PipelineJobSchedule resource has not been created.")
+
+ pipeline_job_schedule.create(
+ cron_expression=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ pipeline_job_schedule.cron_expression
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_get_max_concurrent_run_count_before_create(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Gets the PipelineJobSchedule max_concurrent_run_count before creating.
+
+ Raises error because PipelineJobSchedule should be created first.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ pipeline_job_schedule.max_concurrent_run_count
+
+ assert e.match(regexp=r"PipelineJobSchedule resource has not been created.")
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ pipeline_job_schedule.max_concurrent_run_count
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_get_allow_queueing_before_create(
+ self,
+ mock_schedule_service_create,
+ mock_schedule_service_get,
+ mock_schedule_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ """Gets the PipelineJobSchedule allow_queueing before creating.
+
+ Raises error because PipelineJobSchedule should be created first.
+ """
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(
+ pipeline_job=job,
+ display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ pipeline_job_schedule.allow_queueing
+
+ assert e.match(regexp=r"PipelineJobSchedule resource has not been created.")
+
+ pipeline_job_schedule.create(
+ cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,
+ max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,
+ max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ pipeline_job_schedule.allow_queueing
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_pipeline_jobs.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_pipeline_jobs.py
new file mode 100644
index 0000000000000000000000000000000000000000..24d802bcfbc20c849e0bd5d347bff6feef48329a
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_pipeline_jobs.py
@@ -0,0 +1,2504 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+import json
+
+from unittest import mock
+from importlib import reload
+from unittest.mock import patch
+from urllib import request
+from datetime import datetime
+
+from google.api_core import operation as ga_operation
+from google.auth import credentials as auth_credentials
+from google.cloud import aiplatform
+from google.cloud import aiplatform_v1beta1
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform.constants import pipeline as pipeline_constants
+from google.cloud.aiplatform_v1 import Context as GapicContext
+from google.cloud.aiplatform_v1 import MetadataStore as GapicMetadataStore
+from google.cloud.aiplatform.metadata import constants
+from google.cloud.aiplatform_v1 import MetadataServiceClient
+from google.cloud.aiplatform import pipeline_jobs
+from google.cloud.aiplatform.compat.types import pipeline_failure_policy
+from google.cloud.aiplatform.utils import gcs_utils
+from google.cloud import storage
+from google.protobuf import json_format
+from google.protobuf import field_mask_pb2 as field_mask
+
+from google.cloud.aiplatform.compat.services import (
+ pipeline_service_client,
+)
+from google.cloud.aiplatform_v1beta1.types import (
+ pipeline_service as PipelineServiceV1Beta1,
+)
+from google.cloud.aiplatform_v1.types import (
+ pipeline_service as PipelineServiceV1,
+)
+from google.cloud.aiplatform_v1beta1.services import (
+ pipeline_service as v1beta1_pipeline_service,
+)
+from google.cloud.aiplatform_v1beta1.types import (
+ pipeline_job as v1beta1_pipeline_job,
+ pipeline_state as v1beta1_pipeline_state,
+ context as v1beta1_context,
+)
+from google.cloud.aiplatform.preview.pipelinejob import (
+ pipeline_jobs as preview_pipeline_jobs,
+)
+from google.cloud.aiplatform.compat.types import (
+ pipeline_job as gca_pipeline_job,
+ pipeline_state as gca_pipeline_state,
+ context as gca_context,
+)
+
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_PIPELINE_JOB_DISPLAY_NAME = "sample-pipeline-job-display-name"
+_TEST_PIPELINE_JOB_DISPLAY_NAME_2 = "sample-pipeline-job-display-name-2"
+_TEST_PIPELINE_JOB_ID = "sample-test-pipeline-202111111"
+_TEST_PIPELINE_JOB_ID_2 = "sample-test-pipeline-202111112"
+_TEST_GCS_BUCKET_NAME = "my-bucket"
+_TEST_GCS_OUTPUT_DIRECTORY = f"gs://{_TEST_GCS_BUCKET_NAME}/output_artifacts/"
+_TEST_CREDENTIALS = auth_credentials.AnonymousCredentials()
+_TEST_SERVICE_ACCOUNT = "abcde@my-project.iam.gserviceaccount.com"
+_TEST_LABELS = {"vertex-ai-pipelines-run-billing-id": "100"}
+
+_TEST_TEMPLATE_PATH = f"gs://{_TEST_GCS_BUCKET_NAME}/job_spec.json"
+_TEST_AR_TEMPLATE_PATH = "https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"
+_TEST_HTTPS_TEMPLATE_PATH = "https://raw.githubusercontent.com/repo/pipeline.json"
+_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+_TEST_NETWORK = f"projects/{_TEST_PROJECT}/global/networks/{_TEST_PIPELINE_JOB_ID}"
+_TEST_RESERVED_IP_RANGES = ["vertex-ai-ip-range"]
+
+_TEST_PIPELINE_JOB_NAME = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/pipelineJobs/{_TEST_PIPELINE_JOB_ID}"
+_TEST_PIPELINE_JOB_NAME_2 = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/pipelineJobs/{_TEST_PIPELINE_JOB_ID_2}"
+_TEST_PIPELINE_JOB_LIST_READ_MASK = field_mask.FieldMask(
+ paths=pipeline_constants._READ_MASK_FIELDS
+)
+
+_TEST_PIPELINE_PARAMETER_VALUES_LEGACY = {"string_param": "hello"}
+_TEST_PIPELINE_PARAMETER_VALUES = {
+ "string_param": "hello world",
+ "bool_param": True,
+ "double_param": 12.34,
+ "int_param": 5678,
+ "list_int_param": [123, 456, 789],
+ "list_string_param": ["lorem", "ipsum"],
+ "struct_param": {"key1": 12345, "key2": 67890},
+}
+
+_TEST_PIPELINE_INPUT_ARTIFACTS = {
+ "vertex_model": "456",
+}
+
+_TEST_PIPELINE_SPEC_LEGACY_JSON = json.dumps(
+ {
+ "pipelineInfo": {"name": "my-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {"parameters": {"string_param": {"type": "STRING"}}},
+ },
+ "schemaVersion": "2.0.0",
+ "components": {},
+ }
+)
+_TEST_PIPELINE_SPEC_LEGACY_YAML = """\
+pipelineInfo:
+ name: my-pipeline
+root:
+ dag:
+ tasks: {}
+ inputDefinitions:
+ parameters:
+ string_param:
+ type: STRING
+schemaVersion: 2.0.0
+components: {}
+"""
+_TEST_PIPELINE_SPEC_JSON = json.dumps(
+ {
+ "pipelineInfo": {"name": "my-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {
+ "parameters": {
+ "string_param": {"parameterType": "STRING"},
+ "bool_param": {"parameterType": "BOOLEAN"},
+ "double_param": {"parameterType": "NUMBER_DOUBLE"},
+ "int_param": {"parameterType": "NUMBER_INTEGER"},
+ "list_int_param": {"parameterType": "LIST"},
+ "list_string_param": {"parameterType": "LIST"},
+ "struct_param": {"parameterType": "STRUCT"},
+ }
+ },
+ },
+ "schemaVersion": "2.1.0",
+ "components": {},
+ }
+)
+_TEST_PIPELINE_SPEC_YAML = """\
+pipelineInfo:
+ name: my-pipeline
+root:
+ dag:
+ tasks: {}
+ inputDefinitions:
+ parameters:
+ string_param:
+ parameterType: STRING
+ bool_param:
+ parameterType: BOOLEAN
+ double_param:
+ parameterType: NUMBER_DOUBLE
+ int_param:
+ parameterType: NUMBER_INTEGER
+ list_int_param:
+ parameterType: LIST
+ list_string_param:
+ parameterType: LIST
+ struct_param:
+ parameterType: STRUCT
+schemaVersion: 2.1.0
+components: {}
+"""
+_TEST_TFX_PIPELINE_SPEC_JSON = json.dumps(
+ {
+ "pipelineInfo": {"name": "my-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {"parameters": {"string_param": {"type": "STRING"}}},
+ },
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "tfx-1.4.0",
+ "components": {},
+ }
+)
+_TEST_TFX_PIPELINE_SPEC_YAML = """\
+pipelineInfo:
+ name: my-pipeline
+root:
+ dag:
+ tasks: {}
+ inputDefinitions:
+ parameters:
+ string_param:
+ type: STRING
+schemaVersion: 2.0.0
+sdkVersion: tfx-1.4.0
+components: {}
+"""
+
+_TEST_PIPELINE_JOB_LEGACY = json.dumps(
+ {"runtimeConfig": {}, "pipelineSpec": json.loads(_TEST_PIPELINE_SPEC_LEGACY_JSON)}
+)
+_TEST_PIPELINE_JOB = json.dumps(
+ {
+ "runtimeConfig": {"parameterValues": _TEST_PIPELINE_PARAMETER_VALUES},
+ "pipelineSpec": json.loads(_TEST_PIPELINE_SPEC_JSON),
+ }
+)
+_TEST_PIPELINE_JOB_TFX = json.dumps(
+ {"runtimeConfig": {}, "pipelineSpec": json.loads(_TEST_TFX_PIPELINE_SPEC_JSON)}
+)
+
+_TEST_PIPELINE_GET_METHOD_NAME = "get_fake_pipeline_job"
+_TEST_PIPELINE_LIST_METHOD_NAME = "list_fake_pipeline_jobs"
+_TEST_PIPELINE_CANCEL_METHOD_NAME = "cancel_fake_pipeline_job"
+_TEST_PIPELINE_DELETE_METHOD_NAME = "delete_fake_pipeline_job"
+_TEST_PIPELINE_RESOURCE_NAME = (
+ f"{_TEST_PARENT}/fakePipelineJobs/{_TEST_PIPELINE_JOB_ID}"
+)
+_TEST_PIPELINE_CREATE_TIME = datetime.now()
+
+# experiments
+_TEST_EXPERIMENT = "test-experiment"
+
+_TEST_METADATASTORE = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/default"
+)
+_TEST_CONTEXT_ID = _TEST_EXPERIMENT
+_TEST_CONTEXT_NAME = f"{_TEST_METADATASTORE}/contexts/{_TEST_CONTEXT_ID}"
+
+_EXPERIMENT_MOCK = GapicContext(
+ name=_TEST_CONTEXT_NAME,
+ schema_title=constants.SYSTEM_EXPERIMENT,
+ schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_EXPERIMENT],
+ metadata={**constants.EXPERIMENT_METADATA},
+)
+
+
+@pytest.fixture
+def mock_pipeline_service_create_with_preflight_validations():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_pipeline_job"
+ ) as mock_create_pipeline_job:
+ mock_create_pipeline_job.return_value = gca_pipeline_job.PipelineJob(
+ name=_TEST_PIPELINE_JOB_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ )
+ yield mock_create_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_service_create():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_pipeline_job"
+ ) as mock_create_pipeline_job:
+ mock_create_pipeline_job.return_value = gca_pipeline_job.PipelineJob(
+ name=_TEST_PIPELINE_JOB_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ )
+ yield mock_create_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_v1beta1_service_create():
+ with mock.patch.object(
+ v1beta1_pipeline_service.PipelineServiceClient, "create_pipeline_job"
+ ) as mock_create_pipeline_job:
+ mock_create_pipeline_job.return_value = v1beta1_pipeline_job.PipelineJob(
+ name=_TEST_PIPELINE_JOB_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ )
+ yield mock_create_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_v1beta1_service_get():
+ with mock.patch.object(
+ v1beta1_pipeline_service.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_pipeline_job:
+ mock_get_pipeline_job.side_effect = [
+ make_v1beta1_pipeline_job(
+ _TEST_PIPELINE_JOB_NAME,
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_v1beta1_pipeline_job(
+ _TEST_PIPELINE_JOB_NAME,
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ make_v1beta1_pipeline_job(
+ _TEST_PIPELINE_JOB_NAME,
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ make_v1beta1_pipeline_job(
+ _TEST_PIPELINE_JOB_NAME,
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ make_v1beta1_pipeline_job(
+ _TEST_PIPELINE_JOB_NAME,
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ make_v1beta1_pipeline_job(
+ _TEST_PIPELINE_JOB_NAME,
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ make_v1beta1_pipeline_job(
+ _TEST_PIPELINE_JOB_NAME,
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ make_v1beta1_pipeline_job(
+ _TEST_PIPELINE_JOB_NAME,
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ make_v1beta1_pipeline_job(
+ _TEST_PIPELINE_JOB_NAME,
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ ]
+
+ yield mock_get_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_v1_service_batch_cancel():
+ with patch.object(
+ pipeline_service_client.PipelineServiceClient, "batch_cancel_pipeline_jobs"
+ ) as batch_cancel_pipeline_jobs_mock:
+ batch_cancel_pipeline_jobs_mock.return_value = mock.Mock(ga_operation.Operation)
+ yield batch_cancel_pipeline_jobs_mock
+
+
+@pytest.fixture
+def mock_pipeline_v1_service_batch_delete():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "batch_delete_pipeline_jobs"
+ ) as mock_batch_pipeline_jobs:
+ mock_batch_pipeline_jobs.return_value = (
+ make_v1_batch_delete_pipeline_jobs_response()
+ )
+ mock_lro = mock.Mock(ga_operation.Operation)
+ mock_lro.result.return_value = make_v1_batch_delete_pipeline_jobs_response()
+ mock_batch_pipeline_jobs.return_value = mock_lro
+ yield mock_batch_pipeline_jobs
+
+
+def make_v1_batch_delete_pipeline_jobs_response():
+ response = PipelineServiceV1.BatchDeletePipelineJobsResponse()
+ response.pipeline_jobs.append(
+ make_pipeline_job_with_name(
+ _TEST_PIPELINE_JOB_NAME,
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ )
+ )
+ response.pipeline_jobs.append(
+ make_pipeline_job_with_name(
+ _TEST_PIPELINE_JOB_NAME_2,
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED,
+ )
+ )
+ return response
+
+
+@pytest.fixture
+def mock_pipeline_v1beta1_service_batch_delete():
+ with mock.patch.object(
+ v1beta1_pipeline_service.PipelineServiceClient, "batch_delete_pipeline_jobs"
+ ) as mock_batch_pipeline_jobs:
+ mock_batch_pipeline_jobs.return_value = (
+ make_batch_delete_pipeline_jobs_response()
+ )
+ mock_lro = mock.Mock(ga_operation.Operation)
+ mock_lro.result.return_value = make_batch_delete_pipeline_jobs_response()
+ mock_batch_pipeline_jobs.return_value = mock_lro
+ yield mock_batch_pipeline_jobs
+
+
+def make_v1beta1_pipeline_job(name: str, state: v1beta1_pipeline_state.PipelineState):
+ return v1beta1_pipeline_job.PipelineJob(
+ name=name,
+ state=state,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ labels=_TEST_LABELS,
+ job_detail=v1beta1_pipeline_job.PipelineJobDetail(
+ pipeline_run_context=v1beta1_context.Context(
+ name=name,
+ )
+ ),
+ )
+
+
+def make_batch_delete_pipeline_jobs_response():
+ response = PipelineServiceV1Beta1.BatchDeletePipelineJobsResponse()
+ response.pipeline_jobs.append(
+ make_v1beta1_pipeline_job(
+ _TEST_PIPELINE_JOB_NAME,
+ v1beta1_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ )
+ )
+ response.pipeline_jobs.append(
+ make_v1beta1_pipeline_job(
+ _TEST_PIPELINE_JOB_NAME_2,
+ v1beta1_pipeline_state.PipelineState.PIPELINE_STATE_FAILED,
+ )
+ )
+ return response
+
+
+@pytest.fixture
+def mock_pipeline_bucket_exists():
+ def mock_create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist(
+ output_artifacts_gcs_dir=None,
+ service_account=None,
+ project=None,
+ location=None,
+ credentials=None,
+ ):
+ output_artifacts_gcs_dir = (
+ output_artifacts_gcs_dir
+ or gcs_utils.generate_gcs_directory_for_pipeline_artifacts(
+ project=project,
+ location=location,
+ )
+ )
+ return output_artifacts_gcs_dir
+
+ with mock.patch(
+ "google.cloud.aiplatform.utils.gcs_utils.create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist",
+ wraps=mock_create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist,
+ ) as mock_context:
+ yield mock_context
+
+
+def make_pipeline_job(state):
+ return gca_pipeline_job.PipelineJob(
+ name=_TEST_PIPELINE_JOB_NAME,
+ state=state,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ job_detail=gca_pipeline_job.PipelineJobDetail(
+ pipeline_run_context=gca_context.Context(
+ name=_TEST_PIPELINE_JOB_NAME,
+ )
+ ),
+ )
+
+
+def make_pipeline_job_with_name(name, state):
+ return gca_pipeline_job.PipelineJob(
+ name=name,
+ state=state,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ job_detail=gca_pipeline_job.PipelineJobDetail(
+ pipeline_run_context=gca_context.Context(
+ name=name,
+ )
+ ),
+ )
+
+
+@pytest.fixture
+def mock_pipeline_service_get():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_pipeline_job:
+ mock_get_pipeline_job.side_effect = [
+ make_pipeline_job(gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ ]
+
+ yield mock_get_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_service_get_with_fail():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_get_pipeline_job:
+ mock_get_pipeline_job.side_effect = [
+ make_pipeline_job(gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING),
+ make_pipeline_job(gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING),
+ make_pipeline_job(gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED),
+ ]
+
+ yield mock_get_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_service_cancel():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "cancel_pipeline_job"
+ ) as mock_cancel_pipeline_job:
+ yield mock_cancel_pipeline_job
+
+
+@pytest.fixture
+def mock_pipeline_service_list():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "list_pipeline_jobs"
+ ) as mock_list_pipeline_jobs:
+ mock_list_pipeline_jobs.return_value = [
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ ]
+ yield mock_list_pipeline_jobs
+
+
+@pytest.fixture
+def mock_load_yaml_and_json(job_spec):
+ with patch.object(storage.Blob, "download_as_bytes") as mock_load_yaml_and_json:
+ mock_load_yaml_and_json.return_value = job_spec.encode()
+ yield mock_load_yaml_and_json
+
+
+@pytest.fixture
+def mock_request_urlopen(job_spec):
+ with patch.object(request, "urlopen") as mock_urlopen:
+ mock_read_response = mock.MagicMock()
+ mock_decode_response = mock.MagicMock()
+ mock_decode_response.return_value = job_spec.encode()
+ mock_read_response.return_value.decode = mock_decode_response
+ mock_urlopen.return_value.read = mock_read_response
+ yield mock_urlopen
+
+
+# experiment mocks
+@pytest.fixture
+def get_metadata_store_mock():
+ with patch.object(
+ MetadataServiceClient, "get_metadata_store"
+ ) as get_metadata_store_mock:
+ get_metadata_store_mock.return_value = GapicMetadataStore(
+ name=_TEST_METADATASTORE,
+ )
+ yield get_metadata_store_mock
+
+
+@pytest.fixture
+def get_experiment_mock():
+ with patch.object(MetadataServiceClient, "get_context") as get_context_mock:
+ get_context_mock.return_value = _EXPERIMENT_MOCK
+ yield get_context_mock
+
+
+@pytest.fixture
+def add_context_children_mock():
+ with patch.object(
+ MetadataServiceClient, "add_context_children"
+ ) as add_context_children_mock:
+ yield add_context_children_mock
+
+
+@pytest.fixture
+def list_contexts_mock():
+ with patch.object(MetadataServiceClient, "list_contexts") as list_contexts_mock:
+ list_contexts_mock.return_value = [_EXPERIMENT_MOCK]
+ yield list_contexts_mock
+
+
+@pytest.fixture
+def create_experiment_run_context_mock():
+ with patch.object(MetadataServiceClient, "create_context") as create_context_mock:
+ create_context_mock.side_effect = [_EXPERIMENT_MOCK]
+ yield create_context_mock
+
+
+def make_pipeline_job_with_experiment(state):
+ return gca_pipeline_job.PipelineJob(
+ name=_TEST_PIPELINE_JOB_NAME,
+ state=state,
+ create_time=_TEST_PIPELINE_CREATE_TIME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ job_detail=gca_pipeline_job.PipelineJobDetail(
+ pipeline_run_context=gca_context.Context(
+ name=_TEST_PIPELINE_JOB_NAME,
+ parent_contexts=[_TEST_CONTEXT_NAME],
+ ),
+ ),
+ )
+
+
+@pytest.fixture
+def mock_create_pipeline_job_with_experiment():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_pipeline_job"
+ ) as mock_pipeline_with_experiment:
+ mock_pipeline_with_experiment.return_value = make_pipeline_job_with_experiment(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ yield mock_pipeline_with_experiment
+
+
+@pytest.fixture
+def mock_get_pipeline_job_with_experiment():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_pipeline_job"
+ ) as mock_pipeline_with_experiment:
+ mock_pipeline_with_experiment.side_effect = [
+ make_pipeline_job_with_experiment(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING
+ ),
+ make_pipeline_job_with_experiment(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ ),
+ ]
+ yield mock_pipeline_with_experiment
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestPipelineJob:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ @mock.patch.object(pipeline_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(pipeline_jobs, "_LOG_WAIT_TIME", 1)
+ def test_run_call_pipeline_service_create(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ sync,
+ ):
+ import yaml
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ job.run(
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ "inputArtifacts": {"vertex_model": {"artifactId": "456"}},
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.1.0",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ timeout=None,
+ )
+
+ mock_pipeline_service_get.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert job._gca_resource == make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_run_with_reserved_ip_ranges(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ sync,
+ ):
+ import yaml
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ job.run(
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ "inputArtifacts": {"vertex_model": {"artifactId": "456"}},
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.1.0",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ timeout=None,
+ )
+
+ mock_pipeline_service_get.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert job._gca_resource == make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_artifact_registry(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ mock_request_urlopen,
+ job_spec,
+ mock_load_yaml_and_json,
+ sync,
+ ):
+ import yaml
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_AR_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.1.0",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ template_uri=_TEST_AR_TEMPLATE_PATH,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ timeout=None,
+ )
+
+ mock_pipeline_service_get.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert job._gca_resource == make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_https(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ mock_request_urlopen,
+ job_spec,
+ mock_load_yaml_and_json,
+ sync,
+ ):
+ import yaml
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_HTTPS_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.1.0",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ template_uri=_TEST_HTTPS_TEMPLATE_PATH,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ timeout=None,
+ )
+
+ mock_pipeline_service_get.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert job._gca_resource == make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_PIPELINE_SPEC_JSON,
+ _TEST_PIPELINE_SPEC_YAML,
+ _TEST_PIPELINE_JOB,
+ ],
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_timeout(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ sync,
+ ):
+ import yaml
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ sync=sync,
+ create_request_timeout=180.0,
+ )
+
+ if not sync:
+ job.wait()
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.1.0",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ timeout=180.0,
+ )
+
+ # mock_pipeline_service_get.assert_called_with(
+ # name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ # )
+
+ # assert job._gca_resource == make_pipeline_job(
+ # gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ # )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_PIPELINE_SPEC_JSON,
+ _TEST_PIPELINE_SPEC_YAML,
+ _TEST_PIPELINE_JOB,
+ ],
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_timeout_not_explicitly_set(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ sync,
+ ):
+ import yaml
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ sync=sync,
+ )
+
+ if not sync:
+ job.wait()
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.1.0",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ timeout=None,
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ @pytest.mark.parametrize(
+ "failure_policy",
+ [
+ (
+ "slow",
+ pipeline_failure_policy.PipelineFailurePolicy.PIPELINE_FAILURE_POLICY_FAIL_SLOW,
+ ),
+ (
+ "fast",
+ pipeline_failure_policy.PipelineFailurePolicy.PIPELINE_FAILURE_POLICY_FAIL_FAST,
+ ),
+ ],
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ @mock.patch.object(pipeline_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(pipeline_jobs, "_LOG_WAIT_TIME", 1)
+ def test_run_call_pipeline_service_create_with_failure_policy(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ failure_policy,
+ sync,
+ ):
+ import yaml
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ failure_policy=failure_policy[0],
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ "failurePolicy": failure_policy[1],
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.1.0",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ timeout=None,
+ )
+
+ mock_pipeline_service_get.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert job._gca_resource == make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_PIPELINE_SPEC_LEGACY_JSON,
+ _TEST_PIPELINE_SPEC_LEGACY_YAML,
+ _TEST_PIPELINE_JOB_LEGACY,
+ ],
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_legacy(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ sync,
+ ):
+ import yaml
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES_LEGACY,
+ enable_caching=True,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameters": {"string_param": {"stringValue": "hello"}},
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.0.0",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ timeout=None,
+ )
+
+ mock_pipeline_service_get.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert job._gca_resource == make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_TFX_PIPELINE_SPEC_JSON,
+ _TEST_TFX_PIPELINE_SPEC_YAML,
+ _TEST_PIPELINE_JOB_TFX,
+ ],
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_tfx(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ sync,
+ ):
+ import yaml
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES_LEGACY,
+ enable_caching=True,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameters": {"string_param": {"stringValue": "hello"}},
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "tfx-1.4.0",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ timeout=None,
+ )
+
+ mock_pipeline_service_get.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert job._gca_resource == make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_submit_call_pipeline_service_pipeline_job_create(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ import yaml
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ job.submit(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.1.0",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ timeout=None,
+ )
+
+ assert not mock_pipeline_service_get.called
+
+ job.wait()
+
+ mock_pipeline_service_get.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert job._gca_resource == make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_submit_call_gcs_utils_get_or_create_with_correct_arguments(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ project=_TEST_PROJECT,
+ pipeline_root=_TEST_GCS_OUTPUT_DIRECTORY,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job.submit(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ mock_pipeline_bucket_exists.assert_called_once_with(
+ output_artifacts_gcs_dir=_TEST_GCS_OUTPUT_DIRECTORY,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_done_method_pipeline_service(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ job.submit(service_account=_TEST_SERVICE_ACCOUNT, network=_TEST_NETWORK)
+
+ assert job.done() is False
+
+ job.wait()
+
+ assert job.done() is True
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_PIPELINE_SPEC_LEGACY_JSON,
+ _TEST_PIPELINE_SPEC_LEGACY_YAML,
+ _TEST_PIPELINE_JOB_LEGACY,
+ ],
+ )
+ def test_submit_call_pipeline_service_pipeline_job_create_legacy(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ import yaml
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES_LEGACY,
+ enable_caching=True,
+ )
+
+ job.submit(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ expected_runtime_config_dict = {
+ "parameters": {"string_param": {"stringValue": "hello"}},
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.0.0",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ timeout=None,
+ )
+
+ assert not mock_pipeline_service_get.called
+
+ job.wait()
+
+ mock_pipeline_service_get.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert job._gca_resource == make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.usefixtures("mock_pipeline_service_get")
+ def test_get_pipeline_job(self, mock_pipeline_service_get):
+ aiplatform.init(project=_TEST_PROJECT)
+ job = pipeline_jobs.PipelineJob.get(resource_name=_TEST_PIPELINE_JOB_ID)
+
+ mock_pipeline_service_get.assert_called_once_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+ assert isinstance(job, pipeline_jobs.PipelineJob)
+
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_pipeline_bucket_exists",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_PIPELINE_SPEC_JSON,
+ _TEST_PIPELINE_SPEC_YAML,
+ _TEST_PIPELINE_JOB,
+ _TEST_PIPELINE_SPEC_LEGACY_JSON,
+ _TEST_PIPELINE_SPEC_LEGACY_YAML,
+ _TEST_PIPELINE_JOB_LEGACY,
+ ],
+ )
+ def test_cancel_pipeline_job(
+ self, mock_pipeline_service_cancel, mock_load_yaml_and_json
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ )
+
+ job.run()
+ job.cancel()
+
+ mock_pipeline_service_cancel.assert_called_once_with(
+ name=_TEST_PIPELINE_JOB_NAME
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_pipeline_bucket_exists",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_PIPELINE_SPEC_JSON,
+ _TEST_PIPELINE_SPEC_YAML,
+ _TEST_PIPELINE_JOB,
+ _TEST_PIPELINE_SPEC_LEGACY_JSON,
+ _TEST_PIPELINE_SPEC_LEGACY_YAML,
+ _TEST_PIPELINE_JOB_LEGACY,
+ ],
+ )
+ def test_list_pipeline_job(
+ self, mock_pipeline_service_list, mock_load_yaml_and_json
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ )
+
+ job.run()
+ job.list()
+
+ mock_pipeline_service_list.assert_called_once_with(
+ request={"parent": _TEST_PARENT}
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_pipeline_bucket_exists",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_PIPELINE_SPEC_JSON,
+ _TEST_PIPELINE_SPEC_YAML,
+ _TEST_PIPELINE_JOB,
+ _TEST_PIPELINE_SPEC_LEGACY_JSON,
+ _TEST_PIPELINE_SPEC_LEGACY_YAML,
+ _TEST_PIPELINE_JOB_LEGACY,
+ ],
+ )
+ def test_list_pipeline_job_with_read_mask(
+ self, mock_pipeline_service_list, mock_load_yaml_and_json
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ )
+
+ job.run()
+ job.list(enable_simple_view=True)
+
+ mock_pipeline_service_list.assert_called_once_with(
+ request={
+ "parent": _TEST_PARENT,
+ "read_mask": _TEST_PIPELINE_JOB_LIST_READ_MASK,
+ },
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_PIPELINE_SPEC_JSON,
+ _TEST_PIPELINE_SPEC_YAML,
+ _TEST_PIPELINE_JOB,
+ _TEST_PIPELINE_SPEC_LEGACY_JSON,
+ _TEST_PIPELINE_SPEC_LEGACY_YAML,
+ _TEST_PIPELINE_JOB_LEGACY,
+ ],
+ )
+ def test_cancel_pipeline_job_without_running(
+ self,
+ mock_pipeline_service_cancel,
+ mock_load_yaml_and_json,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ job.cancel()
+
+ assert e.match(regexp=r"PipelineJob resource has not been created")
+
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get_with_fail",
+ "mock_pipeline_bucket_exists",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ @mock.patch.object(pipeline_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(pipeline_jobs, "_LOG_WAIT_TIME", 1)
+ def test_pipeline_failure_raises(self, mock_load_yaml_and_json, sync):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ sync=sync,
+ )
+
+ if not sync:
+ job.wait()
+
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get_with_fail",
+ "mock_pipeline_bucket_exists",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_pipeline_job_has_failed_property(self, mock_load_yaml_and_json):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ job.submit(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING
+ assert job.has_failed
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_pipeline_job_has_failed_property_with_no_submit(
+ self, mock_load_yaml_and_json
+ ):
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ with pytest.raises(
+ RuntimeError,
+ match=r"PipelineJob resource has not been created\.",
+ ):
+ assert job.has_failed
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_clone_pipeline_job(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ import yaml
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ cloned = job.clone(job_id=f"cloned-{_TEST_PIPELINE_JOB_ID}")
+
+ cloned.submit(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.1.0",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=f"cloned-{_TEST_PIPELINE_JOB_ID}",
+ timeout=None,
+ )
+
+ assert not mock_pipeline_service_get.called
+
+ cloned.wait()
+
+ mock_pipeline_service_get.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert cloned._gca_resource == make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_clone_pipeline_job_with_all_args(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ import yaml
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ cloned = job.clone(
+ display_name=f"cloned-{_TEST_PIPELINE_JOB_DISPLAY_NAME}",
+ job_id=f"cloned-{_TEST_PIPELINE_JOB_ID}",
+ pipeline_root=f"cloned-{_TEST_GCS_BUCKET_NAME}",
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ credentials=_TEST_CREDENTIALS,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ cloned.submit(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": f"cloned-{_TEST_GCS_BUCKET_NAME}",
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ "inputArtifacts": {"vertex_model": {"artifactId": "456"}},
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=f"cloned-{_TEST_PIPELINE_JOB_DISPLAY_NAME}",
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.1.0",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=f"cloned-{_TEST_PIPELINE_JOB_ID}",
+ timeout=None,
+ )
+
+ assert not mock_pipeline_service_get.called
+
+ cloned.wait()
+
+ mock_pipeline_service_get.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert cloned._gca_resource == make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_get_associated_experiment_from_pipeline_returns_none_without_experiment(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ job.submit(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ )
+
+ job.wait()
+
+ test_experiment = job.get_associated_experiment()
+
+ assert test_experiment is None
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_get_associated_experiment_from_pipeline_returns_experiment(
+ self,
+ job_spec,
+ mock_load_yaml_and_json,
+ add_context_children_mock,
+ get_experiment_mock,
+ create_experiment_run_context_mock,
+ get_metadata_store_mock,
+ mock_create_pipeline_job_with_experiment,
+ mock_get_pipeline_job_with_experiment,
+ mock_pipeline_bucket_exists,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ test_experiment = aiplatform.Experiment(_TEST_EXPERIMENT)
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ enable_caching=True,
+ )
+
+ assert get_experiment_mock.call_count == 1
+
+ job.submit(
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ create_request_timeout=None,
+ experiment=test_experiment,
+ )
+
+ job.wait()
+
+ associated_experiment = job.get_associated_experiment()
+
+ assert associated_experiment.resource_name == _TEST_CONTEXT_NAME
+
+ assert add_context_children_mock.call_count == 1
+
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_get",
+ "mock_pipeline_v1beta1_service_batch_delete",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_PIPELINE_SPEC_JSON,
+ _TEST_PIPELINE_SPEC_YAML,
+ _TEST_PIPELINE_JOB,
+ _TEST_PIPELINE_SPEC_LEGACY_JSON,
+ _TEST_PIPELINE_SPEC_LEGACY_YAML,
+ _TEST_PIPELINE_JOB_LEGACY,
+ ],
+ )
+ def test_create_two_and_batch_delete_pipeline_jobs_returns_response(
+ self,
+ mock_load_yaml_and_json,
+ mock_pipeline_v1beta1_service_batch_delete,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = preview_pipeline_jobs._PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ )
+
+ response = job.batch_delete(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ names=[_TEST_PIPELINE_JOB_ID, _TEST_PIPELINE_JOB_ID_2],
+ )
+
+ assert mock_pipeline_v1beta1_service_batch_delete.call_count == 1
+ assert len(response.pipeline_jobs) == 2
+
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_get",
+ "mock_pipeline_v1_service_batch_delete",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_PIPELINE_SPEC_JSON,
+ _TEST_PIPELINE_SPEC_YAML,
+ _TEST_PIPELINE_JOB,
+ _TEST_PIPELINE_SPEC_LEGACY_JSON,
+ _TEST_PIPELINE_SPEC_LEGACY_YAML,
+ _TEST_PIPELINE_JOB_LEGACY,
+ ],
+ )
+ def test_create_two_and_batch_delete_v1_pipeline_jobs_returns_response(
+ self,
+ mock_load_yaml_and_json,
+ mock_pipeline_v1_service_batch_delete,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ )
+
+ response = job.batch_delete(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ names=[_TEST_PIPELINE_JOB_ID, _TEST_PIPELINE_JOB_ID_2],
+ )
+
+ assert mock_pipeline_v1_service_batch_delete.call_count == 1
+ assert len(response.pipeline_jobs) == 2
+
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_get",
+ "mock_pipeline_v1_service_batch_cancel",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [
+ _TEST_PIPELINE_SPEC_JSON,
+ _TEST_PIPELINE_SPEC_YAML,
+ _TEST_PIPELINE_JOB,
+ _TEST_PIPELINE_SPEC_LEGACY_JSON,
+ _TEST_PIPELINE_SPEC_LEGACY_YAML,
+ _TEST_PIPELINE_JOB_LEGACY,
+ ],
+ )
+ def test_create_two_and_batch_cancel_v1_pipeline_jobs_returns_response(
+ self,
+ mock_load_yaml_and_json,
+ mock_pipeline_v1_service_batch_cancel,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ )
+
+ job.batch_cancel(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ names=[_TEST_PIPELINE_JOB_ID, _TEST_PIPELINE_JOB_ID_2],
+ )
+
+ assert mock_pipeline_v1_service_batch_cancel.call_count == 1
+
+ @pytest.mark.usefixtures(
+ "mock_pipeline_v1beta1_service_create",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_submit_v1beta1_pipeline_job_returns_response(
+ self,
+ mock_load_yaml_and_json,
+ job_spec,
+ mock_pipeline_v1beta1_service_create,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = preview_pipeline_jobs._PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ enable_preflight_validations=True,
+ )
+
+ job.submit()
+
+ assert mock_pipeline_v1beta1_service_create.call_count == 1
+
+ @pytest.mark.usefixtures(
+ "mock_pipeline_v1beta1_service_create",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON],
+ )
+ @mock.patch.object(pipeline_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(pipeline_jobs, "_LOG_WAIT_TIME", 1)
+ def test_run_call_pipeline_service_create_with_default_runtime(
+ self,
+ mock_load_yaml_and_json,
+ job_spec,
+ mock_pipeline_v1beta1_service_create,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = preview_pipeline_jobs._PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ default_runtime={
+ "persistentResourceRuntimeDetail": {
+ "persistentResourceName": "testRrName",
+ "taskResourceUnavailableWaitTimeMs": 1244,
+ "taskResourceUnavailableTimeoutBehavior": "FAIL",
+ }
+ },
+ )
+
+ job.submit()
+
+ assert mock_pipeline_v1beta1_service_create.call_count == 1
+
+ @pytest.mark.usefixtures(
+ "mock_pipeline_v1beta1_service_create",
+ "mock_pipeline_v1beta1_service_get",
+ )
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ def test_rerun_v1beta1_pipeline_job_returns_response(
+ self,
+ mock_load_yaml_and_json,
+ job_spec,
+ mock_pipeline_v1beta1_service_create,
+ mock_pipeline_v1beta1_service_get,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = preview_pipeline_jobs._PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ )
+
+ job.submit()
+
+ job.rerun(
+ original_pipelinejob_name=_TEST_PIPELINE_JOB_NAME,
+ pipeline_task_rerun_configs=[
+ aiplatform_v1beta1.PipelineTaskRerunConfig(
+ task_name="task-name",
+ task_id=100,
+ )
+ ],
+ parameter_values={"param-1": "value-1"},
+ )
+
+ assert mock_pipeline_v1beta1_service_get.call_count == 1
+ assert mock_pipeline_v1beta1_service_create.call_count == 2
+
+ @pytest.mark.parametrize(
+ "job_spec",
+ [_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_SPEC_YAML, _TEST_PIPELINE_JOB],
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_run_with_preflight_validations(
+ self,
+ mock_pipeline_service_create_with_preflight_validations,
+ mock_pipeline_service_get,
+ mock_pipeline_bucket_exists,
+ job_spec,
+ mock_load_yaml_and_json,
+ sync,
+ ):
+ import yaml
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,
+ input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,
+ enable_caching=True,
+ )
+
+ job.run(
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ sync=sync,
+ create_request_timeout=None,
+ enable_preflight_validations=True,
+ )
+
+ if not sync:
+ job.wait()
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
+ "inputArtifacts": {"vertex_model": {"artifactId": "456"}},
+ }
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ job_spec = yaml.safe_load(job_spec)
+ pipeline_spec = job_spec.get("pipelineSpec") or job_spec
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.1.0",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
+ preflight_validations=True,
+ )
+
+ mock_pipeline_service_create_with_preflight_validations.assert_called_once_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ timeout=None,
+ )
+
+ mock_pipeline_service_get.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert job._gca_resource == make_pipeline_job(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_prediction.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_prediction.py
new file mode 100644
index 0000000000000000000000000000000000000000..1cb6ca5875d6b9cd3b74ad7f92a5606d3f497ec3
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_prediction.py
@@ -0,0 +1,3343 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import asyncio
+import importlib
+import json
+import multiprocessing
+import os
+import pytest
+import requests
+import textwrap
+import time
+from unittest import mock
+
+from fastapi import HTTPException
+from fastapi import Request
+from fastapi import Response
+from starlette.datastructures import Headers
+from starlette.testclient import TestClient
+
+from google.auth.exceptions import GoogleAuthError
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import helpers
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import models
+
+from google.cloud.aiplatform.compat.types import (
+ model as gca_model_compat,
+ env_var as gca_env_var,
+)
+
+from google.cloud.aiplatform.constants import prediction
+from google.cloud.aiplatform.docker_utils import build
+from google.cloud.aiplatform.docker_utils import errors
+from google.cloud.aiplatform.docker_utils import local_util
+from google.cloud.aiplatform.docker_utils import run
+from google.cloud.aiplatform.docker_utils import utils
+from google.cloud.aiplatform.prediction import DEFAULT_HEALTH_ROUTE
+from google.cloud.aiplatform.prediction import DEFAULT_HTTP_PORT
+from google.cloud.aiplatform.prediction import DEFAULT_PREDICT_ROUTE
+from google.cloud.aiplatform.prediction import LocalModel
+from google.cloud.aiplatform.prediction import LocalEndpoint
+from google.cloud.aiplatform.prediction import handler_utils
+from google.cloud.aiplatform.prediction import local_endpoint
+from google.cloud.aiplatform.prediction import (
+ model_server as model_server_module,
+)
+from google.cloud.aiplatform.prediction.handler import Handler
+from google.cloud.aiplatform.prediction.handler import PredictionHandler
+from google.cloud.aiplatform.prediction.model_server import CprModelServer
+from google.cloud.aiplatform.prediction.local_model import (
+ _DEFAULT_HANDLER_CLASS,
+)
+from google.cloud.aiplatform.prediction.local_model import (
+ _DEFAULT_HANDLER_MODULE,
+)
+from google.cloud.aiplatform.prediction.local_model import (
+ _DEFAULT_PYTHON_MODULE,
+)
+from google.cloud.aiplatform.prediction.local_model import (
+ _DEFAULT_SDK_REQUIREMENTS,
+)
+from google.cloud.aiplatform.prediction.predictor import Predictor
+from google.cloud.aiplatform.prediction.serializer import DefaultSerializer
+from google.cloud.aiplatform.utils import prediction_utils
+
+from google.cloud.aiplatform_v1.services.model_service import (
+ client as model_service_client,
+)
+
+
+_TEST_INPUT = b'{"instances": [[1, 2, 3, 4]]}'
+_TEST_DESERIALIZED_INPUT = {"instances": [[1, 2, 3, 4]]}
+_TEST_PREDICTION_OUTPUT = {"predictions": [[1]]}
+_TEST_SERIALIZED_OUTPUT = b'{"predictions": [[1]]}'
+_APPLICATION_JSON = "application/json"
+_TEST_GCS_ARTIFACTS_URI = ""
+
+_TEST_AIP_HTTP_PORT = "8080"
+_TEST_AIP_HEALTH_ROUTE = "/health"
+_TEST_AIP_PREDICT_ROUTE = "/predict"
+_TEST_AIP_STORAGE_URI = "gs://fake/storage/uri"
+
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_MODEL_NAME = "test-model"
+_TEST_ARTIFACT_URI = "gs://test/artifact/uri"
+_TEST_SERVING_CONTAINER_IMAGE = "gcr.io/test-serving/container:image"
+_TEST_SERVING_CONTAINER_PREDICTION_ROUTE = "predict"
+_TEST_SERVING_CONTAINER_HEALTH_ROUTE = "health"
+_TEST_DESCRIPTION = "test description"
+_TEST_SERVING_CONTAINER_COMMAND = ["python3", "run_my_model.py"]
+_TEST_SERVING_CONTAINER_ARGS = ["--test", "arg"]
+_TEST_SERVING_CONTAINER_ENVIRONMENT_VARIABLES = {
+ "learning_rate": 0.01,
+ "loss_fn": "mse",
+}
+_TEST_SERVING_CONTAINER_PORTS = [8888, 10000]
+_TEST_SERVING_CONTAINER_GRPC_PORTS = [7777, 7000]
+_TEST_ID = "1028944691210842416"
+_TEST_LABEL = {"team": "experimentation", "trial_id": "x435"}
+_TEST_APPENDED_USER_AGENT = ["fake_user_agent"]
+
+_TEST_INSTANCE_SCHEMA_URI = "gs://test/schema/instance.yaml"
+_TEST_PARAMETERS_SCHEMA_URI = "gs://test/schema/parameters.yaml"
+_TEST_PREDICTION_SCHEMA_URI = "gs://test/schema/predictions.yaml"
+
+_TEST_EXPLANATION_METADATA = aiplatform.explain.ExplanationMetadata(
+ inputs={
+ "features": {
+ "input_tensor_name": "dense_input",
+ "encoding": "BAG_OF_FEATURES",
+ "modality": "numeric",
+ "index_feature_mapping": ["abc", "def", "ghj"],
+ }
+ },
+ outputs={"medv": {"output_tensor_name": "dense_2"}},
+)
+_TEST_EXPLANATION_PARAMETERS = aiplatform.explain.ExplanationParameters(
+ {"sampled_shapley_attribution": {"path_count": 10}}
+)
+
+_TEST_MODEL_RESOURCE_NAME = model_service_client.ModelServiceClient.model_path(
+ _TEST_PROJECT, _TEST_LOCATION, _TEST_ID
+)
+
+_TEST_IMAGE_URI = "test_image:latest"
+
+_DEFAULT_BASE_IMAGE = "python:3.10"
+_MODEL_SERVER_FILE = "cpr_model_server.py"
+_TEST_SRC_DIR = "user_code"
+_TEST_PREDICTOR_FILE = "predictor.py"
+_TEST_PREDICTOR_FILE_STEM = "predictor"
+_TEST_PREDICTOR_CLASS = "MyPredictor"
+_TEST_HANDLER_FILE = "hanlder.py"
+_TEST_HANDLER_FILE_STEM = "hanlder"
+_TEST_HANDLER_CLASS = "MyHandler"
+_TEST_OUTPUT_IMAGE = "cpr_image:latest"
+
+_TEST_PREDICT_RESPONSE_CONTENT = b'{"x": [[1]]}'
+_TEST_HEALTH_CHECK_RESPONSE_CONTENT = b"{}"
+_TEST_HTTP_ERROR_MESSAGE = "HTTP Error Occurred."
+_TEST_CONTAINER_LOGS_LEN = 5
+_CONTAINER_RUNNING_STATUS = "running"
+_CONTAINER_EXITED_STATUS = "exited"
+
+_TEST_GPU_COUNT = 1
+_TEST_GPU_DEVICE_IDS = ["1"]
+_TEST_GPU_CAPABILITIES = [["gpu"]]
+_TEST_MULTIPROCESSING_CPU_COUNT = 16
+_DEFAULT_WORKERS_PER_CORE = 1
+
+
+@pytest.fixture
+def deserialize_mock():
+ with mock.patch.object(DefaultSerializer, "deserialize") as deserialize_mock:
+ deserialize_mock.return_value = _TEST_DESERIALIZED_INPUT
+ yield deserialize_mock
+
+
+@pytest.fixture
+def deserialize_exception_mock():
+ with mock.patch.object(
+ DefaultSerializer, "deserialize"
+ ) as deserialize_exception_mock:
+ deserialize_exception_mock.side_effect = HTTPException(
+ status_code=400,
+ )
+ yield deserialize_exception_mock
+
+
+@pytest.fixture
+def serialize_mock():
+ with mock.patch.object(DefaultSerializer, "serialize") as serialize_mock:
+ serialize_mock.return_value = _TEST_SERIALIZED_OUTPUT
+ yield serialize_mock
+
+
+@pytest.fixture
+def serialize_exception_mock():
+ with mock.patch.object(DefaultSerializer, "serialize") as serialize_exception_mock:
+ serialize_exception_mock.side_effect = HTTPException(
+ status_code=400,
+ )
+ yield serialize_exception_mock
+
+
+@pytest.fixture
+def predictor_mock():
+ with mock.patch(
+ "google.cloud.aiplatform.prediction.predictor.Predictor"
+ ) as MockPredictor:
+ instance = MockPredictor.return_value
+ instance().preprocess.return_value = _TEST_DESERIALIZED_INPUT
+ instance().predict.return_value = _TEST_PREDICTION_OUTPUT
+ instance().postprocess.return_value = _TEST_SERIALIZED_OUTPUT
+ yield instance
+
+
+@pytest.fixture
+def model_server_env_mock():
+ env_vars = {
+ "AIP_HTTP_PORT": _TEST_AIP_HTTP_PORT,
+ "AIP_HEALTH_ROUTE": _TEST_AIP_HEALTH_ROUTE,
+ "AIP_PREDICT_ROUTE": _TEST_AIP_PREDICT_ROUTE,
+ "AIP_STORAGE_URI": _TEST_AIP_STORAGE_URI,
+ "HANDLER_MODULE": _DEFAULT_HANDLER_MODULE,
+ "HANDLER_CLASS": _DEFAULT_HANDLER_CLASS,
+ "PREDICTOR_MODULE": f"{_TEST_SRC_DIR}.{_TEST_PREDICTOR_FILE_STEM}",
+ "PREDICTOR_CLASS": _TEST_PREDICTOR_CLASS,
+ }
+ with mock.patch.dict(os.environ, env_vars):
+ yield
+
+
+@pytest.fixture
+def cpu_count_mock():
+ with mock.patch.object(multiprocessing, "cpu_count") as cpu_count_mock:
+ cpu_count_mock.return_value = _TEST_MULTIPROCESSING_CPU_COUNT
+ yield cpu_count_mock
+
+
+def get_test_headers():
+ return Headers({"content-type": _APPLICATION_JSON, "accept": _APPLICATION_JSON})
+
+
+def get_test_request():
+ async def _create_request_receive():
+ return {
+ "type": "http.request",
+ "body": _TEST_INPUT,
+ "more_body": False,
+ }
+
+ return Request(
+ scope={"type": "http", "headers": get_test_headers().raw},
+ receive=_create_request_receive,
+ )
+
+
+@pytest.fixture
+def get_content_type_from_headers_mock():
+ with mock.patch.object(
+ handler_utils, "get_content_type_from_headers"
+ ) as get_content_type_from_headers_mock:
+ get_content_type_from_headers_mock.return_value = _APPLICATION_JSON
+ yield get_content_type_from_headers_mock
+
+
+@pytest.fixture
+def get_accept_from_headers_mock():
+ with mock.patch.object(
+ handler_utils, "get_accept_from_headers"
+ ) as get_accept_from_headers_mock:
+ get_accept_from_headers_mock.return_value = _APPLICATION_JSON
+ yield get_accept_from_headers_mock
+
+
+def get_test_predictor():
+ class _TestPredictor(Predictor):
+ def __init__(self):
+ pass
+
+ def load(self, artifacts_uri):
+ pass
+
+ def predict(self, instances):
+ pass
+
+ return _TestPredictor
+
+
+@pytest.fixture
+def populate_model_server_if_not_exists_mock():
+ with mock.patch.object(
+ prediction_utils, "populate_model_server_if_not_exists"
+ ) as populate_model_server_if_not_exists_mock:
+ yield populate_model_server_if_not_exists_mock
+
+
+@pytest.fixture
+def populate_entrypoint_if_not_exists_mock():
+ with mock.patch.object(
+ prediction_utils, "populate_entrypoint_if_not_exists"
+ ) as populate_entrypoint_if_not_exists_mock:
+ yield populate_entrypoint_if_not_exists_mock
+
+
+@pytest.fixture
+def inspect_source_from_class_mock_predictor_only():
+ with mock.patch.object(
+ prediction_utils, "inspect_source_from_class"
+ ) as inspect_source_from_class_mock_predictor_only:
+ inspect_source_from_class_mock_predictor_only.return_value = (
+ f"{_TEST_SRC_DIR}.{_TEST_PREDICTOR_FILE_STEM}",
+ _TEST_PREDICTOR_CLASS,
+ )
+ yield inspect_source_from_class_mock_predictor_only
+
+
+@pytest.fixture
+def inspect_source_from_class_mock_handler_only():
+ with mock.patch.object(
+ prediction_utils, "inspect_source_from_class"
+ ) as inspect_source_from_class_mock_handler_only:
+ inspect_source_from_class_mock_handler_only.return_value = (
+ f"{_TEST_SRC_DIR}.{_TEST_HANDLER_FILE_STEM}",
+ _TEST_HANDLER_CLASS,
+ )
+ yield inspect_source_from_class_mock_handler_only
+
+
+@pytest.fixture
+def inspect_source_from_class_mock_predictor_and_handler():
+ with mock.patch.object(
+ prediction_utils, "inspect_source_from_class"
+ ) as inspect_source_from_class_mock_predictor_and_handler:
+ inspect_source_from_class_mock_predictor_and_handler.side_effect = [
+ (f"{_TEST_SRC_DIR}.{_TEST_HANDLER_FILE_STEM}", _TEST_HANDLER_CLASS),
+ (f"{_TEST_SRC_DIR}.{_TEST_PREDICTOR_FILE_STEM}", _TEST_PREDICTOR_CLASS),
+ ]
+ yield inspect_source_from_class_mock_predictor_and_handler
+
+
+@pytest.fixture
+def is_prebuilt_prediction_container_uri_is_true_mock():
+ with mock.patch.object(
+ helpers, "is_prebuilt_prediction_container_uri"
+ ) as is_prebuilt_prediction_container_uri_is_true_mock:
+ is_prebuilt_prediction_container_uri_is_false_mock.return_value = True
+ yield is_prebuilt_prediction_container_uri_is_true_mock
+
+
+@pytest.fixture
+def is_prebuilt_prediction_container_uri_is_false_mock():
+ with mock.patch.object(
+ helpers, "is_prebuilt_prediction_container_uri"
+ ) as is_prebuilt_prediction_container_uri_is_false_mock:
+ is_prebuilt_prediction_container_uri_is_false_mock.return_value = False
+ yield is_prebuilt_prediction_container_uri_is_false_mock
+
+
+@pytest.fixture
+def build_image_mock():
+ with mock.patch.object(build, "build_image") as build_image_mock:
+ build_image_mock.return_value = None
+ yield build_image_mock
+
+
+@pytest.fixture
+def local_endpoint_logger_mock():
+ with mock.patch(
+ "google.cloud.aiplatform.prediction.local_endpoint._logger"
+ ) as local_endpoint_logger_mock:
+ yield local_endpoint_logger_mock
+
+
+@pytest.fixture
+def local_endpoint_init_mock():
+ with mock.patch.object(LocalEndpoint, "__init__") as local_endpoint_init_mock:
+ local_endpoint_init_mock.return_value = None
+ yield local_endpoint_init_mock
+
+
+@pytest.fixture
+def local_endpoint_enter_mock():
+ with mock.patch.object(LocalEndpoint, "__enter__") as local_endpoint_enter_mock:
+ yield local_endpoint_enter_mock
+
+
+@pytest.fixture
+def local_endpoint_exit_mock():
+ with mock.patch.object(LocalEndpoint, "__exit__") as local_endpoint_exit_mock:
+ yield local_endpoint_exit_mock
+
+
+@pytest.fixture
+def local_endpoint_del_mock():
+ with mock.patch.object(LocalEndpoint, "__del__") as local_endpoint_del_mock:
+ yield local_endpoint_del_mock
+
+
+@pytest.fixture
+def local_endpoint_run_health_check_mock():
+ with mock.patch.object(
+ LocalEndpoint, "run_health_check"
+ ) as local_endpoint_run_health_check_mock:
+ local_endpoint_run_health_check_mock.return_value.status_code = 200
+ yield local_endpoint_run_health_check_mock
+
+
+@pytest.fixture
+def local_endpoint_run_health_check_raise_exception_mock():
+ with mock.patch.object(
+ LocalEndpoint, "run_health_check"
+ ) as local_endpoint_run_health_check_raise_exception_mock:
+ local_endpoint_run_health_check_raise_exception_mock.side_effect = (
+ requests.exceptions.RequestException()
+ )
+ yield local_endpoint_run_health_check_raise_exception_mock
+
+
+@pytest.fixture
+def time_sleep_mock():
+ with mock.patch.object(time, "sleep") as time_sleep_mock:
+ yield time_sleep_mock
+
+
+@pytest.fixture
+def initializer_mock():
+ global_config = initializer.global_config
+ type(global_config).project = mock.PropertyMock(return_value=_TEST_PROJECT)
+
+
+@pytest.fixture
+def initializer_project_none_mock():
+ global_config = initializer.global_config
+ type(global_config).project = mock.PropertyMock(side_effect=GoogleAuthError)
+
+
+def get_docker_container_mock():
+ container = mock.MagicMock()
+ return container
+
+
+@pytest.fixture
+def run_prediction_container_mock():
+ with mock.patch.object(
+ run, "run_prediction_container"
+ ) as run_prediction_container_mock:
+ run_prediction_container_mock.return_value = get_docker_container_mock()
+ run_prediction_container_mock.return_value.status = run.CONTAINER_RUNNING_STATUS
+ yield run_prediction_container_mock
+
+
+@pytest.fixture
+def run_prediction_container_container_not_running_mock():
+ with mock.patch.object(
+ run, "run_prediction_container"
+ ) as run_prediction_container_container_not_running_mock:
+ run_prediction_container_container_not_running_mock.return_value = (
+ get_docker_container_mock()
+ )
+ run_prediction_container_container_not_running_mock.return_value.status = (
+ "NOT_RUNNING"
+ )
+ yield run_prediction_container_container_not_running_mock
+
+
+@pytest.fixture
+def run_print_container_logs_mock():
+ with mock.patch.object(
+ run, "print_container_logs"
+ ) as run_print_container_logs_mock:
+ run_print_container_logs_mock.return_value = _TEST_CONTAINER_LOGS_LEN
+ yield run_print_container_logs_mock
+
+
+@pytest.fixture
+def check_image_exists_locally_true_mock():
+ with mock.patch.object(
+ utils, "check_image_exists_locally"
+ ) as check_image_exists_locally_true_mock:
+ check_image_exists_locally_true_mock.return_value = True
+ yield check_image_exists_locally_true_mock
+
+
+@pytest.fixture
+def check_image_exists_locally_false_mock():
+ with mock.patch.object(
+ utils, "check_image_exists_locally"
+ ) as check_image_exists_locally_false_mock:
+ check_image_exists_locally_false_mock.return_value = False
+ yield check_image_exists_locally_false_mock
+
+
+@pytest.fixture
+def get_container_status_running_mock():
+ with mock.patch.object(
+ LocalEndpoint, "get_container_status"
+ ) as get_container_status_running_mock:
+ get_container_status_running_mock.return_value = _CONTAINER_RUNNING_STATUS
+ yield get_container_status_running_mock
+
+
+@pytest.fixture
+def get_container_status_second_fail_mock():
+ with mock.patch.object(
+ LocalEndpoint, "get_container_status"
+ ) as get_container_status_second_fail_mock:
+ get_container_status_second_fail_mock.side_effect = [
+ _CONTAINER_RUNNING_STATUS,
+ _CONTAINER_EXITED_STATUS,
+ ]
+ yield get_container_status_second_fail_mock
+
+
+@pytest.fixture
+def local_endpoint_print_container_logs_mock():
+ with mock.patch.object(
+ LocalEndpoint, "print_container_logs"
+ ) as local_endpoint_print_container_logs_mock:
+ yield local_endpoint_print_container_logs_mock
+
+
+@pytest.fixture
+def pull_image_if_not_exists_mock():
+ with mock.patch.object(
+ LocalModel, "pull_image_if_not_exists"
+ ) as pull_image_if_not_exists_mock:
+ yield pull_image_if_not_exists_mock
+
+
+def get_requests_post_response():
+ response = requests.models.Response()
+ response.status_code = 200
+ response._content = _TEST_PREDICT_RESPONSE_CONTENT
+ return response
+
+
+@pytest.fixture
+def requests_post_mock():
+ with mock.patch.object(requests, "post") as requests_post_mock:
+ requests_post_mock.return_value = get_requests_post_response()
+ yield requests_post_mock
+
+
+@pytest.fixture
+def requests_post_raises_exception_mock():
+ with mock.patch.object(requests, "post") as requests_post_raises_exception_mock:
+ requests_post_raises_exception_mock.side_effect = requests.exceptions.HTTPError(
+ _TEST_HTTP_ERROR_MESSAGE
+ )
+ yield requests_post_raises_exception_mock
+
+
+@pytest.fixture
+def open_file_mock():
+ with mock.patch("builtins.open") as open_file_mock:
+ yield open_file_mock().__enter__()
+
+
+def get_requests_get_response():
+ response = requests.models.Response()
+ response.status_code = 200
+ response._content = _TEST_HEALTH_CHECK_RESPONSE_CONTENT
+ return response
+
+
+@pytest.fixture
+def requests_get_mock():
+ with mock.patch.object(requests, "get") as requests_get_mock:
+ requests_get_mock.return_value = get_requests_get_response()
+ yield requests_get_mock
+
+
+@pytest.fixture
+def requests_get_second_raises_exception_mock():
+ with mock.patch.object(
+ requests, "get"
+ ) as requests_get_second_raises_exception_mock:
+ requests_get_second_raises_exception_mock.side_effect = [
+ get_requests_get_response(),
+ requests.exceptions.HTTPError(_TEST_HTTP_ERROR_MESSAGE),
+ ]
+ yield requests_get_second_raises_exception_mock
+
+
+@pytest.fixture
+def upload_model_mock():
+ with mock.patch.object(models.Model, "upload") as upload_model_mock:
+ yield upload_model_mock
+
+
+@pytest.fixture
+def execute_command_mock():
+ with mock.patch.object(local_util, "execute_command") as execute_command_mock:
+ execute_command_mock.return_value = 0
+ yield execute_command_mock
+
+
+@pytest.fixture
+def execute_command_return_code_1_mock():
+ with mock.patch.object(
+ local_util, "execute_command"
+ ) as execute_command_return_code_1_mock:
+ execute_command_return_code_1_mock.return_value = 1
+ yield execute_command_return_code_1_mock
+
+
+@pytest.fixture
+def raise_docker_error_with_command_mock():
+ with mock.patch.object(
+ errors, "raise_docker_error_with_command"
+ ) as raise_docker_error_with_command:
+ raise_docker_error_with_command.side_effect = errors.DockerError()
+
+
+@pytest.fixture
+def is_registry_uri_true_mock():
+ with mock.patch.object(
+ prediction_utils, "is_registry_uri"
+ ) as is_registry_uri_true_mock:
+ is_registry_uri_true_mock.return_value = True
+ yield is_registry_uri_true_mock
+
+
+@pytest.fixture
+def is_registry_uri_false_mock():
+ with mock.patch.object(
+ prediction_utils, "is_registry_uri"
+ ) as is_registry_uri_false_mock:
+ is_registry_uri_false_mock.return_value = False
+ yield is_registry_uri_false_mock
+
+
+@pytest.fixture
+def importlib_import_module_mock_once():
+ with mock.patch.object(
+ importlib, "import_module"
+ ) as importlib_import_module_mock_once:
+ yield importlib_import_module_mock_once
+
+
+@pytest.fixture
+def importlib_import_module_mock_twice():
+ with mock.patch.object(
+ importlib, "import_module"
+ ) as importlib_import_module_mock_twice:
+ return_values = {
+ _DEFAULT_HANDLER_MODULE: mock.Mock(),
+ f"{_TEST_SRC_DIR}.{_TEST_PREDICTOR_FILE_STEM}": mock.Mock(),
+ }
+ importlib_import_module_mock_twice.side_effect = return_values.get
+ yield importlib_import_module_mock_twice
+
+
+@pytest.fixture
+def fastapi_mock():
+ with mock.patch.object(model_server_module, "FastAPI") as fastapi_mock:
+ yield fastapi_mock
+
+
+class FakeHandler(Handler):
+ def __init__(self, artifacts_uri, predictor=None):
+ pass
+
+ def handle(self, request):
+ pass
+
+
+class TestPredictor:
+ def test_preprocess(self):
+ prediction_input = {"x": [1]}
+ predictor = get_test_predictor()
+
+ result = predictor().preprocess(prediction_input)
+
+ assert result == prediction_input
+
+ def test_postprocess(self):
+ prediction_results = {"x": [1]}
+ predictor = get_test_predictor()
+
+ result = predictor().postprocess(prediction_results)
+
+ assert result == prediction_results
+
+
+class TestDefaultSerializer:
+ def test_deserialize_application_json(self):
+ data = b'{"instances": [1, 2, 3]}'
+
+ deserialized_data = DefaultSerializer.deserialize(
+ data, content_type="application/json"
+ )
+
+ assert deserialized_data == {"instances": [1, 2, 3]}
+
+ def test_deserialize_unsupported_content_type_throws_exception(self):
+ content_type = "unsupported_type"
+ expected_message = (
+ f"Unsupported content type of the request: {content_type}.\n"
+ f'Currently supported content-type in DefaultSerializer: "application/json".'
+ )
+ data = b'{"instances": [1, 2, 3]}'
+
+ with pytest.raises(HTTPException) as exception:
+ DefaultSerializer.deserialize(data, content_type=content_type)
+
+ assert exception.value.status_code == 400
+ assert exception.value.detail == expected_message
+
+ def test_deserialize_invalid_json(self):
+ data = b"instances"
+ expected_message = "JSON deserialization failed for the request data"
+
+ with pytest.raises(HTTPException) as exception:
+ DefaultSerializer.deserialize(data, content_type="application/json")
+
+ assert exception.value.status_code == 400
+ assert expected_message in exception.value.detail
+
+ def test_serialize_application_json(self):
+ prediction = {}
+
+ serialized_prediction = DefaultSerializer.serialize(
+ prediction, accept="application/json"
+ )
+
+ assert serialized_prediction == "{}"
+
+ @pytest.mark.parametrize(
+ "accept",
+ [
+ ("application/json, text/html"),
+ ("application/json, text/html;q=0.9"),
+ ("text/html, application/json"),
+ ("text/html, application/json;q=0.9"),
+ (
+ "text/html, application/xhtml+xml, application/xml;q=0.9, application/json;q=0.8"
+ ),
+ ("*/*"),
+ ("text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8"),
+ ("application/json, */*"),
+ ("application/json, */*;q=0.9"),
+ ("text/html, application/json, */*;q=0.9"),
+ ("text/html, application/json;q=0.9, */*;q=0.8"),
+ ],
+ )
+ def test_serialize_application_json_multiple_accept(self, accept):
+ prediction = {}
+
+ serialized_prediction = DefaultSerializer.serialize(prediction, accept=accept)
+
+ assert serialized_prediction == "{}"
+
+ def test_serialize_unsupported_accept_throws_exception(self):
+ accept = "unsupported_type"
+ expected_message = (
+ f"Unsupported accept of the response: {accept}.\n"
+ f'Currently supported accept in DefaultSerializer: "application/json".'
+ )
+ prediction = {}
+
+ with pytest.raises(HTTPException) as exception:
+ DefaultSerializer.serialize(prediction, accept=accept)
+
+ assert exception.value.status_code == 400
+ assert exception.value.detail == expected_message
+
+ def test_serialize_invalid_json(self):
+ data = b"instances"
+ expected_message = "JSON serialization failed for the prediction result"
+
+ with pytest.raises(HTTPException) as exception:
+ DefaultSerializer.serialize(data, accept="application/json")
+
+ assert exception.value.status_code == 400
+ assert expected_message in exception.value.detail
+
+
+class TestPredictionHandler:
+ def test_init(self, predictor_mock):
+ handler = PredictionHandler(_TEST_GCS_ARTIFACTS_URI, predictor=predictor_mock)
+
+ assert handler._predictor == predictor_mock()
+ predictor_mock().load.assert_called_once_with(_TEST_GCS_ARTIFACTS_URI)
+
+ def test_init_no_predictor_raises_exception(self):
+ expected_message = (
+ "PredictionHandler must have a predictor class passed to the init function."
+ )
+
+ with pytest.raises(ValueError) as exception:
+ _ = PredictionHandler(_TEST_GCS_ARTIFACTS_URI)
+
+ assert str(exception.value) == expected_message
+
+ @pytest.mark.asyncio
+ async def test_handle(
+ self,
+ deserialize_mock,
+ get_content_type_from_headers_mock,
+ predictor_mock,
+ get_accept_from_headers_mock,
+ serialize_mock,
+ ):
+ handler = PredictionHandler(_TEST_GCS_ARTIFACTS_URI, predictor=predictor_mock)
+
+ response = await handler.handle(get_test_request())
+
+ assert response.status_code == 200
+ assert response.body == _TEST_SERIALIZED_OUTPUT
+
+ deserialize_mock.assert_called_once_with(_TEST_INPUT, _APPLICATION_JSON)
+ get_content_type_from_headers_mock.assert_called_once_with(get_test_headers())
+ predictor_mock().preprocess.assert_called_once_with(_TEST_DESERIALIZED_INPUT)
+ predictor_mock().predict.assert_called_once_with(_TEST_DESERIALIZED_INPUT)
+ predictor_mock().postprocess.assert_called_once_with(_TEST_PREDICTION_OUTPUT)
+ get_accept_from_headers_mock.assert_called_once_with(get_test_headers())
+ serialize_mock.assert_called_once_with(
+ _TEST_SERIALIZED_OUTPUT, _APPLICATION_JSON
+ )
+
+ @pytest.mark.asyncio
+ async def test_handle_deserialize_raises_exception(
+ self,
+ deserialize_exception_mock,
+ get_content_type_from_headers_mock,
+ predictor_mock,
+ get_accept_from_headers_mock,
+ serialize_mock,
+ ):
+ handler = PredictionHandler(_TEST_GCS_ARTIFACTS_URI, predictor=predictor_mock)
+
+ with pytest.raises(HTTPException):
+ await handler.handle(get_test_request())
+
+ get_content_type_from_headers_mock.assert_called_once_with(get_test_headers())
+ deserialize_exception_mock.assert_called_once_with(
+ _TEST_INPUT, _APPLICATION_JSON
+ )
+ assert not predictor_mock().preprocess.called
+ assert not predictor_mock().predict.called
+ assert not predictor_mock().postprocess.called
+ assert not get_accept_from_headers_mock.called
+ assert not serialize_mock.called
+
+ @pytest.mark.asyncio
+ async def test_handle_predictor_raises_exception(
+ self,
+ deserialize_mock,
+ get_content_type_from_headers_mock,
+ get_accept_from_headers_mock,
+ serialize_mock,
+ ):
+ preprocess_mock = mock.MagicMock(return_value=_TEST_DESERIALIZED_INPUT)
+ predict_mock = mock.MagicMock(side_effect=Exception())
+ postprocess_mock = mock.MagicMock(return_value=_TEST_SERIALIZED_OUTPUT)
+ handler = PredictionHandler(
+ _TEST_GCS_ARTIFACTS_URI, predictor=get_test_predictor()
+ )
+ expected_message = (
+ "The following exception has occurred: Exception. Arguments: ()."
+ )
+
+ with mock.patch.multiple(
+ handler._predictor,
+ preprocess=preprocess_mock,
+ predict=predict_mock,
+ postprocess=postprocess_mock,
+ ):
+ with pytest.raises(HTTPException) as exception:
+ await handler.handle(get_test_request())
+
+ assert exception.value.status_code == 500
+ assert exception.value.detail == expected_message
+ get_content_type_from_headers_mock.assert_called_once_with(get_test_headers())
+ deserialize_mock.assert_called_once_with(_TEST_INPUT, _APPLICATION_JSON)
+ preprocess_mock.assert_called_once_with(_TEST_DESERIALIZED_INPUT)
+ predict_mock.assert_called_once_with(_TEST_DESERIALIZED_INPUT)
+ assert not postprocess_mock.called
+ assert not get_accept_from_headers_mock.called
+ assert not serialize_mock.called
+
+ @pytest.mark.asyncio
+ async def test_handle_predictor_raises_http_exception(
+ self,
+ get_content_type_from_headers_mock,
+ deserialize_mock,
+ get_accept_from_headers_mock,
+ serialize_mock,
+ ):
+ status_code = 400
+ expected_message = "This is an user error."
+ preprocess_mock = mock.MagicMock(return_value=_TEST_DESERIALIZED_INPUT)
+ predict_mock = mock.MagicMock(
+ side_effect=HTTPException(status_code=status_code, detail=expected_message)
+ )
+ postprocess_mock = mock.MagicMock(return_value=_TEST_SERIALIZED_OUTPUT)
+ handler = PredictionHandler(
+ _TEST_GCS_ARTIFACTS_URI, predictor=get_test_predictor()
+ )
+
+ with mock.patch.multiple(
+ handler._predictor,
+ preprocess=preprocess_mock,
+ predict=predict_mock,
+ postprocess=postprocess_mock,
+ ):
+ with pytest.raises(HTTPException) as exception:
+ await handler.handle(get_test_request())
+
+ assert exception.value.status_code == status_code
+ assert exception.value.detail == expected_message
+ get_content_type_from_headers_mock.assert_called_once_with(get_test_headers())
+ deserialize_mock.assert_called_once_with(_TEST_INPUT, _APPLICATION_JSON)
+ preprocess_mock.assert_called_once_with(_TEST_DESERIALIZED_INPUT)
+ predict_mock.assert_called_once_with(_TEST_DESERIALIZED_INPUT)
+ assert not postprocess_mock.called
+ assert not get_accept_from_headers_mock.called
+ assert not serialize_mock.called
+
+ @pytest.mark.asyncio
+ async def test_handle_serialize_raises_exception(
+ self,
+ deserialize_mock,
+ get_content_type_from_headers_mock,
+ predictor_mock,
+ get_accept_from_headers_mock,
+ serialize_exception_mock,
+ ):
+ handler = PredictionHandler(_TEST_GCS_ARTIFACTS_URI, predictor=predictor_mock)
+
+ with pytest.raises(HTTPException):
+ await handler.handle(get_test_request())
+
+ deserialize_mock.assert_called_once_with(_TEST_INPUT, _APPLICATION_JSON)
+ get_content_type_from_headers_mock.assert_called_once_with(get_test_headers())
+ predictor_mock().preprocess.assert_called_once_with(_TEST_DESERIALIZED_INPUT)
+ predictor_mock().predict.assert_called_once_with(_TEST_DESERIALIZED_INPUT)
+ predictor_mock().postprocess.assert_called_once_with(_TEST_PREDICTION_OUTPUT)
+ get_accept_from_headers_mock.assert_called_once_with(get_test_headers())
+ serialize_exception_mock.assert_called_once_with(
+ _TEST_SERIALIZED_OUTPUT, _APPLICATION_JSON
+ )
+
+
+class TestHandlerUtils:
+ @pytest.mark.parametrize(
+ "header_key, content_type_value, expected_content_type",
+ [
+ ("Content-Type", "fake_content_type", "fake_content_type"),
+ ("content-Type", "fake_content_type", "fake_content_type"),
+ ("content-type", "fake_content_type", "fake_content_type"),
+ ("Content-type", "fake_content_type", "fake_content_type"),
+ ("ContentType", "fake_content_type", "fake_content_type"),
+ ("contentType", "fake_content_type", "fake_content_type"),
+ ("contenttype", "fake_content_type", "fake_content_type"),
+ ("Contenttype", "fake_content_type", "fake_content_type"),
+ ("Content-Type-", "fake_content_type", None),
+ ("cContent-Type", "fake_content_type", None),
+ ],
+ )
+ def test_get_content_type_from_headers(
+ self, header_key, content_type_value, expected_content_type
+ ):
+ headers = Headers({header_key: content_type_value})
+
+ content_type = handler_utils.get_content_type_from_headers(headers)
+
+ assert content_type == expected_content_type
+
+ @pytest.mark.parametrize(
+ "header_key, content_type_value, expected_content_type",
+ [
+ ("Content-Type", "fake_content_type; charset", "fake_content_type"),
+ ("content-Type", "fake_content_type; charset", "fake_content_type"),
+ ("content-type", "fake_content_type; charset", "fake_content_type"),
+ ("Content-type", "fake_content_type; charset", "fake_content_type"),
+ ("ContentType", "fake_content_type; charset", "fake_content_type"),
+ ("contentType", "fake_content_type; charset", "fake_content_type"),
+ ("contenttype", "fake_content_type; charset", "fake_content_type"),
+ ("Contenttype", "fake_content_type; charset", "fake_content_type"),
+ ("Content-Type-", "fake_content_type; charset", None),
+ ("cContent-Type", "fake_content_type; charset", None),
+ ],
+ )
+ def test_get_content_type_from_headers_with_parameter(
+ self, header_key, content_type_value, expected_content_type
+ ):
+ headers = Headers({header_key: content_type_value})
+
+ content_type = handler_utils.get_content_type_from_headers(headers)
+
+ assert content_type == expected_content_type
+
+ def test_get_content_type_from_headers_no_headers(self):
+ headers = Headers({})
+
+ content_type = handler_utils.get_content_type_from_headers(headers)
+
+ assert content_type is None
+
+ def test_get_content_type_from_headers_none(self):
+ content_type = handler_utils.get_content_type_from_headers(None)
+
+ assert content_type is None
+
+ @pytest.mark.parametrize(
+ "header_key, accept_value, expected_accept",
+ [
+ ("Accept", "fake_accept", "fake_accept"),
+ ("accept", "fake_accept", "fake_accept"),
+ ("Accept", prediction.ANY_ACCEPT_TYPE, prediction.ANY_ACCEPT_TYPE),
+ ("Accept", "fake_accept;q=0.9", "fake_accept;q=0.9"),
+ ("accept", "fake_accept;q=0.9", "fake_accept;q=0.9"),
+ ("aaccept", "fake_accept; charset", prediction.DEFAULT_ACCEPT_VALUE),
+ ("accept-", "fake_accept; charset", prediction.DEFAULT_ACCEPT_VALUE),
+ ],
+ )
+ def test_get_accept_from_headers(self, header_key, accept_value, expected_accept):
+ headers = Headers({header_key: accept_value})
+
+ accept = handler_utils.get_accept_from_headers(headers)
+
+ assert accept == expected_accept
+
+ def test_get_accept_from_headers_no_headers(self):
+ headers = Headers({})
+
+ accept = handler_utils.get_accept_from_headers(headers)
+
+ assert accept == prediction.DEFAULT_ACCEPT_VALUE
+
+ def test_get_accept_from_headers_none(self):
+ accept = handler_utils.get_accept_from_headers(None)
+
+ assert accept == prediction.DEFAULT_ACCEPT_VALUE
+
+ @pytest.mark.parametrize(
+ "accept, expected",
+ [
+ (
+ "application/json, text/html",
+ {"application/json": 1.0, "text/html": 1.0},
+ ),
+ (
+ "application/json, text/html;q=0.9",
+ {"application/json": 1.0, "text/html": 0.9},
+ ),
+ (
+ "text/html, application/json",
+ {"text/html": 1.0, "application/json": 1.0},
+ ),
+ (
+ "text/html, application/json;q=0.9",
+ {"text/html": 1.0, "application/json": 0.9},
+ ),
+ (
+ "text/html, application/xhtml+xml, application/xml;q=0.9, application/json;q=0.8",
+ {
+ "text/html": 1.0,
+ "application/xhtml+xml": 1.0,
+ "application/xml": 0.9,
+ "application/json": 0.8,
+ },
+ ),
+ ("*/*", {"*/*": 1.0}),
+ (
+ "text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8",
+ {
+ "text/html": 1.0,
+ "application/xhtml+xml": 1.0,
+ "application/xml": 0.9,
+ "*/*": 0.8,
+ },
+ ),
+ ("application/json, */*", {"application/json": 1.0, "*/*": 1.0}),
+ ("application/json, */*;q=0.9", {"application/json": 1.0, "*/*": 0.9}),
+ (
+ "text/html, application/json, */*;q=0.9",
+ {"text/html": 1.0, "application/json": 1.0, "*/*": 0.9},
+ ),
+ (
+ "text/html, application/json;q=0.9, */*;q=0.8",
+ {"text/html": 1.0, "application/json": 0.9, "*/*": 0.8},
+ ),
+ (None, {}),
+ ],
+ )
+ def test_parse_accept_header(self, accept, expected):
+ result = handler_utils.parse_accept_header(accept)
+
+ assert result == expected
+
+
+class TestLocalModel:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def _load_module(self, name, location):
+ spec = importlib.util.spec_from_file_location(name, location)
+ return importlib.util.module_from_spec(spec)
+
+ def test_init_with_serving_container_spec(self):
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+ ports = [
+ gca_model_compat.Port(container_port=port)
+ for port in _TEST_SERVING_CONTAINER_PORTS
+ ]
+ grpc_ports = [
+ gca_model_compat.Port(container_port=port)
+ for port in _TEST_SERVING_CONTAINER_GRPC_PORTS
+ ]
+ container_spec = gca_model_compat.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_SERVING_CONTAINER_COMMAND,
+ args=_TEST_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ grpc_ports=grpc_ports,
+ )
+
+ local_model = LocalModel(
+ serving_container_spec=container_spec,
+ )
+
+ assert local_model.serving_container_spec.image_uri == container_spec.image_uri
+ assert (
+ local_model.serving_container_spec.predict_route
+ == container_spec.predict_route
+ )
+ assert (
+ local_model.serving_container_spec.health_route
+ == container_spec.health_route
+ )
+ assert local_model.serving_container_spec.command == container_spec.command
+ assert local_model.serving_container_spec.args == container_spec.args
+ assert local_model.serving_container_spec.env == container_spec.env
+ assert local_model.serving_container_spec.ports == container_spec.ports
+ assert (
+ local_model.serving_container_spec.grpc_ports == container_spec.grpc_ports
+ )
+
+ def test_init_with_serving_container_spec_but_not_image_uri_throws_exception(self):
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+ ports = [
+ gca_model_compat.Port(container_port=port)
+ for port in _TEST_SERVING_CONTAINER_PORTS
+ ]
+ grpc_ports = [
+ gca_model_compat.Port(container_port=port)
+ for port in _TEST_SERVING_CONTAINER_GRPC_PORTS
+ ]
+ container_spec = gca_model_compat.ModelContainerSpec(
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_SERVING_CONTAINER_COMMAND,
+ args=_TEST_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ grpc_ports=grpc_ports,
+ )
+ expected_message = "Image uri is required for the serving container spec to initialize a LocalModel instance."
+
+ with pytest.raises(ValueError) as exception:
+ _ = LocalModel(
+ serving_container_spec=container_spec,
+ )
+
+ assert str(exception.value) == expected_message
+
+ def test_init_with_separate_args(self):
+ local_model = LocalModel(
+ serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ serving_container_command=_TEST_SERVING_CONTAINER_COMMAND,
+ serving_container_args=_TEST_SERVING_CONTAINER_ARGS,
+ serving_container_environment_variables=_TEST_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ serving_container_ports=_TEST_SERVING_CONTAINER_PORTS,
+ serving_container_grpc_ports=_TEST_SERVING_CONTAINER_GRPC_PORTS,
+ )
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model_compat.Port(container_port=port)
+ for port in _TEST_SERVING_CONTAINER_PORTS
+ ]
+
+ grpc_ports = [
+ gca_model_compat.Port(container_port=port)
+ for port in _TEST_SERVING_CONTAINER_GRPC_PORTS
+ ]
+
+ container_spec = gca_model_compat.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_SERVING_CONTAINER_COMMAND,
+ args=_TEST_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ grpc_ports=grpc_ports,
+ )
+
+ assert local_model.serving_container_spec.image_uri == container_spec.image_uri
+ assert (
+ local_model.serving_container_spec.predict_route
+ == container_spec.predict_route
+ )
+ assert (
+ local_model.serving_container_spec.health_route
+ == container_spec.health_route
+ )
+ assert local_model.serving_container_spec.command == container_spec.command
+ assert local_model.serving_container_spec.args == container_spec.args
+ assert local_model.serving_container_spec.env == container_spec.env
+ assert local_model.serving_container_spec.ports == container_spec.ports
+ assert (
+ local_model.serving_container_spec.grpc_ports == container_spec.grpc_ports
+ )
+
+ def test_init_with_separate_args_but_not_image_uri_throws_exception(self):
+ expected_message = "Serving container image uri is required to initialize a LocalModel instance."
+
+ with pytest.raises(ValueError) as exception:
+ _ = LocalModel(
+ serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ serving_container_command=_TEST_SERVING_CONTAINER_COMMAND,
+ serving_container_args=_TEST_SERVING_CONTAINER_ARGS,
+ serving_container_environment_variables=_TEST_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ serving_container_ports=_TEST_SERVING_CONTAINER_PORTS,
+ serving_container_grpc_ports=_TEST_SERVING_CONTAINER_GRPC_PORTS,
+ )
+
+ assert str(exception.value) == expected_message
+
+ def test_build_cpr_model_creates_and_get_localmodel(
+ self,
+ tmp_path,
+ inspect_source_from_class_mock_predictor_only,
+ is_prebuilt_prediction_container_uri_is_false_mock,
+ build_image_mock,
+ ):
+ src_dir = tmp_path / _TEST_SRC_DIR
+ src_dir.mkdir()
+ predictor = src_dir / _TEST_PREDICTOR_FILE
+ predictor.write_text(
+ textwrap.dedent(
+ """
+ class {predictor_class}:
+ pass
+ """
+ ).format(predictor_class=_TEST_PREDICTOR_CLASS)
+ )
+ my_predictor = self._load_module(_TEST_PREDICTOR_CLASS, str(predictor))
+
+ local_model = LocalModel.build_cpr_model(
+ str(src_dir),
+ _TEST_OUTPUT_IMAGE,
+ predictor=my_predictor,
+ )
+
+ assert local_model.serving_container_spec.image_uri == _TEST_OUTPUT_IMAGE
+ assert local_model.serving_container_spec.predict_route == DEFAULT_PREDICT_ROUTE
+ assert local_model.serving_container_spec.health_route == DEFAULT_HEALTH_ROUTE
+ inspect_source_from_class_mock_predictor_only.assert_called_once_with(
+ my_predictor, str(src_dir)
+ )
+ is_prebuilt_prediction_container_uri_is_false_mock.assert_called_once_with(
+ _DEFAULT_BASE_IMAGE
+ )
+ build_image_mock.assert_called_once_with(
+ _DEFAULT_BASE_IMAGE,
+ str(src_dir),
+ _TEST_OUTPUT_IMAGE,
+ python_module=_DEFAULT_PYTHON_MODULE,
+ requirements_path=None,
+ extra_requirements=_DEFAULT_SDK_REQUIREMENTS,
+ extra_packages=None,
+ exposed_ports=[DEFAULT_HTTP_PORT],
+ environment_variables={
+ "HANDLER_MODULE": _DEFAULT_HANDLER_MODULE,
+ "HANDLER_CLASS": _DEFAULT_HANDLER_CLASS,
+ "PREDICTOR_MODULE": f"{_TEST_SRC_DIR}.{_TEST_PREDICTOR_FILE_STEM}",
+ "PREDICTOR_CLASS": _TEST_PREDICTOR_CLASS,
+ },
+ pip_command="pip",
+ python_command="python",
+ no_cache=False,
+ )
+
+ def test_build_cpr_model_fails_handler_is_none(
+ self,
+ tmp_path,
+ build_image_mock,
+ ):
+ src_dir = tmp_path / _TEST_SRC_DIR
+ src_dir.mkdir()
+ predictor = src_dir / _TEST_PREDICTOR_FILE
+ predictor.write_text(
+ textwrap.dedent(
+ """
+ class {predictor_class}:
+ pass
+ """
+ ).format(predictor_class=_TEST_PREDICTOR_CLASS)
+ )
+ my_predictor = self._load_module(_TEST_PREDICTOR_CLASS, str(predictor))
+ expected_message = "A handler must be provided but handler is None."
+
+ with pytest.raises(ValueError) as exception:
+ _ = LocalModel.build_cpr_model(
+ str(src_dir),
+ _TEST_OUTPUT_IMAGE,
+ predictor=my_predictor,
+ handler=None,
+ )
+
+ assert str(exception.value) == expected_message
+
+ def test_build_cpr_model_fails_prediction_handler_but_predictor_is_none(
+ self,
+ tmp_path,
+ build_image_mock,
+ ):
+ src_dir = tmp_path / _TEST_SRC_DIR
+ expected_message = (
+ "PredictionHandler must have a predictor class but predictor is None."
+ )
+
+ with pytest.raises(ValueError) as exception:
+ _ = LocalModel.build_cpr_model(
+ str(src_dir),
+ _TEST_OUTPUT_IMAGE,
+ predictor=None,
+ )
+
+ assert str(exception.value) == expected_message
+
+ def test_build_cpr_model_with_custom_handler(
+ self,
+ tmp_path,
+ inspect_source_from_class_mock_predictor_and_handler,
+ is_prebuilt_prediction_container_uri_is_false_mock,
+ build_image_mock,
+ ):
+ src_dir = tmp_path / _TEST_SRC_DIR
+ src_dir.mkdir()
+ predictor = src_dir / _TEST_PREDICTOR_FILE
+ predictor.write_text(
+ textwrap.dedent(
+ """
+ class {predictor_class}:
+ pass
+ """
+ ).format(predictor_class=_TEST_PREDICTOR_CLASS)
+ )
+ my_predictor = self._load_module(_TEST_PREDICTOR_CLASS, str(predictor))
+ handler = src_dir / _TEST_HANDLER_FILE
+ handler.write_text(
+ textwrap.dedent(
+ """
+ class {handler_class}:
+ pass
+ """
+ ).format(handler_class=_TEST_HANDLER_CLASS)
+ )
+ my_handler = self._load_module(_TEST_HANDLER_CLASS, str(handler))
+
+ local_model = LocalModel.build_cpr_model(
+ str(src_dir),
+ _TEST_OUTPUT_IMAGE,
+ predictor=my_predictor,
+ handler=my_handler,
+ )
+
+ assert local_model.serving_container_spec.image_uri == _TEST_OUTPUT_IMAGE
+ assert local_model.serving_container_spec.predict_route == DEFAULT_PREDICT_ROUTE
+ assert local_model.serving_container_spec.health_route == DEFAULT_HEALTH_ROUTE
+ inspect_source_from_class_mock_predictor_and_handler.assert_has_calls(
+ [mock.call(my_handler, str(src_dir)), mock.call(my_predictor, str(src_dir))]
+ )
+ is_prebuilt_prediction_container_uri_is_false_mock.assert_called_once_with(
+ _DEFAULT_BASE_IMAGE
+ )
+ build_image_mock.assert_called_once_with(
+ _DEFAULT_BASE_IMAGE,
+ str(src_dir),
+ _TEST_OUTPUT_IMAGE,
+ python_module=_DEFAULT_PYTHON_MODULE,
+ requirements_path=None,
+ extra_requirements=_DEFAULT_SDK_REQUIREMENTS,
+ extra_packages=None,
+ exposed_ports=[DEFAULT_HTTP_PORT],
+ environment_variables={
+ "HANDLER_MODULE": f"{_TEST_SRC_DIR}.{_TEST_HANDLER_FILE_STEM}",
+ "HANDLER_CLASS": _TEST_HANDLER_CLASS,
+ "PREDICTOR_MODULE": f"{_TEST_SRC_DIR}.{_TEST_PREDICTOR_FILE_STEM}",
+ "PREDICTOR_CLASS": _TEST_PREDICTOR_CLASS,
+ },
+ pip_command="pip",
+ python_command="python",
+ no_cache=False,
+ )
+
+ def test_build_cpr_model_with_custom_handler_and_predictor_is_none(
+ self,
+ tmp_path,
+ inspect_source_from_class_mock_handler_only,
+ is_prebuilt_prediction_container_uri_is_false_mock,
+ build_image_mock,
+ ):
+ src_dir = tmp_path / _TEST_SRC_DIR
+ src_dir.mkdir()
+ handler = src_dir / _TEST_HANDLER_FILE
+ handler.write_text(
+ textwrap.dedent(
+ """
+ class {handler_class}:
+ pass
+ """
+ ).format(handler_class=_TEST_HANDLER_CLASS)
+ )
+ my_handler = self._load_module(_TEST_HANDLER_CLASS, str(handler))
+
+ local_model = LocalModel.build_cpr_model(
+ str(src_dir),
+ _TEST_OUTPUT_IMAGE,
+ predictor=None,
+ handler=my_handler,
+ )
+
+ assert local_model.serving_container_spec.image_uri == _TEST_OUTPUT_IMAGE
+ assert local_model.serving_container_spec.predict_route == DEFAULT_PREDICT_ROUTE
+ assert local_model.serving_container_spec.health_route == DEFAULT_HEALTH_ROUTE
+ inspect_source_from_class_mock_handler_only.assert_called_once_with(
+ my_handler, str(src_dir)
+ )
+ is_prebuilt_prediction_container_uri_is_false_mock.assert_called_once_with(
+ _DEFAULT_BASE_IMAGE
+ )
+ build_image_mock.assert_called_once_with(
+ _DEFAULT_BASE_IMAGE,
+ str(src_dir),
+ _TEST_OUTPUT_IMAGE,
+ python_module=_DEFAULT_PYTHON_MODULE,
+ requirements_path=None,
+ extra_requirements=_DEFAULT_SDK_REQUIREMENTS,
+ extra_packages=None,
+ exposed_ports=[DEFAULT_HTTP_PORT],
+ environment_variables={
+ "HANDLER_MODULE": f"{_TEST_SRC_DIR}.{_TEST_HANDLER_FILE_STEM}",
+ "HANDLER_CLASS": _TEST_HANDLER_CLASS,
+ },
+ pip_command="pip",
+ python_command="python",
+ no_cache=False,
+ )
+
+ def test_build_cpr_model_creates_and_get_localmodel_base_is_prebuilt(
+ self,
+ tmp_path,
+ inspect_source_from_class_mock_predictor_only,
+ is_prebuilt_prediction_container_uri_is_true_mock,
+ build_image_mock,
+ ):
+ src_dir = tmp_path / _TEST_SRC_DIR
+ src_dir.mkdir()
+ predictor = src_dir / _TEST_PREDICTOR_FILE
+ predictor.write_text(
+ textwrap.dedent(
+ """
+ class {predictor_class}:
+ pass
+ """
+ ).format(predictor_class=_TEST_PREDICTOR_CLASS)
+ )
+ my_predictor = self._load_module(_TEST_PREDICTOR_CLASS, str(predictor))
+
+ local_model = LocalModel.build_cpr_model(
+ str(src_dir),
+ _TEST_OUTPUT_IMAGE,
+ predictor=my_predictor,
+ )
+
+ assert local_model.serving_container_spec.image_uri == _TEST_OUTPUT_IMAGE
+ assert local_model.serving_container_spec.predict_route == DEFAULT_PREDICT_ROUTE
+ assert local_model.serving_container_spec.health_route == DEFAULT_HEALTH_ROUTE
+ inspect_source_from_class_mock_predictor_only.assert_called_once_with(
+ my_predictor, str(src_dir)
+ )
+ is_prebuilt_prediction_container_uri_is_true_mock.assert_called_once_with(
+ _DEFAULT_BASE_IMAGE
+ )
+ build_image_mock.assert_called_once_with(
+ _DEFAULT_BASE_IMAGE,
+ str(src_dir),
+ _TEST_OUTPUT_IMAGE,
+ python_module=_DEFAULT_PYTHON_MODULE,
+ requirements_path=None,
+ extra_requirements=_DEFAULT_SDK_REQUIREMENTS,
+ extra_packages=None,
+ exposed_ports=[DEFAULT_HTTP_PORT],
+ environment_variables={
+ "HANDLER_MODULE": _DEFAULT_HANDLER_MODULE,
+ "HANDLER_CLASS": _DEFAULT_HANDLER_CLASS,
+ "PREDICTOR_MODULE": f"{_TEST_SRC_DIR}.{_TEST_PREDICTOR_FILE_STEM}",
+ "PREDICTOR_CLASS": _TEST_PREDICTOR_CLASS,
+ },
+ pip_command="pip3",
+ python_command="python3",
+ no_cache=False,
+ )
+
+ def test_build_cpr_model_creates_and_get_localmodel_with_requirements_path(
+ self,
+ tmp_path,
+ inspect_source_from_class_mock_predictor_only,
+ is_prebuilt_prediction_container_uri_is_false_mock,
+ build_image_mock,
+ ):
+ src_dir = tmp_path / _TEST_SRC_DIR
+ src_dir.mkdir()
+ predictor = src_dir / _TEST_PREDICTOR_FILE
+ predictor.write_text(
+ textwrap.dedent(
+ """
+ class {predictor_class}:
+ pass
+ """
+ ).format(predictor_class=_TEST_PREDICTOR_CLASS)
+ )
+ my_predictor = self._load_module(_TEST_PREDICTOR_CLASS, str(predictor))
+ requirements_path = f"{_TEST_SRC_DIR}/requirements.txt"
+
+ local_model = LocalModel.build_cpr_model(
+ str(src_dir),
+ _TEST_OUTPUT_IMAGE,
+ predictor=my_predictor,
+ requirements_path=requirements_path,
+ )
+
+ assert local_model.serving_container_spec.image_uri == _TEST_OUTPUT_IMAGE
+ assert local_model.serving_container_spec.predict_route == DEFAULT_PREDICT_ROUTE
+ assert local_model.serving_container_spec.health_route == DEFAULT_HEALTH_ROUTE
+ inspect_source_from_class_mock_predictor_only.assert_called_once_with(
+ my_predictor, str(src_dir)
+ )
+ is_prebuilt_prediction_container_uri_is_false_mock.assert_called_once_with(
+ _DEFAULT_BASE_IMAGE
+ )
+ build_image_mock.assert_called_once_with(
+ _DEFAULT_BASE_IMAGE,
+ str(src_dir),
+ _TEST_OUTPUT_IMAGE,
+ python_module=_DEFAULT_PYTHON_MODULE,
+ requirements_path=requirements_path,
+ extra_requirements=_DEFAULT_SDK_REQUIREMENTS,
+ extra_packages=None,
+ exposed_ports=[DEFAULT_HTTP_PORT],
+ environment_variables={
+ "HANDLER_MODULE": _DEFAULT_HANDLER_MODULE,
+ "HANDLER_CLASS": _DEFAULT_HANDLER_CLASS,
+ "PREDICTOR_MODULE": f"{_TEST_SRC_DIR}.{_TEST_PREDICTOR_FILE_STEM}",
+ "PREDICTOR_CLASS": _TEST_PREDICTOR_CLASS,
+ },
+ pip_command="pip",
+ python_command="python",
+ no_cache=False,
+ )
+
+ def test_build_cpr_model_creates_and_get_localmodel_with_extra_packages(
+ self,
+ tmp_path,
+ inspect_source_from_class_mock_predictor_only,
+ is_prebuilt_prediction_container_uri_is_false_mock,
+ build_image_mock,
+ ):
+ src_dir = tmp_path / _TEST_SRC_DIR
+ src_dir.mkdir()
+ predictor = src_dir / _TEST_PREDICTOR_FILE
+ predictor.write_text(
+ textwrap.dedent(
+ """
+ class {predictor_class}:
+ pass
+ """
+ ).format(predictor_class=_TEST_PREDICTOR_CLASS)
+ )
+ my_predictor = self._load_module(_TEST_PREDICTOR_CLASS, str(predictor))
+ extra_packages = [f"{_TEST_SRC_DIR}/custom_package.tar.gz"]
+
+ local_model = LocalModel.build_cpr_model(
+ str(src_dir),
+ _TEST_OUTPUT_IMAGE,
+ predictor=my_predictor,
+ extra_packages=extra_packages,
+ )
+
+ assert local_model.serving_container_spec.image_uri == _TEST_OUTPUT_IMAGE
+ assert local_model.serving_container_spec.predict_route == DEFAULT_PREDICT_ROUTE
+ assert local_model.serving_container_spec.health_route == DEFAULT_HEALTH_ROUTE
+ inspect_source_from_class_mock_predictor_only.assert_called_once_with(
+ my_predictor, str(src_dir)
+ )
+ is_prebuilt_prediction_container_uri_is_false_mock.assert_called_once_with(
+ _DEFAULT_BASE_IMAGE
+ )
+ build_image_mock.assert_called_once_with(
+ _DEFAULT_BASE_IMAGE,
+ str(src_dir),
+ _TEST_OUTPUT_IMAGE,
+ python_module=_DEFAULT_PYTHON_MODULE,
+ requirements_path=None,
+ extra_requirements=_DEFAULT_SDK_REQUIREMENTS,
+ extra_packages=extra_packages,
+ exposed_ports=[DEFAULT_HTTP_PORT],
+ environment_variables={
+ "HANDLER_MODULE": _DEFAULT_HANDLER_MODULE,
+ "HANDLER_CLASS": _DEFAULT_HANDLER_CLASS,
+ "PREDICTOR_MODULE": f"{_TEST_SRC_DIR}.{_TEST_PREDICTOR_FILE_STEM}",
+ "PREDICTOR_CLASS": _TEST_PREDICTOR_CLASS,
+ },
+ pip_command="pip",
+ python_command="python",
+ no_cache=False,
+ )
+
+ def test_build_cpr_model_creates_and_get_localmodel_no_cache(
+ self,
+ tmp_path,
+ inspect_source_from_class_mock_predictor_only,
+ is_prebuilt_prediction_container_uri_is_false_mock,
+ build_image_mock,
+ ):
+ src_dir = tmp_path / _TEST_SRC_DIR
+ src_dir.mkdir()
+ predictor = src_dir / _TEST_PREDICTOR_FILE
+ predictor.write_text(
+ textwrap.dedent(
+ """
+ class {predictor_class}:
+ pass
+ """
+ ).format(predictor_class=_TEST_PREDICTOR_CLASS)
+ )
+ my_predictor = self._load_module(_TEST_PREDICTOR_CLASS, str(predictor))
+ no_cache = True
+
+ local_model = LocalModel.build_cpr_model(
+ str(src_dir), _TEST_OUTPUT_IMAGE, predictor=my_predictor, no_cache=no_cache
+ )
+
+ assert local_model.serving_container_spec.image_uri == _TEST_OUTPUT_IMAGE
+ assert local_model.serving_container_spec.predict_route == DEFAULT_PREDICT_ROUTE
+ assert local_model.serving_container_spec.health_route == DEFAULT_HEALTH_ROUTE
+ inspect_source_from_class_mock_predictor_only.assert_called_once_with(
+ my_predictor, str(src_dir)
+ )
+ is_prebuilt_prediction_container_uri_is_false_mock.assert_called_once_with(
+ _DEFAULT_BASE_IMAGE
+ )
+ build_image_mock.assert_called_once_with(
+ _DEFAULT_BASE_IMAGE,
+ str(src_dir),
+ _TEST_OUTPUT_IMAGE,
+ python_module=_DEFAULT_PYTHON_MODULE,
+ requirements_path=None,
+ extra_requirements=_DEFAULT_SDK_REQUIREMENTS,
+ extra_packages=None,
+ exposed_ports=[DEFAULT_HTTP_PORT],
+ environment_variables={
+ "HANDLER_MODULE": _DEFAULT_HANDLER_MODULE,
+ "HANDLER_CLASS": _DEFAULT_HANDLER_CLASS,
+ "PREDICTOR_MODULE": f"{_TEST_SRC_DIR}.{_TEST_PREDICTOR_FILE_STEM}",
+ "PREDICTOR_CLASS": _TEST_PREDICTOR_CLASS,
+ },
+ pip_command="pip",
+ python_command="python",
+ no_cache=no_cache,
+ )
+
+ def test_deploy_to_local_endpoint(
+ self,
+ local_endpoint_init_mock,
+ local_endpoint_enter_mock,
+ local_endpoint_exit_mock,
+ local_endpoint_del_mock,
+ ):
+ container_spec = gca_model_compat.ModelContainerSpec(image_uri=_TEST_IMAGE_URI)
+ local_model = LocalModel(container_spec)
+
+ with local_model.deploy_to_local_endpoint():
+ pass
+
+ local_endpoint_init_mock.assert_called_once_with(
+ serving_container_image_uri=_TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route="",
+ serving_container_health_route="",
+ serving_container_command=[],
+ serving_container_args=[],
+ serving_container_environment_variables={},
+ serving_container_ports=[],
+ credential_path=None,
+ host_port=None,
+ gpu_count=None,
+ gpu_device_ids=None,
+ gpu_capabilities=None,
+ container_ready_timeout=None,
+ container_ready_check_interval=None,
+ )
+ assert local_endpoint_enter_mock.called
+ assert local_endpoint_exit_mock.called
+
+ def test_deploy_to_local_endpoint_with_all_parameters(
+ self,
+ local_endpoint_init_mock,
+ local_endpoint_enter_mock,
+ local_endpoint_exit_mock,
+ local_endpoint_del_mock,
+ ):
+ container_spec = gca_model_compat.ModelContainerSpec(image_uri=_TEST_IMAGE_URI)
+ local_model = LocalModel(container_spec)
+ artifact_uri = "gs://myproject/mymodel"
+ credential_path = "key.json"
+ host_port = 6666
+ container_ready_timeout = 60
+ container_ready_check_interval = 5
+
+ with local_model.deploy_to_local_endpoint(
+ artifact_uri=artifact_uri,
+ credential_path=credential_path,
+ host_port=host_port,
+ container_ready_timeout=container_ready_timeout,
+ container_ready_check_interval=container_ready_check_interval,
+ ):
+ pass
+
+ local_endpoint_init_mock.assert_called_once_with(
+ serving_container_image_uri=_TEST_IMAGE_URI,
+ artifact_uri=artifact_uri,
+ serving_container_predict_route="",
+ serving_container_health_route="",
+ serving_container_command=[],
+ serving_container_args=[],
+ serving_container_environment_variables={},
+ serving_container_ports=[],
+ credential_path=credential_path,
+ host_port=host_port,
+ gpu_count=None,
+ gpu_device_ids=None,
+ gpu_capabilities=None,
+ container_ready_timeout=container_ready_timeout,
+ container_ready_check_interval=container_ready_check_interval,
+ )
+ assert local_endpoint_enter_mock.called
+ assert local_endpoint_exit_mock.called
+
+ def test_deploy_to_local_endpoint_with_gpu_count(
+ self,
+ local_endpoint_init_mock,
+ local_endpoint_enter_mock,
+ local_endpoint_exit_mock,
+ local_endpoint_del_mock,
+ ):
+ container_spec = gca_model_compat.ModelContainerSpec(image_uri=_TEST_IMAGE_URI)
+ local_model = LocalModel(container_spec)
+
+ with local_model.deploy_to_local_endpoint(
+ gpu_count=_TEST_GPU_COUNT, gpu_capabilities=_TEST_GPU_CAPABILITIES
+ ):
+ pass
+
+ local_endpoint_init_mock.assert_called_once_with(
+ serving_container_image_uri=_TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route="",
+ serving_container_health_route="",
+ serving_container_command=[],
+ serving_container_args=[],
+ serving_container_environment_variables={},
+ serving_container_ports=[],
+ credential_path=None,
+ host_port=None,
+ gpu_count=_TEST_GPU_COUNT,
+ gpu_device_ids=None,
+ gpu_capabilities=_TEST_GPU_CAPABILITIES,
+ container_ready_timeout=None,
+ container_ready_check_interval=None,
+ )
+ assert local_endpoint_enter_mock.called
+ assert local_endpoint_exit_mock.called
+
+ def test_deploy_to_local_endpoint_with_gpu_device_ids(
+ self,
+ local_endpoint_init_mock,
+ local_endpoint_enter_mock,
+ local_endpoint_exit_mock,
+ local_endpoint_del_mock,
+ ):
+ container_spec = gca_model_compat.ModelContainerSpec(image_uri=_TEST_IMAGE_URI)
+ local_model = LocalModel(container_spec)
+
+ with local_model.deploy_to_local_endpoint(
+ gpu_device_ids=_TEST_GPU_DEVICE_IDS, gpu_capabilities=_TEST_GPU_CAPABILITIES
+ ):
+ pass
+
+ local_endpoint_init_mock.assert_called_once_with(
+ serving_container_image_uri=_TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route="",
+ serving_container_health_route="",
+ serving_container_command=[],
+ serving_container_args=[],
+ serving_container_environment_variables={},
+ serving_container_ports=[],
+ credential_path=None,
+ host_port=None,
+ gpu_count=None,
+ gpu_device_ids=_TEST_GPU_DEVICE_IDS,
+ gpu_capabilities=_TEST_GPU_CAPABILITIES,
+ container_ready_timeout=None,
+ container_ready_check_interval=None,
+ )
+ assert local_endpoint_enter_mock.called
+ assert local_endpoint_exit_mock.called
+
+ def test_copy_image(
+ self,
+ pull_image_if_not_exists_mock,
+ execute_command_mock,
+ ):
+ container_spec = gca_model_compat.ModelContainerSpec(image_uri=_TEST_IMAGE_URI)
+ local_model = LocalModel(container_spec)
+ dst_image_uri = "new_image:latest"
+ expected_command = ["docker", "tag", f"{_TEST_IMAGE_URI}", f"{dst_image_uri}"]
+
+ new_local_model = local_model.copy_image(dst_image_uri)
+
+ pull_image_if_not_exists_mock.assert_called_once_with()
+ execute_command_mock.assert_called_once_with(expected_command)
+ assert new_local_model.serving_container_spec.image_uri == dst_image_uri
+
+ def test_copy_image_raises_exception(
+ self,
+ pull_image_if_not_exists_mock,
+ execute_command_return_code_1_mock,
+ ):
+ container_spec = gca_model_compat.ModelContainerSpec(image_uri=_TEST_IMAGE_URI)
+ local_model = LocalModel(container_spec)
+ dst_image_uri = "new_image:latest"
+ expected_command = ["docker", "tag", f"{_TEST_IMAGE_URI}", f"{dst_image_uri}"]
+ expected_message = "Docker failed with error code"
+ expected_return_code = 1
+
+ with mock.patch.object(
+ errors, "raise_docker_error_with_command"
+ ) as raise_docker_error_with_command:
+ raise_docker_error_with_command.side_effect = errors.DockerError(
+ expected_message, expected_command, expected_return_code
+ )
+
+ with pytest.raises(errors.DockerError) as exception:
+ local_model.copy_image(dst_image_uri)
+
+ pull_image_if_not_exists_mock.assert_called_once_with()
+ execute_command_return_code_1_mock.assert_called_once_with(expected_command)
+ assert exception.value.message == expected_message
+ assert exception.value.cmd == expected_command
+ assert exception.value.exit_code == expected_return_code
+
+ def test_push_image(
+ self,
+ execute_command_mock,
+ is_registry_uri_true_mock,
+ ):
+ container_spec = gca_model_compat.ModelContainerSpec(image_uri=_TEST_IMAGE_URI)
+ local_model = LocalModel(container_spec)
+ expected_command = ["docker", "push", f"{_TEST_IMAGE_URI}"]
+
+ local_model.push_image()
+
+ execute_command_mock.assert_called_once_with(expected_command)
+
+ def test_push_image_image_uri_is_not_registry_uri(
+ self,
+ execute_command_mock,
+ is_registry_uri_false_mock,
+ ):
+ container_spec = gca_model_compat.ModelContainerSpec(image_uri=_TEST_IMAGE_URI)
+ local_model = LocalModel(container_spec)
+ expected_message = (
+ "The image uri must be a container registry or artifact registry uri "
+ f"but it is: {_TEST_IMAGE_URI}."
+ )
+
+ with pytest.raises(ValueError) as exception:
+ local_model.push_image()
+
+ assert str(exception.value) == expected_message
+
+ def test_push_image_raises_exception(
+ self,
+ execute_command_return_code_1_mock,
+ is_registry_uri_true_mock,
+ ):
+ container_spec = gca_model_compat.ModelContainerSpec(image_uri=_TEST_IMAGE_URI)
+ local_model = LocalModel(container_spec)
+ expected_command = ["docker", "push", f"{_TEST_IMAGE_URI}"]
+ expected_message = "Docker failed with error code"
+ expected_return_code = 1
+
+ with mock.patch.object(
+ errors, "raise_docker_error_with_command"
+ ) as raise_docker_error_with_command:
+ raise_docker_error_with_command.side_effect = errors.DockerError(
+ expected_message, expected_command, expected_return_code
+ )
+
+ with pytest.raises(errors.DockerError) as exception:
+ local_model.push_image()
+
+ execute_command_return_code_1_mock.assert_called_once_with(expected_command)
+ assert exception.value.message == expected_message
+ assert exception.value.cmd == expected_command
+ assert exception.value.exit_code == expected_return_code
+
+ def test_pull_image_if_not_exists_image_exists(
+ self,
+ check_image_exists_locally_true_mock,
+ execute_command_mock,
+ ):
+ container_spec = gca_model_compat.ModelContainerSpec(image_uri=_TEST_IMAGE_URI)
+ local_model = LocalModel(container_spec)
+
+ local_model.pull_image_if_not_exists()
+
+ assert not execute_command_mock.called
+
+ def test_pull_image_if_not_exists_image_not_exists(
+ self,
+ check_image_exists_locally_false_mock,
+ execute_command_mock,
+ ):
+ container_spec = gca_model_compat.ModelContainerSpec(image_uri=_TEST_IMAGE_URI)
+ local_model = LocalModel(container_spec)
+ expected_command = ["docker", "pull", f"{_TEST_IMAGE_URI}"]
+
+ local_model.pull_image_if_not_exists()
+
+ execute_command_mock.assert_called_once_with(expected_command)
+
+ def test_pull_image_if_not_exists_docker_command_fail(
+ self,
+ check_image_exists_locally_false_mock,
+ execute_command_return_code_1_mock,
+ ):
+ container_spec = gca_model_compat.ModelContainerSpec(image_uri=_TEST_IMAGE_URI)
+ local_model = LocalModel(container_spec)
+ expected_command = ["docker", "pull", f"{_TEST_IMAGE_URI}"]
+ return_code = 1
+ expected_message = textwrap.dedent(
+ """
+ Docker failed with error code {return_code}.
+ Command: {command}
+ """.format(
+ return_code=return_code, command=" ".join(expected_command)
+ )
+ )
+
+ with pytest.raises(errors.DockerError) as exception:
+ local_model.pull_image_if_not_exists()
+
+ execute_command_return_code_1_mock.assert_called_once_with(expected_command)
+ assert exception.value.message == expected_message
+ assert exception.value.cmd == expected_command
+ assert exception.value.exit_code == return_code
+
+
+class TestLocalEndpoint:
+ def test_init(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ with LocalEndpoint(_TEST_IMAGE_URI):
+ pass
+
+ run_prediction_container_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route=prediction.DEFAULT_LOCAL_PREDICT_ROUTE,
+ serving_container_health_route=prediction.DEFAULT_LOCAL_HEALTH_ROUTE,
+ serving_container_command=None,
+ serving_container_args=None,
+ serving_container_environment_variables={},
+ serving_container_ports=None,
+ credential_path=None,
+ host_port=None,
+ gpu_count=None,
+ gpu_device_ids=None,
+ gpu_capabilities=None,
+ )
+ assert run_prediction_container_mock.return_value.stop.called
+
+ def test_init_with_all_parameters(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ artifact_uri = "gs://myproject/mymodel"
+ serving_container_predict_route = "/custom_predict"
+ serving_container_health_route = "/custom_health"
+ serving_container_command = ["echo", "hello"]
+ serving_container_args = [">", "tmp.log"]
+ serving_container_environment_variables = {"custom_key": "custom_value"}
+ serving_container_ports = [5555]
+ credential_path = "key.json"
+ host_port = 6666
+ container_ready_timeout = 60
+ container_ready_check_interval = 5
+
+ with LocalEndpoint(
+ _TEST_IMAGE_URI,
+ artifact_uri=artifact_uri,
+ serving_container_predict_route=serving_container_predict_route,
+ serving_container_health_route=serving_container_health_route,
+ serving_container_command=serving_container_command,
+ serving_container_args=serving_container_args,
+ serving_container_environment_variables=serving_container_environment_variables,
+ serving_container_ports=serving_container_ports,
+ credential_path=credential_path,
+ host_port=host_port,
+ container_ready_timeout=container_ready_timeout,
+ container_ready_check_interval=container_ready_check_interval,
+ ):
+ pass
+
+ run_prediction_container_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=artifact_uri,
+ serving_container_predict_route=serving_container_predict_route,
+ serving_container_health_route=serving_container_health_route,
+ serving_container_command=serving_container_command,
+ serving_container_args=serving_container_args,
+ serving_container_environment_variables=serving_container_environment_variables,
+ serving_container_ports=serving_container_ports,
+ credential_path=credential_path,
+ host_port=host_port,
+ gpu_count=None,
+ gpu_device_ids=None,
+ gpu_capabilities=None,
+ )
+ assert run_prediction_container_mock.return_value.stop.called
+
+ def test_init_with_initializer_project(
+ self,
+ initializer_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ with LocalEndpoint(_TEST_IMAGE_URI):
+ pass
+
+ run_prediction_container_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route=prediction.DEFAULT_LOCAL_PREDICT_ROUTE,
+ serving_container_health_route=prediction.DEFAULT_LOCAL_HEALTH_ROUTE,
+ serving_container_command=None,
+ serving_container_args=None,
+ serving_container_environment_variables={
+ local_endpoint._GCLOUD_PROJECT_ENV: _TEST_PROJECT
+ },
+ serving_container_ports=None,
+ credential_path=None,
+ host_port=None,
+ gpu_count=None,
+ gpu_device_ids=None,
+ gpu_capabilities=None,
+ )
+ assert run_prediction_container_mock.return_value.stop.called
+
+ def test_init_with_gpu_count(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ with LocalEndpoint(
+ _TEST_IMAGE_URI,
+ gpu_count=_TEST_GPU_COUNT,
+ gpu_capabilities=_TEST_GPU_CAPABILITIES,
+ ):
+ pass
+
+ run_prediction_container_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route=prediction.DEFAULT_LOCAL_PREDICT_ROUTE,
+ serving_container_health_route=prediction.DEFAULT_LOCAL_HEALTH_ROUTE,
+ serving_container_command=None,
+ serving_container_args=None,
+ serving_container_environment_variables={},
+ serving_container_ports=None,
+ credential_path=None,
+ host_port=None,
+ gpu_count=_TEST_GPU_COUNT,
+ gpu_device_ids=None,
+ gpu_capabilities=_TEST_GPU_CAPABILITIES,
+ )
+ assert run_prediction_container_mock.return_value.stop.called
+
+ def test_init_with_gpu_device_ids(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ with LocalEndpoint(
+ _TEST_IMAGE_URI,
+ gpu_device_ids=_TEST_GPU_DEVICE_IDS,
+ gpu_capabilities=_TEST_GPU_CAPABILITIES,
+ ):
+ pass
+
+ run_prediction_container_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route=prediction.DEFAULT_LOCAL_PREDICT_ROUTE,
+ serving_container_health_route=prediction.DEFAULT_LOCAL_HEALTH_ROUTE,
+ serving_container_command=None,
+ serving_container_args=None,
+ serving_container_environment_variables={},
+ serving_container_ports=None,
+ credential_path=None,
+ host_port=None,
+ gpu_count=None,
+ gpu_device_ids=_TEST_GPU_DEVICE_IDS,
+ gpu_capabilities=_TEST_GPU_CAPABILITIES,
+ )
+ assert run_prediction_container_mock.return_value.stop.called
+
+ def test_init_with_gpu_count_and_device_ids_throw_error(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_mock,
+ ):
+ expected_message = (
+ "At most one gpu_count or gpu_device_ids can be set but both are set."
+ )
+
+ with pytest.raises(ValueError) as exception:
+ with LocalEndpoint(
+ _TEST_IMAGE_URI,
+ gpu_count=_TEST_GPU_COUNT,
+ gpu_device_ids=_TEST_GPU_DEVICE_IDS,
+ gpu_capabilities=_TEST_GPU_CAPABILITIES,
+ ):
+ pass
+
+ assert str(exception.value) == expected_message
+
+ def test_init_with_gpu_count_but_capabilities_unset(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ with LocalEndpoint(_TEST_IMAGE_URI, gpu_count=_TEST_GPU_COUNT):
+ pass
+
+ run_prediction_container_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route=prediction.DEFAULT_LOCAL_PREDICT_ROUTE,
+ serving_container_health_route=prediction.DEFAULT_LOCAL_HEALTH_ROUTE,
+ serving_container_command=None,
+ serving_container_args=None,
+ serving_container_environment_variables={},
+ serving_container_ports=None,
+ credential_path=None,
+ host_port=None,
+ gpu_count=_TEST_GPU_COUNT,
+ gpu_device_ids=None,
+ gpu_capabilities=prediction.DEFAULT_LOCAL_RUN_GPU_CAPABILITIES,
+ )
+ assert run_prediction_container_mock.return_value.stop.called
+
+ def test_init_with_gpu_device_ids_but_capabilities_unset(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ with LocalEndpoint(_TEST_IMAGE_URI, gpu_device_ids=_TEST_GPU_DEVICE_IDS):
+ pass
+
+ run_prediction_container_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route=prediction.DEFAULT_LOCAL_PREDICT_ROUTE,
+ serving_container_health_route=prediction.DEFAULT_LOCAL_HEALTH_ROUTE,
+ serving_container_command=None,
+ serving_container_args=None,
+ serving_container_environment_variables={},
+ serving_container_ports=None,
+ credential_path=None,
+ host_port=None,
+ gpu_count=None,
+ gpu_device_ids=_TEST_GPU_DEVICE_IDS,
+ gpu_capabilities=prediction.DEFAULT_LOCAL_RUN_GPU_CAPABILITIES,
+ )
+ assert run_prediction_container_mock.return_value.stop.called
+
+ def test_init_with_gpu_capabilities_but_count_and_device_ids_unset(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ with LocalEndpoint(_TEST_IMAGE_URI, gpu_capabilities=_TEST_GPU_CAPABILITIES):
+ pass
+
+ run_prediction_container_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route=prediction.DEFAULT_LOCAL_PREDICT_ROUTE,
+ serving_container_health_route=prediction.DEFAULT_LOCAL_HEALTH_ROUTE,
+ serving_container_command=None,
+ serving_container_args=None,
+ serving_container_environment_variables={},
+ serving_container_ports=None,
+ credential_path=None,
+ host_port=None,
+ gpu_count=prediction.DEFAULT_LOCAL_RUN_GPU_COUNT,
+ gpu_device_ids=None,
+ gpu_capabilities=_TEST_GPU_CAPABILITIES,
+ )
+ assert run_prediction_container_mock.return_value.stop.called
+
+ def test_init_fail_with_container_not_running(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_container_not_running_mock,
+ time_sleep_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ expected_message = "The container never starts running."
+ expected_command = ""
+ expected_return_code = 1
+ with pytest.raises(errors.DockerError) as exception:
+ with LocalEndpoint(_TEST_IMAGE_URI):
+ pass
+
+ run_prediction_container_container_not_running_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route=prediction.DEFAULT_LOCAL_PREDICT_ROUTE,
+ serving_container_health_route=prediction.DEFAULT_LOCAL_HEALTH_ROUTE,
+ serving_container_command=None,
+ serving_container_args=None,
+ serving_container_environment_variables={},
+ serving_container_ports=None,
+ credential_path=None,
+ host_port=None,
+ gpu_count=None,
+ gpu_device_ids=None,
+ gpu_capabilities=None,
+ )
+ assert (
+ run_prediction_container_container_not_running_mock.return_value.stop.called
+ )
+ assert exception.value.message == expected_message
+ assert exception.value.cmd == expected_command
+ assert exception.value.exit_code == expected_return_code
+
+ def test_init_fail_with_health_check_fail_container_not_running(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_mock,
+ time_sleep_mock,
+ local_endpoint_run_health_check_raise_exception_mock,
+ local_endpoint_print_container_logs_mock,
+ get_container_status_second_fail_mock,
+ ):
+ expected_command = ""
+ expected_message = "Container exited before the first health check succeeded."
+ expected_return_code = 1
+
+ with pytest.raises(errors.DockerError) as exception:
+ with LocalEndpoint(_TEST_IMAGE_URI):
+ pass
+
+ run_prediction_container_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route=prediction.DEFAULT_LOCAL_PREDICT_ROUTE,
+ serving_container_health_route=prediction.DEFAULT_LOCAL_HEALTH_ROUTE,
+ serving_container_command=None,
+ serving_container_args=None,
+ serving_container_environment_variables={},
+ serving_container_ports=None,
+ credential_path=None,
+ host_port=None,
+ gpu_count=None,
+ gpu_device_ids=None,
+ gpu_capabilities=None,
+ )
+ local_endpoint_print_container_logs_mock.assert_called_once_with(
+ show_all=True,
+ message="Container already exited, all container logs:",
+ )
+ assert run_prediction_container_mock.return_value.stop.called
+ assert exception.value.message == expected_message
+ assert exception.value.cmd == expected_command
+ assert exception.value.exit_code == expected_return_code
+
+ def test_init_fail_with_health_check_fail_timeout(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_mock,
+ time_sleep_mock,
+ local_endpoint_run_health_check_raise_exception_mock,
+ local_endpoint_print_container_logs_mock,
+ get_container_status_running_mock,
+ ):
+ expected_command = ""
+ expected_message = "The health check never succeeded."
+ expected_return_code = 1
+
+ with pytest.raises(errors.DockerError) as exception:
+ with LocalEndpoint(_TEST_IMAGE_URI):
+ pass
+
+ run_prediction_container_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route=prediction.DEFAULT_LOCAL_PREDICT_ROUTE,
+ serving_container_health_route=prediction.DEFAULT_LOCAL_HEALTH_ROUTE,
+ serving_container_command=None,
+ serving_container_args=None,
+ serving_container_environment_variables={},
+ serving_container_ports=None,
+ credential_path=None,
+ host_port=None,
+ gpu_count=None,
+ gpu_device_ids=None,
+ gpu_capabilities=None,
+ )
+ local_endpoint_print_container_logs_mock.assert_called_once_with(
+ show_all=True,
+ message="Health check never succeeds, all container logs:",
+ )
+ assert run_prediction_container_mock.return_value.stop.called
+ assert exception.value.message == expected_message
+ assert exception.value.cmd == expected_command
+ assert exception.value.exit_code == expected_return_code
+
+ def test_serve(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ local_endpoint = LocalEndpoint(_TEST_IMAGE_URI)
+
+ local_endpoint.serve()
+
+ run_prediction_container_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route=prediction.DEFAULT_LOCAL_PREDICT_ROUTE,
+ serving_container_health_route=prediction.DEFAULT_LOCAL_HEALTH_ROUTE,
+ serving_container_command=None,
+ serving_container_args=None,
+ serving_container_environment_variables={},
+ serving_container_ports=None,
+ credential_path=None,
+ host_port=None,
+ gpu_count=None,
+ gpu_device_ids=None,
+ gpu_capabilities=None,
+ )
+
+ def test_serve_with_all_parameters(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ artifact_uri = "gs://myproject/mymodel"
+ serving_container_predict_route = "/custom_predict"
+ serving_container_health_route = "/custom_health"
+ serving_container_command = ["echo", "hello"]
+ serving_container_args = [">", "tmp.log"]
+ serving_container_environment_variables = {"custom_key": "custom_value"}
+ serving_container_ports = [5555]
+ credential_path = "key.json"
+ host_port = 6666
+ container_ready_timeout = 60
+ container_ready_check_interval = 5
+ local_endpoint = LocalEndpoint(
+ _TEST_IMAGE_URI,
+ artifact_uri=artifact_uri,
+ serving_container_predict_route=serving_container_predict_route,
+ serving_container_health_route=serving_container_health_route,
+ serving_container_command=serving_container_command,
+ serving_container_args=serving_container_args,
+ serving_container_environment_variables=serving_container_environment_variables,
+ serving_container_ports=serving_container_ports,
+ credential_path=credential_path,
+ host_port=host_port,
+ container_ready_timeout=container_ready_timeout,
+ container_ready_check_interval=container_ready_check_interval,
+ )
+
+ local_endpoint.serve()
+
+ run_prediction_container_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=artifact_uri,
+ serving_container_predict_route=serving_container_predict_route,
+ serving_container_health_route=serving_container_health_route,
+ serving_container_command=serving_container_command,
+ serving_container_args=serving_container_args,
+ serving_container_environment_variables=serving_container_environment_variables,
+ serving_container_ports=serving_container_ports,
+ credential_path=credential_path,
+ host_port=host_port,
+ gpu_count=None,
+ gpu_device_ids=None,
+ gpu_capabilities=None,
+ )
+
+ def test_serve_with_initializer_project(
+ self,
+ initializer_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ local_endpoint_object = LocalEndpoint(_TEST_IMAGE_URI)
+
+ local_endpoint_object.serve()
+
+ run_prediction_container_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route=prediction.DEFAULT_LOCAL_PREDICT_ROUTE,
+ serving_container_health_route=prediction.DEFAULT_LOCAL_HEALTH_ROUTE,
+ serving_container_command=None,
+ serving_container_args=None,
+ serving_container_environment_variables={
+ local_endpoint._GCLOUD_PROJECT_ENV: _TEST_PROJECT
+ },
+ serving_container_ports=None,
+ credential_path=None,
+ host_port=None,
+ gpu_count=None,
+ gpu_device_ids=None,
+ gpu_capabilities=None,
+ )
+
+ def test_serve_with_gpu_count(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ local_endpoint = LocalEndpoint(
+ _TEST_IMAGE_URI,
+ gpu_count=_TEST_GPU_COUNT,
+ gpu_capabilities=_TEST_GPU_CAPABILITIES,
+ )
+
+ local_endpoint.serve()
+
+ run_prediction_container_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route=prediction.DEFAULT_LOCAL_PREDICT_ROUTE,
+ serving_container_health_route=prediction.DEFAULT_LOCAL_HEALTH_ROUTE,
+ serving_container_command=None,
+ serving_container_args=None,
+ serving_container_environment_variables={},
+ serving_container_ports=None,
+ credential_path=None,
+ host_port=None,
+ gpu_count=_TEST_GPU_COUNT,
+ gpu_device_ids=None,
+ gpu_capabilities=_TEST_GPU_CAPABILITIES,
+ )
+
+ def test_serve_with_gpu_device_ids(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ local_endpoint = LocalEndpoint(
+ _TEST_IMAGE_URI,
+ gpu_device_ids=_TEST_GPU_DEVICE_IDS,
+ gpu_capabilities=_TEST_GPU_CAPABILITIES,
+ )
+
+ local_endpoint.serve()
+
+ run_prediction_container_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route=prediction.DEFAULT_LOCAL_PREDICT_ROUTE,
+ serving_container_health_route=prediction.DEFAULT_LOCAL_HEALTH_ROUTE,
+ serving_container_command=None,
+ serving_container_args=None,
+ serving_container_environment_variables={},
+ serving_container_ports=None,
+ credential_path=None,
+ host_port=None,
+ gpu_count=None,
+ gpu_device_ids=_TEST_GPU_DEVICE_IDS,
+ gpu_capabilities=_TEST_GPU_CAPABILITIES,
+ )
+
+ def test_serve_serve_twice(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ local_endpoint = LocalEndpoint(_TEST_IMAGE_URI)
+ local_endpoint.serve()
+
+ # Call serve again.
+ local_endpoint.serve()
+
+ # This is only called once.
+ run_prediction_container_mock.assert_called_once_with(
+ _TEST_IMAGE_URI,
+ artifact_uri=None,
+ serving_container_predict_route=prediction.DEFAULT_LOCAL_PREDICT_ROUTE,
+ serving_container_health_route=prediction.DEFAULT_LOCAL_HEALTH_ROUTE,
+ serving_container_command=None,
+ serving_container_args=None,
+ serving_container_environment_variables={},
+ serving_container_ports=None,
+ credential_path=None,
+ host_port=None,
+ gpu_count=None,
+ gpu_device_ids=None,
+ gpu_capabilities=None,
+ )
+
+ def test_stop(
+ self,
+ initializer_project_none_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ endpoint = LocalEndpoint(_TEST_IMAGE_URI)
+ endpoint.serve()
+
+ endpoint.stop()
+
+ assert run_prediction_container_mock.return_value.stop.called
+
+ def test_predict_request(
+ self,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ requests_post_mock,
+ ):
+ serving_container_predict_route = "/custom_predict"
+ host_port = 8080
+ url = f"http://localhost:{host_port}{serving_container_predict_route}"
+ request = '{"instances": [{"x": [[1.1, 2.2, 3.3, 5.5]]}]}'
+
+ with LocalEndpoint(
+ _TEST_IMAGE_URI,
+ serving_container_predict_route=serving_container_predict_route,
+ host_port=host_port,
+ ) as endpoint:
+ response = endpoint.predict(request=request)
+
+ requests_post_mock.assert_called_once_with(url, data=request, headers=None)
+ assert response.status_code == get_requests_post_response().status_code
+ assert response._content == get_requests_post_response()._content
+
+ def test_predict_request_with_headers(
+ self,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ requests_post_mock,
+ ):
+ serving_container_predict_route = "/custom_predict"
+ host_port = 8080
+ url = f"http://localhost:{host_port}{serving_container_predict_route}"
+ request = '{"instances": [{"x": [[1.1, 2.2, 3.3, 5.5]]}]}'
+ headers = {"Custom-header": "Custom-value"}
+
+ with LocalEndpoint(
+ _TEST_IMAGE_URI,
+ serving_container_predict_route=serving_container_predict_route,
+ host_port=host_port,
+ ) as endpoint:
+ response = endpoint.predict(request=request, headers=headers)
+
+ requests_post_mock.assert_called_once_with(url, data=request, headers=headers)
+ assert response.status_code == get_requests_post_response().status_code
+ assert response._content == get_requests_post_response()._content
+
+ def test_predict_request_file(
+ self,
+ tmp_path,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ requests_post_mock,
+ open_file_mock,
+ ):
+ serving_container_predict_route = "/custom_predict"
+ host_port = 8080
+ url = f"http://localhost:{host_port}{serving_container_predict_route}"
+ request = '{"instances": [{"x": [[1.1, 2.2, 3.3, 5.5]]}]}'
+ request_file = tmp_path / "input.json"
+ request_file.write_text(request)
+
+ with LocalEndpoint(
+ _TEST_IMAGE_URI,
+ serving_container_predict_route=serving_container_predict_route,
+ host_port=host_port,
+ ) as endpoint:
+ response = endpoint.predict(request_file=request_file)
+
+ requests_post_mock.assert_called_once_with(
+ url, data=open_file_mock, headers=None
+ )
+ assert response.status_code == get_requests_post_response().status_code
+ assert response._content == get_requests_post_response()._content
+
+ def test_predict_request_file_with_headers(
+ self,
+ tmp_path,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ requests_post_mock,
+ open_file_mock,
+ ):
+ serving_container_predict_route = "/custom_predict"
+ host_port = 8080
+ url = f"http://localhost:{host_port}{serving_container_predict_route}"
+ request = '{"instances": [{"x": [[1.1, 2.2, 3.3, 5.5]]}]}'
+ request_file = tmp_path / "input.json"
+ request_file.write_text(request)
+ headers = {"Custom-header": "Custom-value"}
+
+ with LocalEndpoint(
+ _TEST_IMAGE_URI,
+ serving_container_predict_route=serving_container_predict_route,
+ host_port=host_port,
+ ) as endpoint:
+ response = endpoint.predict(request_file=request_file, headers=headers)
+
+ requests_post_mock.assert_called_once_with(
+ url, data=open_file_mock, headers=headers
+ )
+ assert response.status_code == get_requests_post_response().status_code
+ assert response._content == get_requests_post_response()._content
+
+ def test_predict_container_exited_raises_exception(
+ self,
+ run_prediction_container_mock,
+ requests_post_mock,
+ ):
+ request = '{"instances": [{"x": [[1.1, 2.2, 3.3, 5.5]]}]}'
+ endpoint = LocalEndpoint(
+ _TEST_IMAGE_URI,
+ )
+ endpoint.container_exited = True
+ expected_message = (
+ "The local endpoint is not serving traffic. Please call `serve()`."
+ )
+
+ with pytest.raises(RuntimeError) as exception:
+ endpoint.predict(request=request)
+
+ assert str(exception.value) == expected_message
+
+ def test_predict_both_request_and_request_file_specified_raises_exception(
+ self,
+ tmp_path,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ serving_container_predict_route = "/custom_predict"
+ host_port = 8080
+ request = '{"instances": [{"x": [[1.1, 2.2, 3.3, 5.5]]}]}'
+ request_file = tmp_path / "input.json"
+ request_file.write_text(request)
+ expected_message = (
+ "request and request_file can not be specified at the same time."
+ )
+
+ with pytest.raises(ValueError) as exception:
+ with LocalEndpoint(
+ _TEST_IMAGE_URI,
+ serving_container_predict_route=serving_container_predict_route,
+ host_port=host_port,
+ ) as endpoint:
+ endpoint.predict(request=request, request_file=request_file)
+
+ assert str(exception.value) == expected_message
+
+ def test_predict_none_of_request_and_request_file_specified_raises_exception(
+ self,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ serving_container_predict_route = "/custom_predict"
+ host_port = 8080
+ expected_message = "One of request and request_file needs to be specified."
+
+ with pytest.raises(ValueError) as exception:
+ with LocalEndpoint(
+ _TEST_IMAGE_URI,
+ serving_container_predict_route=serving_container_predict_route,
+ host_port=host_port,
+ ) as endpoint:
+ endpoint.predict()
+
+ assert str(exception.value) == expected_message
+
+ def test_predict_request_file_not_exists_raises_exception(
+ self,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ serving_container_predict_route = "/custom_predict"
+ host_port = 8080
+ request_file = "non_existing_input.json"
+ expected_message = f"request_file does not exist: {request_file}."
+
+ with pytest.raises(ValueError) as exception:
+ with LocalEndpoint(
+ _TEST_IMAGE_URI,
+ serving_container_predict_route=serving_container_predict_route,
+ host_port=host_port,
+ ) as endpoint:
+ endpoint.predict(request_file=request_file)
+
+ assert str(exception.value) == expected_message
+
+ def test_predict_raises_exception(
+ self,
+ local_endpoint_logger_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ requests_post_raises_exception_mock,
+ ):
+ serving_container_predict_route = "/custom_predict"
+ host_port = 8080
+ url = f"http://localhost:{host_port}{serving_container_predict_route}"
+ request = '{"instances": [{"x": [[1.1, 2.2, 3.3, 5.5]]}]}'
+
+ with pytest.raises(requests.exceptions.RequestException) as exception:
+ with LocalEndpoint(
+ _TEST_IMAGE_URI,
+ serving_container_predict_route=serving_container_predict_route,
+ host_port=host_port,
+ ) as endpoint:
+ endpoint.predict(request=request)
+
+ requests_post_raises_exception_mock.assert_called_once_with(
+ url, data=request, headers=None
+ )
+ assert local_endpoint_logger_mock.warning.called
+ assert str(exception.value) == _TEST_HTTP_ERROR_MESSAGE
+
+ def test_predict_raises_exception_not_verbose(
+ self,
+ local_endpoint_logger_mock,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ requests_post_raises_exception_mock,
+ ):
+ serving_container_predict_route = "/custom_predict"
+ host_port = 8080
+ url = f"http://localhost:{host_port}{serving_container_predict_route}"
+ request = '{"instances": [{"x": [[1.1, 2.2, 3.3, 5.5]]}]}'
+
+ with pytest.raises(requests.exceptions.RequestException) as exception:
+ with LocalEndpoint(
+ _TEST_IMAGE_URI,
+ serving_container_predict_route=serving_container_predict_route,
+ host_port=host_port,
+ ) as endpoint:
+ endpoint.predict(request=request, verbose=False)
+
+ requests_post_raises_exception_mock.assert_called_once_with(
+ url, data=request, headers=None
+ )
+ assert not local_endpoint_logger_mock.warning.called
+ assert str(exception.value) == _TEST_HTTP_ERROR_MESSAGE
+
+ def test_run_health_check(
+ self,
+ run_prediction_container_mock,
+ requests_get_mock,
+ ):
+ serving_container_health_route = "/custom_health"
+ host_port = 8080
+ url = f"http://localhost:{host_port}{serving_container_health_route}"
+
+ with LocalEndpoint(
+ _TEST_IMAGE_URI,
+ serving_container_health_route=serving_container_health_route,
+ host_port=host_port,
+ ) as endpoint:
+ response = endpoint.run_health_check()
+
+ requests_get_mock.assert_called_with(url)
+ assert response.status_code == get_requests_get_response().status_code
+ assert response._content == get_requests_get_response()._content
+
+ def test_run_health_check_container_exited_raises_exception(
+ self,
+ run_prediction_container_mock,
+ requests_get_mock,
+ ):
+ endpoint = LocalEndpoint(
+ _TEST_IMAGE_URI,
+ )
+ endpoint.container_exited = True
+ expected_message = (
+ "The local endpoint is not serving traffic. Please call `serve()`."
+ )
+
+ with pytest.raises(RuntimeError) as exception:
+ endpoint.run_health_check()
+
+ assert str(exception.value) == expected_message
+
+ def test_run_health_check_raises_exception(
+ self,
+ local_endpoint_logger_mock,
+ run_prediction_container_mock,
+ requests_get_second_raises_exception_mock,
+ ):
+ serving_container_health_route = "/custom_health"
+ host_port = 8080
+ url = f"http://localhost:{host_port}{serving_container_health_route}"
+
+ with pytest.raises(requests.exceptions.RequestException) as exception:
+ with LocalEndpoint(
+ _TEST_IMAGE_URI,
+ serving_container_health_route=serving_container_health_route,
+ host_port=host_port,
+ ) as endpoint:
+ endpoint.run_health_check()
+
+ requests_get_second_raises_exception_mock.assert_called_with(url)
+ assert local_endpoint_logger_mock.warning.called
+ assert str(exception.value) == _TEST_HTTP_ERROR_MESSAGE
+
+ def test_run_health_check_raises_exception_not_verbose(
+ self,
+ local_endpoint_logger_mock,
+ run_prediction_container_mock,
+ requests_get_second_raises_exception_mock,
+ ):
+ serving_container_health_route = "/custom_health"
+ host_port = 8080
+ url = f"http://localhost:{host_port}{serving_container_health_route}"
+
+ with pytest.raises(requests.exceptions.RequestException) as exception:
+ with LocalEndpoint(
+ _TEST_IMAGE_URI,
+ serving_container_health_route=serving_container_health_route,
+ host_port=host_port,
+ ) as endpoint:
+ endpoint.run_health_check(verbose=False)
+
+ requests_get_second_raises_exception_mock.assert_called_with(url)
+ assert not local_endpoint_logger_mock.warning.called
+ assert str(exception.value) == _TEST_HTTP_ERROR_MESSAGE
+
+ def test_print_container_logs(
+ self,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ run_print_container_logs_mock,
+ ):
+ with LocalEndpoint(_TEST_IMAGE_URI) as endpoint:
+ endpoint.print_container_logs()
+
+ run_print_container_logs_mock.assert_called_once_with(
+ run_prediction_container_mock(), start_index=0, message=None
+ )
+
+ def test_print_container_logs_show_all(
+ self,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ run_print_container_logs_mock,
+ ):
+ with LocalEndpoint(_TEST_IMAGE_URI) as endpoint:
+ endpoint.print_container_logs(show_all=True)
+
+ run_print_container_logs_mock.assert_called_once_with(
+ run_prediction_container_mock(), start_index=None, message=None
+ )
+
+ def test_print_container_logs_if_container_is_not_running_container_running(
+ self,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ get_container_status_running_mock,
+ local_endpoint_print_container_logs_mock,
+ ):
+ with LocalEndpoint(_TEST_IMAGE_URI) as endpoint:
+ endpoint.print_container_logs_if_container_is_not_running()
+
+ assert get_container_status_running_mock.called
+ assert not local_endpoint_print_container_logs_mock.called
+
+ def test_print_container_logs_if_container_is_not_running_container_exited(
+ self,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ get_container_status_second_fail_mock,
+ local_endpoint_print_container_logs_mock,
+ ):
+ with LocalEndpoint(_TEST_IMAGE_URI) as endpoint:
+ endpoint.print_container_logs_if_container_is_not_running()
+
+ local_endpoint_print_container_logs_mock.assert_called_once_with(
+ show_all=False, message=None
+ )
+
+ def test_print_container_logs_if_container_is_not_running_container_exited_show_all(
+ self,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ get_container_status_second_fail_mock,
+ local_endpoint_print_container_logs_mock,
+ ):
+ with LocalEndpoint(_TEST_IMAGE_URI) as endpoint:
+ endpoint.print_container_logs_if_container_is_not_running(show_all=True)
+
+ local_endpoint_print_container_logs_mock.assert_called_once_with(
+ show_all=True, message=None
+ )
+
+ def test_get_container_status(
+ self,
+ run_prediction_container_mock,
+ local_endpoint_run_health_check_mock,
+ ):
+ with LocalEndpoint(_TEST_IMAGE_URI) as endpoint:
+ status = endpoint.get_container_status()
+
+ assert run_prediction_container_mock().reload.called
+ assert status == _CONTAINER_RUNNING_STATUS
+
+
+class TestModelServer:
+ @mock.patch.dict(
+ os.environ,
+ {
+ "AIP_HTTP_PORT": _TEST_AIP_HTTP_PORT,
+ "AIP_HEALTH_ROUTE": _TEST_AIP_HEALTH_ROUTE,
+ "AIP_PREDICT_ROUTE": _TEST_AIP_PREDICT_ROUTE,
+ "AIP_STORAGE_URI": _TEST_AIP_STORAGE_URI,
+ "HANDLER_MODULE": _DEFAULT_HANDLER_MODULE,
+ "HANDLER_CLASS": _DEFAULT_HANDLER_CLASS,
+ },
+ clear=True,
+ )
+ def test_init(
+ self,
+ importlib_import_module_mock_once,
+ fastapi_mock,
+ ):
+ model_server = CprModelServer()
+
+ importlib_import_module_mock_once.assert_called_once_with(
+ _DEFAULT_HANDLER_MODULE
+ )
+ getattr(
+ importlib_import_module_mock_once.return_value, _DEFAULT_HANDLER_CLASS
+ ).assert_called_once_with(_TEST_AIP_STORAGE_URI, predictor=None)
+ assert (
+ model_server.handler
+ == getattr(
+ importlib_import_module_mock_once.return_value, _DEFAULT_HANDLER_CLASS
+ ).return_value
+ )
+ assert model_server.http_port == int(_TEST_AIP_HTTP_PORT)
+ assert model_server.health_route == _TEST_AIP_HEALTH_ROUTE
+ assert model_server.predict_route == _TEST_AIP_PREDICT_ROUTE
+ fastapi_mock.return_value.add_api_route.assert_has_calls(
+ [
+ mock.call(
+ path=_TEST_AIP_HEALTH_ROUTE,
+ endpoint=model_server.health,
+ methods=["GET"],
+ )
+ ],
+ [
+ mock.call(
+ path=_TEST_AIP_PREDICT_ROUTE,
+ endpoint=model_server.predict,
+ methods=["POST"],
+ )
+ ],
+ )
+
+ def test_init_with_predictor(
+ self,
+ model_server_env_mock,
+ importlib_import_module_mock_twice,
+ fastapi_mock,
+ ):
+ model_server = CprModelServer()
+
+ importlib_import_module_mock_twice.assert_has_calls(
+ [
+ mock.call(_DEFAULT_HANDLER_MODULE),
+ mock.call(f"{_TEST_SRC_DIR}.{_TEST_PREDICTOR_FILE_STEM}"),
+ ]
+ )
+ getattr(
+ importlib_import_module_mock_twice(_DEFAULT_HANDLER_MODULE),
+ _DEFAULT_HANDLER_CLASS,
+ ).assert_called_once_with(
+ _TEST_AIP_STORAGE_URI,
+ predictor=getattr(
+ importlib_import_module_mock_twice(
+ f"{_TEST_SRC_DIR}.{_TEST_PREDICTOR_FILE_STEM}"
+ ),
+ _TEST_PREDICTOR_CLASS,
+ ),
+ )
+ assert (
+ model_server.handler
+ == getattr(
+ importlib_import_module_mock_twice(_DEFAULT_HANDLER_MODULE),
+ _DEFAULT_HANDLER_CLASS,
+ ).return_value
+ )
+ assert model_server.http_port == int(_TEST_AIP_HTTP_PORT)
+ assert model_server.health_route == _TEST_AIP_HEALTH_ROUTE
+ assert model_server.predict_route == _TEST_AIP_PREDICT_ROUTE
+ fastapi_mock.return_value.add_api_route.assert_has_calls(
+ [
+ mock.call(
+ path=_TEST_AIP_HEALTH_ROUTE,
+ endpoint=model_server.health,
+ methods=["GET"],
+ )
+ ],
+ [
+ mock.call(
+ path=_TEST_AIP_PREDICT_ROUTE,
+ endpoint=model_server.predict,
+ methods=["POST"],
+ )
+ ],
+ )
+
+ @mock.patch.dict(
+ os.environ,
+ {
+ "AIP_HTTP_PORT": _TEST_AIP_HTTP_PORT,
+ "AIP_HEALTH_ROUTE": _TEST_AIP_HEALTH_ROUTE,
+ "AIP_PREDICT_ROUTE": _TEST_AIP_PREDICT_ROUTE,
+ "AIP_STORAGE_URI": _TEST_AIP_STORAGE_URI,
+ "HANDLER_CLASS": _DEFAULT_HANDLER_CLASS,
+ },
+ clear=True,
+ )
+ def test_init_fails_no_handler_module(
+ self,
+ ):
+ expected_message = (
+ "Both of the environment variables, HANDLER_MODULE and HANDLER_CLASS "
+ "need to be specified."
+ )
+
+ with pytest.raises(ValueError) as exception:
+ _ = CprModelServer()
+
+ assert str(exception.value) == expected_message
+
+ @mock.patch.dict(
+ os.environ,
+ {
+ "AIP_HTTP_PORT": _TEST_AIP_HTTP_PORT,
+ "AIP_HEALTH_ROUTE": _TEST_AIP_HEALTH_ROUTE,
+ "AIP_PREDICT_ROUTE": _TEST_AIP_PREDICT_ROUTE,
+ "AIP_STORAGE_URI": _TEST_AIP_STORAGE_URI,
+ "HANDLER_MODULE": _DEFAULT_HANDLER_MODULE,
+ },
+ clear=True,
+ )
+ def test_init_fails_no_handler_class(
+ self,
+ ):
+ expected_message = (
+ "Both of the environment variables, HANDLER_MODULE and HANDLER_CLASS "
+ "need to be specified."
+ )
+
+ with pytest.raises(ValueError) as exception:
+ _ = CprModelServer()
+
+ assert str(exception.value) == expected_message
+
+ @mock.patch.dict(
+ os.environ,
+ {
+ "AIP_HEALTH_ROUTE": _TEST_AIP_HEALTH_ROUTE,
+ "AIP_PREDICT_ROUTE": _TEST_AIP_PREDICT_ROUTE,
+ "AIP_STORAGE_URI": _TEST_AIP_STORAGE_URI,
+ "HANDLER_MODULE": _DEFAULT_HANDLER_MODULE,
+ "HANDLER_CLASS": _DEFAULT_HANDLER_CLASS,
+ },
+ clear=True,
+ )
+ def test_init_no_aip_http_port(
+ self,
+ importlib_import_module_mock_once,
+ ):
+ expected_message = (
+ "The environment variable AIP_HTTP_PORT needs to be specified."
+ )
+
+ with pytest.raises(ValueError) as exception:
+ _ = CprModelServer()
+
+ assert str(exception.value) == expected_message
+
+ @mock.patch.dict(
+ os.environ,
+ {
+ "AIP_HTTP_PORT": _TEST_AIP_HTTP_PORT,
+ "AIP_PREDICT_ROUTE": _TEST_AIP_PREDICT_ROUTE,
+ "AIP_STORAGE_URI": _TEST_AIP_STORAGE_URI,
+ "HANDLER_MODULE": _DEFAULT_HANDLER_MODULE,
+ "HANDLER_CLASS": _DEFAULT_HANDLER_CLASS,
+ },
+ clear=True,
+ )
+ def test_init_no_aip_health_route(
+ self,
+ importlib_import_module_mock_once,
+ ):
+ expected_message = (
+ "Both of the environment variables AIP_HEALTH_ROUTE and "
+ "AIP_PREDICT_ROUTE need to be specified."
+ )
+
+ with pytest.raises(ValueError) as exception:
+ _ = CprModelServer()
+
+ assert str(exception.value) == expected_message
+
+ @mock.patch.dict(
+ os.environ,
+ {
+ "AIP_HTTP_PORT": _TEST_AIP_HTTP_PORT,
+ "AIP_HEALTH_ROUTE": _TEST_AIP_HEALTH_ROUTE,
+ "AIP_STORAGE_URI": _TEST_AIP_STORAGE_URI,
+ "HANDLER_MODULE": _DEFAULT_HANDLER_MODULE,
+ "HANDLER_CLASS": _DEFAULT_HANDLER_CLASS,
+ },
+ clear=True,
+ )
+ def test_init_no_aip_predict_route(
+ self,
+ importlib_import_module_mock_once,
+ ):
+ expected_message = (
+ "Both of the environment variables AIP_HEALTH_ROUTE and "
+ "AIP_PREDICT_ROUTE need to be specified."
+ )
+
+ with pytest.raises(ValueError) as exception:
+ _ = CprModelServer()
+
+ assert str(exception.value) == expected_message
+
+ def test_health(self, model_server_env_mock, importlib_import_module_mock_twice):
+ model_server = CprModelServer()
+ client = TestClient(model_server.app)
+
+ response = client.get(_TEST_AIP_HEALTH_ROUTE)
+
+ assert response.status_code == 200
+
+ def test_predict(self, model_server_env_mock, importlib_import_module_mock_twice):
+ model_server = CprModelServer()
+ client = TestClient(model_server.app)
+
+ with mock.patch.object(model_server.handler, "handle") as handle_mock:
+ future = asyncio.Future()
+ future.set_result(Response())
+
+ handle_mock.return_value = future
+
+ response = client.post(_TEST_AIP_PREDICT_ROUTE, json={"x": [1]})
+
+ assert response.status_code == 200
+
+ def test_predict_thorws_http_exception(
+ self, model_server_env_mock, importlib_import_module_mock_twice
+ ):
+ expected_message = "A fake HTTP exception."
+ model_server = CprModelServer()
+ client = TestClient(model_server.app)
+
+ with mock.patch.object(model_server.handler, "handle") as handle_mock:
+ handle_mock.side_effect = HTTPException(
+ status_code=400,
+ detail=expected_message,
+ )
+
+ response = client.post(_TEST_AIP_PREDICT_ROUTE, json={"x": [1]})
+
+ assert response.status_code == 400
+ assert json.loads(response.content)["detail"] == expected_message
+
+ def test_predict_thorws_exceptions_not_http_exception_default_handler(
+ self, model_server_env_mock, importlib_import_module_mock_twice
+ ):
+ expected_message = (
+ "An exception ValueError occurred. Arguments: ('Not a correct value.',)."
+ )
+ model_server = CprModelServer()
+ model_server.is_default_handler = True
+ client = TestClient(model_server.app)
+
+ with mock.patch.object(model_server.handler, "handle") as handle_mock:
+ handle_mock.side_effect = ValueError("Not a correct value.")
+
+ response = client.post(_TEST_AIP_PREDICT_ROUTE, json={"x": [1]})
+
+ assert (
+ prediction.CUSTOM_PREDICTION_ROUTINES_SERVER_ERROR_HEADER_KEY
+ in response.headers
+ )
+ assert response.status_code == 500
+ assert json.loads(response.content)["detail"] == expected_message
+
+ def test_predict_thorws_exceptions_not_http_exception_not_default_handler(
+ self, model_server_env_mock, importlib_import_module_mock_twice
+ ):
+ expected_message = (
+ "An exception ValueError occurred. Arguments: ('Not a correct value.',)."
+ )
+ model_server = CprModelServer()
+ client = TestClient(model_server.app)
+
+ with mock.patch.object(model_server.handler, "handle") as handle_mock:
+ handle_mock.side_effect = ValueError("Not a correct value.")
+
+ response = client.post(_TEST_AIP_PREDICT_ROUTE, json={"x": [1]})
+
+ assert (
+ prediction.CUSTOM_PREDICTION_ROUTINES_SERVER_ERROR_HEADER_KEY
+ not in response.headers
+ )
+ assert response.status_code == 500
+ assert json.loads(response.content)["detail"] == expected_message
+
+ @mock.patch.dict(
+ os.environ,
+ {
+ "VERTEX_CPR_WEB_CONCURRENCY": "8",
+ },
+ clear=True,
+ )
+ def test_set_number_of_workers_from_env_web_concurrency(self):
+ model_server_module.set_number_of_workers_from_env()
+
+ assert os.getenv("WEB_CONCURRENCY") == "8"
+
+ @mock.patch.dict(
+ os.environ,
+ {},
+ clear=True,
+ )
+ def test_set_number_of_workers_from_env_default_workers_per_core(
+ self, cpu_count_mock
+ ):
+ model_server_module.set_number_of_workers_from_env()
+
+ assert os.getenv("WEB_CONCURRENCY") == str(
+ cpu_count_mock.return_value * _DEFAULT_WORKERS_PER_CORE
+ )
+
+ @mock.patch.dict(
+ os.environ,
+ {"VERTEX_CPR_WORKERS_PER_CORE": "2"},
+ clear=True,
+ )
+ def test_set_number_of_workers_from_env_with_workers_per_core(self, cpu_count_mock):
+ model_server_module.set_number_of_workers_from_env()
+
+ assert os.getenv("WEB_CONCURRENCY") == str(cpu_count_mock.return_value * 2)
+
+ @mock.patch.dict(
+ os.environ,
+ {"VERTEX_CPR_MAX_WORKERS": "4"},
+ clear=True,
+ )
+ def test_set_number_of_workers_from_env_max_workers(self, cpu_count_mock):
+ number_of_workers = min(
+ 4, cpu_count_mock.return_value * _DEFAULT_WORKERS_PER_CORE
+ )
+
+ model_server_module.set_number_of_workers_from_env()
+
+ assert os.getenv("WEB_CONCURRENCY") == str(number_of_workers)
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_publisher_model.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_publisher_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d0ca131ac9b63c0ce4ff6bd24e3785cba8b400d
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_publisher_model.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+
+from unittest import mock
+from importlib import reload
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import _publisher_models
+
+from google.cloud.aiplatform.compat.services import (
+ model_garden_service_client_v1,
+)
+
+
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+
+_TEST_RESOURCE_NAME = "publishers/google/models/chat-bison@001"
+_TEST_MODEL_GARDEN_ID = "google/chat-bison@001"
+_TEST_INVALID_RESOURCE_NAME = "google.chat-bison@001"
+
+
+@pytest.fixture
+def mock_get_publisher_model():
+ with mock.patch.object(
+ model_garden_service_client_v1.ModelGardenServiceClient,
+ "get_publisher_model",
+ ) as mock_get_publisher_model:
+ yield mock_get_publisher_model
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestPublisherModel:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_init_publisher_model_with_resource_name(self, mock_get_publisher_model):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ _ = _publisher_models._PublisherModel(_TEST_RESOURCE_NAME)
+ mock_get_publisher_model.assert_called_once_with(
+ name=_TEST_RESOURCE_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_publisher_model_with_model_garden_id(self, mock_get_publisher_model):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ _ = _publisher_models._PublisherModel(_TEST_MODEL_GARDEN_ID)
+ mock_get_publisher_model.assert_called_once_with(
+ name=_TEST_RESOURCE_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_publisher_model_with_invalid_resource_name(
+ self, mock_get_publisher_model
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ with pytest.raises(
+ ValueError,
+ match=(
+ f"`{_TEST_INVALID_RESOURCE_NAME}` is not a valid PublisherModel "
+ "resource name or model garden id."
+ ),
+ ):
+ _ = _publisher_models._PublisherModel(_TEST_INVALID_RESOURCE_NAME)
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_tensorboard.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_tensorboard.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae1863c37c25659a24d6d10bf3f3fa378e547bd9
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_tensorboard.py
@@ -0,0 +1,1148 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+
+from unittest import mock
+from unittest.mock import patch
+from importlib import reload
+
+from google.api_core import operation
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import tensorboard
+from google.cloud.aiplatform import utils
+
+from google.cloud.aiplatform.compat.services import (
+ tensorboard_service_client,
+)
+
+from google.cloud.aiplatform.compat.types import (
+ encryption_spec as gca_encryption_spec,
+ tensorboard as gca_tensorboard,
+ tensorboard_data as gca_tensorboard_data,
+ tensorboard_experiment as gca_tensorboard_experiment,
+ tensorboard_run as gca_tensorboard_run,
+ tensorboard_service as gca_tensorboard_service,
+ tensorboard_time_series as gca_tensorboard_time_series,
+)
+
+from google.protobuf import field_mask_pb2
+
+# project
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+_TEST_ALT_PROJECT = "test-project_alt"
+
+_TEST_ALT_LOCATION = "europe-west4"
+_TEST_INVALID_LOCATION = "us-central2"
+
+# tensorboard
+_TEST_ID = "1028944691210842416"
+_TEST_DISPLAY_NAME = "my_tensorboard_1234"
+_TEST_DISPLAY_NAME_UPDATE = "my_tensorboard_1234_update"
+
+_TEST_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/tensorboards/{_TEST_ID}"
+)
+_TEST_ALT_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_ALT_LOCATION}/tensorboards/{_TEST_ID}"
+)
+_TEST_INVALID_NAME = f"prj/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/{_TEST_ID}"
+
+_TEST_TENSORBOARD_EXPERIMENT_ID = "test-experiment"
+_TEST_TENSORBOARD_EXPERIMENT_NAME = (
+ f"{_TEST_NAME}/experiments/{_TEST_TENSORBOARD_EXPERIMENT_ID}"
+)
+
+_TEST_TENSORBOARD_RUN_ID = "test-run"
+_TEST_TENSORBOARD_RUN_NAME = (
+ f"{_TEST_TENSORBOARD_EXPERIMENT_NAME}/runs/{_TEST_TENSORBOARD_RUN_ID}"
+)
+
+_TEST_TENSORBOARD_TIME_SERIES_ID = "test-time-series"
+_TEST_TENSORBOARD_TIME_SERIES_NAME = (
+ f"{_TEST_TENSORBOARD_RUN_NAME}/timeSeries/{_TEST_TENSORBOARD_TIME_SERIES_ID}"
+)
+
+# request_metadata
+_TEST_REQUEST_METADATA = ()
+
+# CMEK encryption
+_TEST_ENCRYPTION_KEY_NAME = "key_1234"
+_TEST_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_ENCRYPTION_KEY_NAME
+)
+
+_TEST_TIME_SERIES_DISPLAY_NAME = "accuracy"
+
+
+@pytest.fixture
+def get_tensorboard_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient, "get_tensorboard"
+ ) as get_tensorboard_mock:
+ get_tensorboard_mock.return_value = gca_tensorboard.Tensorboard(
+ name=_TEST_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ yield get_tensorboard_mock
+
+
+@pytest.fixture
+def create_tensorboard_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient, "create_tensorboard"
+ ) as create_tensorboard_mock:
+ create_tensorboard_lro_mock = mock.Mock(operation.Operation)
+ create_tensorboard_lro_mock.result.return_value = gca_tensorboard.Tensorboard(
+ name=_TEST_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ create_tensorboard_mock.return_value = create_tensorboard_lro_mock
+ yield create_tensorboard_mock
+
+
+@pytest.fixture
+def update_tensorboard_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient, "update_tensorboard"
+ ) as update_tensorboard_mock:
+ update_tensorboard_lro_mock = mock.Mock(operation.Operation)
+ update_tensorboard_lro_mock.result.return_value = gca_tensorboard.Tensorboard(
+ name=_TEST_NAME,
+ display_name=_TEST_DISPLAY_NAME_UPDATE,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ update_tensorboard_mock.return_value = update_tensorboard_lro_mock
+ yield update_tensorboard_mock
+
+
+@pytest.fixture
+def delete_tensorboard_mock():
+ with mock.patch.object(
+ tensorboard_service_client.TensorboardServiceClient, "delete_tensorboard"
+ ) as delete_tensorboard_mock:
+ delete_tensorboard_lro_mock = mock.Mock(operation.Operation)
+ delete_tensorboard_lro_mock.result.return_value = (
+ gca_tensorboard_service.DeleteTensorboardRequest(
+ name=_TEST_NAME,
+ )
+ )
+ delete_tensorboard_mock.return_value = delete_tensorboard_lro_mock
+ yield delete_tensorboard_mock
+
+
+_TEST_TENSORBOARD_EXPERIMENT = gca_tensorboard_experiment.TensorboardExperiment(
+ name=_TEST_TENSORBOARD_EXPERIMENT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+)
+
+
+@pytest.fixture
+def get_tensorboard_experiment_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "get_tensorboard_experiment",
+ ) as get_tensorboard_experiment__mock:
+ get_tensorboard_experiment__mock.return_value = _TEST_TENSORBOARD_EXPERIMENT
+ yield get_tensorboard_experiment__mock
+
+
+@pytest.fixture
+def create_tensorboard_experiment_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "create_tensorboard_experiment",
+ ) as create_tensorboard_experiment_mock:
+ create_tensorboard_experiment_mock.return_value = _TEST_TENSORBOARD_EXPERIMENT
+ yield create_tensorboard_experiment_mock
+
+
+@pytest.fixture
+def delete_tensorboard_experiment_mock():
+ with mock.patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "delete_tensorboard_experiment",
+ ) as delete_tensorboard_experiment_mock:
+ delete_tensorboard_lro_experiment_mock = mock.Mock(operation.Operation)
+ delete_tensorboard_lro_experiment_mock.result.return_value = (
+ gca_tensorboard_service.DeleteTensorboardExperimentRequest(
+ name=_TEST_TENSORBOARD_EXPERIMENT_NAME,
+ )
+ )
+ delete_tensorboard_experiment_mock.return_value = (
+ delete_tensorboard_lro_experiment_mock
+ )
+ yield delete_tensorboard_experiment_mock
+
+
+@pytest.fixture
+def list_tensorboard_experiment_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "list_tensorboard_experiments",
+ ) as list_tensorboard_experiment_mock:
+ list_tensorboard_experiment_mock.return_value = [_TEST_TENSORBOARD_EXPERIMENT]
+ yield list_tensorboard_experiment_mock
+
+
+_TEST_TENSORBOARD_RUN = gca_tensorboard_run.TensorboardRun(
+ name=_TEST_TENSORBOARD_RUN_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+)
+
+
+@pytest.fixture
+def get_tensorboard_run_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "get_tensorboard_run",
+ ) as get_tensorboard_run_mock:
+ get_tensorboard_run_mock.return_value = _TEST_TENSORBOARD_RUN
+ yield get_tensorboard_run_mock
+
+
+@pytest.fixture
+def create_tensorboard_run_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "create_tensorboard_run",
+ ) as create_tensorboard_run_mock:
+ create_tensorboard_run_mock.return_value = _TEST_TENSORBOARD_RUN
+ yield create_tensorboard_run_mock
+
+
+@pytest.fixture
+def delete_tensorboard_run_mock():
+ with mock.patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "delete_tensorboard_run",
+ ) as delete_tensorboard_run_mock:
+ delete_tensorboard_lro_run_mock = mock.Mock(operation.Operation)
+ delete_tensorboard_lro_run_mock.result.return_value = (
+ gca_tensorboard_service.DeleteTensorboardRunRequest(
+ name=_TEST_TENSORBOARD_RUN_NAME,
+ )
+ )
+ delete_tensorboard_run_mock.return_value = delete_tensorboard_lro_run_mock
+ yield delete_tensorboard_run_mock
+
+
+@pytest.fixture
+def list_tensorboard_run_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "list_tensorboard_runs",
+ ) as list_tensorboard_run_mock:
+ list_tensorboard_run_mock.return_value = [
+ gca_tensorboard_run.TensorboardRun(
+ name=_TEST_TENSORBOARD_RUN_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ )
+ ]
+ yield list_tensorboard_run_mock
+
+
+@pytest.fixture
+def write_tensorboard_run_data_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "write_tensorboard_run_data",
+ ) as write_tensorboard_run_data_mock:
+ yield write_tensorboard_run_data_mock
+
+
+_TEST_TENSORBOARD_TIME_SERIES = gca_tensorboard_time_series.TensorboardTimeSeries(
+ name=_TEST_TENSORBOARD_TIME_SERIES_NAME,
+ display_name=_TEST_TIME_SERIES_DISPLAY_NAME,
+ value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
+)
+
+
+@pytest.fixture
+def get_tensorboard_time_series_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "get_tensorboard_time_series",
+ ) as get_tensorboard_time_series_mock:
+ get_tensorboard_time_series_mock.return_value = _TEST_TENSORBOARD_TIME_SERIES
+ yield get_tensorboard_time_series_mock
+
+
+@pytest.fixture
+def create_tensorboard_time_series_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "create_tensorboard_time_series",
+ ) as create_tensorboard_time_series_mock:
+ create_tensorboard_time_series_mock.return_value = _TEST_TENSORBOARD_TIME_SERIES
+ yield create_tensorboard_time_series_mock
+
+
+@pytest.fixture
+def delete_tensorboard_time_series_mock():
+ with mock.patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "delete_tensorboard_time_series",
+ ) as delete_tensorboard_time_series_mock:
+ delete_tensorboard_lro_time_series_mock = mock.Mock(operation.Operation)
+ delete_tensorboard_lro_time_series_mock.result.return_value = (
+ gca_tensorboard_service.DeleteTensorboardTimeSeriesRequest(
+ name=_TEST_TENSORBOARD_TIME_SERIES_NAME,
+ )
+ )
+ delete_tensorboard_time_series_mock.return_value = (
+ delete_tensorboard_lro_time_series_mock
+ )
+ yield delete_tensorboard_time_series_mock
+
+
+@pytest.fixture
+def list_tensorboard_time_series_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "list_tensorboard_time_series",
+ ) as list_tensorboard_time_series_mock:
+ list_tensorboard_time_series_mock.return_value = [_TEST_TENSORBOARD_TIME_SERIES]
+ yield list_tensorboard_time_series_mock
+
+
+_TEST_TENSORBOARD_TIME_SERIES_DATA = gca_tensorboard_data.TimeSeriesData(
+ tensorboard_time_series_id=_TEST_TENSORBOARD_TIME_SERIES_ID,
+ value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[
+ gca_tensorboard_data.TimeSeriesDataPoint(
+ scalar=gca_tensorboard_data.Scalar(value=1.0),
+ step=1,
+ wall_time=utils.get_timestamp_proto(),
+ )
+ ],
+)
+
+
+@pytest.fixture
+def batch_read_tensorboard_time_series_mock():
+ with patch.object(
+ tensorboard_service_client.TensorboardServiceClient,
+ "batch_read_tensorboard_time_series_data",
+ ) as batch_read_tensorboard_time_series_data_mock:
+ batch_read_tensorboard_time_series_data_mock.return_value = (
+ gca_tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse(
+ time_series_data=[_TEST_TENSORBOARD_TIME_SERIES_DATA]
+ )
+ )
+ yield batch_read_tensorboard_time_series_data_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestTensorboard:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_init_tensorboard(self, get_tensorboard_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ tensorboard.Tensorboard(tensorboard_name=_TEST_NAME)
+ get_tensorboard_mock.assert_called_once_with(
+ name=_TEST_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_tensorboard_with_id_only_with_project_and_location(
+ self, get_tensorboard_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ tensorboard.Tensorboard(
+ tensorboard_name=_TEST_ID, project=_TEST_PROJECT, location=_TEST_LOCATION
+ )
+ get_tensorboard_mock.assert_called_once_with(
+ name=_TEST_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_tensorboard_with_project_and_location(self, get_tensorboard_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ tensorboard.Tensorboard(
+ tensorboard_name=_TEST_NAME, project=_TEST_PROJECT, location=_TEST_LOCATION
+ )
+ get_tensorboard_mock.assert_called_once_with(
+ name=_TEST_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_tensorboard_with_alt_project_and_location(self, get_tensorboard_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ tensorboard.Tensorboard(
+ tensorboard_name=_TEST_NAME,
+ project=_TEST_ALT_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ get_tensorboard_mock.assert_called_once_with(
+ name=_TEST_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_tensorboard_with_alt_location(self, get_tensorboard_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_ALT_LOCATION)
+ tensorboard.Tensorboard(
+ tensorboard_name=_TEST_NAME,
+ )
+ get_tensorboard_mock.assert_called_once_with(
+ name=_TEST_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_tensorboard_with_project_and_alt_location(self):
+ aiplatform.init(project=_TEST_PROJECT)
+ with pytest.raises(RuntimeError):
+ tensorboard.Tensorboard(
+ tensorboard_name=_TEST_NAME,
+ project=_TEST_PROJECT,
+ location=_TEST_ALT_LOCATION,
+ )
+
+ def test_init_tensorboard_with_location_override(self, get_tensorboard_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ tensorboard.Tensorboard(tensorboard_name=_TEST_ID, location=_TEST_ALT_LOCATION)
+ get_tensorboard_mock.assert_called_once_with(
+ name=_TEST_ALT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures("get_tensorboard_mock")
+ def test_init_tensorboard_with_invalid_name(self):
+ with pytest.raises(ValueError):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ tensorboard.Tensorboard(tensorboard_name=_TEST_INVALID_NAME)
+
+ @pytest.mark.usefixtures("get_tensorboard_mock")
+ def test_create_tensorboard_with_default_encryption_key(
+ self, create_tensorboard_mock
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ )
+
+ tensorboard.Tensorboard.create(
+ display_name=_TEST_DISPLAY_NAME,
+ create_request_timeout=None,
+ )
+
+ expected_tensorboard = gca_tensorboard.Tensorboard(
+ display_name=_TEST_DISPLAY_NAME,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+
+ create_tensorboard_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ tensorboard=expected_tensorboard,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_tensorboard_mock")
+ def test_create_tensorboard(self, create_tensorboard_mock):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ )
+
+ tensorboard.Tensorboard.create(
+ display_name=_TEST_DISPLAY_NAME,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ create_request_timeout=None,
+ )
+
+ expected_tensorboard = gca_tensorboard.Tensorboard(
+ display_name=_TEST_DISPLAY_NAME,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+
+ create_tensorboard_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ tensorboard=expected_tensorboard,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_tensorboard_mock")
+ def test_create_tensorboard_with_timeout(self, create_tensorboard_mock):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ )
+
+ tensorboard.Tensorboard.create(
+ display_name=_TEST_DISPLAY_NAME,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ create_request_timeout=180.0,
+ )
+
+ expected_tensorboard = gca_tensorboard.Tensorboard(
+ display_name=_TEST_DISPLAY_NAME,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+
+ create_tensorboard_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ tensorboard=expected_tensorboard,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=180.0,
+ )
+
+ @pytest.mark.usefixtures("get_tensorboard_mock")
+ def test_create_tensorboard_with_timeout_not_explicitly_set(
+ self, create_tensorboard_mock
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ )
+
+ tensorboard.Tensorboard.create(
+ display_name=_TEST_DISPLAY_NAME,
+ encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
+ )
+
+ expected_tensorboard = gca_tensorboard.Tensorboard(
+ display_name=_TEST_DISPLAY_NAME,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+
+ create_tensorboard_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ tensorboard=expected_tensorboard,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_tensorboard_mock")
+ def test_create_tensorboard_is_default_true(self, create_tensorboard_mock):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ )
+
+ tensorboard.Tensorboard.create(
+ display_name=_TEST_DISPLAY_NAME,
+ is_default=True,
+ )
+
+ expected_tensorboard = gca_tensorboard.Tensorboard(
+ display_name=_TEST_DISPLAY_NAME, is_default=True
+ )
+
+ create_tensorboard_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ tensorboard=expected_tensorboard,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_tensorboard_mock")
+ def test_create_tensorboard_is_default_false(self, create_tensorboard_mock):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ )
+
+ tensorboard.Tensorboard.create(
+ display_name=_TEST_DISPLAY_NAME,
+ is_default=False,
+ )
+
+ expected_tensorboard = gca_tensorboard.Tensorboard(
+ display_name=_TEST_DISPLAY_NAME, is_default=False
+ )
+
+ create_tensorboard_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ tensorboard=expected_tensorboard,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_tensorboard_mock")
+ def test_delete_tensorboard(self, delete_tensorboard_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_tensorboard = tensorboard.Tensorboard(tensorboard_name=_TEST_NAME)
+
+ my_tensorboard.delete()
+
+ delete_tensorboard_mock.assert_called_once_with(
+ name=my_tensorboard.resource_name
+ )
+
+ @pytest.mark.usefixtures("get_tensorboard_mock")
+ def test_update_tensorboard_display_name(self, update_tensorboard_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_tensorboard = tensorboard.Tensorboard(tensorboard_name=_TEST_NAME)
+ my_tensorboard.update(display_name=_TEST_DISPLAY_NAME_UPDATE)
+
+ expected_tensorboard = gca_tensorboard.Tensorboard(
+ name=_TEST_NAME,
+ display_name=_TEST_DISPLAY_NAME_UPDATE,
+ )
+ update_tensorboard_mock.assert_called_once_with(
+ update_mask=field_mask_pb2.FieldMask(paths=["display_name"]),
+ tensorboard=expected_tensorboard,
+ metadata=_TEST_REQUEST_METADATA,
+ )
+
+ @pytest.mark.usefixtures("get_tensorboard_mock")
+ def test_update_tensorboard_encryption_spec(self, update_tensorboard_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_tensorboard = tensorboard.Tensorboard(tensorboard_name=_TEST_NAME)
+ my_tensorboard.update(encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME)
+
+ expected_tensorboard = gca_tensorboard.Tensorboard(
+ name=_TEST_NAME,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+ update_tensorboard_mock.assert_called_once_with(
+ update_mask=field_mask_pb2.FieldMask(paths=["encryption_spec"]),
+ tensorboard=expected_tensorboard,
+ metadata=_TEST_REQUEST_METADATA,
+ )
+
+ @pytest.mark.usefixtures("get_tensorboard_mock")
+ def test_update_tensorboard_is_default_true(self, update_tensorboard_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_tensorboard = tensorboard.Tensorboard(tensorboard_name=_TEST_NAME)
+ my_tensorboard.update(is_default=True)
+
+ expected_tensorboard = gca_tensorboard.Tensorboard(
+ name=_TEST_NAME,
+ is_default=True,
+ )
+ update_tensorboard_mock.assert_called_once_with(
+ update_mask=field_mask_pb2.FieldMask(paths=["is_default"]),
+ tensorboard=expected_tensorboard,
+ metadata=_TEST_REQUEST_METADATA,
+ )
+
+ @pytest.mark.usefixtures("get_tensorboard_mock")
+ def test_update_tensorboard_is_default_false(self, update_tensorboard_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_tensorboard = tensorboard.Tensorboard(tensorboard_name=_TEST_NAME)
+ my_tensorboard.update(is_default=False)
+
+ expected_tensorboard = gca_tensorboard.Tensorboard(
+ name=_TEST_NAME,
+ is_default=False,
+ )
+ update_tensorboard_mock.assert_called_once_with(
+ update_mask=field_mask_pb2.FieldMask(paths=["is_default"]),
+ tensorboard=expected_tensorboard,
+ metadata=_TEST_REQUEST_METADATA,
+ )
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestTensorboardExperiment:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_init_tensorboard_experiment(self, get_tensorboard_experiment_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ tensorboard.TensorboardExperiment(
+ tensorboard_experiment_name=_TEST_TENSORBOARD_EXPERIMENT_NAME
+ )
+ get_tensorboard_experiment_mock.assert_called_once_with(
+ name=_TEST_TENSORBOARD_EXPERIMENT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_tensorboard_experiment_with_tensorboard(
+ self, get_tensorboard_experiment_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ tensorboard.TensorboardExperiment(
+ tensorboard_experiment_name=_TEST_TENSORBOARD_EXPERIMENT_ID,
+ tensorboard_id=_TEST_ID,
+ )
+ get_tensorboard_experiment_mock.assert_called_once_with(
+ name=_TEST_TENSORBOARD_EXPERIMENT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_tensorboard_experiment_with_id_only_with_project_and_location(
+ self, get_tensorboard_experiment_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ tensorboard.TensorboardExperiment(
+ tensorboard_experiment_name=_TEST_TENSORBOARD_EXPERIMENT_ID,
+ tensorboard_id=_TEST_ID,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ get_tensorboard_experiment_mock.assert_called_once_with(
+ name=_TEST_TENSORBOARD_EXPERIMENT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_create_tensorboard_experiment(
+ self, create_tensorboard_experiment_mock, get_tensorboard_experiment_mock
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ )
+
+ tensorboard.TensorboardExperiment.create(
+ tensorboard_experiment_id=_TEST_TENSORBOARD_EXPERIMENT_ID,
+ tensorboard_name=_TEST_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ create_request_timeout=None,
+ )
+
+ expected_tensorboard_experiment = (
+ gca_tensorboard_experiment.TensorboardExperiment(
+ display_name=_TEST_DISPLAY_NAME,
+ )
+ )
+
+ create_tensorboard_experiment_mock.assert_called_once_with(
+ parent=_TEST_NAME,
+ tensorboard_experiment=expected_tensorboard_experiment,
+ tensorboard_experiment_id=_TEST_TENSORBOARD_EXPERIMENT_ID,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ get_tensorboard_experiment_mock.assert_called_once_with(
+ name=_TEST_TENSORBOARD_EXPERIMENT_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_create_tensorboard_experiment_with_timeout(
+ self, create_tensorboard_experiment_mock, get_tensorboard_experiment_mock
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ )
+
+ tensorboard.TensorboardExperiment.create(
+ tensorboard_experiment_id=_TEST_TENSORBOARD_EXPERIMENT_ID,
+ tensorboard_name=_TEST_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ create_request_timeout=180.0,
+ )
+
+ expected_tensorboard_experiment = (
+ gca_tensorboard_experiment.TensorboardExperiment(
+ display_name=_TEST_DISPLAY_NAME,
+ )
+ )
+
+ create_tensorboard_experiment_mock.assert_called_once_with(
+ parent=_TEST_NAME,
+ tensorboard_experiment=expected_tensorboard_experiment,
+ tensorboard_experiment_id=_TEST_TENSORBOARD_EXPERIMENT_ID,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=180.0,
+ )
+
+ def test_create_tensorboard_experiment_with_timeout_not_explicitly_set(
+ self, create_tensorboard_experiment_mock, get_tensorboard_experiment_mock
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ )
+
+ tensorboard.TensorboardExperiment.create(
+ tensorboard_experiment_id=_TEST_TENSORBOARD_EXPERIMENT_ID,
+ tensorboard_name=_TEST_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ )
+
+ expected_tensorboard_experiment = (
+ gca_tensorboard_experiment.TensorboardExperiment(
+ display_name=_TEST_DISPLAY_NAME,
+ )
+ )
+
+ create_tensorboard_experiment_mock.assert_called_once_with(
+ parent=_TEST_NAME,
+ tensorboard_experiment=expected_tensorboard_experiment,
+ tensorboard_experiment_id=_TEST_TENSORBOARD_EXPERIMENT_ID,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_tensorboard_experiment_mock")
+ def test_delete_tensorboard_experiement(self, delete_tensorboard_experiment_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_tensorboard_experiment = tensorboard.TensorboardExperiment(
+ tensorboard_experiment_name=_TEST_TENSORBOARD_EXPERIMENT_NAME
+ )
+
+ my_tensorboard_experiment.delete()
+
+ delete_tensorboard_experiment_mock.assert_called_once_with(
+ name=my_tensorboard_experiment.resource_name
+ )
+
+ def test_list_tensorboard_experiments(self, list_tensorboard_experiment_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ tensorboard.TensorboardExperiment.list(tensorboard_name=_TEST_NAME)
+
+ list_tensorboard_experiment_mock.assert_called_once_with(
+ request={"parent": _TEST_NAME}
+ )
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestTensorboardRun:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.usefixtures("list_tensorboard_time_series_mock")
+ def test_init_tensorboard_run(self, get_tensorboard_run_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ tensorboard.TensorboardRun(tensorboard_run_name=_TEST_TENSORBOARD_RUN_NAME)
+ get_tensorboard_run_mock.assert_called_once_with(
+ name=_TEST_TENSORBOARD_RUN_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures("list_tensorboard_time_series_mock")
+ def test_init_tensorboard_run_with_tensorboard_and_experiment(
+ self, get_tensorboard_run_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ tensorboard.TensorboardRun(
+ tensorboard_run_name=_TEST_TENSORBOARD_RUN_ID,
+ tensorboard_experiment_id=_TEST_TENSORBOARD_EXPERIMENT_ID,
+ tensorboard_id=_TEST_ID,
+ )
+ get_tensorboard_run_mock.assert_called_once_with(
+ name=_TEST_TENSORBOARD_RUN_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_tensorboard_run_with_id_only_with_project_and_location(
+ self, get_tensorboard_run_mock, list_tensorboard_time_series_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ tensorboard.TensorboardRun(
+ tensorboard_run_name=_TEST_TENSORBOARD_RUN_ID,
+ tensorboard_experiment_id=_TEST_TENSORBOARD_EXPERIMENT_ID,
+ tensorboard_id=_TEST_ID,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ get_tensorboard_run_mock.assert_called_once_with(
+ name=_TEST_TENSORBOARD_RUN_NAME, retry=base._DEFAULT_RETRY
+ )
+ list_tensorboard_time_series_mock.assert_called_once_with(
+ request={"parent": _TEST_TENSORBOARD_RUN_NAME}
+ )
+
+ @pytest.mark.usefixtures("list_tensorboard_time_series_mock")
+ def test_create_tensorboard_run(
+ self,
+ create_tensorboard_run_mock,
+ get_tensorboard_run_mock,
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ )
+
+ tensorboard.TensorboardRun.create(
+ tensorboard_run_id=_TEST_TENSORBOARD_RUN_ID,
+ tensorboard_experiment_name=_TEST_TENSORBOARD_EXPERIMENT_NAME,
+ create_request_timeout=None,
+ )
+
+ expected_tensorboard_run = gca_tensorboard_run.TensorboardRun(
+ display_name=_TEST_TENSORBOARD_RUN_ID,
+ )
+
+ create_tensorboard_run_mock.assert_called_once_with(
+ parent=_TEST_TENSORBOARD_EXPERIMENT_NAME,
+ tensorboard_run=expected_tensorboard_run,
+ tensorboard_run_id=_TEST_TENSORBOARD_RUN_ID,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ get_tensorboard_run_mock.assert_called_once_with(
+ name=_TEST_TENSORBOARD_RUN_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ @pytest.mark.usefixtures("list_tensorboard_time_series_mock")
+ def test_create_tensorboard_run_with_timeout(
+ self, create_tensorboard_run_mock, get_tensorboard_run_mock
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ )
+
+ tensorboard.TensorboardRun.create(
+ tensorboard_run_id=_TEST_TENSORBOARD_RUN_ID,
+ tensorboard_experiment_name=_TEST_TENSORBOARD_EXPERIMENT_NAME,
+ create_request_timeout=180.0,
+ )
+
+ expected_tensorboard_run = gca_tensorboard_run.TensorboardRun(
+ display_name=_TEST_TENSORBOARD_RUN_ID,
+ )
+
+ create_tensorboard_run_mock.assert_called_once_with(
+ parent=_TEST_TENSORBOARD_EXPERIMENT_NAME,
+ tensorboard_run=expected_tensorboard_run,
+ tensorboard_run_id=_TEST_TENSORBOARD_RUN_ID,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=180.0,
+ )
+
+ @pytest.mark.usefixtures("list_tensorboard_time_series_mock")
+ def test_create_tensorboard_run_with_timeout_not_explicitly_set(
+ self, create_tensorboard_run_mock, get_tensorboard_run_mock
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ )
+
+ tensorboard.TensorboardRun.create(
+ tensorboard_run_id=_TEST_TENSORBOARD_RUN_ID,
+ tensorboard_experiment_name=_TEST_TENSORBOARD_EXPERIMENT_NAME,
+ )
+
+ expected_tensorboard_run = gca_tensorboard_run.TensorboardRun(
+ display_name=_TEST_TENSORBOARD_RUN_ID,
+ )
+
+ create_tensorboard_run_mock.assert_called_once_with(
+ parent=_TEST_TENSORBOARD_EXPERIMENT_NAME,
+ tensorboard_run=expected_tensorboard_run,
+ tensorboard_run_id=_TEST_TENSORBOARD_RUN_ID,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_tensorboard_run_mock", "list_tensorboard_time_series_mock"
+ )
+ def test_delete_tensorboard_run(self, delete_tensorboard_run_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_tensorboard_run = tensorboard.TensorboardRun(
+ tensorboard_run_name=_TEST_TENSORBOARD_RUN_NAME
+ )
+
+ my_tensorboard_run.delete()
+
+ delete_tensorboard_run_mock.assert_called_once_with(
+ name=my_tensorboard_run.resource_name
+ )
+
+ def test_list_tensorboard_runs(
+ self, list_tensorboard_run_mock, list_tensorboard_time_series_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ tensorboard.TensorboardRun.list(
+ tensorboard_experiment_name=_TEST_TENSORBOARD_EXPERIMENT_NAME
+ )
+
+ list_tensorboard_run_mock.assert_called_once_with(
+ request={"parent": _TEST_TENSORBOARD_EXPERIMENT_NAME}
+ )
+
+ list_tensorboard_time_series_mock.assert_called_once_with(
+ request={"parent": _TEST_TENSORBOARD_RUN_NAME}
+ )
+
+ @pytest.mark.usefixtures(
+ "get_tensorboard_run_mock", "list_tensorboard_time_series_mock"
+ )
+ def test_write_tensorboard_run_data(self, write_tensorboard_run_data_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ tb_run = tensorboard.TensorboardRun(
+ tensorboard_run_name=_TEST_TENSORBOARD_RUN_NAME
+ )
+
+ timestamp = utils.get_timestamp_proto()
+ tb_run.write_tensorboard_scalar_data(
+ time_series_data={"accuracy": 0.9}, step=1, wall_time=timestamp
+ )
+
+ expected_time_series_data = [
+ gca_tensorboard_data.TimeSeriesData(
+ tensorboard_time_series_id=_TEST_TENSORBOARD_TIME_SERIES_ID,
+ value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[
+ gca_tensorboard_data.TimeSeriesDataPoint(
+ scalar=gca_tensorboard_data.Scalar(value=0.9),
+ wall_time=timestamp,
+ step=1,
+ )
+ ],
+ ),
+ ]
+
+ write_tensorboard_run_data_mock.assert_called_once_with(
+ tensorboard_run=_TEST_TENSORBOARD_RUN_NAME,
+ time_series_data=expected_time_series_data,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_tensorboard_run_mock", "list_tensorboard_time_series_mock"
+ )
+ def test_read_tensorboard_time_series(
+ self, batch_read_tensorboard_time_series_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ tb_run = tensorboard.TensorboardRun(
+ tensorboard_run_name=_TEST_TENSORBOARD_RUN_NAME
+ )
+
+ ts_data = tb_run.read_time_series_data()
+
+ true_ts_data = {
+ _TEST_TIME_SERIES_DISPLAY_NAME: _TEST_TENSORBOARD_TIME_SERIES_DATA
+ }
+
+ batch_read_tensorboard_time_series_mock.assert_called_once_with(
+ request=gca_tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(
+ tensorboard=_TEST_NAME,
+ time_series=[_TEST_TENSORBOARD_TIME_SERIES_NAME],
+ )
+ )
+
+ assert ts_data == true_ts_data
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestTensorboardTimeSeries:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_init_tensorboard_time_series(self, get_tensorboard_time_series_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ tensorboard.TensorboardTimeSeries(
+ tensorboard_time_series_name=_TEST_TENSORBOARD_TIME_SERIES_NAME
+ )
+ get_tensorboard_time_series_mock.assert_called_once_with(
+ name=_TEST_TENSORBOARD_TIME_SERIES_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_tensorboard_time_series_with_tensorboard_and_experiment_and_run(
+ self, get_tensorboard_time_series_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ tensorboard.TensorboardTimeSeries(
+ tensorboard_time_series_name=_TEST_TENSORBOARD_TIME_SERIES_ID,
+ tensorboard_run_id=_TEST_TENSORBOARD_RUN_ID,
+ tensorboard_experiment_id=_TEST_TENSORBOARD_EXPERIMENT_ID,
+ tensorboard_id=_TEST_ID,
+ )
+ get_tensorboard_time_series_mock.assert_called_once_with(
+ name=_TEST_TENSORBOARD_TIME_SERIES_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_init_tensorboard_time_series_with_id_only_with_project_and_location(
+ self, get_tensorboard_time_series_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ tensorboard.TensorboardTimeSeries(
+ tensorboard_time_series_name=_TEST_TENSORBOARD_TIME_SERIES_ID,
+ tensorboard_run_id=_TEST_TENSORBOARD_RUN_ID,
+ tensorboard_experiment_id=_TEST_TENSORBOARD_EXPERIMENT_ID,
+ tensorboard_id=_TEST_ID,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ get_tensorboard_time_series_mock.assert_called_once_with(
+ name=_TEST_TENSORBOARD_TIME_SERIES_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_create_tensorboard_time_series(
+ self,
+ create_tensorboard_time_series_mock,
+ ):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ )
+
+ tensorboard.TensorboardTimeSeries.create(
+ display_name=_TEST_TIME_SERIES_DISPLAY_NAME,
+ tensorboard_run_name=_TEST_TENSORBOARD_RUN_NAME,
+ )
+
+ expected_tensorboard_time_series = gca_tensorboard_time_series.TensorboardTimeSeries(
+ display_name=_TEST_TIME_SERIES_DISPLAY_NAME,
+ value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
+ plugin_name="scalars",
+ )
+
+ create_tensorboard_time_series_mock.assert_called_once_with(
+ parent=_TEST_TENSORBOARD_RUN_NAME,
+ tensorboard_time_series=expected_tensorboard_time_series,
+ )
+
+ @pytest.mark.usefixtures("get_tensorboard_time_series_mock")
+ def test_delete_tensorboard_time_series(self, delete_tensorboard_time_series_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ my_tensorboard_time_series = tensorboard.TensorboardTimeSeries(
+ tensorboard_time_series_name=_TEST_TENSORBOARD_TIME_SERIES_NAME
+ )
+
+ my_tensorboard_time_series.delete()
+
+ delete_tensorboard_time_series_mock.assert_called_once_with(
+ name=my_tensorboard_time_series.resource_name
+ )
+
+ def test_list_tensorboard_time_series(self, list_tensorboard_time_series_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ tensorboard.TensorboardTimeSeries.list(
+ tensorboard_run_name=_TEST_TENSORBOARD_RUN_NAME
+ )
+
+ list_tensorboard_time_series_mock.assert_called_once_with(
+ request={"parent": _TEST_TENSORBOARD_RUN_NAME}
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_training_jobs.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_training_jobs.py
new file mode 100644
index 0000000000000000000000000000000000000000..08c258ac378c07a7f7027ea40cc1c4b4d0619cdd
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_training_jobs.py
@@ -0,0 +1,8580 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from distutils import core
+import copy
+import os
+import functools
+import importlib
+import logging
+import pathlib
+import pytest
+import subprocess
+import shutil
+import sys
+import tarfile
+import tempfile
+import uuid
+from unittest import mock
+from unittest.mock import patch
+
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import datasets
+from google.cloud.aiplatform import explain
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import schema
+from google.cloud.aiplatform import training_jobs
+from google.cloud.aiplatform import utils
+from google.cloud.aiplatform.utils import source_utils
+from google.cloud.aiplatform.utils import worker_spec_utils
+
+from google.cloud.aiplatform.compat.services import (
+ job_service_client,
+ model_service_client,
+ pipeline_service_client,
+)
+
+from google.cloud.aiplatform.compat.types import (
+ custom_job as gca_custom_job,
+ dataset as gca_dataset,
+ encryption_spec as gca_encryption_spec,
+ env_var as gca_env_var,
+ io as gca_io,
+ job_state as gca_job_state,
+ model as gca_model,
+ pipeline_state as gca_pipeline_state,
+ training_pipeline as gca_training_pipeline,
+)
+
+from google.cloud import storage
+from google.protobuf import json_format
+from google.protobuf import struct_pb2
+from google.protobuf import duration_pb2 # type: ignore
+import constants as test_constants
+
+_TEST_BUCKET_NAME = "test-bucket"
+_TEST_GCS_PATH_WITHOUT_BUCKET = "path/to/folder"
+_TEST_GCS_PATH = f"{_TEST_BUCKET_NAME}/{_TEST_GCS_PATH_WITHOUT_BUCKET}"
+_TEST_GCS_PATH_WITH_TRAILING_SLASH = f"{_TEST_GCS_PATH}/"
+_TEST_LOCAL_SCRIPT_FILE_NAME = (
+ test_constants.TrainingJobConstants._TEST_LOCAL_SCRIPT_FILE_NAME
+)
+_TEST_TEMPDIR = tempfile.mkdtemp()
+_TEST_LOCAL_SCRIPT_FILE_PATH = os.path.join(_TEST_TEMPDIR, _TEST_LOCAL_SCRIPT_FILE_NAME)
+_TEST_PYTHON_SOURCE = """
+print('hello world')
+"""
+_TEST_REQUIREMENTS = test_constants.TrainingJobConstants._TEST_REQUIREMENTS
+
+_TEST_DATASET_DISPLAY_NAME = "test-dataset-display-name"
+_TEST_DATASET_NAME = "test-dataset-name"
+_TEST_DISPLAY_NAME = "test-display-name"
+_TEST_METADATA_SCHEMA_URI_TABULAR = schema.dataset.metadata.tabular
+_TEST_TRAINING_CONTAINER_IMAGE = (
+ test_constants.TrainingJobConstants._TEST_TRAINING_CONTAINER_IMAGE
+)
+_TEST_TRAINING_CONTAINER_CMD = ["python3", "task.py"]
+_TEST_SERVING_CONTAINER_IMAGE = (
+ test_constants.TrainingJobConstants._TEST_TRAINING_CONTAINER_IMAGE
+)
+_TEST_SERVING_CONTAINER_PREDICTION_ROUTE = (
+ test_constants.TrainingJobConstants._TEST_SERVING_CONTAINER_PREDICTION_ROUTE
+)
+_TEST_SERVING_CONTAINER_HEALTH_ROUTE = (
+ test_constants.TrainingJobConstants._TEST_SERVING_CONTAINER_HEALTH_ROUTE
+)
+_TEST_MODULE_NAME = test_constants.TrainingJobConstants._TEST_MODULE_NAME
+
+_TEST_METADATA_SCHEMA_URI_NONTABULAR = schema.dataset.metadata.image
+_TEST_ANNOTATION_SCHEMA_URI = schema.dataset.annotation.image.classification
+
+_TEST_BASE_OUTPUT_DIR = "gs://test-base-output-dir"
+_TEST_SERVICE_ACCOUNT = test_constants.ProjectConstants._TEST_SERVICE_ACCOUNT
+_TEST_BIGQUERY_DESTINATION = "bq://my-project"
+_TEST_RUN_ARGS = test_constants.TrainingJobConstants._TEST_RUN_ARGS
+_TEST_REPLICA_COUNT = test_constants.TrainingJobConstants._TEST_REPLICA_COUNT
+_TEST_MACHINE_TYPE = test_constants.TrainingJobConstants._TEST_MACHINE_TYPE
+_TEST_MACHINE_TYPE_TPU = test_constants.TrainingJobConstants._TEST_MACHINE_TYPE_TPU
+_TEST_MACHINE_TYPE_TPU_V5E = (
+ test_constants.TrainingJobConstants._TEST_MACHINE_TYPE_TPU_V5E
+)
+_TEST_REDUCTION_SERVER_REPLICA_COUNT = (
+ test_constants.TrainingJobConstants._TEST_REDUCTION_SERVER_REPLICA_COUNT
+)
+_TEST_REDUCTION_SERVER_MACHINE_TYPE = (
+ test_constants.TrainingJobConstants._TEST_REDUCTION_SERVER_MACHINE_TYPE
+)
+_TEST_REDUCTION_SERVER_CONTAINER_URI = (
+ test_constants.TrainingJobConstants._TEST_REDUCTION_SERVER_CONTAINER_URI
+)
+_TEST_ACCELERATOR_TPU_TYPE = (
+ test_constants.TrainingJobConstants._TEST_ACCELERATOR_TPU_TYPE
+)
+_TEST_ACCELERATOR_TYPE = test_constants.TrainingJobConstants._TEST_ACCELERATOR_TYPE
+_TEST_INVALID_ACCELERATOR_TYPE = "NVIDIA_DOES_NOT_EXIST"
+_TEST_ACCELERATOR_COUNT = test_constants.TrainingJobConstants._TEST_ACCELERATOR_COUNT
+_TEST_BOOT_DISK_TYPE_DEFAULT = (
+ test_constants.TrainingJobConstants._TEST_BOOT_DISK_TYPE_DEFAULT
+)
+_TEST_BOOT_DISK_SIZE_GB_DEFAULT = (
+ test_constants.TrainingJobConstants._TEST_BOOT_DISK_SIZE_GB_DEFAULT
+)
+_TEST_BOOT_DISK_TYPE = test_constants.TrainingJobConstants._TEST_BOOT_DISK_TYPE
+_TEST_BOOT_DISK_SIZE_GB = test_constants.TrainingJobConstants._TEST_BOOT_DISK_SIZE_GB
+_TEST_MODEL_DISPLAY_NAME = test_constants.TrainingJobConstants._TEST_MODEL_DISPLAY_NAME
+_TEST_LABELS = test_constants.ProjectConstants._TEST_LABELS
+_TEST_MODEL_LABELS = test_constants.TrainingJobConstants._TEST_MODEL_LABELS
+
+_TEST_TRAINING_FRACTION_SPLIT = (
+ test_constants.TrainingJobConstants._TEST_TRAINING_FRACTION_SPLIT
+)
+_TEST_VALIDATION_FRACTION_SPLIT = (
+ test_constants.TrainingJobConstants._TEST_VALIDATION_FRACTION_SPLIT
+)
+_TEST_TEST_FRACTION_SPLIT = (
+ test_constants.TrainingJobConstants._TEST_TEST_FRACTION_SPLIT
+)
+_TEST_TRAINING_FILTER_SPLIT = "train"
+_TEST_VALIDATION_FILTER_SPLIT = "validate"
+_TEST_TEST_FILTER_SPLIT = "test"
+_TEST_PREDEFINED_SPLIT_COLUMN_NAME = "split"
+_TEST_TIMESTAMP_SPLIT_COLUMN_NAME = "timestamp"
+
+_TEST_PROJECT = test_constants.ProjectConstants._TEST_PROJECT
+_TEST_LOCATION = test_constants.ProjectConstants._TEST_LOCATION
+_TEST_ID = test_constants.TrainingJobConstants._TEST_ID
+_TEST_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/trainingPipelines/{_TEST_ID}"
+)
+_TEST_TENSORBOARD_RESOURCE_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/tensorboards/{_TEST_ID}"
+)
+_TEST_CUSTOM_JOB_RESOURCE_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/customJobs/{_TEST_ID}"
+)
+_TEST_MODEL_VERSION_DESCRIPTION = "My version description"
+_TEST_MODEL_VERSION_ID = "2"
+_TEST_ALT_PROJECT = "test-project-alt"
+_TEST_ALT_LOCATION = "europe-west4"
+_TEST_NETWORK = test_constants.TrainingJobConstants._TEST_NETWORK
+
+_TEST_MODEL_INSTANCE_SCHEMA_URI = "instance_schema_uri.yaml"
+_TEST_MODEL_PARAMETERS_SCHEMA_URI = "parameters_schema_uri.yaml"
+_TEST_MODEL_PREDICTION_SCHEMA_URI = "prediction_schema_uri.yaml"
+_TEST_MODEL_SERVING_CONTAINER_COMMAND = ["test_command"]
+_TEST_MODEL_SERVING_CONTAINER_ARGS = ["test_args"]
+_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES = {
+ "learning_rate": 0.01,
+ "loss_fn": "mse",
+}
+_TEST_ENVIRONMENT_VARIABLES = (
+ test_constants.TrainingJobConstants._TEST_ENVIRONMENT_VARIABLES
+)
+_TEST_MODEL_SERVING_CONTAINER_PORTS = [8888, 10000]
+_TEST_MODEL_DESCRIPTION = "test description"
+
+_TEST_OUTPUT_PYTHON_PACKAGE_PATH = (
+ test_constants.TrainingJobConstants._TEST_OUTPUT_PYTHON_PACKAGE_PATH
+)
+_TEST_PACKAGE_GCS_URIS = [_TEST_OUTPUT_PYTHON_PACKAGE_PATH] * 2
+_TEST_PYTHON_MODULE_NAME = "aiplatform.task"
+
+_TEST_MODEL_NAME = f"projects/{_TEST_PROJECT}/locations/us-central1/models/{_TEST_ID}"
+
+_TEST_PIPELINE_RESOURCE_NAME = (
+ f"projects/{_TEST_PROJECT}/locations/us-central1/trainingPipelines/{_TEST_ID}"
+)
+_TEST_CREDENTIALS = test_constants.TrainingJobConstants._TEST_CREDENTIALS
+
+
+# Explanation Spec
+_TEST_EXPLANATION_METADATA = explain.ExplanationMetadata(
+ inputs={
+ "features": {
+ "input_tensor_name": "dense_input",
+ "encoding": "BAG_OF_FEATURES",
+ "modality": "numeric",
+ "index_feature_mapping": ["abc", "def", "ghj"],
+ }
+ },
+ outputs={"medv": {"output_tensor_name": "dense_2"}},
+)
+_TEST_EXPLANATION_PARAMETERS = explain.ExplanationParameters(
+ {"sampled_shapley_attribution": {"path_count": 10}}
+)
+
+# CMEK encryption
+_TEST_DEFAULT_ENCRYPTION_KEY_NAME = "key_default"
+_TEST_DEFAULT_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME
+)
+
+_TEST_PIPELINE_ENCRYPTION_KEY_NAME = "key_pipeline"
+_TEST_PIPELINE_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME
+)
+
+_TEST_MODEL_ENCRYPTION_KEY_NAME = "key_model"
+_TEST_MODEL_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
+ kms_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME
+)
+
+_TEST_TIMEOUT = test_constants.TrainingJobConstants._TEST_TIMEOUT
+_TEST_RESTART_JOB_ON_WORKER_RESTART = (
+ test_constants.TrainingJobConstants._TEST_RESTART_JOB_ON_WORKER_RESTART
+)
+
+_TEST_DISABLE_RETRIES = test_constants.TrainingJobConstants._TEST_DISABLE_RETRIES
+_TEST_MAX_WAIT_DURATION = test_constants.TrainingJobConstants._TEST_MAX_WAIT_DURATION
+_TEST_ENABLE_WEB_ACCESS = test_constants.TrainingJobConstants._TEST_ENABLE_WEB_ACCESS
+_TEST_ENABLE_DASHBOARD_ACCESS = True
+_TEST_WEB_ACCESS_URIS = test_constants.TrainingJobConstants._TEST_WEB_ACCESS_URIS
+_TEST_DASHBOARD_ACCESS_URIS = {"workerpool0-0:8888": "uri"}
+_TEST_PERSISTENT_RESOURCE_ID = (
+ test_constants.PersistentResourceConstants._TEST_PERSISTENT_RESOURCE_ID
+)
+_TEST_SPOT_STRATEGY = test_constants.TrainingJobConstants._TEST_SPOT_STRATEGY
+
+_TEST_BASE_CUSTOM_JOB_PROTO = gca_custom_job.CustomJob(
+ job_spec=gca_custom_job.CustomJobSpec(),
+)
+
+
+def _get_custom_job_proto_with_enable_web_access(state=None, name=None, version="v1"):
+ custom_job_proto = copy.deepcopy(_TEST_BASE_CUSTOM_JOB_PROTO)
+ custom_job_proto.name = name
+ custom_job_proto.state = state
+
+ custom_job_proto.job_spec.enable_web_access = _TEST_ENABLE_WEB_ACCESS
+ if state == gca_job_state.JobState.JOB_STATE_RUNNING:
+ custom_job_proto.web_access_uris = _TEST_WEB_ACCESS_URIS
+ return custom_job_proto
+
+
+def _get_custom_job_proto_with_enable_dashboard_access(
+ state=None, name=None, version="v1"
+):
+ custom_job_proto = copy.deepcopy(_TEST_BASE_CUSTOM_JOB_PROTO)
+ custom_job_proto.name = name
+ custom_job_proto.state = state
+
+ custom_job_proto.job_spec.enable_dashboard_access = _TEST_ENABLE_DASHBOARD_ACCESS
+ if state == gca_job_state.JobState.JOB_STATE_RUNNING:
+ custom_job_proto.web_access_uris = _TEST_DASHBOARD_ACCESS_URIS
+ return custom_job_proto
+
+
+def _get_custom_job_proto_with_persistent_resource_id(
+ state=None, name=None, version="v1"
+):
+ custom_job_proto = copy.deepcopy(_TEST_BASE_CUSTOM_JOB_PROTO)
+ custom_job_proto.name = name
+ custom_job_proto.state = state
+ custom_job_proto.job_spec.persistent_resource_id = _TEST_PERSISTENT_RESOURCE_ID
+
+ return custom_job_proto
+
+
+def _get_custom_job_proto_with_scheduling(state=None, name=None, version="v1"):
+ custom_job_proto = copy.deepcopy(_TEST_BASE_CUSTOM_JOB_PROTO)
+ custom_job_proto.name = name
+ custom_job_proto.state = state
+
+ custom_job_proto.job_spec.scheduling.timeout = duration_pb2.Duration(
+ seconds=_TEST_TIMEOUT
+ )
+ custom_job_proto.job_spec.scheduling.restart_job_on_worker_restart = (
+ _TEST_RESTART_JOB_ON_WORKER_RESTART
+ )
+ custom_job_proto.job_spec.scheduling.disable_retries = _TEST_DISABLE_RETRIES
+ custom_job_proto.job_spec.scheduling.max_wait_duration = duration_pb2.Duration(
+ seconds=_TEST_MAX_WAIT_DURATION
+ )
+
+ return custom_job_proto
+
+
+def _get_custom_job_proto_with_spot_strategy(state=None, name=None, version="v1"):
+ custom_job_proto = copy.deepcopy(_TEST_BASE_CUSTOM_JOB_PROTO)
+ custom_job_proto.name = name
+ custom_job_proto.state = state
+
+ custom_job_proto.job_spec.scheduling.strategy = _TEST_SPOT_STRATEGY
+ return custom_job_proto
+
+
+def local_copy_method(path):
+ shutil.copy(path, ".")
+ return pathlib.Path(path).name
+
+
+@pytest.fixture
+def get_training_job_custom_mock():
+ with patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as get_training_job_custom_mock:
+ get_training_job_custom_mock.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
+ training_task_definition=schema.training_job.definition.custom_task,
+ )
+ )
+
+ yield get_training_job_custom_mock
+
+
+@pytest.fixture
+def get_training_job_custom_mock_no_model_to_upload():
+ with patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as get_training_job_custom_mock:
+ get_training_job_custom_mock.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ model_to_upload=None,
+ training_task_definition=schema.training_job.definition.custom_task,
+ )
+ )
+
+ yield get_training_job_custom_mock
+
+
+@pytest.fixture
+def get_training_job_tabular_mock():
+ with patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as get_training_job_tabular_mock:
+ get_training_job_tabular_mock.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
+ training_task_definition=schema.training_job.definition.automl_tabular,
+ )
+ )
+
+ yield get_training_job_tabular_mock
+
+
+@pytest.fixture
+def mock_client_bucket():
+ with patch.object(storage.Client, "bucket") as mock_client_bucket:
+
+ def blob_side_effect(name, mock_blob, bucket):
+ mock_blob.name = name
+ mock_blob.bucket = bucket
+ return mock_blob
+
+ MockBucket = mock.Mock(autospec=storage.Bucket)
+ MockBucket.name = _TEST_BUCKET_NAME
+ MockBlob = mock.Mock(autospec=storage.Blob)
+ MockBucket.blob.side_effect = functools.partial(
+ blob_side_effect, mock_blob=MockBlob, bucket=MockBucket
+ )
+ mock_client_bucket.return_value = MockBucket
+
+ yield mock_client_bucket, MockBlob
+
+
+@pytest.fixture
+def mock_get_backing_custom_job_with_enable_web_access():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_custom_job"
+ ) as get_custom_job_mock:
+ get_custom_job_mock.side_effect = [
+ _get_custom_job_proto_with_enable_web_access(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_PENDING,
+ ),
+ _get_custom_job_proto_with_enable_web_access(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto_with_enable_web_access(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto_with_enable_web_access(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto_with_enable_web_access(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ _get_custom_job_proto_with_enable_web_access(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ ]
+ yield get_custom_job_mock
+
+
+@pytest.fixture
+def mock_get_backing_custom_job_with_enable_dashboard_access():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_custom_job"
+ ) as get_custom_job_mock:
+ get_custom_job_mock.side_effect = [
+ _get_custom_job_proto_with_enable_dashboard_access(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_PENDING,
+ ),
+ _get_custom_job_proto_with_enable_dashboard_access(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto_with_enable_dashboard_access(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto_with_enable_dashboard_access(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto_with_enable_dashboard_access(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ _get_custom_job_proto_with_enable_dashboard_access(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ ]
+ yield get_custom_job_mock
+
+
+@pytest.fixture
+def mock_get_backing_custom_job_with_persistent_resource_id():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_custom_job"
+ ) as get_custom_job_mock:
+ get_custom_job_mock.side_effect = [
+ _get_custom_job_proto_with_persistent_resource_id(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_PENDING,
+ ),
+ _get_custom_job_proto_with_persistent_resource_id(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto_with_persistent_resource_id(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto_with_persistent_resource_id(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_RUNNING,
+ ),
+ _get_custom_job_proto_with_persistent_resource_id(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ _get_custom_job_proto_with_persistent_resource_id(
+ name=_TEST_CUSTOM_JOB_RESOURCE_NAME,
+ state=gca_job_state.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ ]
+ yield get_custom_job_mock
+
+
+@pytest.mark.skipif(
+ sys.executable is None, reason="requires python path to invoke subprocess"
+)
+@pytest.mark.usefixtures("google_auth_mock")
+class TestTrainingScriptPythonPackagerHelpers:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+
+ def test_timestamp_copy_to_gcs_calls_gcs_client_with_bucket(
+ self, mock_client_bucket
+ ):
+
+ mock_client_bucket, mock_blob = mock_client_bucket
+
+ gcs_path = utils._timestamped_copy_to_gcs(
+ local_file_path=_TEST_LOCAL_SCRIPT_FILE_PATH,
+ gcs_dir=_TEST_BUCKET_NAME,
+ project=_TEST_PROJECT,
+ )
+
+ local_script_file_name = pathlib.Path(_TEST_LOCAL_SCRIPT_FILE_PATH).name
+
+ mock_client_bucket.assert_called_once_with(_TEST_BUCKET_NAME)
+ mock_client_bucket.return_value.blob.assert_called_once()
+
+ blob_arg = mock_client_bucket.return_value.blob.call_args[0][0]
+ assert blob_arg.startswith("aiplatform-")
+ assert blob_arg.endswith(_TEST_LOCAL_SCRIPT_FILE_NAME)
+
+ mock_blob.upload_from_filename.assert_called_once_with(
+ _TEST_LOCAL_SCRIPT_FILE_PATH
+ )
+ assert gcs_path.endswith(local_script_file_name)
+ assert gcs_path.startswith(f"gs://{_TEST_BUCKET_NAME}/aiplatform-")
+
+ def test_timestamp_copy_to_gcs_calls_gcs_client_with_gcs_path(
+ self, mock_client_bucket
+ ):
+
+ mock_client_bucket, mock_blob = mock_client_bucket
+
+ gcs_path = utils._timestamped_copy_to_gcs(
+ local_file_path=_TEST_LOCAL_SCRIPT_FILE_PATH,
+ gcs_dir=_TEST_GCS_PATH_WITH_TRAILING_SLASH,
+ project=_TEST_PROJECT,
+ )
+
+ local_script_file_name = pathlib.Path(_TEST_LOCAL_SCRIPT_FILE_PATH).name
+
+ mock_client_bucket.assert_called_once_with(_TEST_BUCKET_NAME)
+ mock_client_bucket.return_value.blob.assert_called_once()
+
+ blob_arg = mock_client_bucket.return_value.blob.call_args[0][0]
+ assert blob_arg.startswith(f"{_TEST_GCS_PATH_WITHOUT_BUCKET}/aiplatform-")
+ assert blob_arg.endswith(f"{_TEST_LOCAL_SCRIPT_FILE_NAME}")
+
+ mock_blob.upload_from_filename.assert_called_once_with(
+ _TEST_LOCAL_SCRIPT_FILE_PATH
+ )
+
+ assert gcs_path.startswith(f"gs://{_TEST_GCS_PATH}/aiplatform-")
+ assert gcs_path.endswith(local_script_file_name)
+
+ def test_timestamp_copy_to_gcs_calls_gcs_client_with_trailing_slash(
+ self, mock_client_bucket
+ ):
+
+ mock_client_bucket, mock_blob = mock_client_bucket
+
+ gcs_path = utils._timestamped_copy_to_gcs(
+ local_file_path=_TEST_LOCAL_SCRIPT_FILE_PATH,
+ gcs_dir=_TEST_GCS_PATH,
+ project=_TEST_PROJECT,
+ )
+
+ local_script_file_name = pathlib.Path(_TEST_LOCAL_SCRIPT_FILE_PATH).name
+
+ mock_client_bucket.assert_called_once_with(_TEST_BUCKET_NAME)
+ mock_client_bucket.return_value.blob.assert_called_once()
+
+ blob_arg = mock_client_bucket.return_value.blob.call_args[0][0]
+ assert blob_arg.startswith(f"{_TEST_GCS_PATH_WITHOUT_BUCKET}/aiplatform-")
+ assert blob_arg.endswith(_TEST_LOCAL_SCRIPT_FILE_NAME)
+
+ mock_blob.upload_from_filename.assert_called_once_with(
+ _TEST_LOCAL_SCRIPT_FILE_PATH
+ )
+
+ assert gcs_path.startswith(f"gs://{_TEST_GCS_PATH}/aiplatform-")
+ assert gcs_path.endswith(local_script_file_name)
+
+ def test_timestamp_copy_to_gcs_calls_gcs_client(self, mock_client_bucket):
+
+ mock_client_bucket, mock_blob = mock_client_bucket
+
+ gcs_path = utils._timestamped_copy_to_gcs(
+ local_file_path=_TEST_LOCAL_SCRIPT_FILE_PATH,
+ gcs_dir=_TEST_BUCKET_NAME,
+ project=_TEST_PROJECT,
+ )
+
+ mock_client_bucket.assert_called_once_with(_TEST_BUCKET_NAME)
+ mock_client_bucket.return_value.blob.assert_called_once()
+ mock_blob.upload_from_filename.assert_called_once_with(
+ _TEST_LOCAL_SCRIPT_FILE_PATH
+ )
+ assert gcs_path.endswith(pathlib.Path(_TEST_LOCAL_SCRIPT_FILE_PATH).name)
+ assert gcs_path.startswith(f"gs://{_TEST_BUCKET_NAME}")
+
+ def test_get_python_executable_raises_if_None(self):
+ with patch.object(sys, "executable", new=None):
+ with pytest.raises(EnvironmentError):
+ source_utils._get_python_executable()
+
+ def test_get_python_executable_returns_python_executable(self):
+ assert "python" in source_utils._get_python_executable().lower()
+
+
+@pytest.mark.skipif(
+ sys.executable is None, reason="requires python path to invoke subprocess"
+)
+@pytest.mark.usefixtures("google_auth_mock")
+class TestTrainingScriptPythonPackager:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+ with open(_TEST_LOCAL_SCRIPT_FILE_PATH, "w") as fp:
+ fp.write(_TEST_PYTHON_SOURCE)
+
+ def teardown_method(self):
+ pathlib.Path(_TEST_LOCAL_SCRIPT_FILE_PATH).unlink()
+ python_package_file = f"{source_utils._TrainingScriptPythonPackager._ROOT_MODULE}-{source_utils._TrainingScriptPythonPackager._SETUP_PY_VERSION}.tar.gz"
+ if pathlib.Path(python_package_file).is_file():
+ pathlib.Path(python_package_file).unlink()
+ subprocess.check_output(
+ [
+ "pip3",
+ "uninstall",
+ "-y",
+ source_utils._TrainingScriptPythonPackager._ROOT_MODULE,
+ ]
+ )
+
+ def test_packager_creates_and_copies_python_package(self):
+ tsp = source_utils._TrainingScriptPythonPackager(_TEST_LOCAL_SCRIPT_FILE_PATH)
+ tsp.package_and_copy(copy_method=local_copy_method)
+ assert pathlib.Path(
+ f"{tsp._ROOT_MODULE}-{tsp._SETUP_PY_VERSION}.tar.gz"
+ ).is_file()
+
+ def test_created_package_module_is_installable_and_can_be_run(self):
+ tsp = source_utils._TrainingScriptPythonPackager(_TEST_LOCAL_SCRIPT_FILE_PATH)
+ source_dist_path = tsp.package_and_copy(copy_method=local_copy_method)
+ subprocess.check_output(["pip3", "install", source_dist_path])
+ module_output = subprocess.check_output(
+ [source_utils._get_python_executable(), "-m", tsp.module_name]
+ )
+ assert "hello world" in module_output.decode()
+
+ def test_requirements_are_in_package(self):
+ tsp = source_utils._TrainingScriptPythonPackager(
+ _TEST_LOCAL_SCRIPT_FILE_PATH, requirements=_TEST_REQUIREMENTS
+ )
+ source_dist_path = tsp.package_and_copy(copy_method=local_copy_method)
+ with tarfile.open(source_dist_path) as tf:
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ setup_py_path = f"{source_utils._TrainingScriptPythonPackager._ROOT_MODULE}-{source_utils._TrainingScriptPythonPackager._SETUP_PY_VERSION}/setup.py"
+ tf.extract(setup_py_path, path=tmpdirname)
+ setup_py = core.run_setup(
+ pathlib.Path(tmpdirname, setup_py_path), stop_after="init"
+ )
+ assert _TEST_REQUIREMENTS == setup_py.install_requires
+
+ def test_packaging_fails_whith_RuntimeError(self):
+ with patch("subprocess.Popen") as mock_popen:
+ mock_subprocess = mock.Mock()
+ mock_subprocess.communicate.return_value = (b"", b"")
+ mock_subprocess.returncode = 1
+ mock_popen.return_value = mock_subprocess
+ tsp = source_utils._TrainingScriptPythonPackager(
+ _TEST_LOCAL_SCRIPT_FILE_PATH
+ )
+ with pytest.raises(RuntimeError):
+ tsp.package_and_copy(copy_method=local_copy_method)
+
+ def test_package_and_copy_to_gcs_copies_to_gcs(self, mock_client_bucket):
+ mock_client_bucket, mock_blob = mock_client_bucket
+
+ tsp = source_utils._TrainingScriptPythonPackager(_TEST_LOCAL_SCRIPT_FILE_PATH)
+
+ gcs_path = tsp.package_and_copy_to_gcs(
+ gcs_staging_dir=_TEST_BUCKET_NAME, project=_TEST_PROJECT
+ )
+
+ mock_client_bucket.assert_called_once_with(_TEST_BUCKET_NAME)
+ mock_client_bucket.return_value.blob.assert_called_once()
+
+ mock_blob.upload_from_filename.call_args[0][0].endswith(
+ "/trainer/dist/aiplatform_custom_trainer_script-0.1.tar.gz"
+ )
+
+ assert gcs_path.endswith("-aiplatform_custom_trainer_script-0.1.tar.gz")
+ assert gcs_path.startswith(f"gs://{_TEST_BUCKET_NAME}")
+
+
+@pytest.fixture
+def mock_pipeline_service_create():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
+ )
+ )
+ yield mock_create_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_create_with_version():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ model_to_upload=gca_model.Model(
+ name=_TEST_MODEL_NAME, version_id=_TEST_MODEL_VERSION_ID
+ ),
+ )
+ )
+ yield mock_create_training_pipeline
+
+
+def make_training_pipeline(state, add_training_task_metadata=True):
+ return gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=state,
+ model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
+ training_task_inputs={"tensorboard": _TEST_TENSORBOARD_RESOURCE_NAME},
+ training_task_metadata=(
+ {"backingCustomJob": _TEST_CUSTOM_JOB_RESOURCE_NAME}
+ if add_training_task_metadata
+ else None
+ ),
+ )
+
+
+def make_training_pipeline_with_version(state, add_training_task_metadata=True):
+ return gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=state,
+ model_to_upload=gca_model.Model(
+ name=_TEST_MODEL_NAME, version_id=_TEST_MODEL_VERSION_ID
+ ),
+ training_task_inputs={"tensorboard": _TEST_TENSORBOARD_RESOURCE_NAME},
+ training_task_metadata=(
+ {"backingCustomJob": _TEST_CUSTOM_JOB_RESOURCE_NAME}
+ if add_training_task_metadata
+ else None
+ ),
+ )
+
+
+def make_training_pipeline_with_no_model_upload(state):
+ return gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=state,
+ )
+
+
+def make_training_pipeline_with_enable_web_access(state):
+ training_pipeline = gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=state,
+ training_task_inputs={"enable_web_access": _TEST_ENABLE_WEB_ACCESS},
+ )
+ if state == gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING:
+ training_pipeline.training_task_metadata = {
+ "backingCustomJob": _TEST_CUSTOM_JOB_RESOURCE_NAME
+ }
+ return training_pipeline
+
+
+def make_training_pipeline_with_enable_dashboard_access(state):
+ training_pipeline = gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=state,
+ training_task_inputs={"enable_dashboard_access": _TEST_ENABLE_DASHBOARD_ACCESS},
+ )
+ if state == gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING:
+ training_pipeline.training_task_metadata = {
+ "backingCustomJob": _TEST_CUSTOM_JOB_RESOURCE_NAME
+ }
+ return training_pipeline
+
+
+def make_training_pipeline_with_persistent_resource_id(state):
+ training_pipeline = gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=state,
+ training_task_inputs={"persistent_resource_id": _TEST_PERSISTENT_RESOURCE_ID},
+ )
+ if state == gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING:
+ training_pipeline.training_task_metadata = {
+ "backingCustomJob": _TEST_CUSTOM_JOB_RESOURCE_NAME
+ }
+ return training_pipeline
+
+
+def make_training_pipeline_with_scheduling(state):
+ training_pipeline = gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=state,
+ training_task_inputs={
+ "timeout": f"{_TEST_TIMEOUT}s",
+ "restart_job_on_worker_restart": _TEST_RESTART_JOB_ON_WORKER_RESTART,
+ "disable_retries": _TEST_DISABLE_RETRIES,
+ "max_wait_duration": f"{_TEST_MAX_WAIT_DURATION}s",
+ },
+ )
+ if state == gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING:
+ training_pipeline.training_task_metadata = {
+ "backingCustomJob": _TEST_CUSTOM_JOB_RESOURCE_NAME
+ }
+ return training_pipeline
+
+
+def make_training_pipeline_with_spot_strategy(state):
+ training_pipeline = gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=state,
+ training_task_inputs={
+ "scheduling_strategy": _TEST_SPOT_STRATEGY,
+ },
+ )
+ if state == gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING:
+ training_pipeline.training_task_metadata = {
+ "backingCustomJob": _TEST_CUSTOM_JOB_RESOURCE_NAME
+ }
+ return training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_get(make_call=make_training_pipeline):
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.side_effect = [
+ make_call(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ add_training_task_metadata=False,
+ ),
+ make_call(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_call(gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_call(gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_call(gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_call(gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_call(gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_call(gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_call(gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ make_call(gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED),
+ ]
+
+ yield mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_get_with_enable_web_access():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.side_effect = [
+ make_training_pipeline_with_enable_web_access(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_PENDING,
+ ),
+ make_training_pipeline_with_enable_web_access(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_enable_web_access(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_enable_web_access(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_enable_web_access(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ make_training_pipeline_with_enable_web_access(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ ]
+
+ yield mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_get_with_enable_dashboard_access():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.side_effect = [
+ make_training_pipeline_with_enable_dashboard_access(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_PENDING,
+ ),
+ make_training_pipeline_with_enable_dashboard_access(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_enable_dashboard_access(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_enable_dashboard_access(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_enable_dashboard_access(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ make_training_pipeline_with_enable_dashboard_access(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ ]
+
+ yield mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_get_with_persistent_resource_id():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.side_effect = [
+ make_training_pipeline_with_persistent_resource_id(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_PENDING,
+ ),
+ make_training_pipeline_with_persistent_resource_id(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_persistent_resource_id(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_persistent_resource_id(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_persistent_resource_id(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ make_training_pipeline_with_persistent_resource_id(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ ]
+
+ yield mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_get_with_scheduling():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.side_effect = [
+ make_training_pipeline_with_scheduling(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_PENDING,
+ ),
+ make_training_pipeline_with_scheduling(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_scheduling(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_scheduling(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_scheduling(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ make_training_pipeline_with_scheduling(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ ]
+
+ yield mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_get_with_spot_strategy():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.side_effect = [
+ make_training_pipeline_with_spot_strategy(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_PENDING,
+ ),
+ make_training_pipeline_with_spot_strategy(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_spot_strategy(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_spot_strategy(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_spot_strategy(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ make_training_pipeline_with_spot_strategy(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ ]
+
+ yield mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_cancel():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "cancel_training_pipeline"
+ ) as mock_cancel_training_pipeline:
+ yield mock_cancel_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_create_with_no_model_to_upload():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ )
+ )
+ yield mock_create_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_create_with_enable_web_access():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ make_training_pipeline_with_enable_web_access(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_PENDING,
+ )
+ )
+ yield mock_create_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_create_with_enable_dashboard_access():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ make_training_pipeline_with_enable_dashboard_access(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_PENDING,
+ )
+ )
+ yield mock_create_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_create_with_persistent_resource_id():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ make_training_pipeline_with_persistent_resource_id(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_PENDING,
+ )
+ )
+ yield mock_create_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_create_with_scheduling():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ make_training_pipeline_with_scheduling(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_PENDING,
+ )
+ )
+ yield mock_create_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_create_with_spot_strategy():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ make_training_pipeline_with_spot_strategy(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_PENDING,
+ )
+ )
+ yield mock_create_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_get_with_no_model_to_upload():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ )
+ )
+ yield mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_pipeline_service_create_and_get_with_fail():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ )
+ )
+
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.return_value = (
+ gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED,
+ )
+ )
+
+ yield mock_create_training_pipeline, mock_get_training_pipeline
+
+
+@pytest.fixture
+def mock_model_service_get():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as mock_get_model:
+ mock_get_model.return_value = gca_model.Model(name=_TEST_MODEL_NAME)
+ mock_get_model.return_value.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ mock_get_model.return_value.version_id = "1"
+ yield mock_get_model
+
+
+@pytest.fixture
+def mock_model_service_get_with_version():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as mock_get_model:
+ mock_get_model.return_value = gca_model.Model(
+ name=_TEST_MODEL_NAME, version_id=_TEST_MODEL_VERSION_ID
+ )
+ mock_get_model.return_value.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ yield mock_get_model
+
+
+@pytest.fixture
+def mock_python_package_to_gcs():
+ with mock.patch.object(
+ source_utils._TrainingScriptPythonPackager, "package_and_copy_to_gcs"
+ ) as mock_package_to_copy_gcs:
+ mock_package_to_copy_gcs.return_value = _TEST_OUTPUT_PYTHON_PACKAGE_PATH
+ yield mock_package_to_copy_gcs
+
+
+@pytest.fixture
+def mock_tabular_dataset():
+ ds = mock.MagicMock(datasets.TabularDataset)
+ ds.name = _TEST_DATASET_NAME
+ ds.metadata_schema_uri = _TEST_METADATA_SCHEMA_URI_TABULAR
+ ds._latest_future = None
+ ds._exception = None
+ ds._gca_resource = gca_dataset.Dataset(
+ display_name=_TEST_DATASET_DISPLAY_NAME,
+ metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR,
+ labels={},
+ name=_TEST_DATASET_NAME,
+ metadata={},
+ )
+ return ds
+
+
+@pytest.fixture
+def mock_nontabular_dataset():
+ ds = mock.MagicMock(datasets.ImageDataset)
+ ds.name = _TEST_DATASET_NAME
+ ds.metadata_schema_uri = _TEST_METADATA_SCHEMA_URI_NONTABULAR
+ ds._latest_future = None
+ ds._exception = None
+ ds._gca_resource = gca_dataset.Dataset(
+ display_name=_TEST_DATASET_DISPLAY_NAME,
+ metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR,
+ labels={},
+ name=_TEST_DATASET_NAME,
+ metadata={},
+ )
+ return ds
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestCustomTrainingJob:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+ self._local_script_file_name = os.path.join(
+ _TEST_TEMPDIR, f"{uuid.uuid4()}-{_TEST_LOCAL_SCRIPT_FILE_NAME}"
+ )
+ with open(self._local_script_file_name, "w") as fp:
+ fp.write(_TEST_PYTHON_SOURCE)
+
+ def teardown_method(self):
+ pathlib.Path(self._local_script_file_name).unlink()
+ initializer.global_pool.shutdown(wait=True)
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_tabular_dataset(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ explanation_metadata=_TEST_EXPLANATION_METADATA,
+ explanation_parameters=_TEST_EXPLANATION_PARAMETERS,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ network=_TEST_NETWORK,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME,
+ tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ mock_python_package_to_gcs.assert_called_once_with(
+ gcs_staging_dir=_TEST_BUCKET_NAME,
+ project=_TEST_PROJECT,
+ credentials=initializer.global_config.credentials,
+ )
+
+ true_args = _TEST_RUN_ARGS
+ true_env = [
+ {"name": key, "value": value}
+ for key, value in _TEST_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ "env": true_env,
+ },
+ }
+
+ true_timestamp_split = gca_training_pipeline.TimestampSplit(
+ training_fraction=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction=_TEST_TEST_FRACTION_SPLIT,
+ key=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME,
+ )
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_MODEL_SERVING_CONTAINER_PORTS
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ explanation_spec=gca_model.explanation.ExplanationSpec(
+ metadata=_TEST_EXPLANATION_METADATA,
+ parameters=_TEST_EXPLANATION_PARAMETERS,
+ ),
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ timestamp_split=true_timestamp_split,
+ dataset_id=mock_tabular_dataset.name,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ "tensorboard": _TEST_TENSORBOARD_RESOURCE_NAME,
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ labels=_TEST_LABELS,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ assert job._has_logged_custom_job
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ def test_custom_training_job_run_raises_with_impartial_explanation_spec(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ explanation_metadata=_TEST_EXPLANATION_METADATA,
+ # Missing the required explanations_parameters field
+ )
+
+ with pytest.raises(ValueError) as e:
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME,
+ tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME,
+ sync=False,
+ create_request_timeout=None,
+ )
+
+ assert e.match(
+ regexp=r"To get model explanation, `explanation_parameters` "
+ "must be specified."
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ def test_custom_training_tabular_done(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ )
+
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME,
+ tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME,
+ sync=False,
+ create_request_timeout=None,
+ )
+
+ assert job.done() is False
+
+ job.wait()
+
+ assert job.done() is True
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_tabular_dataset_and_timeout(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME,
+ tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME,
+ sync=sync,
+ create_request_timeout=180.0,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+ true_env = [
+ {"name": key, "value": value}
+ for key, value in _TEST_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ "env": true_env,
+ },
+ }
+
+ true_timestamp_split = gca_training_pipeline.TimestampSplit(
+ training_fraction=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction=_TEST_TEST_FRACTION_SPLIT,
+ key=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME,
+ )
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_MODEL_SERVING_CONTAINER_PORTS
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ timestamp_split=true_timestamp_split,
+ dataset_id=mock_tabular_dataset.name,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ "tensorboard": _TEST_TENSORBOARD_RESOURCE_NAME,
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ labels=_TEST_LABELS,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=180.0,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_tabular_dataset_and_timeout_not_explicitly_set(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME,
+ tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME,
+ sync=sync,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+ true_env = [
+ {"name": key, "value": value}
+ for key, value in _TEST_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ "env": true_env,
+ },
+ }
+
+ true_timestamp_split = gca_training_pipeline.TimestampSplit(
+ training_fraction=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction=_TEST_TEST_FRACTION_SPLIT,
+ key=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME,
+ )
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_MODEL_SERVING_CONTAINER_PORTS
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ timestamp_split=true_timestamp_split,
+ dataset_id=mock_tabular_dataset.name,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ "tensorboard": _TEST_TENSORBOARD_RESOURCE_NAME,
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ labels=_TEST_LABELS,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_bigquery_destination(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ training_encryption_spec_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME,
+ model_encryption_spec_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ bigquery_destination=_TEST_BIGQUERY_DESTINATION,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+ true_env = [
+ {"name": key, "value": value}
+ for key, value in _TEST_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ "env": true_env,
+ },
+ }
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_MODEL_SERVING_CONTAINER_PORTS
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ encryption_spec=_TEST_MODEL_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ predefined_split=gca_training_pipeline.PredefinedSplit(
+ key=_TEST_PREDEFINED_SPLIT_COLUMN_NAME
+ ),
+ dataset_id=mock_tabular_dataset.name,
+ bigquery_destination=gca_io.BigQueryDestination(
+ output_uri=_TEST_BIGQUERY_DESTINATION
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_PIPELINE_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_python_package_to_gcs",
+ "mock_model_service_get",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_called_twice_raises(
+ self,
+ mock_tabular_dataset,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_with_invalid_accelerator_type_raises(
+ self,
+ mock_pipeline_service_create,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ with pytest.raises(ValueError):
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_INVALID_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_with_two_splits_raises(
+ self,
+ mock_pipeline_service_create,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ with pytest.raises(ValueError):
+ job.run(
+ dataset=mock_tabular_dataset,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_INVALID_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_with_incomplete_model_info_raises_with_model_to_upload(
+ self,
+ mock_pipeline_service_create,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_no_dataset(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_python_package_to_gcs,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ model_from_job = job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ training_filter_split=_TEST_TRAINING_FILTER_SPLIT,
+ validation_filter_split=_TEST_VALIDATION_FILTER_SPLIT,
+ test_filter_split=_TEST_TEST_FILTER_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ mock_python_package_to_gcs.assert_called_once_with(
+ gcs_staging_dir=_TEST_BUCKET_NAME,
+ project=_TEST_PROJECT,
+ credentials=initializer.global_config.credentials,
+ )
+
+ true_args = _TEST_RUN_ARGS
+ true_env = [
+ {"name": key, "value": value}
+ for key, value in _TEST_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ "env": true_env,
+ },
+ }
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ container_spec=true_container_spec,
+ version_aliases=["default"],
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_enable_web_access",
+ "mock_pipeline_service_get_with_enable_web_access",
+ "mock_get_backing_custom_job_with_enable_web_access",
+ "mock_python_package_to_gcs",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_enable_web_access(
+ self, sync, caplog
+ ):
+
+ caplog.set_level(logging.INFO)
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ enable_web_access=_TEST_ENABLE_WEB_ACCESS,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ print(caplog.text)
+ # TODO: b/383923584: Re-enable this test once the parent issue is fixed
+ # assert "workerpool0-0" in caplog.text
+ assert job._gca_resource == make_training_pipeline_with_enable_web_access(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ # TODO: Update test to address Mutant issue b/270708320
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_enable_dashboard_access",
+ "mock_pipeline_service_get_with_enable_dashboard_access",
+ "mock_get_backing_custom_job_with_enable_dashboard_access",
+ "mock_python_package_to_gcs",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_enable_dashboard_access(
+ self, sync, caplog
+ ):
+
+ caplog.set_level(logging.INFO)
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ enable_dashboard_access=_TEST_ENABLE_DASHBOARD_ACCESS,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ print(caplog.text)
+ # TODO: b/383923584: Re-enable this test once the parent issue is fixed
+ # assert "workerpool0-0:8888" in caplog.text
+ assert job._gca_resource == make_training_pipeline_with_enable_dashboard_access(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_scheduling",
+ "mock_pipeline_service_get_with_scheduling",
+ "mock_python_package_to_gcs",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_scheduling(self, sync, caplog):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ if not sync:
+ job.wait()
+
+ assert job._gca_resource == make_training_pipeline_with_scheduling(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ assert (
+ job._gca_resource.state
+ == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ assert job._gca_resource.training_task_inputs["timeout"] == f"{_TEST_TIMEOUT}s"
+ assert (
+ job._gca_resource.training_task_inputs["restart_job_on_worker_restart"]
+ == _TEST_RESTART_JOB_ON_WORKER_RESTART
+ )
+ assert (
+ job._gca_resource.training_task_inputs["disable_retries"]
+ == _TEST_DISABLE_RETRIES
+ )
+ assert (
+ job._gca_resource.training_task_inputs["max_wait_duration"]
+ == f"{_TEST_MAX_WAIT_DURATION}s"
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_spot_strategy",
+ "mock_pipeline_service_get_with_spot_strategy",
+ "mock_python_package_to_gcs",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_spot_strategy(self, sync):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ )
+
+ if not sync:
+ job.wait()
+
+ assert job._gca_resource == make_training_pipeline_with_spot_strategy(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ assert (
+ job._gca_resource.state
+ == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ assert (
+ job._gca_resource.training_task_inputs["scheduling_strategy"]
+ == _TEST_SPOT_STRATEGY
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_no_model_to_upload",
+ "mock_pipeline_service_get_with_no_model_to_upload",
+ "mock_python_package_to_gcs",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_returns_none_if_no_model_to_upload(
+ self,
+ mock_tabular_dataset,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ model = job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ assert model is None
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_no_model_to_upload",
+ "mock_pipeline_service_get_with_no_model_to_upload",
+ "mock_python_package_to_gcs",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_get_model_raises_if_no_model_to_upload(
+ self,
+ mock_tabular_dataset,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_raises_if_pipeline_fails(
+ self,
+ mock_pipeline_service_create_and_get_with_fail,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ sync,
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ def test_raises_before_run_is_called(
+ self, mock_pipeline_service_create, mock_python_package_to_gcs
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ with pytest.raises(RuntimeError):
+ job.has_failed
+
+ with pytest.raises(RuntimeError):
+ job.state
+
+ def test_run_raises_if_no_staging_bucket(self):
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ with pytest.raises(RuntimeError):
+ training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_distributed_training(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ replica_count=10,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ mock_python_package_to_gcs.assert_called_once_with(
+ gcs_staging_dir=_TEST_BUCKET_NAME,
+ project=_TEST_PROJECT,
+ credentials=initializer.global_config.credentials,
+ )
+
+ true_args = _TEST_RUN_ARGS
+ true_env = [
+ {"name": key, "value": value}
+ for key, value in _TEST_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ true_worker_pool_spec = [
+ {
+ "replica_count": 1,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ "env": true_env,
+ },
+ },
+ {
+ "replica_count": 9,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ "env": true_env,
+ },
+ },
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ dataset_id=mock_tabular_dataset.name,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": true_worker_pool_spec,
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_distributed_training_with_reduction_server(
+ self,
+ mock_pipeline_service_create_with_no_model_to_upload,
+ mock_pipeline_service_get_with_no_model_to_upload,
+ mock_python_package_to_gcs,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ replica_count=10,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ reduction_server_replica_count=_TEST_REDUCTION_SERVER_REPLICA_COUNT,
+ reduction_server_machine_type=_TEST_REDUCTION_SERVER_MACHINE_TYPE,
+ reduction_server_container_uri=_TEST_REDUCTION_SERVER_CONTAINER_URI,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ mock_python_package_to_gcs.assert_called_once_with(
+ gcs_staging_dir=_TEST_BUCKET_NAME,
+ project=_TEST_PROJECT,
+ credentials=initializer.global_config.credentials,
+ )
+
+ true_args = _TEST_RUN_ARGS
+ true_env = [
+ {"name": key, "value": value}
+ for key, value in _TEST_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ true_worker_pool_spec = [
+ {
+ "replica_count": 1,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ "env": true_env,
+ },
+ },
+ {
+ "replica_count": 9,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ "env": true_env,
+ },
+ },
+ {
+ "replica_count": _TEST_REDUCTION_SERVER_REPLICA_COUNT,
+ "machine_spec": {"machine_type": _TEST_REDUCTION_SERVER_MACHINE_TYPE},
+ "container_spec": {"image_uri": _TEST_REDUCTION_SERVER_CONTAINER_URI},
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ },
+ ]
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": true_worker_pool_spec,
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ )
+
+ mock_pipeline_service_create_with_no_model_to_upload.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline_with_no_model_upload(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @pytest.mark.usefixtures("get_training_job_custom_mock")
+ def test_get_training_job(self, get_training_job_custom_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ job = training_jobs.CustomTrainingJob.get(resource_name=_TEST_NAME)
+
+ get_training_job_custom_mock.assert_called_once_with(
+ name=_TEST_NAME, retry=base._DEFAULT_RETRY
+ )
+ assert isinstance(job, training_jobs.CustomTrainingJob)
+
+ @pytest.mark.usefixtures("get_training_job_custom_mock")
+ def test_get_training_job_wrong_job_type(self, get_training_job_custom_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ # The returned job is for a custom training task,
+ # but the calling type if of AutoMLImageTrainingJob.
+ # Hence, it should throw an error.
+ with pytest.raises(ValueError):
+ training_jobs.AutoMLImageTrainingJob.get(resource_name=_TEST_NAME)
+
+ @pytest.mark.usefixtures("get_training_job_custom_mock_no_model_to_upload")
+ def test_get_training_job_no_model_to_upload(
+ self, get_training_job_custom_mock_no_model_to_upload
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ job = training_jobs.CustomTrainingJob.get(resource_name=_TEST_NAME)
+
+ with pytest.raises(RuntimeError):
+ job.get_model(sync=False)
+
+ @pytest.mark.usefixtures("get_training_job_tabular_mock")
+ def test_get_training_job_tabular(self, get_training_job_tabular_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ with pytest.raises(ValueError):
+ training_jobs.CustomTrainingJob.get(resource_name=_TEST_NAME)
+
+ @pytest.mark.usefixtures("get_training_job_custom_mock")
+ def test_get_training_job_with_id_only(self, get_training_job_custom_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ training_jobs.CustomTrainingJob.get(resource_name=_TEST_ID)
+ get_training_job_custom_mock.assert_called_once_with(
+ name=_TEST_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_get_training_job_with_id_only_with_project_and_location(
+ self, get_training_job_custom_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ training_jobs.CustomTrainingJob.get(
+ resource_name=_TEST_ID, project=_TEST_PROJECT, location=_TEST_LOCATION
+ )
+ get_training_job_custom_mock.assert_called_once_with(
+ name=_TEST_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_get_training_job_with_project_and_location(
+ self, get_training_job_custom_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ training_jobs.CustomTrainingJob.get(
+ resource_name=_TEST_NAME, project=_TEST_PROJECT, location=_TEST_LOCATION
+ )
+ get_training_job_custom_mock.assert_called_once_with(
+ name=_TEST_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_get_training_job_with_alt_project_and_location(
+ self, get_training_job_custom_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ training_jobs.CustomTrainingJob.get(
+ resource_name=_TEST_NAME, project=_TEST_ALT_PROJECT, location=_TEST_LOCATION
+ )
+ get_training_job_custom_mock.assert_called_once_with(
+ name=_TEST_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ def test_get_training_job_with_project_and_alt_location(self):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ with pytest.raises(RuntimeError):
+ training_jobs.CustomTrainingJob.get(
+ resource_name=_TEST_NAME,
+ project=_TEST_PROJECT,
+ location=_TEST_ALT_LOCATION,
+ )
+
+ def test_unique_supported_training_schemas(self):
+ """Ensure that the `_supported_training_schemas` across AutoML training
+ classes and CustomTrainingJob contain unique values."""
+
+ schemas = [
+ schema
+ for c in aiplatform.training_jobs._TrainingJob.__subclasses__()
+ for schema in c._supported_training_schemas
+ if c.__name__.startswith("AutoML")
+ ]
+
+ schemas.extend(
+ aiplatform.training_jobs.CustomTrainingJob._supported_training_schemas
+ )
+
+ # Ensure all schemas across classes are unique
+ assert len(set(schemas)) == len(schemas)
+
+ @pytest.mark.usefixtures("get_training_job_tabular_mock")
+ def test_get_and_return_subclass_automl(self):
+ subcls = aiplatform.training_jobs._TrainingJob._get_and_return_subclass(
+ resource_name=_TEST_PIPELINE_RESOURCE_NAME
+ )
+
+ assert isinstance(subcls, aiplatform.training_jobs.AutoMLTabularTrainingJob)
+
+ @pytest.mark.usefixtures("get_training_job_custom_mock")
+ def test_get_and_return_subclass_custom(self):
+ subcls = aiplatform.training_jobs._TrainingJob._get_and_return_subclass(
+ resource_name=_TEST_PIPELINE_RESOURCE_NAME
+ )
+
+ assert isinstance(subcls, aiplatform.training_jobs.CustomTrainingJob)
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_nontabular_dataset_without_model_display_name_nor_model_labels(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_python_package_to_gcs,
+ mock_nontabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_nontabular_dataset,
+ annotation_schema_uri=_TEST_ANNOTATION_SCHEMA_URI,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ training_filter_split=_TEST_TRAINING_FILTER_SPLIT,
+ validation_filter_split=_TEST_VALIDATION_FILTER_SPLIT,
+ test_filter_split=_TEST_TEST_FILTER_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ mock_python_package_to_gcs.assert_called_once_with(
+ gcs_staging_dir=_TEST_BUCKET_NAME,
+ project=_TEST_PROJECT,
+ credentials=initializer.global_config.credentials,
+ )
+
+ true_args = _TEST_RUN_ARGS
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ },
+ }
+
+ true_filter_split = gca_training_pipeline.FilterSplit(
+ training_filter=_TEST_TRAINING_FILTER_SPLIT,
+ validation_filter=_TEST_VALIDATION_FILTER_SPLIT,
+ test_filter=_TEST_TEST_FILTER_SPLIT,
+ )
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_MODEL_SERVING_CONTAINER_PORTS
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME + "-model",
+ labels=_TEST_LABELS,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ filter_split=true_filter_split,
+ dataset_id=mock_nontabular_dataset.name,
+ annotation_schema_uri=_TEST_ANNOTATION_SCHEMA_URI,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ def test_run_call_pipeline_service_create_with_nontabular_dataset_raises_if_annotation_schema_uri(
+ self,
+ mock_nontabular_dataset,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ )
+
+ with pytest.raises(Exception):
+ job.run(
+ dataset=mock_nontabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ create_request_timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_python_package_to_gcs",
+ "mock_model_service_get",
+ )
+ def test_cancel_training_job(self, mock_pipeline_service_cancel):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ job.run()
+ job.cancel()
+
+ mock_pipeline_service_cancel.assert_called_once_with(
+ name=_TEST_PIPELINE_RESOURCE_NAME
+ )
+
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_python_package_to_gcs",
+ "mock_model_service_get",
+ )
+ def test_cancel_training_job_without_running(self, mock_pipeline_service_cancel):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ with pytest.raises(RuntimeError) as e:
+ job.cancel()
+
+ assert e.match(regexp=r"TrainingJob has not been launched")
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_persistent_resource_id",
+ "mock_pipeline_service_get_with_persistent_resource_id",
+ "mock_get_backing_custom_job_with_persistent_resource_id",
+ "mock_python_package_to_gcs",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_persistent_resource_id(
+ self, sync, caplog
+ ):
+
+ caplog.set_level(logging.INFO)
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ sync=sync,
+ create_request_timeout=None,
+ persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID,
+ )
+
+ if not sync:
+ job.wait()
+
+ assert job._gca_resource == make_training_pipeline_with_persistent_resource_id(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_python_package_to_gcs",
+ "mock_model_service_get",
+ )
+ def test_training_job_tpu_v5e(self, mock_pipeline_service_create):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ )
+
+ job.run(
+ machine_type=_TEST_MACHINE_TYPE_TPU_V5E,
+ tpu_topology="2x2",
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ )
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME + "-model",
+ labels=_TEST_LABELS,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ version_aliases=["default"],
+ )
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE_TPU_V5E,
+ "tpu_topology": "2x2",
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ },
+ }
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_python_package_to_gcs",
+ "mock_model_service_get",
+ )
+ def test_training_job_tpu_v3_pod(self, mock_pipeline_service_create):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ )
+
+ job.run(
+ machine_type=_TEST_MACHINE_TYPE_TPU,
+ accelerator_type=_TEST_ACCELERATOR_TPU_TYPE,
+ accelerator_count=32,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ )
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME + "-model",
+ labels=_TEST_LABELS,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ version_aliases=["default"],
+ )
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE_TPU,
+ "accelerator_type": _TEST_ACCELERATOR_TPU_TYPE,
+ "accelerator_count": 32,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ },
+ }
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_python_package_to_gcs",
+ "mock_model_service_get",
+ )
+ def test_training_job_reservation_affinity(self, mock_pipeline_service_create):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ )
+
+ job.run(
+ machine_type=_TEST_MACHINE_TYPE,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ reservation_affinity_type="ANY_RESERVATION",
+ )
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME + "-model",
+ labels=_TEST_LABELS,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ version_aliases=["default"],
+ )
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "reservation_affinity": {
+ "reservation_affinity_type": "ANY_RESERVATION"
+ },
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ },
+ }
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestCustomContainerTrainingJob:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ def test_custom_container_training_tabular_done(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ )
+
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME,
+ sync=False,
+ create_request_timeout=None,
+ )
+
+ assert job.done() is False
+
+ job.wait()
+
+ assert job.done() is True
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_tabular_dataset(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ explanation_metadata=_TEST_EXPLANATION_METADATA,
+ explanation_parameters=_TEST_EXPLANATION_PARAMETERS,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME,
+ tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+ true_env = [
+ {"name": key, "value": value}
+ for key, value in _TEST_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "containerSpec": {
+ "imageUri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "command": _TEST_TRAINING_CONTAINER_CMD,
+ "args": true_args,
+ "env": true_env,
+ },
+ }
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_MODEL_SERVING_CONTAINER_PORTS
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ explanation_spec=gca_model.explanation.ExplanationSpec(
+ metadata=_TEST_EXPLANATION_METADATA,
+ parameters=_TEST_EXPLANATION_PARAMETERS,
+ ),
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ predefined_split=gca_training_pipeline.PredefinedSplit(
+ key=_TEST_PREDEFINED_SPLIT_COLUMN_NAME
+ ),
+ dataset_id=mock_tabular_dataset.name,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "tensorboard": _TEST_TENSORBOARD_RESOURCE_NAME,
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ assert job._has_logged_custom_job
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ def test_custom_container_training_job_run_raises_with_impartial_explanation_spec(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ explanation_metadata=_TEST_EXPLANATION_METADATA,
+ # Missing the required explanations_parameters field
+ )
+
+ with pytest.raises(ValueError) as e:
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME,
+ create_request_timeout=None,
+ )
+ assert e.match(
+ regexp=r"To get model explanation, `explanation_parameters` "
+ "must be specified."
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_tabular_dataset_and_timeout(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME,
+ sync=sync,
+ create_request_timeout=180.0,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+ true_env = [
+ {"name": key, "value": value}
+ for key, value in _TEST_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "containerSpec": {
+ "imageUri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "command": _TEST_TRAINING_CONTAINER_CMD,
+ "args": true_args,
+ "env": true_env,
+ },
+ }
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_MODEL_SERVING_CONTAINER_PORTS
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ predefined_split=gca_training_pipeline.PredefinedSplit(
+ key=_TEST_PREDEFINED_SPLIT_COLUMN_NAME
+ ),
+ dataset_id=mock_tabular_dataset.name,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "tensorboard": _TEST_TENSORBOARD_RESOURCE_NAME,
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=180.0,
+ )
+
+ # assert job._gca_resource == make_training_pipeline(
+ # gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ # )
+
+ # mock_model_service_get.assert_called_once_with(
+ # name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ # )
+
+ # assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ # assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ # assert not job.has_failed
+
+ # assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ # assert job._has_logged_custom_job
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_tabular_dataset_and_timeout_not_explicitly_set(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME,
+ sync=sync,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+ true_env = [
+ {"name": key, "value": value}
+ for key, value in _TEST_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "containerSpec": {
+ "imageUri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "command": _TEST_TRAINING_CONTAINER_CMD,
+ "args": true_args,
+ "env": true_env,
+ },
+ }
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_MODEL_SERVING_CONTAINER_PORTS
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ predefined_split=gca_training_pipeline.PredefinedSplit(
+ key=_TEST_PREDEFINED_SPLIT_COLUMN_NAME
+ ),
+ dataset_id=mock_tabular_dataset.name,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "tensorboard": _TEST_TENSORBOARD_RESOURCE_NAME,
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_bigquery_destination(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ training_encryption_spec_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME,
+ model_encryption_spec_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ bigquery_destination=_TEST_BIGQUERY_DESTINATION,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "containerSpec": {
+ "imageUri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "command": _TEST_TRAINING_CONTAINER_CMD,
+ "args": true_args,
+ },
+ }
+
+ true_timestamp_split = gca_training_pipeline.TimestampSplit(
+ training_fraction=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction=_TEST_TEST_FRACTION_SPLIT,
+ key=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME,
+ )
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_MODEL_SERVING_CONTAINER_PORTS
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ encryption_spec=_TEST_MODEL_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ timestamp_split=true_timestamp_split,
+ dataset_id=mock_tabular_dataset.name,
+ bigquery_destination=gca_io.BigQueryDestination(
+ output_uri=_TEST_BIGQUERY_DESTINATION
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_PIPELINE_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_python_package_to_gcs",
+ "mock_model_service_get",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_called_twice_raises(
+ self,
+ mock_tabular_dataset,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_with_invalid_accelerator_type_raises(
+ self,
+ mock_pipeline_service_create,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ with pytest.raises(ValueError):
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_INVALID_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_with_two_split_raises(
+ self,
+ mock_pipeline_service_create,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ with pytest.raises(ValueError):
+ job.run(
+ dataset=mock_tabular_dataset,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_INVALID_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_with_incomplete_model_info_raises_with_model_to_upload(
+ self,
+ mock_pipeline_service_create,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_no_dataset(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ model_from_job = job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "containerSpec": {
+ "imageUri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "command": _TEST_TRAINING_CONTAINER_CMD,
+ "args": true_args,
+ },
+ }
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ container_spec=true_container_spec,
+ version_aliases=["default"],
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_enable_web_access",
+ "mock_pipeline_service_get_with_enable_web_access",
+ "mock_get_backing_custom_job_with_enable_web_access",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_enable_web_access(
+ self, sync, caplog
+ ):
+
+ caplog.set_level(logging.INFO)
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ enable_web_access=_TEST_ENABLE_WEB_ACCESS,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ print(caplog.text)
+ # TODO: b/383923584: Re-enable this test once the parent issue is fixed
+ # assert "workerpool0-0" in caplog.text
+ assert job._gca_resource == make_training_pipeline_with_enable_web_access(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ # TODO: Update test to address Mutant issue b/270708320
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_enable_dashboard_access",
+ "mock_pipeline_service_get_with_enable_dashboard_access",
+ "mock_get_backing_custom_job_with_enable_dashboard_access",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_enable_dashboard_access(
+ self, sync, caplog
+ ):
+
+ caplog.set_level(logging.INFO)
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ enable_dashboard_access=_TEST_ENABLE_DASHBOARD_ACCESS,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ print(caplog.text)
+ # TODO: b/383923584: Re-enable this test once the parent issue is fixed
+ # assert "workerpool0-0:8888" in caplog.text
+ assert job._gca_resource == make_training_pipeline_with_enable_dashboard_access(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_scheduling",
+ "mock_pipeline_service_get_with_scheduling",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_scheduling(self, sync, caplog):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ if not sync:
+ job.wait()
+
+ assert job._gca_resource == make_training_pipeline_with_scheduling(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ assert (
+ job._gca_resource.state
+ == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ assert job._gca_resource.training_task_inputs["timeout"] == f"{_TEST_TIMEOUT}s"
+ assert (
+ job._gca_resource.training_task_inputs["restart_job_on_worker_restart"]
+ == _TEST_RESTART_JOB_ON_WORKER_RESTART
+ )
+ assert (
+ job._gca_resource.training_task_inputs["disable_retries"]
+ == _TEST_DISABLE_RETRIES
+ )
+ assert (
+ job._gca_resource.training_task_inputs["max_wait_duration"]
+ == f"{_TEST_MAX_WAIT_DURATION}s"
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_returns_none_if_no_model_to_upload(
+ self,
+ mock_pipeline_service_create_with_no_model_to_upload,
+ mock_pipeline_service_get_with_no_model_to_upload,
+ mock_tabular_dataset,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ )
+
+ model = job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ assert model is None
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_no_model_to_upload",
+ "mock_pipeline_service_get_with_no_model_to_upload",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_get_model_raises_if_no_model_to_upload(
+ self,
+ mock_tabular_dataset,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ )
+
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_raises_if_pipeline_fails(
+ self,
+ mock_pipeline_service_create_and_get_with_fail,
+ mock_tabular_dataset,
+ sync,
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ def test_raises_before_run_is_called(self, mock_pipeline_service_create):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ with pytest.raises(RuntimeError):
+ job.has_failed
+
+ with pytest.raises(RuntimeError):
+ job.state
+
+ def test_run_raises_if_no_staging_bucket(self):
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ with pytest.raises(RuntimeError):
+ training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_distributed_training(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=10,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+
+ true_worker_pool_spec = [
+ {
+ "replica_count": 1,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "containerSpec": {
+ "imageUri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "command": _TEST_TRAINING_CONTAINER_CMD,
+ "args": true_args,
+ },
+ },
+ {
+ "replica_count": 9,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "containerSpec": {
+ "imageUri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "command": _TEST_TRAINING_CONTAINER_CMD,
+ "args": true_args,
+ },
+ },
+ ]
+
+ true_fraction_split = gca_training_pipeline.FractionSplit(
+ training_fraction=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction=_TEST_TEST_FRACTION_SPLIT,
+ )
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ fraction_split=true_fraction_split,
+ dataset_id=mock_tabular_dataset.name,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": true_worker_pool_spec,
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_distributed_training_with_reduction_server(
+ self,
+ mock_pipeline_service_create_with_no_model_to_upload,
+ mock_pipeline_service_get_with_no_model_to_upload,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=10,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ reduction_server_replica_count=_TEST_REDUCTION_SERVER_REPLICA_COUNT,
+ reduction_server_machine_type=_TEST_REDUCTION_SERVER_MACHINE_TYPE,
+ reduction_server_container_uri=_TEST_REDUCTION_SERVER_CONTAINER_URI,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ true_args = _TEST_RUN_ARGS
+
+ true_worker_pool_spec = [
+ {
+ "replica_count": 1,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "containerSpec": {
+ "imageUri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "command": _TEST_TRAINING_CONTAINER_CMD,
+ "args": true_args,
+ },
+ },
+ {
+ "replica_count": 9,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "containerSpec": {
+ "imageUri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "command": _TEST_TRAINING_CONTAINER_CMD,
+ "args": true_args,
+ },
+ },
+ {
+ "replica_count": _TEST_REDUCTION_SERVER_REPLICA_COUNT,
+ "machine_spec": {"machine_type": _TEST_REDUCTION_SERVER_MACHINE_TYPE},
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "container_spec": {"image_uri": _TEST_REDUCTION_SERVER_CONTAINER_URI},
+ },
+ ]
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": true_worker_pool_spec,
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ )
+
+ mock_pipeline_service_create_with_no_model_to_upload.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline_with_no_model_upload(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_nontabular_dataset(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_python_package_to_gcs,
+ mock_nontabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ )
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_nontabular_dataset,
+ annotation_schema_uri=_TEST_ANNOTATION_SCHEMA_URI,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ training_filter_split=_TEST_TRAINING_FILTER_SPLIT,
+ validation_filter_split=_TEST_VALIDATION_FILTER_SPLIT,
+ test_filter_split=_TEST_TEST_FILTER_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "containerSpec": {
+ "imageUri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "command": _TEST_TRAINING_CONTAINER_CMD,
+ "args": true_args,
+ },
+ }
+
+ true_filter_split = gca_training_pipeline.FilterSplit(
+ training_filter=_TEST_TRAINING_FILTER_SPLIT,
+ validation_filter=_TEST_VALIDATION_FILTER_SPLIT,
+ test_filter=_TEST_TEST_FILTER_SPLIT,
+ )
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_MODEL_SERVING_CONTAINER_PORTS
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ filter_split=true_filter_split,
+ dataset_id=mock_nontabular_dataset.name,
+ annotation_schema_uri=_TEST_ANNOTATION_SCHEMA_URI,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ labels=_TEST_LABELS,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ def test_run_call_pipeline_service_create_with_nontabular_dataset_raises_if_annotation_schema_uri(
+ self,
+ mock_nontabular_dataset,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ )
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ )
+
+ with pytest.raises(Exception):
+ job.run(
+ dataset=mock_nontabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ create_request_timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_persistent_resource_id",
+ "mock_pipeline_service_get_with_persistent_resource_id",
+ "mock_get_backing_custom_job_with_persistent_resource_id",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_persistent_resource_id(
+ self, sync, caplog
+ ):
+
+ caplog.set_level(logging.INFO)
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ sync=sync,
+ create_request_timeout=None,
+ persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID,
+ )
+
+ if not sync:
+ job.wait()
+
+ print(caplog.text)
+ assert job._gca_resource == make_training_pipeline_with_persistent_resource_id(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_python_package_to_gcs",
+ "mock_model_service_get",
+ )
+ def test_training_job_tpu_v5e(self, mock_pipeline_service_create):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ )
+
+ job.run(
+ machine_type=_TEST_MACHINE_TYPE_TPU_V5E,
+ tpu_topology="2x2",
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ )
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME + "-model",
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ version_aliases=["default"],
+ )
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE_TPU_V5E,
+ "tpu_topology": "2x2",
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "containerSpec": {
+ "imageUri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "command": _TEST_TRAINING_CONTAINER_CMD,
+ },
+ }
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_python_package_to_gcs",
+ "mock_model_service_get",
+ )
+ def test_training_job_tpu_v3_pod(self, mock_pipeline_service_create):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ )
+
+ job.run(
+ machine_type=_TEST_MACHINE_TYPE_TPU,
+ accelerator_type=_TEST_ACCELERATOR_TPU_TYPE,
+ accelerator_count=32,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ )
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME + "-model",
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ version_aliases=["default"],
+ )
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE_TPU,
+ "accelerator_type": _TEST_ACCELERATOR_TPU_TYPE,
+ "accelerator_count": 32,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "containerSpec": {
+ "imageUri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "command": _TEST_TRAINING_CONTAINER_CMD,
+ },
+ }
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_python_package_to_gcs",
+ "mock_model_service_get",
+ )
+ def test_training_job_reservation_affinity(self, mock_pipeline_service_create):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ )
+
+ job.run(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=32,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ reservation_affinity_type="ANY_RESERVATION",
+ )
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME + "-model",
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ version_aliases=["default"],
+ )
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": 32,
+ "reservation_affinity": {
+ "reservation_affinity_type": "ANY_RESERVATION"
+ },
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "containerSpec": {
+ "imageUri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "command": _TEST_TRAINING_CONTAINER_CMD,
+ },
+ }
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+
+class Test_WorkerPoolSpec:
+ def test_machine_spec_return_spec_dict(self):
+ test_spec = worker_spec_utils._WorkerPoolSpec(
+ replica_count=_TEST_REPLICA_COUNT,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ reservation_affinity_type="SPECIFIC_RESERVATION",
+ reservation_affinity_key="compute.googleapis.com/reservation-name",
+ reservation_affinity_values="projects/{project_id_or_number}/zones/{zone}/reservations/{reservation_name}",
+ )
+
+ true_spec_dict = {
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ "reservation_affinity": {
+ "reservation_affinity_type": "SPECIFIC_RESERVATION",
+ "key": "compute.googleapis.com/reservation-name",
+ "values": "projects/{project_id_or_number}/zones/{zone}/reservations/{reservation_name}",
+ },
+ },
+ "replica_count": _TEST_REPLICA_COUNT,
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ }
+
+ assert test_spec.spec_dict == true_spec_dict
+
+ def test_machine_spec_return_spec_dict_with_boot_disk(self):
+ test_spec = worker_spec_utils._WorkerPoolSpec(
+ replica_count=_TEST_REPLICA_COUNT,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ boot_disk_type=_TEST_BOOT_DISK_TYPE,
+ boot_disk_size_gb=_TEST_BOOT_DISK_SIZE_GB,
+ )
+
+ true_spec_dict = {
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "replica_count": _TEST_REPLICA_COUNT,
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB,
+ },
+ }
+
+ assert test_spec.spec_dict == true_spec_dict
+
+ def test_machine_spec_return_spec_dict_with_no_accelerator(self):
+ test_spec = worker_spec_utils._WorkerPoolSpec(
+ replica_count=_TEST_REPLICA_COUNT,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_count=0,
+ accelerator_type="ACCELERATOR_TYPE_UNSPECIFIED",
+ )
+
+ true_spec_dict = {
+ "machine_spec": {"machine_type": _TEST_MACHINE_TYPE},
+ "replica_count": _TEST_REPLICA_COUNT,
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ }
+
+ assert test_spec.spec_dict == true_spec_dict
+
+ def test_machine_spec_spec_dict_raises_invalid_accelerator(self):
+ test_spec = worker_spec_utils._WorkerPoolSpec(
+ replica_count=_TEST_REPLICA_COUNT,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ accelerator_type=_TEST_INVALID_ACCELERATOR_TYPE,
+ )
+
+ with pytest.raises(ValueError):
+ test_spec.spec_dict
+
+ def test_machine_spec_spec_dict_is_empty(self):
+ test_spec = worker_spec_utils._WorkerPoolSpec(
+ replica_count=0,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ accelerator_type=_TEST_INVALID_ACCELERATOR_TYPE,
+ )
+
+ assert test_spec.is_empty
+
+ def test_machine_spec_spec_dict_is_not_empty(self):
+ test_spec = worker_spec_utils._WorkerPoolSpec(
+ replica_count=_TEST_REPLICA_COUNT,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ accelerator_type=_TEST_INVALID_ACCELERATOR_TYPE,
+ )
+
+ assert not test_spec.is_empty
+
+
+class Test_DistributedTrainingSpec:
+ def test_machine_spec_returns_pool_spec(self):
+
+ spec = worker_spec_utils._DistributedTrainingSpec(
+ chief_spec=worker_spec_utils._WorkerPoolSpec(
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ reservation_affinity_type="ANY_RESERVATION",
+ ),
+ worker_spec=worker_spec_utils._WorkerPoolSpec(
+ replica_count=10,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ reservation_affinity_type="SPECIFIC_RESERVATION",
+ reservation_affinity_key="compute.googleapis.com/reservation-name",
+ reservation_affinity_values="projects/{project_id_or_number}/zones/{zone}/reservations/{reservation_name}",
+ ),
+ server_spec=worker_spec_utils._WorkerPoolSpec(
+ replica_count=3,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ reservation_affinity_type="NO_RESERVATION",
+ ),
+ evaluator_spec=worker_spec_utils._WorkerPoolSpec(
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ ),
+ )
+
+ true_pool_spec = [
+ {
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ "reservation_affinity": {
+ "reservation_affinity_type": "ANY_RESERVATION",
+ },
+ },
+ "replica_count": 1,
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ },
+ {
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ "reservation_affinity": {
+ "reservation_affinity_type": "SPECIFIC_RESERVATION",
+ "key": "compute.googleapis.com/reservation-name",
+ "values": "projects/{project_id_or_number}/zones/{zone}/reservations/{reservation_name}",
+ },
+ },
+ "replica_count": 10,
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ },
+ {
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ "reservation_affinity": {
+ "reservation_affinity_type": "NO_RESERVATION",
+ },
+ },
+ "replica_count": 3,
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ },
+ {
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "replica_count": 1,
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ },
+ ]
+
+ assert spec.pool_specs == true_pool_spec
+
+ def test_chief_worker_pool_returns_spec(self):
+
+ chief_worker_spec = worker_spec_utils._DistributedTrainingSpec.chief_worker_pool(
+ replica_count=10,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ reservation_affinity_type="SPECIFIC_RESERVATION",
+ reservation_affinity_key="compute.googleapis.com/reservation-name",
+ reservation_affinity_values="projects/{project_id_or_number}/zones/{zone}/reservations/{reservation_name}",
+ )
+
+ true_pool_spec = [
+ {
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ "reservation_affinity": {
+ "reservation_affinity_type": "SPECIFIC_RESERVATION",
+ "key": "compute.googleapis.com/reservation-name",
+ "values": "projects/{project_id_or_number}/zones/{zone}/reservations/{reservation_name}",
+ },
+ },
+ "replica_count": 1,
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ },
+ {
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ "reservation_affinity": {
+ "reservation_affinity_type": "SPECIFIC_RESERVATION",
+ "key": "compute.googleapis.com/reservation-name",
+ "values": "projects/{project_id_or_number}/zones/{zone}/reservations/{reservation_name}",
+ },
+ },
+ "replica_count": 9,
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ },
+ ]
+
+ assert chief_worker_spec.pool_specs == true_pool_spec
+
+ def test_chief_worker_pool_returns_just_chief(self):
+
+ chief_worker_spec = worker_spec_utils._DistributedTrainingSpec.chief_worker_pool(
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ reservation_affinity_type="SPECIFIC_RESERVATION",
+ reservation_affinity_key="compute.googleapis.com/reservation-name",
+ reservation_affinity_values="projects/{project_id_or_number}/zones/{zone}/reservations/{reservation_name}",
+ )
+
+ true_pool_spec = [
+ {
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ "reservation_affinity": {
+ "reservation_affinity_type": "SPECIFIC_RESERVATION",
+ "key": "compute.googleapis.com/reservation-name",
+ "values": "projects/{project_id_or_number}/zones/{zone}/reservations/{reservation_name}",
+ },
+ },
+ "replica_count": 1,
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ }
+ ]
+
+ assert chief_worker_spec.pool_specs == true_pool_spec
+
+ def test_machine_spec_raise_with_more_than_one_chief_replica(self):
+
+ spec = worker_spec_utils._DistributedTrainingSpec(
+ chief_spec=worker_spec_utils._WorkerPoolSpec(
+ replica_count=2,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ ),
+ )
+
+ with pytest.raises(ValueError):
+ spec.pool_specs
+
+ def test_machine_spec_handles_missing_pools(self):
+
+ spec = worker_spec_utils._DistributedTrainingSpec(
+ chief_spec=worker_spec_utils._WorkerPoolSpec(
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ ),
+ worker_spec=worker_spec_utils._WorkerPoolSpec(replica_count=0),
+ server_spec=worker_spec_utils._WorkerPoolSpec(
+ replica_count=3,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ ),
+ evaluator_spec=worker_spec_utils._WorkerPoolSpec(replica_count=0),
+ )
+
+ true_pool_spec = [
+ {
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "replica_count": 1,
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ },
+ {},
+ {
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "replica_count": 3,
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ },
+ ]
+
+ assert spec.pool_specs == true_pool_spec
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestCustomPythonPackageTrainingJob:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ @pytest.mark.parametrize(
+ "python_package_gcs_uri",
+ [_TEST_OUTPUT_PYTHON_PACKAGE_PATH, _TEST_PACKAGE_GCS_URIS],
+ )
+ def test_run_call_pipeline_service_create_with_tabular_dataset(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ python_package_gcs_uri,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ python_package_gcs_uri=python_package_gcs_uri,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ explanation_metadata=_TEST_EXPLANATION_METADATA,
+ explanation_parameters=_TEST_EXPLANATION_PARAMETERS,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_tabular_dataset,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ network=_TEST_NETWORK,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+ true_env = [
+ {"name": key, "value": value}
+ for key, value in _TEST_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ if isinstance(python_package_gcs_uri, str):
+ package_uris = [python_package_gcs_uri]
+ else:
+ package_uris = python_package_gcs_uri
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_PYTHON_MODULE_NAME,
+ "package_uris": package_uris,
+ "args": true_args,
+ "env": true_env,
+ },
+ }
+
+ true_fraction_split = gca_training_pipeline.FractionSplit(
+ training_fraction=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction=_TEST_TEST_FRACTION_SPLIT,
+ )
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_MODEL_SERVING_CONTAINER_PORTS
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ explanation_spec=gca_model.explanation.ExplanationSpec(
+ metadata=_TEST_EXPLANATION_METADATA,
+ parameters=_TEST_EXPLANATION_PARAMETERS,
+ ),
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ fraction_split=true_fraction_split,
+ dataset_id=mock_tabular_dataset.name,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ def test_custom_python_package_training_job_run_raises_with_wrong_package_uris(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ wrong_package_gcs_uri = {"package": _TEST_OUTPUT_PYTHON_PACKAGE_PATH}
+
+ with pytest.raises(ValueError) as e:
+ training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ python_package_gcs_uri=wrong_package_gcs_uri,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ explanation_metadata=_TEST_EXPLANATION_METADATA,
+ explanation_parameters=_TEST_EXPLANATION_PARAMETERS,
+ )
+
+ assert e.match("'python_package_gcs_uri' must be a string or list.")
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ def test_custom_python_package_training_job_run_raises_with_impartial_explanation_spec(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ explanation_metadata=_TEST_EXPLANATION_METADATA,
+ # Missing the required explanations_parameters field
+ )
+
+ with pytest.raises(ValueError) as e:
+ job.run(
+ dataset=mock_tabular_dataset,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ create_request_timeout=None,
+ )
+ assert e.match(
+ regexp=r"To get model explanation, `explanation_parameters` "
+ "must be specified."
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_tabular_dataset_with_timeout(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_tabular_dataset,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=180.0,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+ true_env = [
+ {"name": key, "value": value}
+ for key, value in _TEST_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_PYTHON_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ "env": true_env,
+ },
+ }
+
+ true_fraction_split = gca_training_pipeline.FractionSplit(
+ training_fraction=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction=_TEST_TEST_FRACTION_SPLIT,
+ )
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_MODEL_SERVING_CONTAINER_PORTS
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ fraction_split=true_fraction_split,
+ dataset_id=mock_tabular_dataset.name,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=180.0,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_tabular_dataset_with_timeout_not_explicitly_set(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_tabular_dataset,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ model_labels=_TEST_MODEL_LABELS,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ args=_TEST_RUN_ARGS,
+ environment_variables=_TEST_ENVIRONMENT_VARIABLES,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+ true_env = [
+ {"name": key, "value": value}
+ for key, value in _TEST_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_PYTHON_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ "env": true_env,
+ },
+ }
+
+ true_fraction_split = gca_training_pipeline.FractionSplit(
+ training_fraction=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction=_TEST_TEST_FRACTION_SPLIT,
+ )
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_MODEL_SERVING_CONTAINER_PORTS
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ labels=_TEST_MODEL_LABELS,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ fraction_split=true_fraction_split,
+ dataset_id=mock_tabular_dataset.name,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "network": _TEST_NETWORK,
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_tabular_dataset_without_model_display_name_nor_model_labels(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
+ )
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_tabular_dataset,
+ # model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_PYTHON_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ },
+ }
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_MODEL_SERVING_CONTAINER_PORTS
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME + "-model",
+ labels=_TEST_LABELS,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ predefined_split=gca_training_pipeline.PredefinedSplit(
+ key=_TEST_PREDEFINED_SPLIT_COLUMN_NAME
+ ),
+ dataset_id=mock_tabular_dataset.name,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_bigquery_destination(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ training_encryption_spec_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME,
+ model_encryption_spec_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_tabular_dataset,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ bigquery_destination=_TEST_BIGQUERY_DESTINATION,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_PYTHON_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ },
+ }
+
+ true_timestamp_split = gca_training_pipeline.TimestampSplit(
+ training_fraction=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction=_TEST_TEST_FRACTION_SPLIT,
+ key=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME,
+ )
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_MODEL_SERVING_CONTAINER_PORTS
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ encryption_spec=_TEST_MODEL_ENCRYPTION_SPEC,
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ timestamp_split=true_timestamp_split,
+ dataset_id=mock_tabular_dataset.name,
+ bigquery_destination=gca_io.BigQueryDestination(
+ output_uri=_TEST_BIGQUERY_DESTINATION
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ encryption_spec=_TEST_PIPELINE_ENCRYPTION_SPEC,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_python_package_to_gcs",
+ "mock_model_service_get",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_called_twice_raises(
+ self,
+ mock_tabular_dataset,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_with_invalid_accelerator_type_raises(
+ self,
+ mock_pipeline_service_create,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ with pytest.raises(ValueError):
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_INVALID_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_with_two_split_raises(
+ self,
+ mock_pipeline_service_create,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ with pytest.raises(ValueError):
+ job.run(
+ dataset=mock_tabular_dataset,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_INVALID_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_with_incomplete_model_info_raises_with_model_to_upload(
+ self,
+ mock_pipeline_service_create,
+ mock_python_package_to_gcs,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_no_dataset(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ model_from_job = job.run(
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_PYTHON_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ },
+ }
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ container_spec=true_container_spec,
+ version_aliases=["default"],
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_enable_web_access",
+ "mock_pipeline_service_get_with_enable_web_access",
+ "mock_get_backing_custom_job_with_enable_web_access",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_enable_web_access(
+ self, sync, caplog
+ ):
+
+ caplog.set_level(logging.INFO)
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ enable_web_access=_TEST_ENABLE_WEB_ACCESS,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ print(caplog.text)
+ # TODO: b/383923584: Re-enable this test once the parent issue is fixed
+ # assert "workerpool0-0" in caplog.text
+ assert job._gca_resource == make_training_pipeline_with_enable_web_access(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ # TODO: Update test to address Mutant issue b/270708320
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_enable_dashboard_access",
+ "mock_pipeline_service_get_with_enable_dashboard_access",
+ "mock_get_backing_custom_job_with_enable_dashboard_access",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_enable_dashboard_access(
+ self, sync, caplog
+ ):
+
+ caplog.set_level(logging.INFO)
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ enable_dashboard_access=_TEST_ENABLE_DASHBOARD_ACCESS,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+ print(caplog.text)
+ # TODO: b/383923584: Re-enable this test once the parent issue is fixed
+ # assert "workerpool0-0:8888" in caplog.text
+ assert job._gca_resource == make_training_pipeline_with_enable_dashboard_access(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_scheduling",
+ "mock_pipeline_service_get_with_scheduling",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_scheduling(self, sync, caplog):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ create_request_timeout=None,
+ disable_retries=_TEST_DISABLE_RETRIES,
+ max_wait_duration=_TEST_MAX_WAIT_DURATION,
+ )
+
+ if not sync:
+ job.wait()
+
+ assert job._gca_resource == make_training_pipeline_with_scheduling(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ assert (
+ job._gca_resource.state
+ == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ assert job._gca_resource.training_task_inputs["timeout"] == f"{_TEST_TIMEOUT}s"
+ assert (
+ job._gca_resource.training_task_inputs["restart_job_on_worker_restart"]
+ == _TEST_RESTART_JOB_ON_WORKER_RESTART
+ )
+ assert (
+ job._gca_resource.training_task_inputs["disable_retries"]
+ == _TEST_DISABLE_RETRIES
+ )
+ assert (
+ job._gca_resource.training_task_inputs["max_wait_duration"]
+ == f"{_TEST_MAX_WAIT_DURATION}s"
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_no_model_to_upload",
+ "mock_pipeline_service_get_with_no_model_to_upload",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_returns_none_if_no_model_to_upload(
+ self,
+ mock_tabular_dataset,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ model = job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ assert model is None
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_no_model_to_upload",
+ "mock_pipeline_service_get_with_no_model_to_upload",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_get_model_raises_if_no_model_to_upload(
+ self,
+ mock_tabular_dataset,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_raises_if_pipeline_fails(
+ self,
+ mock_pipeline_service_create_and_get_with_fail,
+ mock_tabular_dataset,
+ sync,
+ ):
+
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ sync=sync,
+ )
+
+ if not sync:
+ job.wait()
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ def test_raises_before_run_is_called(self, mock_pipeline_service_create):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ with pytest.raises(RuntimeError):
+ job.get_model()
+
+ with pytest.raises(RuntimeError):
+ job.has_failed
+
+ with pytest.raises(RuntimeError):
+ job.state
+
+ def test_run_raises_if_no_staging_bucket(self):
+
+ aiplatform.init(project=_TEST_PROJECT)
+
+ with pytest.raises(RuntimeError):
+ training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_distributed_training(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_tabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_tabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=10,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction_split=_TEST_TEST_FRACTION_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+
+ true_worker_pool_spec = [
+ {
+ "replica_count": 1,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_PYTHON_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ },
+ },
+ {
+ "replica_count": 9,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_PYTHON_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ },
+ },
+ ]
+
+ true_fraction_split = gca_training_pipeline.FractionSplit(
+ training_fraction=_TEST_TRAINING_FRACTION_SPLIT,
+ validation_fraction=_TEST_VALIDATION_FRACTION_SPLIT,
+ test_fraction=_TEST_TEST_FRACTION_SPLIT,
+ )
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_MODEL_DISPLAY_NAME,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ fraction_split=true_fraction_split,
+ dataset_id=mock_tabular_dataset.name,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": true_worker_pool_spec,
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_distributed_training_with_reduction_server(
+ self,
+ mock_pipeline_service_create_with_no_model_to_upload,
+ mock_pipeline_service_get_with_no_model_to_upload,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=10,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ reduction_server_replica_count=_TEST_REDUCTION_SERVER_REPLICA_COUNT,
+ reduction_server_machine_type=_TEST_REDUCTION_SERVER_MACHINE_TYPE,
+ reduction_server_container_uri=_TEST_REDUCTION_SERVER_CONTAINER_URI,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ job.wait()
+
+ true_args = _TEST_RUN_ARGS
+
+ true_worker_pool_spec = [
+ {
+ "replica_count": 1,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_PYTHON_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ },
+ },
+ {
+ "replica_count": 9,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_PYTHON_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ },
+ },
+ {
+ "replica_count": _TEST_REDUCTION_SERVER_REPLICA_COUNT,
+ "machine_spec": {"machine_type": _TEST_REDUCTION_SERVER_MACHINE_TYPE},
+ "container_spec": {"image_uri": _TEST_REDUCTION_SERVER_CONTAINER_URI},
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ },
+ ]
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": true_worker_pool_spec,
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ )
+
+ mock_pipeline_service_create_with_no_model_to_upload.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline_with_no_model_upload(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_nontabular_dataset_without_model_display_name_nor_model_labels(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ mock_python_package_to_gcs,
+ mock_nontabular_dataset,
+ mock_model_service_get,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ )
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ )
+
+ model_from_job = job.run(
+ dataset=mock_nontabular_dataset,
+ annotation_schema_uri=_TEST_ANNOTATION_SCHEMA_URI,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME,
+ training_filter_split=_TEST_TRAINING_FILTER_SPLIT,
+ validation_filter_split=_TEST_VALIDATION_FILTER_SPLIT,
+ test_filter_split=_TEST_TEST_FILTER_SPLIT,
+ sync=sync,
+ create_request_timeout=None,
+ )
+
+ if not sync:
+ model_from_job.wait()
+
+ true_args = _TEST_RUN_ARGS
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_PYTHON_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ "args": true_args,
+ },
+ }
+
+ true_filter_split = gca_training_pipeline.FilterSplit(
+ training_filter=_TEST_TRAINING_FILTER_SPLIT,
+ validation_filter=_TEST_VALIDATION_FILTER_SPLIT,
+ test_filter=_TEST_TEST_FILTER_SPLIT,
+ )
+
+ env = [
+ gca_env_var.EnvVar(name=str(key), value=str(value))
+ for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items()
+ ]
+
+ ports = [
+ gca_model.Port(container_port=port)
+ for port in _TEST_MODEL_SERVING_CONTAINER_PORTS
+ ]
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ env=env,
+ ports=ports,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME + "-model",
+ labels=_TEST_LABELS,
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ version_aliases=["default"],
+ )
+
+ true_input_data_config = gca_training_pipeline.InputDataConfig(
+ filter_split=true_filter_split,
+ dataset_id=mock_nontabular_dataset.name,
+ annotation_schema_uri=_TEST_ANNOTATION_SCHEMA_URI,
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ )
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ labels=_TEST_LABELS,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ "service_account": _TEST_SERVICE_ACCOUNT,
+ "tensorboard": _TEST_TENSORBOARD_RESOURCE_NAME,
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ input_data_config=true_input_data_config,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ assert job._gca_resource == make_training_pipeline(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ mock_model_service_get.assert_called_once_with(
+ name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert model_from_job._gca_resource is mock_model_service_get.return_value
+
+ assert job.get_model()._gca_resource is mock_model_service_get.return_value
+
+ assert not job.has_failed
+
+ assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+
+ assert job._has_logged_custom_job
+
+ def test_run_call_pipeline_service_create_with_nontabular_dataset_raises_if_annotation_schema_uri(
+ self,
+ mock_nontabular_dataset,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ )
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS,
+ model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ )
+
+ with pytest.raises(Exception):
+ job.run(
+ dataset=mock_nontabular_dataset,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ replica_count=1,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ model_display_name=_TEST_MODEL_DISPLAY_NAME,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_persistent_resource_id",
+ "mock_pipeline_service_get_with_persistent_resource_id",
+ "mock_get_backing_custom_job_with_persistent_resource_id",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_persistent_resource_id(
+ self, sync, caplog
+ ):
+
+ caplog.set_level(logging.INFO)
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ sync=sync,
+ create_request_timeout=None,
+ persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID,
+ )
+
+ if not sync:
+ job.wait()
+
+ print(caplog.text)
+ assert job._gca_resource == make_training_pipeline_with_persistent_resource_id(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_python_package_to_gcs",
+ "mock_model_service_get",
+ )
+ def test_training_job_tpu_v5e(self, mock_pipeline_service_create):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ )
+
+ job.run(
+ machine_type=_TEST_MACHINE_TYPE_TPU_V5E,
+ tpu_topology="2x2",
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ )
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME + "-model",
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ version_aliases=["default"],
+ )
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE_TPU_V5E,
+ "tpu_topology": "2x2",
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_PYTHON_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ },
+ }
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_python_package_to_gcs",
+ "mock_model_service_get",
+ )
+ def test_training_job_tpu_v3_pod(self, mock_pipeline_service_create):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ )
+
+ job.run(
+ machine_type=_TEST_MACHINE_TYPE_TPU,
+ accelerator_type=_TEST_ACCELERATOR_TPU_TYPE,
+ accelerator_count=32,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ )
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME + "-model",
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ version_aliases=["default"],
+ )
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE_TPU,
+ "accelerator_type": _TEST_ACCELERATOR_TPU_TYPE,
+ "accelerator_count": 32,
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_PYTHON_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ },
+ }
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create",
+ "mock_pipeline_service_get",
+ "mock_python_package_to_gcs",
+ "mock_model_service_get",
+ )
+ def test_training_job_reservation_affinity(self, mock_pipeline_service_create):
+ aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ model_description=_TEST_MODEL_DESCRIPTION,
+ model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ )
+
+ job.run(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=32,
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ reservation_affinity_type="ANY_RESERVATION",
+ )
+
+ true_container_spec = gca_model.ModelContainerSpec(
+ image_uri=_TEST_SERVING_CONTAINER_IMAGE,
+ predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ command=_TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ )
+
+ true_managed_model = gca_model.Model(
+ display_name=_TEST_DISPLAY_NAME + "-model",
+ description=_TEST_MODEL_DESCRIPTION,
+ container_spec=true_container_spec,
+ predict_schemata=gca_model.PredictSchemata(
+ instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI,
+ parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI,
+ ),
+ version_aliases=["default"],
+ )
+
+ true_worker_pool_spec = {
+ "replica_count": _TEST_REPLICA_COUNT,
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": 32,
+ "reservation_affinity": {
+ "reservation_affinity_type": "ANY_RESERVATION"
+ },
+ },
+ "disk_spec": {
+ "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT,
+ "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT,
+ },
+ "python_package_spec": {
+ "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "python_module": _TEST_PYTHON_MODULE_NAME,
+ "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH],
+ },
+ }
+
+ true_training_pipeline = gca_training_pipeline.TrainingPipeline(
+ display_name=_TEST_DISPLAY_NAME,
+ training_task_definition=schema.training_job.definition.custom_task,
+ training_task_inputs=json_format.ParseDict(
+ {
+ "worker_pool_specs": [true_worker_pool_spec],
+ "base_output_directory": {
+ "output_uri_prefix": _TEST_BASE_OUTPUT_DIR
+ },
+ },
+ struct_pb2.Value(),
+ ),
+ model_to_upload=true_managed_model,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=initializer.global_config.common_location_path(),
+ training_pipeline=true_training_pipeline,
+ timeout=None,
+ )
+
+
+class TestVersionedTrainingJobs:
+ @pytest.mark.usefixtures("mock_pipeline_service_get")
+ @pytest.mark.parametrize(
+ "mock_pipeline_service_get",
+ ["make_training_pipeline_with_version"],
+ indirect=True,
+ )
+ @pytest.mark.parametrize(
+ "parent,location,project,model_id",
+ [
+ (_TEST_ID, _TEST_LOCATION, _TEST_PROJECT, None),
+ (_TEST_MODEL_NAME, None, None, None),
+ (None, None, None, _TEST_ID),
+ ],
+ )
+ @pytest.mark.parametrize(
+ "aliases,default,goal",
+ [
+ (["alias1", "alias2"], True, ["alias1", "alias2", "default"]),
+ (None, True, ["default"]),
+ (["alias1", "alias2", "default"], True, ["alias1", "alias2", "default"]),
+ (["alias1", "alias2", "default"], False, ["alias1", "alias2", "default"]),
+ (["alias1", "alias2"], False, ["alias1", "alias2"]),
+ (None, False, []),
+ ],
+ )
+ @pytest.mark.parametrize(
+ "callable",
+ [
+ training_jobs.CustomTrainingJob,
+ training_jobs.CustomContainerTrainingJob,
+ training_jobs.CustomPythonPackageTrainingJob,
+ ],
+ )
+ @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1)
+ @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1)
+ def test_run_pipeline_for_versioned_model(
+ self,
+ mock_pipeline_service_create_with_version,
+ mock_python_package_to_gcs,
+ mock_nontabular_dataset,
+ mock_model_service_get_with_version,
+ parent,
+ location,
+ project,
+ model_id,
+ aliases,
+ default,
+ goal,
+ callable,
+ ):
+ aiplatform.init(
+ project=project,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ location=location,
+ )
+ job_args = {
+ "display_name": _TEST_DISPLAY_NAME,
+ "model_serving_container_image_uri": _TEST_SERVING_CONTAINER_IMAGE,
+ "model_serving_container_predict_route": _TEST_SERVING_CONTAINER_PREDICTION_ROUTE,
+ "model_serving_container_health_route": _TEST_SERVING_CONTAINER_HEALTH_ROUTE,
+ "model_instance_schema_uri": _TEST_MODEL_INSTANCE_SCHEMA_URI,
+ "model_parameters_schema_uri": _TEST_MODEL_PARAMETERS_SCHEMA_URI,
+ "model_prediction_schema_uri": _TEST_MODEL_PREDICTION_SCHEMA_URI,
+ "model_serving_container_command": _TEST_MODEL_SERVING_CONTAINER_COMMAND,
+ "model_serving_container_args": _TEST_MODEL_SERVING_CONTAINER_ARGS,
+ "model_serving_container_environment_variables": _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES,
+ "model_serving_container_ports": _TEST_MODEL_SERVING_CONTAINER_PORTS,
+ "model_description": _TEST_MODEL_DESCRIPTION,
+ "labels": _TEST_LABELS,
+ }
+
+ run_args = {
+ "dataset": mock_nontabular_dataset,
+ "annotation_schema_uri": _TEST_ANNOTATION_SCHEMA_URI,
+ "base_output_dir": _TEST_BASE_OUTPUT_DIR,
+ "args": _TEST_RUN_ARGS,
+ "machine_type": _TEST_MACHINE_TYPE,
+ "accelerator_type": _TEST_ACCELERATOR_TYPE,
+ "accelerator_count": _TEST_ACCELERATOR_COUNT,
+ "training_filter_split": _TEST_TRAINING_FILTER_SPLIT,
+ "validation_filter_split": _TEST_VALIDATION_FILTER_SPLIT,
+ "test_filter_split": _TEST_TEST_FILTER_SPLIT,
+ "create_request_timeout": None,
+ "model_id": model_id,
+ "parent_model": parent,
+ "is_default_version": default,
+ "model_version_aliases": aliases,
+ "model_version_description": _TEST_MODEL_VERSION_DESCRIPTION,
+ }
+
+ if issubclass(callable, (training_jobs.CustomContainerTrainingJob)):
+ job_args = {
+ "container_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ **job_args,
+ }
+ elif issubclass(callable, (training_jobs.CustomTrainingJob)):
+ job_args = {
+ "container_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ "script_path": _TEST_LOCAL_SCRIPT_FILE_NAME,
+ **job_args,
+ }
+ elif issubclass(callable, training_jobs.CustomPythonPackageTrainingJob):
+ job_args = {
+ "python_package_gcs_uri": _TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ "python_module_name": _TEST_PYTHON_MODULE_NAME,
+ "container_uri": _TEST_TRAINING_CONTAINER_IMAGE,
+ **job_args,
+ }
+
+ job = callable(**job_args)
+
+ model_from_job = job.run(**run_args)
+
+ mock_pipeline_service_create_with_version.assert_called_once()
+ _, tp_kwargs = mock_pipeline_service_create_with_version.call_args_list[0]
+ training_pipeline = tp_kwargs["training_pipeline"]
+
+ assert training_pipeline.model_id == (model_id if model_id else "")
+ assert training_pipeline.parent_model == (_TEST_MODEL_NAME if parent else "")
+ assert training_pipeline.model_to_upload.version_aliases == goal
+ assert (
+ training_pipeline.model_to_upload.version_description
+ == _TEST_MODEL_VERSION_DESCRIPTION
+ )
+
+ assert model_from_job.version_id == _TEST_MODEL_VERSION_ID
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_training_utils.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_training_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..4629d23e332d9411b871c2d80281156d80347567
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_training_utils.py
@@ -0,0 +1,289 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from importlib import reload
+import filecmp
+import json
+import os
+import pytest
+import sys
+import tempfile
+
+from google.cloud.aiplatform.training_utils import environment_variables
+from google.cloud.aiplatform.utils import source_utils
+from unittest import mock
+
+_TEST_TRAINING_DATA_URI = "gs://training-data-uri"
+_TEST_VALIDATION_DATA_URI = "gs://test-validation-data-uri"
+_TEST_TEST_DATA_URI = "gs://test-data-uri"
+_TEST_MODEL_DIR = "gs://test-model-dir"
+_TEST_CHECKPOINT_DIR = "gs://test-checkpoint-dir"
+_TEST_TENSORBOARD_LOG_DIR = "gs://test-tensorboard-log-dir"
+_TEST_CLUSTER_SPEC = """{
+ "cluster": {
+ "worker_pools":[
+ {
+ "index":0,
+ "replicas":[
+ "training-workerpool0-ab-0:2222"
+ ]
+ },
+ {
+ "index":1,
+ "replicas":[
+ "training-workerpool1-ab-0:2222",
+ "training-workerpool1-ab-1:2222"
+ ]
+ }
+ ]
+ },
+ "environment": "cloud",
+ "task": {
+ "worker_pool_index":0,
+ "replica_index":0,
+ "trial":"TRIAL_ID"
+ }
+}"""
+_TEST_AIP_TF_PROFILER_PORT = "1234"
+_TEST_TENSORBOARD_API_URI = "http://testuri.com"
+_TEST_TENSORBOARD_RESOURCE_NAME = (
+ "projects/myproj/locations/us-central1/tensorboards/1234"
+)
+_TEST_CLOUD_ML_JOB_ID = "myjob"
+_TEST_AIP_HTTP_HANDLER_PORT = "5678"
+
+
+class TestTrainingUtils:
+ @pytest.fixture
+ def mock_environment(self):
+ env_vars = {
+ "AIP_TRAINING_DATA_URI": _TEST_TRAINING_DATA_URI,
+ "AIP_VALIDATION_DATA_URI": _TEST_VALIDATION_DATA_URI,
+ "AIP_TEST_DATA_URI": _TEST_TEST_DATA_URI,
+ "AIP_MODEL_DIR": _TEST_MODEL_DIR,
+ "AIP_CHECKPOINT_DIR": _TEST_CHECKPOINT_DIR,
+ "AIP_TENSORBOARD_LOG_DIR": _TEST_TENSORBOARD_LOG_DIR,
+ "AIP_TF_PROFILER_PORT": _TEST_AIP_TF_PROFILER_PORT,
+ "AIP_HTTP_HANDLER_PORT": _TEST_AIP_HTTP_HANDLER_PORT,
+ "AIP_TENSORBOARD_API_URI": _TEST_TENSORBOARD_API_URI,
+ "AIP_TENSORBOARD_RESOURCE_NAME": _TEST_TENSORBOARD_RESOURCE_NAME,
+ "CLOUD_ML_JOB_ID": _TEST_CLOUD_ML_JOB_ID,
+ "CLUSTER_SPEC": _TEST_CLUSTER_SPEC,
+ "TF_CONFIG": _TEST_CLUSTER_SPEC,
+ }
+ with mock.patch.dict(os.environ, env_vars, clear=True):
+ yield
+
+ @pytest.mark.usefixtures("mock_environment")
+ def test_training_data_uri(self):
+ reload(environment_variables)
+ assert environment_variables.training_data_uri == _TEST_TRAINING_DATA_URI
+
+ def test_training_data_uri_none(self):
+ reload(environment_variables)
+ assert environment_variables.training_data_uri is None
+
+ @pytest.mark.usefixtures("mock_environment")
+ def test_validation_data_uri(self):
+ reload(environment_variables)
+ assert environment_variables.validation_data_uri == _TEST_VALIDATION_DATA_URI
+
+ def test_validation_data_uri_none(self):
+ reload(environment_variables)
+ assert environment_variables.validation_data_uri is None
+
+ @pytest.mark.usefixtures("mock_environment")
+ def test_test_data_uri(self):
+ reload(environment_variables)
+ assert environment_variables.test_data_uri == _TEST_TEST_DATA_URI
+
+ def test_test_data_uri_none(self):
+ reload(environment_variables)
+ assert environment_variables.test_data_uri is None
+
+ @pytest.mark.usefixtures("mock_environment")
+ def test_model_dir(self):
+ reload(environment_variables)
+ assert environment_variables.model_dir == _TEST_MODEL_DIR
+
+ def test_model_dir_none(self):
+ reload(environment_variables)
+ assert environment_variables.model_dir is None
+
+ @pytest.mark.usefixtures("mock_environment")
+ def test_checkpoint_dir(self):
+ reload(environment_variables)
+ assert environment_variables.checkpoint_dir == _TEST_CHECKPOINT_DIR
+
+ def test_checkpoint_dir_none(self):
+ reload(environment_variables)
+ assert environment_variables.checkpoint_dir is None
+
+ @pytest.mark.usefixtures("mock_environment")
+ def test_tensorboard_log_dir(self):
+ reload(environment_variables)
+ assert environment_variables.tensorboard_log_dir == _TEST_TENSORBOARD_LOG_DIR
+
+ def test_tensorboard_log_dir_none(self):
+ reload(environment_variables)
+ assert environment_variables.tensorboard_log_dir is None
+
+ @pytest.mark.usefixtures("mock_environment")
+ def test_cluster_spec(self):
+ reload(environment_variables)
+ assert environment_variables.cluster_spec == json.loads(_TEST_CLUSTER_SPEC)
+
+ def test_cluster_spec_none(self):
+ reload(environment_variables)
+ assert environment_variables.cluster_spec is None
+
+ @pytest.mark.usefixtures("mock_environment")
+ def test_tf_config(self):
+ reload(environment_variables)
+ assert environment_variables.tf_config == json.loads(_TEST_CLUSTER_SPEC)
+
+ def test_tf_config_none(self):
+ reload(environment_variables)
+ assert environment_variables.tf_config is None
+
+ @pytest.mark.usefixtures("mock_environment")
+ def test_tf_profiler_port(self):
+ reload(environment_variables)
+ assert environment_variables.tf_profiler_port == _TEST_AIP_TF_PROFILER_PORT
+
+ def test_tf_profiler_port_none(self):
+ reload(environment_variables)
+ assert environment_variables.tf_profiler_port is None
+
+ @pytest.mark.usefixtures("mock_environment")
+ def test_tensorboard_api_uri(self):
+ reload(environment_variables)
+ assert environment_variables.tensorboard_api_uri == _TEST_TENSORBOARD_API_URI
+
+ def test_tensorboard_api_uri_none(self):
+ reload(environment_variables)
+ assert environment_variables.tensorboard_api_uri is None
+
+ @pytest.mark.usefixtures("mock_environment")
+ def test_tensorboard_resource_name(self):
+ reload(environment_variables)
+ assert (
+ environment_variables.tensorboard_resource_name
+ == _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+
+ def test_tensorboard_resource_name_none(self):
+ reload(environment_variables)
+ assert environment_variables.tensorboard_resource_name is None
+
+ @pytest.mark.usefixtures("mock_environment")
+ def test_cloud_ml_job_id(self):
+ reload(environment_variables)
+ assert environment_variables.cloud_ml_job_id == _TEST_CLOUD_ML_JOB_ID
+
+ def test_cloud_ml_job_id_none(self):
+ reload(environment_variables)
+ assert environment_variables.cloud_ml_job_id is None
+
+ @pytest.mark.usefixtures("mock_environment")
+ def test_http_handler_port(self):
+ reload(environment_variables)
+ assert environment_variables.http_handler_port == _TEST_AIP_HTTP_HANDLER_PORT
+
+ def test_http_handler_port_none(self):
+ reload(environment_variables)
+ assert environment_variables.http_handler_port is None
+
+ @pytest.fixture()
+ def mock_temp_file_name(self):
+ # Create random files
+ # tmpdirname = tempfile.TemporaryDirectory()
+ file = tempfile.NamedTemporaryFile()
+
+ with open(file.name, "w") as handle:
+ handle.write("test")
+
+ yield file.name
+
+ file.close()
+
+ @pytest.mark.skipif(
+ sys.executable is None, reason="requires python path to invoke subprocess"
+ )
+ def test_package_file(self, mock_temp_file_name):
+ # Test that the packager properly copies the source file to the destination file
+
+ packager = source_utils._TrainingScriptPythonPackager(
+ script_path=mock_temp_file_name
+ )
+
+ with tempfile.TemporaryDirectory() as destination_directory_name:
+ _ = packager.make_package(package_directory=destination_directory_name)
+
+ # Check that contents of source_distribution_path is the same as destination_directory_name
+ destination_inner_path = f"{destination_directory_name}/{packager._TRAINER_FOLDER}/{packager._ROOT_MODULE}/{packager.task_module_name}.py"
+
+ assert filecmp.cmp(
+ mock_temp_file_name, destination_inner_path, shallow=False
+ )
+
+ @pytest.fixture()
+ def mock_temp_folder_name(self):
+ # Create random folder
+ folder = tempfile.TemporaryDirectory()
+
+ file = tempfile.NamedTemporaryFile(dir=folder.name)
+
+ # Create random file in the folder
+ with open(file.name, "w") as handle:
+ handle.write("test")
+
+ yield folder.name
+
+ file.close()
+
+ folder.cleanup()
+
+ @pytest.mark.skipif(
+ sys.executable is None, reason="requires python path to invoke subprocess"
+ )
+ def test_package_folder(self, mock_temp_folder_name):
+ # Test that the packager properly copies the source folder to the destination folder
+
+ packager = source_utils._TrainingScriptPythonPackager(
+ script_path=mock_temp_folder_name
+ )
+
+ with tempfile.TemporaryDirectory() as destination_directory_name:
+ # Add an existing file into the destination directory to check if it gets deleted
+ existing_file = tempfile.NamedTemporaryFile(dir=destination_directory_name)
+
+ with open(existing_file.name, "w") as handle:
+ handle.write("existing")
+
+ _ = packager.make_package(package_directory=destination_directory_name)
+
+ # Check that contents of source_distribution_path is the same as destination_directory_name
+ destination_inner_path = f"{destination_directory_name}/{packager._TRAINER_FOLDER}/{packager._ROOT_MODULE}"
+
+ dcmp = filecmp.dircmp(mock_temp_folder_name, destination_inner_path)
+
+ assert len(dcmp.diff_files) == 0
+ assert len(dcmp.left_only) == 0
+ assert len(dcmp.right_only) == 0
+
+ existing_file.close()
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_upload_tracker.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_upload_tracker.py
new file mode 100644
index 0000000000000000000000000000000000000000..132ae58435e19c304c2d18140182c686568c02f9
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_upload_tracker.py
@@ -0,0 +1,389 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019-2024 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Tests for tensorboard.uploader.upload_tracker."""
+
+
+import sys
+
+from unittest import mock
+
+from absl.testing import absltest
+from google.cloud.aiplatform.tensorboard import upload_tracker
+
+
+class ReadableBytesStringTest(absltest.TestCase):
+ def testZero(self):
+ self.assertEqual(upload_tracker.readable_bytes_string(0), "0 B")
+
+ def testLessThan1K(self):
+ self.assertEqual(upload_tracker.readable_bytes_string(42), "42 B")
+
+ def testBetween1KAnd1M(self):
+ self.assertEqual(upload_tracker.readable_bytes_string(1337), "1.3 kB")
+
+ def testMoreThan1M(self):
+ self.assertEqual(upload_tracker.readable_bytes_string(299792458), "285.9 MB")
+
+
+class UploadStatsTest(absltest.TestCase):
+ """Unit tests for the UploadStats class."""
+
+ def testAddScalar(self):
+ stats = upload_tracker.UploadStats()
+ stats.add_scalars(1234)
+ self.assertEqual(stats.num_scalars, 1234)
+ stats.add_scalars(4321)
+ self.assertEqual(stats.num_scalars, 5555)
+
+ def testAddTensor(self):
+ stats = upload_tracker.UploadStats()
+ stats.add_tensors(
+ num_tensors=10,
+ num_tensors_skipped=0,
+ tensor_bytes=1000,
+ tensor_bytes_skipped=0,
+ )
+ self.assertEqual(stats.num_tensors, 10)
+ self.assertEqual(stats.num_tensors_skipped, 0)
+ self.assertEqual(stats.tensor_bytes, 1000)
+ self.assertEqual(stats.tensor_bytes_skipped, 0)
+ stats.add_tensors(
+ num_tensors=20,
+ num_tensors_skipped=5,
+ tensor_bytes=2000,
+ tensor_bytes_skipped=500,
+ )
+ self.assertEqual(stats.num_tensors, 30)
+ self.assertEqual(stats.num_tensors_skipped, 5)
+ self.assertEqual(stats.tensor_bytes, 3000)
+ self.assertEqual(stats.tensor_bytes_skipped, 500)
+
+ def testAddTensorsNumTensorsSkippedGreaterThanNumTenosrsErrors(self):
+ stats = upload_tracker.UploadStats()
+ with self.assertRaises(AssertionError):
+ stats.add_tensors(
+ num_tensors=10,
+ num_tensors_skipped=12,
+ tensor_bytes=1000,
+ tensor_bytes_skipped=0,
+ )
+
+ def testAddBlob(self):
+ stats = upload_tracker.UploadStats()
+ stats.add_blob(blob_bytes=1000, is_skipped=False)
+ self.assertEqual(stats.blob_bytes, 1000)
+ self.assertEqual(stats.blob_bytes_skipped, 0)
+ stats.add_blob(blob_bytes=2000, is_skipped=True)
+ self.assertEqual(stats.blob_bytes, 3000)
+ self.assertEqual(stats.blob_bytes_skipped, 2000)
+
+ def testAddPlugin(self):
+ stats = upload_tracker.UploadStats()
+ stats.add_plugin("scalars")
+ self.assertEqual(stats.plugin_names, set(["scalars"]))
+ stats.add_plugin("scalars")
+ self.assertEqual(stats.plugin_names, set(["scalars"]))
+ stats.add_plugin("histograms")
+ self.assertEqual(stats.plugin_names, set(["histograms", "scalars"]))
+
+ def testHasNewDataSinceLastSummarizeReturnsFalseInitially(self):
+ stats = upload_tracker.UploadStats()
+ self.assertEqual(stats.has_new_data_since_last_summarize(), False)
+
+ def testUploadedSummaryWithTensorsAndBlobs(self):
+ stats = upload_tracker.UploadStats()
+ stats.add_scalars(1234)
+ stats.add_tensors(
+ num_tensors=50,
+ num_tensors_skipped=10,
+ tensor_bytes=2000,
+ tensor_bytes_skipped=1800,
+ )
+ stats.add_blob(blob_bytes=1000, is_skipped=False)
+ stats.add_blob(blob_bytes=2000, is_skipped=True)
+ self.assertEqual(stats.has_new_data_since_last_summarize(), True)
+ uploaded_summary, skipped_summary = stats.summarize()
+ self.assertEqual(
+ uploaded_summary,
+ "1234 scalars, 40 tensors (200 B), 1 binary objects (1000 B)",
+ )
+ self.assertEqual(
+ skipped_summary,
+ "10 tensors (1.8 kB), 1 binary objects (2.0 kB)",
+ )
+ self.assertEqual(stats.has_new_data_since_last_summarize(), False)
+
+ def testSummarizeeWithoutTensorsOrBlobs(self):
+ stats = upload_tracker.UploadStats()
+ stats.add_scalars(1234)
+ self.assertEqual(stats.has_new_data_since_last_summarize(), True)
+ (uploaded_summary, skipped_summary) = stats.summarize()
+ self.assertEqual(
+ uploaded_summary,
+ "1234 scalars, 0 tensors, 0 binary objects",
+ )
+ self.assertIsNone(skipped_summary)
+ self.assertEqual(stats.has_new_data_since_last_summarize(), False)
+
+ def testHasNewDataSinceLastSummarizeReturnsTrueAfterNewScalars(self):
+ stats = upload_tracker.UploadStats()
+ self.assertEqual(stats.has_new_data_since_last_summarize(), False)
+ stats.add_scalars(1234)
+ self.assertEqual(stats.has_new_data_since_last_summarize(), True)
+ stats.summarize()
+ self.assertEqual(stats.has_new_data_since_last_summarize(), False)
+ stats.add_scalars(4321)
+ self.assertEqual(stats.has_new_data_since_last_summarize(), True)
+
+ def testHasNewDataSinceLastSummarizeReturnsTrueAfterNewTensors(self):
+ stats = upload_tracker.UploadStats()
+ self.assertEqual(stats.has_new_data_since_last_summarize(), False)
+ stats.add_scalars(1234)
+ self.assertEqual(stats.has_new_data_since_last_summarize(), True)
+ stats.summarize()
+ self.assertEqual(stats.has_new_data_since_last_summarize(), False)
+ stats.add_tensors(
+ num_tensors=10,
+ num_tensors_skipped=10,
+ tensor_bytes=1000,
+ tensor_bytes_skipped=1000,
+ )
+ self.assertEqual(stats.has_new_data_since_last_summarize(), True)
+
+ def testHasNewDataSinceLastSummarizeReturnsTrueAfterNewBlob(self):
+ stats = upload_tracker.UploadStats()
+ self.assertEqual(stats.has_new_data_since_last_summarize(), False)
+ stats.add_scalars(1234)
+ self.assertEqual(stats.has_new_data_since_last_summarize(), True)
+ stats.summarize()
+ self.assertEqual(stats.has_new_data_since_last_summarize(), False)
+ stats.add_blob(blob_bytes=2000, is_skipped=True)
+ self.assertEqual(stats.has_new_data_since_last_summarize(), True)
+
+ def testHasDataInitiallyReturnsFalse(self):
+ stats = upload_tracker.UploadStats()
+ self.assertEqual(stats.has_data(), False)
+
+ def testHasDataReturnsTrueWithScalars(self):
+ stats = upload_tracker.UploadStats()
+ stats.add_scalars(1)
+ self.assertEqual(stats.has_data(), True)
+
+ def testHasDataReturnsTrueWithUnskippedTensors(self):
+ stats = upload_tracker.UploadStats()
+ stats.add_tensors(
+ num_tensors=10,
+ num_tensors_skipped=0,
+ tensor_bytes=1000,
+ tensor_bytes_skipped=0,
+ )
+ self.assertEqual(stats.has_data(), True)
+
+ def testHasDataReturnsTrueWithSkippedTensors(self):
+ stats = upload_tracker.UploadStats()
+ stats.add_tensors(
+ num_tensors=10,
+ num_tensors_skipped=10,
+ tensor_bytes=1000,
+ tensor_bytes_skipped=1000,
+ )
+ self.assertEqual(stats.has_data(), True)
+
+ def testHasDataReturnsTrueWithUnskippedBlob(self):
+ stats = upload_tracker.UploadStats()
+ stats.add_blob(blob_bytes=1000, is_skipped=False)
+ self.assertEqual(stats.has_data(), True)
+
+ def testHasDataReturnsTrueWithSkippedBlob(self):
+ stats = upload_tracker.UploadStats()
+ stats.add_blob(blob_bytes=1000, is_skipped=True)
+ self.assertEqual(stats.has_data(), True)
+
+
+class UploadTrackerTest(absltest.TestCase):
+ """Test for the UploadTracker class."""
+
+ def setUp(self):
+ super().setUp()
+ self.cumulative_bar = mock.MagicMock()
+ self.skipped_bar = mock.MagicMock()
+ self.uploading_bar = mock.MagicMock()
+ self.mock_write = mock.MagicMock()
+ self.mock_stdout_write = mock.patch.object(sys.stdout, "write", self.mock_write)
+ self.mock_stdout_write.start()
+ self.mock_flush = mock.MagicMock()
+ self.mock_stdout_flush = mock.patch.object(sys.stdout, "flush", self.mock_flush)
+ self.mock_stdout_flush.start()
+
+ def tearDown(self):
+ self.mock_stdout_write.stop()
+ self.mock_stdout_flush.stop()
+ super().tearDown()
+
+ def testSendTracker(self):
+ tracker = upload_tracker.UploadTracker(verbosity=1)
+ with tracker.send_tracker():
+ self.assertEqual(self.mock_write.call_count, 2)
+ self.assertEqual(self.mock_flush.call_count, 2)
+ self.assertIn(
+ "Data upload starting...",
+ self.mock_write.call_args[0][0],
+ )
+ self.assertEqual(self.mock_write.call_count, 3)
+ self.assertEqual(self.mock_flush.call_count, 3)
+ self.assertIn(
+ "Listening for new data in logdir...",
+ self.mock_write.call_args[0][0],
+ )
+ self.assertEqual(tracker.has_data(), False)
+
+ def testSendTrackerWithVerbosity0(self):
+ tracker = upload_tracker.UploadTracker(verbosity=0)
+ with tracker.send_tracker():
+ self.assertEqual(self.mock_write.call_count, 0)
+ self.assertEqual(self.mock_flush.call_count, 0)
+ self.assertEqual(self.mock_write.call_count, 0)
+ self.assertEqual(self.mock_flush.call_count, 0)
+
+ def testScalarsTracker(self):
+ tracker = upload_tracker.UploadTracker(verbosity=1)
+ with tracker.scalars_tracker(123):
+ self.assertEqual(self.mock_write.call_count, 1)
+ self.assertEqual(self.mock_flush.call_count, 1)
+ self.assertIn(
+ "Uploading 123 scalars...",
+ self.mock_write.call_args[0][0],
+ )
+ self.assertEqual(self.mock_write.call_count, 1)
+ self.assertEqual(self.mock_flush.call_count, 1)
+ self.assertEqual(tracker.has_data(), True)
+
+ def testScalarsTrackerWithVerbosity0(self):
+ tracker = upload_tracker.UploadTracker(verbosity=0)
+ with tracker.scalars_tracker(123):
+ self.assertEqual(self.mock_write.call_count, 0)
+ self.assertEqual(self.mock_flush.call_count, 0)
+ self.assertEqual(self.mock_write.call_count, 0)
+ self.assertEqual(self.mock_flush.call_count, 0)
+
+ def testTensorsTrackerWithSkippedTensors(self):
+ tracker = upload_tracker.UploadTracker(verbosity=1)
+ with tracker.tensors_tracker(
+ num_tensors=200,
+ num_tensors_skipped=50,
+ tensor_bytes=6000,
+ tensor_bytes_skipped=4000,
+ ):
+ self.assertEqual(self.mock_write.call_count, 1)
+ self.assertEqual(self.mock_flush.call_count, 1)
+ self.assertIn(
+ "Uploading 150 tensors (2.0 kB) (Skipping 50 tensors, 3.9 kB)",
+ self.mock_write.call_args[0][0],
+ )
+ self.assertEqual(tracker.has_data(), True)
+
+ def testTensorsTrackerWithVerbosity0(self):
+ tracker = upload_tracker.UploadTracker(verbosity=0)
+ with tracker.tensors_tracker(
+ num_tensors=200,
+ num_tensors_skipped=50,
+ tensor_bytes=6000,
+ tensor_bytes_skipped=4000,
+ ):
+ self.assertEqual(self.mock_write.call_count, 0)
+ self.assertEqual(self.mock_flush.call_count, 0)
+ self.assertEqual(self.mock_write.call_count, 0)
+ self.assertEqual(self.mock_flush.call_count, 0)
+
+ def testTensorsTrackerWithoutSkippedTensors(self):
+ tracker = upload_tracker.UploadTracker(verbosity=1)
+ with tracker.tensors_tracker(
+ num_tensors=200,
+ num_tensors_skipped=0,
+ tensor_bytes=6000,
+ tensor_bytes_skipped=0,
+ ):
+ self.assertEqual(self.mock_write.call_count, 1)
+ self.assertEqual(self.mock_flush.call_count, 1)
+ self.assertIn(
+ "Uploading 200 tensors (5.9 kB)",
+ self.mock_write.call_args[0][0],
+ )
+ self.assertEqual(tracker.has_data(), True)
+
+ def testBlobTrackerUploaded(self):
+ tracker = upload_tracker.UploadTracker(verbosity=1)
+ with tracker.blob_tracker(blob_bytes=2048):
+ self.assertEqual(self.mock_write.call_count, 1)
+ self.assertEqual(self.mock_flush.call_count, 1)
+ self.assertIn(
+ "Uploading binary object (2.0 kB)",
+ self.mock_write.call_args[0][0],
+ )
+
+ def testBlobTrackerWithVerbosity0(self):
+ tracker = upload_tracker.UploadTracker(verbosity=0)
+ with tracker.blob_tracker(blob_bytes=2048):
+ self.assertEqual(self.mock_write.call_count, 0)
+ self.assertEqual(self.mock_flush.call_count, 0)
+ self.assertEqual(self.mock_write.call_count, 0)
+ self.assertEqual(self.mock_flush.call_count, 0)
+
+ def testBlobTrackerNotUploaded(self):
+ tracker = upload_tracker.UploadTracker(verbosity=1)
+ with tracker.send_tracker():
+ self.assertEqual(self.mock_write.call_count, 2)
+ self.assertEqual(self.mock_flush.call_count, 2)
+ self.assertIn(
+ "Started scanning",
+ self.mock_write.call_args_list[0][0][0],
+ )
+ with tracker.blob_tracker(blob_bytes=2048 * 1024 * 1024) as blob_tracker:
+ self.assertEqual(self.mock_write.call_count, 3)
+ self.assertEqual(self.mock_flush.call_count, 3)
+ self.assertIn(
+ "Uploading binary object (2048.0 MB)",
+ self.mock_write.call_args[0][0],
+ )
+ blob_tracker.mark_uploaded(is_uploaded=False)
+ self.assertEqual(self.mock_write.call_count, 6)
+ self.assertEqual(self.mock_flush.call_count, 5)
+ self.assertIn(
+ "Total uploaded: 0 scalars, 0 tensors, 0 binary objects\n",
+ self.mock_write.call_args_list[3][0][0],
+ )
+ self.assertIn(
+ "Total skipped: 1 binary objects (2048.0 MB)\n",
+ self.mock_write.call_args_list[4][0][0],
+ )
+ self.assertEqual(tracker.has_data(), True)
+
+ def testInvalidVerbosityRaisesError(self):
+ with self.assertRaises(ValueError):
+ upload_tracker.UploadTracker(verbosity="1")
+ with self.assertRaises(ValueError):
+ upload_tracker.UploadTracker(verbosity=-1)
+ with self.assertRaises(ValueError):
+ upload_tracker.UploadTracker(verbosity=0.5)
+ with self.assertRaises(ValueError):
+ upload_tracker.UploadTracker(verbosity=100)
+ with self.assertRaises(ValueError):
+ upload_tracker.UploadTracker(verbosity=None)
+
+
+if __name__ == "__main__":
+ absltest.main()
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_uploader.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_uploader.py
new file mode 100644
index 0000000000000000000000000000000000000000..c925384fcd51f8ea5a8182c05eddfceda4668d88
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_uploader.py
@@ -0,0 +1,2655 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Tests for uploader.py."""
+
+import datetime
+import functools
+import logging
+import os
+import re
+import tempfile
+import threading
+import time
+from unittest import mock
+from unittest.mock import patch
+
+from absl.testing import parameterized
+
+from google.api_core import datetime_helpers
+from google.cloud import storage
+from google.cloud.aiplatform.compat.services import tensorboard_service_client
+from google.cloud.aiplatform.compat.types import tensorboard_data
+from google.cloud.aiplatform.compat.types import (
+ tensorboard_experiment as tensorboard_experiment_type,
+)
+from google.cloud.aiplatform.compat.types import tensorboard_run as tensorboard_run_type
+from google.cloud.aiplatform.compat.types import tensorboard_service
+from google.cloud.aiplatform.compat.types import (
+ tensorboard_time_series as tensorboard_time_series_type,
+)
+from google.cloud.aiplatform.metadata import experiment_resources
+from google.cloud.aiplatform.metadata import metadata
+from google.cloud.aiplatform.tensorboard import logdir_loader
+from google.cloud.aiplatform.tensorboard import tensorboard_resource
+from google.cloud.aiplatform.tensorboard import upload_tracker
+from google.cloud.aiplatform.tensorboard import uploader as uploader_lib
+from google.cloud.aiplatform.tensorboard import uploader_constants
+from google.cloud.aiplatform.tensorboard import uploader_utils
+from google.cloud.aiplatform.tensorboard.plugins.tf_profiler import profile_uploader
+from google.cloud.aiplatform_v1.services.tensorboard_service.transports import (
+ grpc as transports_grpc,
+)
+import grpc
+import grpc_testing
+import pytest
+import tensorflow as tf
+
+from google.protobuf import timestamp_pb2
+from google.protobuf import message
+from tensorboard.compat.proto import event_pb2
+from tensorboard.compat.proto import graph_pb2
+from tensorboard.compat.proto import meta_graph_pb2
+from tensorboard.compat.proto import summary_pb2
+from tensorboard.compat.proto import tensor_pb2
+from tensorboard.compat.proto import types_pb2
+from tensorboard.plugins.graph import metadata as graphs_metadata
+from tensorboard.plugins.scalar import metadata as scalars_metadata
+from tensorboard.summary import v1 as summary_v1
+
+data_compat = uploader_lib.event_file_loader.data_compat
+dataclass_compat = uploader_lib.event_file_loader.dataclass_compat
+scalar_v2_pb = summary_v1._scalar_summary.scalar_pb
+image_pb = summary_v1._image_summary.pb
+
+_SCALARS_HISTOGRAMS_AND_GRAPHS = frozenset(
+ (
+ scalars_metadata.PLUGIN_NAME,
+ graphs_metadata.PLUGIN_NAME,
+ )
+)
+
+_SCALARS_HISTOGRAMS_AND_PROFILE = frozenset(
+ (
+ scalars_metadata.PLUGIN_NAME,
+ "profile",
+ )
+)
+
+
+# Sentinel for `_create_*` helpers, for arguments for which we want to
+# supply a default other than the `None` used by the code under test.
+_USE_DEFAULT = object()
+
+_TEST_EXPERIMENT_NAME = "test-experiment"
+_TEST_PROJECT_NAME = "test_project"
+_TEST_LOCATION_NAME = "us-east1"
+_TEST_TENSORBOARD_RESOURCE_NAME = (
+ "projects/{}/locations/{}/tensorboards/test_tensorboard".format(
+ _TEST_PROJECT_NAME, _TEST_LOCATION_NAME
+ )
+)
+_TEST_LOG_DIR_NAME = "/logs/foo"
+_TEST_RUN_NAME = "test-run"
+_TEST_ONE_PLATFORM_EXPERIMENT_NAME = "{}/experiments/{}".format(
+ _TEST_TENSORBOARD_RESOURCE_NAME, _TEST_EXPERIMENT_NAME
+)
+_TEST_ONE_PLATFORM_RUN_NAME = "{}/runs/{}".format(
+ _TEST_ONE_PLATFORM_EXPERIMENT_NAME, _TEST_RUN_NAME
+)
+_TEST_TIME_SERIES_NAME = "test-time-series"
+_TEST_ONE_PLATFORM_TIME_SERIES_NAME = "{}/timeSeries/{}".format(
+ _TEST_ONE_PLATFORM_RUN_NAME, _TEST_TIME_SERIES_NAME
+)
+_TEST_BLOB_STORAGE_FOLDER = "test_folder"
+_DEFAULT_RUN_NAME = "default"
+
+
+def _create_example_graph_bytes(large_attr_size):
+ graph_def = graph_pb2.GraphDef()
+ graph_def.node.add(name="alice", op="Person")
+ graph_def.node.add(name="bob", op="Person")
+
+ graph_def.node[1].attr["small"].s = b"small_attr_value"
+ graph_def.node[1].attr["large"].s = b"l" * large_attr_size
+ graph_def.node.add(name="friendship", op="Friendship", input=["alice", "bob"])
+ return graph_def.SerializeToString()
+
+
+class AbortUploadError(Exception):
+ """Exception used in testing to abort the upload process."""
+
+
+def _create_mock_client():
+ # Create a stub instance (using a test channel) in order to derive a mock
+ # from it with autospec enabled. Mocking TensorBoardWriterServiceStub itself
+ # doesn't work with autospec because grpc constructs stubs via metaclassing.
+
+ def create_experiment_response(
+ tensorboard_experiment_id=None,
+ tensorboard_experiment=None, # pylint: disable=unused-argument
+ parent=None,
+ ): # pylint: disable=unused-argument
+ tensorboard_experiment_id = (
+ "{}/experiments/{}".format(parent, tensorboard_experiment_id)
+ if parent
+ else tensorboard_experiment_id
+ )
+ return tensorboard_experiment_type.TensorboardExperiment(
+ name=tensorboard_experiment_id
+ )
+
+ def create_run_response(
+ tensorboard_run=None, # pylint: disable=unused-argument
+ tensorboard_run_id=None,
+ parent=None,
+ ): # pylint: disable=unused-argument
+ tensorboard_run_id = (
+ "{}/runs/{}".format(parent, tensorboard_run_id)
+ if parent
+ else tensorboard_run_id
+ )
+ return tensorboard_run_type.TensorboardRun(name=tensorboard_run_id)
+
+ def create_tensorboard_time_series(
+ tensorboard_time_series=None, parent=None
+ ): # pylint: disable=unused-argument
+ name = (
+ "{}/timeSeries/{}".format(parent, tensorboard_time_series.display_name)
+ if parent
+ else tensorboard_time_series.display_name
+ )
+ return tensorboard_time_series_type.TensorboardTimeSeries(
+ name=name,
+ display_name=tensorboard_time_series.display_name,
+ )
+
+ def parse_tensorboard_path_response(path):
+ """Parses a tensorboard path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ test_channel = grpc_testing.channel(
+ service_descriptors=[], time=grpc_testing.strict_real_time()
+ )
+ mock_client = mock.Mock(
+ spec=tensorboard_service_client.TensorboardServiceClient(
+ transport=transports_grpc.TensorboardServiceGrpcTransport(
+ channel=test_channel
+ )
+ )
+ )
+ mock_client.create_tensorboard_experiment.side_effect = create_experiment_response
+ mock_client.create_tensorboard_run.side_effect = create_run_response
+ mock_client.create_tensorboard_time_series.side_effect = (
+ create_tensorboard_time_series
+ )
+ mock_client.parse_tensorboard_path.side_effect = parse_tensorboard_path_response
+ return mock_client
+
+
+def _create_mock_blob_storage():
+ mock_blob_storage = mock.Mock()
+ mock_blob_storage.mock_add_spec(storage.Bucket)
+
+ return mock_blob_storage
+
+
+def _create_uploader(
+ writer_client=_USE_DEFAULT,
+ logdir=None,
+ max_scalar_request_size=_USE_DEFAULT,
+ max_tensor_request_size=_USE_DEFAULT,
+ max_tensor_point_size=_USE_DEFAULT,
+ max_blob_request_size=_USE_DEFAULT,
+ max_blob_size=_USE_DEFAULT,
+ logdir_poll_rate_limiter=_USE_DEFAULT,
+ rpc_rate_limiter=_USE_DEFAULT,
+ experiment_name=_TEST_EXPERIMENT_NAME,
+ tensorboard_resource_name=_TEST_TENSORBOARD_RESOURCE_NAME,
+ blob_storage_bucket=None,
+ blob_storage_folder=_TEST_BLOB_STORAGE_FOLDER,
+ description=None,
+ verbosity=0, # Use 0 to minimize littering the test output.
+ one_shot=None,
+ allowed_plugins=_SCALARS_HISTOGRAMS_AND_GRAPHS,
+ run_name_prefix=None,
+):
+ if writer_client is _USE_DEFAULT:
+ writer_client = _create_mock_client()
+ if max_scalar_request_size is _USE_DEFAULT:
+ max_scalar_request_size = 128000
+ if max_tensor_request_size is _USE_DEFAULT:
+ max_tensor_request_size = 512000
+ if max_blob_request_size is _USE_DEFAULT:
+ max_blob_request_size = 128000
+ if max_blob_size is _USE_DEFAULT:
+ max_blob_size = 12345
+ if max_tensor_point_size is _USE_DEFAULT:
+ max_tensor_point_size = 16000
+ if logdir_poll_rate_limiter is _USE_DEFAULT:
+ logdir_poll_rate_limiter = uploader_utils.RateLimiter(0)
+ if rpc_rate_limiter is _USE_DEFAULT:
+ rpc_rate_limiter = uploader_utils.RateLimiter(0)
+
+ upload_limits = uploader_constants.UploadLimits(
+ max_scalar_request_size=max_scalar_request_size,
+ max_tensor_request_size=max_tensor_request_size,
+ max_tensor_point_size=max_tensor_point_size,
+ max_blob_request_size=max_blob_request_size,
+ max_blob_size=max_blob_size,
+ )
+
+ plugins = (
+ uploader_constants.ALLOWED_PLUGINS.union(allowed_plugins)
+ if allowed_plugins
+ else uploader_constants.ALLOWED_PLUGINS
+ )
+
+ return uploader_lib.TensorBoardUploader(
+ experiment_name=experiment_name,
+ tensorboard_resource_name=tensorboard_resource_name,
+ writer_client=writer_client,
+ logdir=logdir,
+ allowed_plugins=plugins,
+ upload_limits=upload_limits,
+ blob_storage_bucket=blob_storage_bucket,
+ blob_storage_folder=blob_storage_folder,
+ logdir_poll_rate_limiter=logdir_poll_rate_limiter,
+ rpc_rate_limiter=rpc_rate_limiter,
+ description=description,
+ verbosity=verbosity,
+ one_shot=one_shot,
+ run_name_prefix=run_name_prefix,
+ )
+
+
+def _create_dispatcher(
+ experiment_resource_name,
+ api=None,
+ allowed_plugins=_USE_DEFAULT,
+ logdir=None,
+ run_name=_TEST_RUN_NAME,
+):
+ if api is _USE_DEFAULT:
+ api = _create_mock_client()
+ if allowed_plugins is _USE_DEFAULT:
+ allowed_plugins = _SCALARS_HISTOGRAMS_AND_GRAPHS
+
+ upload_limits = uploader_constants.UploadLimits(
+ max_scalar_request_size=128000,
+ max_tensor_request_size=128000,
+ max_tensor_point_size=52000,
+ max_blob_request_size=128000,
+ )
+
+ rpc_rate_limiter = uploader_utils.RateLimiter(0)
+ tensor_rpc_rate_limiter = uploader_utils.RateLimiter(0)
+ blob_rpc_rate_limiter = uploader_utils.RateLimiter(0)
+
+ one_platform_resource_manager = uploader_utils.OnePlatformResourceManager(
+ experiment_resource_name, api
+ )
+ one_platform_resource_manager.get_run_resource_name = mock.Mock()
+ one_platform_resource_manager.get_run_resource_name.return_value = (
+ "{}/runs/{}".format(experiment_resource_name, run_name)
+ )
+
+ request_sender = uploader_lib._BatchedRequestSender(
+ experiment_resource_name=experiment_resource_name,
+ api=api,
+ allowed_plugins=allowed_plugins,
+ upload_limits=upload_limits,
+ rpc_rate_limiter=rpc_rate_limiter,
+ tensor_rpc_rate_limiter=tensor_rpc_rate_limiter,
+ blob_rpc_rate_limiter=blob_rpc_rate_limiter,
+ blob_storage_bucket=None,
+ blob_storage_folder=None,
+ one_platform_resource_manager=one_platform_resource_manager,
+ tracker=upload_tracker.UploadTracker(verbosity=0),
+ )
+
+ additional_senders = {}
+ if "profile" in allowed_plugins:
+ additional_senders["profile"] = profile_uploader.ProfileRequestSender(
+ experiment_resource_name=experiment_resource_name,
+ api=api,
+ upload_limits=upload_limits,
+ blob_rpc_rate_limiter=uploader_utils.RateLimiter(0),
+ blob_storage_bucket=_create_mock_blob_storage(),
+ source_bucket=_create_mock_blob_storage(),
+ blob_storage_folder=None,
+ tracker=upload_tracker.UploadTracker(verbosity=0),
+ logdir=logdir,
+ )
+
+ return uploader_lib._Dispatcher(
+ request_sender=request_sender,
+ additional_senders=additional_senders,
+ )
+
+
+def _create_scalar_request_sender(
+ experiment_resource_id, api=_USE_DEFAULT, max_request_size=_USE_DEFAULT
+):
+ if api is _USE_DEFAULT:
+ api = _create_mock_client()
+ if max_request_size is _USE_DEFAULT:
+ max_request_size = 128000
+ return uploader_lib._ScalarBatchedRequestSender(
+ experiment_resource_id=experiment_resource_id,
+ one_platform_resource_manager=uploader_utils.OnePlatformResourceManager(
+ experiment_resource_id, api
+ ),
+ api=api,
+ rpc_rate_limiter=uploader_utils.RateLimiter(0),
+ max_request_size=max_request_size,
+ tracker=upload_tracker.UploadTracker(verbosity=0),
+ )
+
+
+def _create_file_request_sender(
+ run_resource_id,
+ api=_USE_DEFAULT,
+ max_blob_request_size=_USE_DEFAULT,
+ max_blob_size=_USE_DEFAULT,
+ blob_storage_folder=None,
+ blob_storage_bucket=_USE_DEFAULT,
+ source_bucket=_USE_DEFAULT,
+):
+ if api is _USE_DEFAULT:
+ api = _create_mock_client()
+ if max_blob_request_size is _USE_DEFAULT:
+ max_blob_request_size = 128000
+ if blob_storage_bucket is _USE_DEFAULT:
+ blob_storage_bucket = _create_mock_blob_storage()
+ if source_bucket is _USE_DEFAULT:
+ source_bucket = _create_mock_blob_storage()
+ if max_blob_size is _USE_DEFAULT:
+ max_blob_size = 128000
+ return profile_uploader._FileRequestSender(
+ run_resource_id=run_resource_id,
+ api=api,
+ rpc_rate_limiter=uploader_utils.RateLimiter(0),
+ max_blob_request_size=max_blob_request_size,
+ max_blob_size=max_blob_size,
+ blob_storage_bucket=blob_storage_bucket,
+ blob_storage_folder=blob_storage_folder,
+ tracker=upload_tracker.UploadTracker(verbosity=0),
+ source_bucket=source_bucket,
+ )
+
+
+def _scalar_event(tag, value):
+ return event_pb2.Event(summary=scalar_v2_pb(tag, value))
+
+
+def _grpc_error(code, details):
+ # Monkey patch insertion for the methods a real grpc.RpcError would have.
+ error = grpc.RpcError("RPC error %r: %s" % (code, details))
+ error.code = lambda: code
+ error.details = lambda: details
+ return error
+
+
+def _timestamp_pb(nanos):
+ result = timestamp_pb2.Timestamp()
+ result.FromNanoseconds(nanos)
+ return result
+
+
+class FileWriter(tf.compat.v1.summary.FileWriter):
+ """FileWriter for test.
+
+ TensorFlow FileWriter uses TensorFlow's Protobuf Python binding
+ which is largely discouraged in TensorBoard. We do not want a
+ TB.Writer but require one for testing in integrational style
+ (writing out event files and use the real event readers).
+ """
+
+ def __init__(self, *args, **kwargs):
+ # Briefly enter graph mode context so this testing FileWriter can be
+ # created from an eager mode context without triggering a usage error.
+ with tf.compat.v1.Graph().as_default():
+ super(FileWriter, self).__init__(*args, **kwargs)
+
+ def add_test_summary(self, tag, simple_value=1.0, step=None):
+ """Convenience for writing a simple summary for a given tag."""
+ value = summary_pb2.Summary.Value(tag=tag, simple_value=simple_value)
+ summary = summary_pb2.Summary(value=[value])
+ self.add_summary(summary, global_step=step)
+
+ def add_test_tensor_summary(self, tag, tensor, step=None, value_metadata=None):
+ """Convenience for writing a simple summary for a given tag."""
+ value = summary_pb2.Summary.Value(
+ tag=tag, tensor=tensor, metadata=value_metadata
+ )
+ summary = summary_pb2.Summary(value=[value])
+ self.add_summary(summary, global_step=step)
+
+ def add_event(self, event):
+ if isinstance(event, event_pb2.Event):
+ tf_event = tf.compat.v1.Event.FromString(event.SerializeToString())
+ else:
+ tf_event = event
+ if not isinstance(event, bytes):
+ logging.error(
+ "Added TensorFlow event proto. "
+ "Please prefer TensorBoard copy of the proto"
+ )
+ super(FileWriter, self).add_event(tf_event)
+
+ def add_summary(self, summary, global_step=None):
+ if isinstance(summary, summary_pb2.Summary):
+ tf_summary = tf.compat.v1.Summary.FromString(summary.SerializeToString())
+ else:
+ tf_summary = summary
+ if not isinstance(summary, bytes):
+ logging.error(
+ "Added TensorFlow summary proto. "
+ "Please prefer TensorBoard copy of the proto"
+ )
+ super(FileWriter, self).add_summary(tf_summary, global_step)
+
+ def add_session_log(self, session_log, global_step=None):
+ if isinstance(session_log, event_pb2.SessionLog):
+ tf_session_log = tf.compat.v1.SessionLog.FromString(
+ session_log.SerializeToString()
+ )
+ else:
+ tf_session_log = session_log
+ if not isinstance(session_log, bytes):
+ logging.error(
+ "Added TensorFlow session_log proto. "
+ "Please prefer TensorBoard copy of the proto"
+ )
+ super(FileWriter, self).add_session_log(tf_session_log, global_step)
+
+ def add_graph(self, graph, global_step=None, graph_def=None):
+ if isinstance(graph_def, graph_pb2.GraphDef):
+ tf_graph_def = tf.compat.v1.GraphDef.FromString(
+ graph_def.SerializeToString()
+ )
+ else:
+ tf_graph_def = graph_def
+
+ super(FileWriter, self).add_graph(
+ graph, global_step=global_step, graph_def=tf_graph_def
+ )
+
+ def add_meta_graph(self, meta_graph_def, global_step=None):
+ if isinstance(meta_graph_def, meta_graph_pb2.MetaGraphDef):
+ tf_meta_graph_def = tf.compat.v1.MetaGraphDef.FromString(
+ meta_graph_def.SerializeToString()
+ )
+ else:
+ tf_meta_graph_def = meta_graph_def
+
+ super(FileWriter, self).add_meta_graph(
+ meta_graph_def=tf_meta_graph_def, global_step=global_step
+ )
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TensorboardUploaderTest(tf.test.TestCase, parameterized.TestCase):
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_create_experiment(
+ self, experiment_resources_mock, experiment_tracker_mock
+ ):
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ logdir = _TEST_LOG_DIR_NAME
+ uploader = _create_uploader(_create_mock_client(), logdir)
+ uploader.create_experiment()
+ self.assertEqual(
+ uploader._tensorboard_experiment_resource_name,
+ _TEST_ONE_PLATFORM_EXPERIMENT_NAME,
+ )
+
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_create_experiment_with_name(
+ self, experiment_resources_mock, experiment_tracker_mock
+ ):
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ logdir = _TEST_LOG_DIR_NAME
+ mock_client = _create_mock_client()
+ new_name = "This is the new name"
+ uploader = _create_uploader(mock_client, logdir, experiment_name=new_name)
+ uploader.create_experiment()
+
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_create_experiment_with_description(
+ self, experiment_resources_mock, experiment_tracker_mock
+ ):
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ logdir = _TEST_LOG_DIR_NAME
+ mock_client = _create_mock_client()
+ new_description = """
+ **description**"
+ may have "strange" unicode chars 🌴 \\/<>
+ """
+ uploader = _create_uploader(mock_client, logdir, description=new_description)
+ uploader.create_experiment()
+ self.assertEqual(uploader._experiment_name, _TEST_EXPERIMENT_NAME)
+
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_create_experiment_with_all_metadata(
+ self, experiment_resources_mock, experiment_tracker_mock
+ ):
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ logdir = _TEST_LOG_DIR_NAME
+ mock_client = _create_mock_client()
+ new_description = """
+ **description**"
+ may have "strange" unicode chars 🌴 \\/<>
+ """
+ new_name = "This is a cool name."
+ uploader = _create_uploader(
+ mock_client, logdir, experiment_name=new_name, description=new_description
+ )
+ uploader.create_experiment()
+ self.assertEqual(uploader._experiment_name, new_name)
+
+ def test_start_uploading_without_create_experiment_fails(self):
+ mock_client = _create_mock_client()
+ uploader = _create_uploader(mock_client, _TEST_LOG_DIR_NAME)
+ with self.assertRaisesRegex(RuntimeError, "call create_experiment()"):
+ uploader.start_uploading()
+
+ @parameterized.parameters(
+ {"nested_run_dir": ""},
+ {"nested_run_dir": "nested-dir/"},
+ {"nested_run_dir": "double/nested-dir/"},
+ )
+ @patch.object(
+ uploader_utils.OnePlatformResourceManager,
+ "get_run_resource_name",
+ autospec=True,
+ )
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_start_uploading_scalars(
+ self,
+ experiment_resources_mock,
+ experiment_tracker_mock,
+ run_resource_mock,
+ nested_run_dir,
+ ):
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ mock_client = _create_mock_client()
+ mock_rate_limiter = mock.create_autospec(uploader_utils.RateLimiter)
+ mock_tensor_rate_limiter = mock.create_autospec(uploader_utils.RateLimiter)
+ mock_blob_rate_limiter = mock.create_autospec(uploader_utils.RateLimiter)
+ mock_tracker = mock.MagicMock()
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+ with mock.patch.object(
+ upload_tracker, "UploadTracker", return_value=mock_tracker
+ ):
+ uploader = _create_uploader(
+ writer_client=mock_client,
+ logdir=_TEST_LOG_DIR_NAME,
+ # Send each Event below in a separate WriteScalarRequest
+ max_scalar_request_size=180,
+ rpc_rate_limiter=mock_rate_limiter,
+ verbosity=1, # In order to test the upload tracker.
+ )
+ uploader.create_experiment()
+
+ mock_logdir_loader = mock.create_autospec(logdir_loader.LogdirLoader)
+ mock_logdir_loader.get_run_events.side_effect = [
+ {
+ f"{nested_run_dir}run 1": _apply_compat(
+ [_scalar_event("1.1", 5.0), _scalar_event("1.2", 5.0)]
+ ),
+ f"{nested_run_dir}run 2": _apply_compat(
+ [_scalar_event("2.1", 5.0), _scalar_event("2.2", 5.0)]
+ ),
+ },
+ {
+ f"{nested_run_dir}run 3": _apply_compat(
+ [_scalar_event("3.1", 5.0), _scalar_event("3.2", 5.0)]
+ ),
+ f"{nested_run_dir}run 4": _apply_compat(
+ [_scalar_event("4.1", 5.0), _scalar_event("4.2", 5.0)]
+ ),
+ f"{nested_run_dir}run 5": _apply_compat(
+ [_scalar_event("5.1", 5.0), _scalar_event("5.2", 5.0)]
+ ),
+ },
+ AbortUploadError,
+ ]
+
+ with mock.patch.object(
+ uploader, "_logdir_loader", mock_logdir_loader
+ ), self.assertRaises(AbortUploadError):
+ uploader.start_uploading()
+ self.assertEqual(5, mock_client.write_tensorboard_experiment_data.call_count)
+ self.assertEqual(5, mock_rate_limiter.tick.call_count)
+ self.assertEqual(0, mock_tensor_rate_limiter.tick.call_count)
+ self.assertEqual(0, mock_blob_rate_limiter.tick.call_count)
+
+ # Check upload tracker calls.
+ self.assertEqual(mock_tracker.send_tracker.call_count, 2)
+ self.assertEqual(mock_tracker.scalars_tracker.call_count, 5)
+ self.assertLen(mock_tracker.scalars_tracker.call_args[0], 1)
+ self.assertEqual(mock_tracker.tensors_tracker.call_count, 0)
+ self.assertEqual(mock_tracker.blob_tracker.call_count, 0)
+
+ @parameterized.parameters(
+ {"run_name": "test-run-1", "one_platform_run_name": "test-run-1"},
+ {"run_name": "test/run/2", "one_platform_run_name": "test-run-2"},
+ {"run_name": "test.run.3", "one_platform_run_name": "test-run-3"},
+ {"run_name": "test_run_4", "one_platform_run_name": "test-run-4"},
+ {"run_name": "test/.run_5", "one_platform_run_name": "test--run-5"},
+ )
+ @patch.object(
+ uploader_utils.OnePlatformResourceManager,
+ "get_run_resource_name",
+ autospec=True,
+ )
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_start_uploading_scalars_run_names_updated_to_one_platform_names(
+ self,
+ experiment_resources_mock,
+ experiment_tracker_mock,
+ run_resource_mock,
+ run_name,
+ one_platform_run_name,
+ ):
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+ uploader = _create_uploader(
+ logdir=_TEST_LOG_DIR_NAME,
+ )
+ uploader.create_experiment()
+
+ mock_logdir_loader = mock.create_autospec(logdir_loader.LogdirLoader)
+ mock_logdir_loader.get_run_events.side_effect = [
+ {
+ run_name: _apply_compat(
+ [_scalar_event("1.1", 5.0), _scalar_event("1.2", 5.0)]
+ ),
+ },
+ AbortUploadError,
+ ]
+
+ with mock.patch.object(
+ uploader, "_logdir_loader", mock_logdir_loader
+ ), self.assertRaises(AbortUploadError):
+ uploader.start_uploading()
+ experiment_runs = uploader._experiment_runs
+ self.assertIn(one_platform_run_name, experiment_runs)
+
+ @parameterized.parameters(
+ {
+ "existing_experiment": None,
+ "one_platform_run_name": None,
+ "nested_run_dir": "",
+ },
+ {
+ "existing_experiment": None,
+ "one_platform_run_name": ".",
+ "nested_run_dir": "nested-dir/",
+ },
+ {
+ "existing_experiment": _TEST_EXPERIMENT_NAME,
+ "one_platform_run_name": _TEST_ONE_PLATFORM_RUN_NAME,
+ "nested_run_dir": "double/nested-dir/",
+ },
+ )
+ @patch.object(
+ uploader_utils.OnePlatformResourceManager,
+ "get_run_resource_name",
+ autospec=True,
+ )
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(
+ uploader_utils.OnePlatformResourceManager,
+ "_create_or_get_run_resource",
+ autospec=True,
+ )
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_start_uploading_scalars_one_shot(
+ self,
+ experiment_resources_mock,
+ experiment_run_resource_mock,
+ experiment_tracker_mock,
+ run_resource_mock,
+ existing_experiment,
+ one_platform_run_name,
+ nested_run_dir,
+ ):
+ """Check that one-shot uploading stops without AbortUploadError."""
+
+ def batch_create_runs(parent, requests):
+ # pylint: disable=unused-argument
+ tb_runs = []
+ for request in requests:
+ tb_run = tensorboard_run_type.TensorboardRun(request.tensorboard_run)
+ tb_run.name = "{}/runs/{}".format(
+ request.parent, request.tensorboard_run_id
+ )
+ tb_runs.append(tb_run)
+ return tensorboard_service.BatchCreateTensorboardRunsResponse(
+ tensorboard_runs=tb_runs
+ )
+
+ def batch_create_time_series(parent, requests):
+ # pylint: disable=unused-argument
+ tb_time_series = []
+ for request in requests:
+ ts = tensorboard_time_series_type.TensorboardTimeSeries(
+ request.tensorboard_time_series
+ )
+ ts.name = "{}/timeSeries/{}".format(
+ request.parent, request.tensorboard_time_series.display_name
+ )
+ tb_time_series.append(ts)
+ return tensorboard_service.BatchCreateTensorboardTimeSeriesResponse(
+ tensorboard_time_series=tb_time_series
+ )
+
+ tensorboard_run_mock = mock.create_autospec(tensorboard_resource.TensorboardRun)
+ experiment_resources_mock.get.return_value = existing_experiment
+ tensorboard_run_mock.resource_name = _TEST_TENSORBOARD_RESOURCE_NAME
+ tensorboard_run_mock.display_name = _TEST_RUN_NAME
+ experiment_run_resource_mock.return_value = tensorboard_run_mock
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ mock_client = _create_mock_client()
+ mock_client.batch_create_tensorboard_runs.side_effect = batch_create_runs
+ mock_client.batch_create_tensorboard_time_series.side_effect = (
+ batch_create_time_series
+ )
+
+ mock_rate_limiter = mock.create_autospec(uploader_utils.RateLimiter)
+ mock_tracker = mock.MagicMock()
+ run_resource_mock.return_value = one_platform_run_name
+ with mock.patch.object(
+ upload_tracker, "UploadTracker", return_value=mock_tracker
+ ):
+ uploader = _create_uploader(
+ writer_client=mock_client,
+ logdir=_TEST_LOG_DIR_NAME,
+ # Send each Event below in a separate WriteScalarRequest
+ max_scalar_request_size=200,
+ rpc_rate_limiter=mock_rate_limiter,
+ verbosity=1, # In order to test the upload tracker.
+ one_shot=True,
+ description="Test Description",
+ )
+ uploader.create_experiment()
+
+ mock_logdir_loader = mock.create_autospec(logdir_loader.LogdirLoader)
+ mock_logdir_loader.get_run_events.side_effect = [
+ {
+ f"{nested_run_dir}run 1": _apply_compat(
+ [_scalar_event("tag_1.1", 5.0), _scalar_event("tag_1.2", 5.0)]
+ ),
+ f"{nested_run_dir}run 2": _apply_compat(
+ [_scalar_event("tag_2.1", 5.0), _scalar_event("tag_2.2", 5.0)]
+ ),
+ },
+ # Note the lack of AbortUploadError here.
+ ]
+ mock_logdir_loader_pre_create = mock.create_autospec(logdir_loader.LogdirLoader)
+ mock_logdir_loader_pre_create.get_run_events.side_effect = [
+ {
+ f"{nested_run_dir}run 1": _apply_compat(
+ [_scalar_event("tag_1.1", 5.0), _scalar_event("tag_1.2", 5.0)]
+ ),
+ f"{nested_run_dir}run 2": _apply_compat(
+ [_scalar_event("tag_2.1", 5.0), _scalar_event("tag_2.2", 5.0)]
+ ),
+ },
+ # Note the lack of AbortUploadError here.
+ ]
+
+ with mock.patch.object(uploader, "_logdir_loader", mock_logdir_loader):
+ with mock.patch.object(
+ uploader, "_logdir_loader_pre_create", mock_logdir_loader_pre_create
+ ):
+ with mock.patch.object(
+ uploader, "_end_experiment_runs", return_value=None
+ ):
+ uploader.start_uploading()
+ uploader._end_experiment_runs.assert_called_once()
+
+ self.assertEqual(existing_experiment is None, uploader._is_brand_new_experiment)
+ self.assertEqual(2, mock_client.write_tensorboard_experiment_data.call_count)
+ self.assertEqual(2, mock_rate_limiter.tick.call_count)
+
+ # Check upload tracker calls.
+ self.assertEqual(mock_tracker.send_tracker.call_count, 1)
+ self.assertEqual(mock_tracker.scalars_tracker.call_count, 2)
+ self.assertLen(mock_tracker.scalars_tracker.call_args[0], 1)
+ self.assertEqual(mock_tracker.tensors_tracker.call_count, 0)
+ self.assertEqual(mock_tracker.blob_tracker.call_count, 0)
+ experiment_tracker_mock.set_experiment.assert_called_once()
+
+ @parameterized.parameters(
+ {"nested_run_dir": ""},
+ {"nested_run_dir": "nested-dir/"},
+ {"nested_run_dir": "double/nested-dir/"},
+ )
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_upload_nested_scalars_one_shot(
+ self,
+ experiment_resources_mock,
+ experiment_tracker_mock,
+ nested_run_dir,
+ ):
+ """Check that one-shot uploading stops without AbortUploadError."""
+
+ logdir = self.get_temp_dir()
+ uploader = _create_uploader(
+ logdir=logdir,
+ )
+ uploader.create_experiment()
+
+ run_1 = f"{nested_run_dir}run 1"
+ run_2 = f"{nested_run_dir}run 2"
+
+ mock_dispatcher = mock.create_autospec(uploader_lib._Dispatcher)
+ uploader._dispatcher = mock_dispatcher
+ mock_logdir_loader = mock.create_autospec(logdir_loader.LogdirLoader)
+ mock_logdir_loader.get_run_events.side_effect = [
+ {
+ run_1: _apply_compat(
+ [_scalar_event("tag_1.1", 5.0), _scalar_event("tag_1.2", 5.0)]
+ ),
+ run_2: _apply_compat(
+ [_scalar_event("tag_2.1", 5.0), _scalar_event("tag_2.2", 5.0)]
+ ),
+ },
+ ]
+ with mock.patch.object(uploader, "_logdir_loader", mock_logdir_loader):
+ uploader._upload_once()
+
+ self.assertEqual(1, mock_logdir_loader.get_run_events.call_count)
+ self.assertEqual(1, mock_dispatcher.dispatch_requests.call_count)
+ run_to_events = mock_dispatcher.dispatch_requests.call_args[0][0]
+ self.assertIn(run_1, run_to_events)
+ self.assertIn(run_2, run_to_events)
+
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_upload_empty_logdir(
+ self, experiment_resources_mock, experiment_tracker_mock
+ ):
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ logdir = self.get_temp_dir()
+ mock_client = _create_mock_client()
+ uploader = _create_uploader(mock_client, logdir)
+ uploader.create_experiment()
+ uploader._upload_once()
+ mock_client.write_tensorboard_experiment_data.assert_not_called()
+ experiment_tracker_mock.set_experiment.assert_called_once()
+
+ @parameterized.parameters(
+ {"run_name_prefix": None},
+ {"run_name_prefix": "run-prefix-"},
+ )
+ @patch.object(
+ uploader_utils.OnePlatformResourceManager,
+ "get_run_resource_name",
+ autospec=True,
+ )
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_default_run_name(
+ self,
+ experiment_resources_mock,
+ experiment_tracker_mock,
+ run_resource_mock,
+ run_name_prefix,
+ ):
+ run_resource_mock.return_value = "."
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ logdir = self.get_temp_dir()
+ with FileWriter(logdir) as writer:
+ writer.add_test_summary("foo")
+
+ uploader = _create_uploader(
+ logdir=logdir,
+ run_name_prefix=run_name_prefix,
+ )
+ uploader.create_experiment()
+ mock_dispatcher = mock.create_autospec(uploader_lib._Dispatcher)
+ uploader._dispatcher = mock_dispatcher
+ mock_logdir_loader = mock.create_autospec(logdir_loader.LogdirLoader)
+ mock.patch.object(uploader, "_logdir_loader", mock_logdir_loader)
+ expected_run_name = _DEFAULT_RUN_NAME
+ if run_name_prefix:
+ expected_run_name = run_name_prefix + _DEFAULT_RUN_NAME
+
+ uploader._upload_once()
+
+ run_to_events = mock_dispatcher.dispatch_requests.call_args[0][0]
+ self.assertIn(expected_run_name, run_to_events)
+
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_upload_polls_slowly_once_done(
+ self, experiment_resources_mock, experiment_tracker_mock
+ ):
+ class SuccessError(Exception):
+ pass
+
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ mock_rate_limiter = mock.create_autospec(uploader_utils.RateLimiter)
+ upload_call_count_box = [0]
+
+ def mock_upload_once():
+ upload_call_count_box[0] += 1
+ tick_count = mock_rate_limiter.tick.call_count
+ self.assertEqual(tick_count, upload_call_count_box[0])
+ if tick_count >= 3:
+ raise SuccessError()
+
+ uploader = _create_uploader(
+ logdir=self.get_temp_dir(),
+ logdir_poll_rate_limiter=mock_rate_limiter,
+ )
+ uploader._upload_once = mock_upload_once
+
+ uploader.create_experiment()
+ with self.assertRaises(SuccessError):
+ uploader.start_uploading()
+ experiment_tracker_mock.set_experiment.assert_called_once()
+
+ @patch.object(
+ uploader_utils.OnePlatformResourceManager,
+ "get_run_resource_name",
+ autospec=True,
+ )
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_upload_swallows_rpc_failure(
+ self, experiment_resources_mock, experiment_tracker_mock, run_resource_mock
+ ):
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ logdir = self.get_temp_dir()
+ with FileWriter(logdir) as writer:
+ writer.add_test_summary("foo")
+ mock_client = _create_mock_client()
+ uploader = _create_uploader(mock_client, logdir)
+ uploader.create_experiment()
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+ error = _grpc_error(grpc.StatusCode.INTERNAL, "Failure")
+ mock_client.write_tensorboard_experiment_data.side_effect = error
+ uploader._upload_once()
+ mock_client.write_tensorboard_experiment_data.assert_called_once()
+ experiment_tracker_mock.set_experiment.assert_called_once()
+
+ @patch.object(
+ uploader_utils.OnePlatformResourceManager,
+ "get_run_resource_name",
+ autospec=True,
+ )
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_upload_full_logdir(
+ self, experiment_resources_mock, experiment_tracker_mock, run_resource_mock
+ ):
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ logdir = self.get_temp_dir()
+ mock_client = _create_mock_client()
+ uploader = _create_uploader(mock_client, logdir)
+ uploader.create_experiment()
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+
+ # Convenience helpers for constructing expected requests.
+ data = tensorboard_data.TimeSeriesData
+ point = tensorboard_data.TimeSeriesDataPoint
+ scalar = tensorboard_data.Scalar
+
+ # First round
+ writer = FileWriter(logdir)
+ metadata = summary_pb2.SummaryMetadata(
+ plugin_data=summary_pb2.SummaryMetadata.PluginData(
+ plugin_name="scalars", content=b"12345"
+ ),
+ data_class=summary_pb2.DATA_CLASS_SCALAR,
+ )
+ writer.add_test_summary("foo", simple_value=5.0, step=1)
+ writer.add_test_summary("foo", simple_value=6.0, step=2)
+ writer.add_test_summary("foo", simple_value=7.0, step=3)
+ writer.add_test_tensor_summary(
+ "bar",
+ tensor=tensor_pb2.TensorProto(dtype=types_pb2.DT_FLOAT, float_val=[8.0]),
+ step=3,
+ value_metadata=metadata,
+ )
+ writer.flush()
+ writer_a = FileWriter(os.path.join(logdir, "a"))
+ writer_a.add_test_summary("qux", simple_value=9.0, step=2)
+ writer_a.flush()
+ uploader._upload_once()
+ self.assertEqual(3, mock_client.create_tensorboard_time_series.call_count)
+ call_args_list = mock_client.create_tensorboard_time_series.call_args_list
+ request = call_args_list[1][1]["tensorboard_time_series"]
+ self.assertEqual("scalars", request.plugin_name)
+ self.assertEqual(b"12345", request.plugin_data)
+
+ self.assertEqual(1, mock_client.write_tensorboard_experiment_data.call_count)
+ call_args_list = mock_client.write_tensorboard_experiment_data.call_args_list
+ request1, request2 = (
+ call_args_list[0][1]["write_run_data_requests"][0].time_series_data,
+ call_args_list[0][1]["write_run_data_requests"][1].time_series_data,
+ )
+ _clear_wall_times(request1)
+ _clear_wall_times(request2)
+
+ expected_request1 = [
+ data(
+ tensorboard_time_series_id="foo",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[
+ point(step=1, scalar=scalar(value=5.0)),
+ point(step=2, scalar=scalar(value=6.0)),
+ point(step=3, scalar=scalar(value=7.0)),
+ ],
+ ),
+ data(
+ tensorboard_time_series_id="bar",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[point(step=3, scalar=scalar(value=8.0))],
+ ),
+ ]
+ expected_request2 = [
+ data(
+ tensorboard_time_series_id="qux",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[point(step=2, scalar=scalar(value=9.0))],
+ )
+ ]
+ self.assertProtoEquals(expected_request1[0], request1[0])
+ self.assertProtoEquals(expected_request1[1], request1[1])
+ self.assertProtoEquals(expected_request2[0], request2[0])
+
+ mock_client.write_tensorboard_experiment_data.reset_mock()
+
+ # Second round
+ writer.add_test_summary("foo", simple_value=10.0, step=5)
+ writer.add_test_summary("baz", simple_value=11.0, step=1)
+ writer.flush()
+ writer_b = FileWriter(os.path.join(logdir, "b"))
+ writer_b.add_test_summary("xyz", simple_value=12.0, step=1)
+ writer_b.flush()
+ uploader._upload_once()
+ self.assertEqual(1, mock_client.write_tensorboard_experiment_data.call_count)
+ call_args_list = mock_client.write_tensorboard_experiment_data.call_args_list
+ request3, request4 = (
+ call_args_list[0][1]["write_run_data_requests"][0].time_series_data,
+ call_args_list[0][1]["write_run_data_requests"][1].time_series_data,
+ )
+ _clear_wall_times(request3)
+ _clear_wall_times(request4)
+ expected_request3 = [
+ data(
+ tensorboard_time_series_id="foo",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[point(step=5, scalar=scalar(value=10.0))],
+ ),
+ data(
+ tensorboard_time_series_id="baz",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[point(step=1, scalar=scalar(value=11.0))],
+ ),
+ ]
+ expected_request4 = [
+ data(
+ tensorboard_time_series_id="xyz",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[point(step=1, scalar=scalar(value=12.0))],
+ )
+ ]
+ self.assertProtoEquals(expected_request3[0], request3[0])
+ self.assertProtoEquals(expected_request3[1], request3[1])
+ self.assertProtoEquals(expected_request4[0], request4[0])
+ mock_client.write_tensorboard_experiment_data.reset_mock()
+ experiment_tracker_mock.set_experiment.assert_called_once()
+
+ # Empty third round
+ uploader._upload_once()
+ mock_client.write_tensorboard_experiment_data.assert_not_called()
+ experiment_tracker_mock.set_experiment.assert_called_once()
+
+ @patch.object(
+ uploader_utils.OnePlatformResourceManager,
+ "get_run_resource_name",
+ autospec=True,
+ )
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_verbosity_zero_creates_upload_tracker_with_verbosity_zero(
+ self, experiment_resources_mock, experiment_tracker_mock, run_resource_mock
+ ):
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ mock_client = _create_mock_client()
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+ mock_tracker = mock.MagicMock()
+ with mock.patch.object(
+ upload_tracker, "UploadTracker", return_value=mock_tracker
+ ) as mock_constructor:
+ uploader = _create_uploader(
+ mock_client,
+ _TEST_LOG_DIR_NAME,
+ verbosity=0, # Explicitly set verbosity to 0.
+ )
+ uploader.create_experiment()
+
+ mock_logdir_loader = mock.create_autospec(logdir_loader.LogdirLoader)
+ mock_logdir_loader.get_run_events.side_effect = [
+ {
+ "run 1": _apply_compat(
+ [_scalar_event("1.1", 5.0), _scalar_event("1.2", 5.0)]
+ ),
+ },
+ AbortUploadError,
+ ]
+
+ with mock.patch.object(
+ uploader, "_logdir_loader", mock_logdir_loader
+ ), self.assertRaises(AbortUploadError):
+ uploader.start_uploading()
+
+ self.assertEqual(mock_constructor.call_count, 1)
+ self.assertEqual(mock_constructor.call_args[1], {"verbosity": 0})
+ self.assertEqual(mock_tracker.scalars_tracker.call_count, 1)
+ experiment_tracker_mock.set_experiment.assert_called_once()
+
+ @patch.object(
+ uploader_utils.OnePlatformResourceManager,
+ "get_run_resource_name",
+ autospec=True,
+ )
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_start_uploading_graphs(
+ self, experiment_resources_mock, experiment_tracker_mock, run_resource_mock
+ ):
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ mock_client = _create_mock_client()
+ mock_rate_limiter = mock.create_autospec(uploader_utils.RateLimiter)
+ mock_bucket = mock.create_autospec(storage.Bucket)
+ mock_blob = mock.create_autospec(storage.Blob)
+ mock_bucket.blob.return_value = mock_blob
+ mock_tracker = mock.MagicMock()
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+
+ def create_time_series(tensorboard_time_series, parent=None):
+ return tensorboard_time_series_type.TensorboardTimeSeries(
+ name=_TEST_ONE_PLATFORM_TIME_SERIES_NAME,
+ display_name=tensorboard_time_series.display_name,
+ )
+
+ mock_client.create_tensorboard_time_series.side_effect = create_time_series
+ with mock.patch.object(
+ upload_tracker, "UploadTracker", return_value=mock_tracker
+ ):
+ uploader = _create_uploader(
+ writer_client=mock_client,
+ logdir=_TEST_LOG_DIR_NAME,
+ max_blob_request_size=1000,
+ rpc_rate_limiter=mock_rate_limiter,
+ blob_storage_bucket=mock_bucket,
+ verbosity=1, # In order to test tracker.
+ )
+ uploader.create_experiment()
+
+ # Of course a real Event stream will never produce the same Event twice,
+ # but is this test context it's fine to reuse this one.
+ graph_event = event_pb2.Event(graph_def=_create_example_graph_bytes(950))
+ expected_graph_def = graph_pb2.GraphDef.FromString(graph_event.graph_def)
+ mock_logdir_loader = mock.create_autospec(logdir_loader.LogdirLoader)
+ mock_logdir_loader.get_run_events.side_effect = [
+ {
+ "run 1": _apply_compat([graph_event, graph_event]),
+ "run 2": _apply_compat([graph_event, graph_event]),
+ },
+ {
+ "run 3": _apply_compat([graph_event, graph_event]),
+ "run 4": _apply_compat([graph_event, graph_event]),
+ "run 5": _apply_compat([graph_event, graph_event]),
+ },
+ AbortUploadError,
+ ]
+
+ with mock.patch.object(
+ uploader, "_logdir_loader", mock_logdir_loader
+ ), self.assertRaises(AbortUploadError):
+ uploader.start_uploading()
+
+ self.assertEqual(10, mock_bucket.blob.call_count)
+
+ blob_ids = set()
+ for call in mock_bucket.blob.call_args_list:
+ request = call[0][0]
+ m = re.match(
+ "test_folder/tensorboard-.*/test-experiment/.*/{}/(.*)".format(
+ _TEST_TIME_SERIES_NAME
+ ),
+ request,
+ )
+ self.assertIsNotNone(m)
+ blob_ids.add(m[1])
+
+ for call in mock_blob.upload_from_string.call_args_list:
+ request = call[0][0]
+ actual_graph_def = graph_pb2.GraphDef.FromString(request)
+ self.assertProtoEquals(expected_graph_def, actual_graph_def)
+
+ for call in mock_client.write_tensorboard_experiment_data.call_args_list:
+ kargs = call[1]
+ time_series_data = kargs["write_run_data_requests"][0].time_series_data
+ self.assertEqual(len(time_series_data), 1)
+ self.assertEqual(
+ time_series_data[0].tensorboard_time_series_id, _TEST_TIME_SERIES_NAME
+ )
+ self.assertEqual(len(time_series_data[0].values), 2)
+ blobs = time_series_data[0].values[0].blobs.values
+ self.assertEqual(len(blobs), 1)
+ self.assertIn(blobs[0].id, blob_ids)
+
+ # Check upload tracker calls.
+ self.assertEqual(mock_tracker.send_tracker.call_count, 2)
+ self.assertEqual(mock_tracker.scalars_tracker.call_count, 0)
+ self.assertEqual(mock_tracker.tensors_tracker.call_count, 0)
+ self.assertEqual(mock_tracker.blob_tracker.call_count, 12)
+ experiment_tracker_mock.set_experiment.assert_called_once()
+
+ @patch.object(
+ uploader_utils.OnePlatformResourceManager,
+ "get_run_resource_name",
+ autospec=True,
+ )
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_filter_graphs(
+ self, experiment_resources_mock, experiment_tracker_mock, run_resource_mock
+ ):
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ # Three graphs: one short, one long, one corrupt.
+ bytes_0 = _create_example_graph_bytes(123)
+ bytes_1 = _create_example_graph_bytes(9999)
+ # invalid (truncated) proto: length-delimited field 1 (0x0a) of
+ # length 0x7f specified, but only len("bogus") = 5 bytes given
+ #
+ bytes_2 = b"\x0a\x7fbogus"
+
+ logdir = self.get_temp_dir()
+ for (i, b) in enumerate([bytes_0, bytes_1, bytes_2]):
+ run_dir = os.path.join(logdir, "run_%04d" % i)
+ event = event_pb2.Event(step=0, wall_time=123 * i, graph_def=b)
+ with FileWriter(run_dir) as writer:
+ writer.add_event(event)
+
+ limiter = mock.create_autospec(uploader_utils.RateLimiter)
+ limiter.tick.side_effect = [None, AbortUploadError]
+ mock_bucket = mock.create_autospec(storage.Bucket)
+ mock_blob = mock.create_autospec(storage.Blob)
+ mock_bucket.blob.return_value = mock_blob
+ mock_client = _create_mock_client()
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+
+ def create_time_series(tensorboard_time_series, parent=None):
+ return tensorboard_time_series_type.TensorboardTimeSeries(
+ name=_TEST_ONE_PLATFORM_TIME_SERIES_NAME,
+ display_name=tensorboard_time_series.display_name,
+ )
+
+ mock_client.create_tensorboard_time_series.side_effect = create_time_series
+ uploader = _create_uploader(
+ mock_client,
+ logdir,
+ logdir_poll_rate_limiter=limiter,
+ blob_storage_bucket=mock_bucket,
+ )
+ uploader.create_experiment()
+
+ with self.assertRaises(AbortUploadError):
+ uploader.start_uploading()
+
+ actual_blobs = []
+ for call in mock_blob.upload_from_string.call_args_list:
+ requests = call[0][0]
+ actual_blobs.append(requests)
+
+ actual_graph_defs = []
+ for blob in actual_blobs:
+ try:
+ actual_graph_defs.append(graph_pb2.GraphDef.FromString(blob))
+ except message.DecodeError:
+ actual_graph_defs.append(None)
+
+ with self.subTest("graphs with small attr values should be unchanged"):
+ expected_graph_def_0 = graph_pb2.GraphDef.FromString(bytes_0)
+ self.assertEqual(actual_graph_defs[0], expected_graph_def_0)
+
+ with self.subTest("large attr values should be filtered out"):
+ expected_graph_def_1 = graph_pb2.GraphDef.FromString(bytes_1)
+ del expected_graph_def_1.node[1].attr["large"]
+ expected_graph_def_1.node[1].attr["_too_large_attrs"].list.s.append(
+ b"large"
+ )
+ self.assertEqual(actual_graph_defs[1], expected_graph_def_1)
+
+ with self.subTest("corrupt graphs should be skipped"):
+ self.assertLen(actual_blobs, 2)
+
+ @patch.object(
+ uploader_utils.OnePlatformResourceManager,
+ "get_run_resource_name",
+ autospec=True,
+ )
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_profile_plugin_included_by_default(
+ self, experiment_resources_mock, experiment_tracker_mock, run_resource_mock
+ ):
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ run_name = "profile_test_run"
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+ with tempfile.TemporaryDirectory() as logdir:
+ prof_path = os.path.join(
+ logdir, run_name, profile_uploader.ProfileRequestSender.PROFILE_PATH
+ )
+ os.makedirs(prof_path)
+
+ uploader = _create_uploader(
+ _create_mock_client(),
+ logdir,
+ one_shot=True,
+ run_name_prefix=run_name,
+ )
+
+ uploader.create_experiment()
+ uploader._upload_once()
+ senders = uploader._dispatcher._additional_senders
+ self.assertIn("profile", senders.keys())
+
+ profile_sender = senders["profile"]
+ self.assertIn(run_name, profile_sender._run_to_profile_loaders)
+ self.assertIn(run_name, profile_sender._run_to_file_request_sender)
+ experiment_tracker_mock.set_experiment.assert_called_once()
+
+ @patch.object(
+ uploader_utils.OnePlatformResourceManager,
+ "get_run_resource_name",
+ autospec=True,
+ )
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_nested_profile_files_are_uploaded(
+ self, experiment_resources_mock, experiment_tracker_mock, run_resource_mock
+ ):
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ run_name = "profile_test_run"
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+ with tempfile.TemporaryDirectory() as logdir:
+ prof_path = os.path.join(
+ logdir, run_name, profile_uploader.ProfileRequestSender.PROFILE_PATH
+ )
+ os.makedirs(prof_path)
+
+ mock_client = _create_mock_client()
+ uploader = _create_uploader(
+ mock_client,
+ logdir,
+ one_shot=True,
+ )
+
+ uploader.create_experiment()
+ uploader._upload_once()
+ senders = uploader._dispatcher._additional_senders
+ self.assertIn("profile", senders.keys())
+
+ profile_sender = senders["profile"]
+ self.assertIn(run_name, profile_sender._run_to_profile_loaders)
+ self.assertIn(run_name, profile_sender._run_to_file_request_sender)
+ experiment_tracker_mock.set_experiment.assert_called_once()
+
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_active_experiment_set_experiment_not_called(
+ self, experiment_resources_mock, experiment_tracker_mock
+ ):
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.experiment_name = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ logdir = self.get_temp_dir()
+ mock_client = _create_mock_client()
+
+ uploader = _create_uploader(mock_client, logdir)
+ uploader.create_experiment()
+ uploader._upload_once()
+
+ experiment_tracker_mock.set_experiment.assert_not_called()
+
+
+# TODO(b/276368161)
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class _TensorBoardTrackerTest(tf.test.TestCase):
+ @patch.object(
+ uploader_utils.OnePlatformResourceManager,
+ "get_run_resource_name",
+ autospec=True,
+ )
+ @patch.object(metadata, "_experiment_tracker", autospec=True)
+ @patch.object(experiment_resources, "Experiment", autospec=True)
+ def test_thread_continuously_uploads(
+ self, experiment_resources_mock, experiment_tracker_mock, run_resource_mock
+ ):
+ """Test Tensorboard Tracker by mimicking its implementation: Call start_upload through a thread and subsequently end the thread by calling _end_uploading()."""
+
+ experiment_resources_mock.get.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_experiment.return_value = _TEST_EXPERIMENT_NAME
+ experiment_tracker_mock.set_tensorboard.return_value = (
+ _TEST_TENSORBOARD_RESOURCE_NAME
+ )
+ logdir = self.get_temp_dir()
+ mock_client = _create_mock_client()
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+
+ builder = _create_dispatcher(
+ experiment_resource_name=_TEST_ONE_PLATFORM_EXPERIMENT_NAME,
+ api=mock_client,
+ allowed_plugins=_SCALARS_HISTOGRAMS_AND_PROFILE,
+ logdir=logdir,
+ )
+ mock_rate_limiter = mock.create_autospec(uploader_utils.RateLimiter)
+ mock_bucket = _create_mock_blob_storage()
+
+ uploader = _create_uploader(
+ mock_client,
+ logdir,
+ allowed_plugins=_SCALARS_HISTOGRAMS_AND_PROFILE,
+ rpc_rate_limiter=mock_rate_limiter,
+ blob_storage_bucket=mock_bucket,
+ )
+ uploader._dispatcher = builder
+ uploader.create_experiment()
+
+ # Convenience helpers for constructing expected requests.
+ data = tensorboard_data.TimeSeriesData
+ point = tensorboard_data.TimeSeriesDataPoint
+ scalar = tensorboard_data.Scalar
+
+ # Directory with scalar data
+ writer = FileWriter(os.path.join(logdir, "a"))
+ metadata = summary_pb2.SummaryMetadata(
+ plugin_data=summary_pb2.SummaryMetadata.PluginData(
+ plugin_name="scalars", content=b"12345"
+ ),
+ data_class=summary_pb2.DATA_CLASS_SCALAR,
+ )
+ writer.add_test_summary("foo", simple_value=5.0, step=1)
+ writer.add_test_summary("foo", simple_value=6.0, step=2)
+ writer.add_test_summary("foo", simple_value=7.0, step=3)
+ writer.add_test_tensor_summary(
+ "bar",
+ tensor=tensor_pb2.TensorProto(dtype=types_pb2.DT_FLOAT, float_val=[8.0]),
+ step=3,
+ value_metadata=metadata,
+ )
+ writer.flush()
+ writer_a = FileWriter(os.path.join(logdir, "b"))
+ writer_a.add_test_summary("qux", simple_value=9.0, step=2)
+ writer_a.flush()
+
+ # Directory with profile data
+ prof_run_name = "2024_04_04_04_24_24"
+ prof_path = os.path.join(
+ logdir, profile_uploader.ProfileRequestSender.PROFILE_PATH
+ )
+ os.makedirs(prof_path)
+ run_path = os.path.join(prof_path, prof_run_name)
+ os.makedirs(run_path)
+ tempfile.NamedTemporaryFile(
+ prefix="c", suffix=".xplane.pb", dir=run_path, delete=False
+ )
+ self.assertNotEmpty(os.listdir(run_path))
+
+ uploader_thread = threading.Thread(target=uploader.start_uploading)
+ uploader_thread.start()
+ time.sleep(5)
+
+ # Check create_time_series calls
+ self.assertEqual(4, mock_client.create_tensorboard_time_series.call_count)
+ call_args_list = mock_client.create_tensorboard_time_series.call_args_list
+ request1, request2, request3, request4 = (
+ call_args_list[0][1]["tensorboard_time_series"],
+ call_args_list[1][1]["tensorboard_time_series"],
+ call_args_list[2][1]["tensorboard_time_series"],
+ call_args_list[3][1]["tensorboard_time_series"],
+ )
+ self.assertEqual("scalars", request1.plugin_name)
+ self.assertEqual("scalars", request2.plugin_name)
+ self.assertEqual(b"12345", request2.plugin_data)
+ self.assertEqual("scalars", request3.plugin_name)
+ self.assertEqual("profile", request4.plugin_name)
+ experiment_tracker_mock.set_experiment.assert_called_once()
+
+ # Check write_tensorboard_experiment_data calls
+ self.assertEqual(1, mock_client.write_tensorboard_experiment_data.call_count)
+ call_args_list = mock_client.write_tensorboard_experiment_data.call_args_list
+ request1, request2 = (
+ call_args_list[0][1]["write_run_data_requests"][0].time_series_data,
+ call_args_list[0][1]["write_run_data_requests"][1].time_series_data,
+ )
+ _clear_wall_times(request1)
+ _clear_wall_times(request2)
+
+ expected_request1 = [
+ data(
+ tensorboard_time_series_id="foo",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[
+ point(step=1, scalar=scalar(value=5.0)),
+ point(step=2, scalar=scalar(value=6.0)),
+ point(step=3, scalar=scalar(value=7.0)),
+ ],
+ ),
+ data(
+ tensorboard_time_series_id="bar",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[point(step=3, scalar=scalar(value=8.0))],
+ ),
+ ]
+ expected_request2 = [
+ data(
+ tensorboard_time_series_id="qux",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[point(step=2, scalar=scalar(value=9.0))],
+ )
+ ]
+ self.assertProtoEquals(expected_request1[0], request1[0])
+ self.assertProtoEquals(expected_request1[1], request1[1])
+ self.assertProtoEquals(expected_request2[0], request2[0])
+
+ with mock.patch.object(uploader, "_end_experiment_runs", return_value=None):
+ uploader._end_uploading()
+ uploader._end_experiment_runs.assert_called_once()
+ time.sleep(1)
+ self.assertFalse(uploader_thread.is_alive())
+ mock_client.write_tensorboard_experiment_data.reset_mock()
+
+ # Empty directory
+ uploader._upload_once()
+ mock_client.write_tensorboard_experiment_data.assert_not_called()
+ with mock.patch.object(uploader, "_end_experiment_runs", return_value=None):
+ uploader._end_uploading()
+ uploader._end_experiment_runs.assert_called_once()
+ time.sleep(1)
+ self.assertFalse(uploader_thread.is_alive())
+ experiment_tracker_mock.set_experiment.assert_called_once()
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class BatchedRequestSenderTest(tf.test.TestCase):
+ def _populate_run_from_events(
+ self, n_scalar_events, events, allowed_plugins=_USE_DEFAULT
+ ):
+ mock_client = _create_mock_client()
+ builder = _create_dispatcher(
+ experiment_resource_name="123",
+ api=mock_client,
+ allowed_plugins=allowed_plugins,
+ )
+ builder.dispatch_requests({"": _apply_compat(events)})
+ scalar_requests = mock_client.write_tensorboard_experiment_data.call_args_list
+ if scalar_requests:
+ self.assertLen(scalar_requests, 1)
+ self.assertLen(
+ scalar_requests[0][1]["write_run_data_requests"][0].time_series_data,
+ n_scalar_events,
+ )
+ return scalar_requests
+
+ def test_empty_events(self):
+ call_args_list = self._populate_run_from_events(0, [])
+ self.assertProtoEquals(call_args_list, [])
+
+ def test_scalar_events(self):
+ events = [
+ event_pb2.Event(summary=scalar_v2_pb("scalar1", 5.0)),
+ event_pb2.Event(summary=scalar_v2_pb("scalar2", 5.0)),
+ ]
+ call_args_lists = self._populate_run_from_events(2, events)
+ scalar_tag_counts = _extract_tag_counts(call_args_lists)
+ self.assertEqual(scalar_tag_counts, {"scalar1": 1, "scalar2": 1})
+
+ def test_skips_non_scalar_events(self):
+ events = [
+ event_pb2.Event(summary=scalar_v2_pb("scalar1", 5.0)),
+ event_pb2.Event(file_version="brain.Event:2"),
+ ]
+ call_args_list = self._populate_run_from_events(1, events)
+ scalar_tag_counts = _extract_tag_counts(call_args_list)
+ self.assertEqual(scalar_tag_counts, {"scalar1": 1})
+
+ def test_skips_non_scalar_events_in_scalar_time_series(self):
+ events = [
+ event_pb2.Event(file_version="brain.Event:2"),
+ event_pb2.Event(summary=scalar_v2_pb("scalar1", 5.0)),
+ event_pb2.Event(summary=scalar_v2_pb("scalar2", 5.0)),
+ ]
+ call_args_list = self._populate_run_from_events(2, events)
+ scalar_tag_counts = _extract_tag_counts(call_args_list)
+ self.assertEqual(scalar_tag_counts, {"scalar1": 1, "scalar2": 1})
+
+ def test_skips_events_from_disallowed_plugins(self):
+ event = event_pb2.Event(
+ step=1, wall_time=123.456, summary=scalar_v2_pb("foo", 5.0)
+ )
+ call_args_lists = self._populate_run_from_events(
+ 0,
+ [event],
+ allowed_plugins=frozenset("not-scalars"),
+ )
+ self.assertEqual(call_args_lists, [])
+
+ def test_remembers_first_metadata_in_time_series(self):
+ scalar_1 = event_pb2.Event(summary=scalar_v2_pb("loss", 4.0))
+ scalar_2 = event_pb2.Event(summary=scalar_v2_pb("loss", 3.0))
+ scalar_2.summary.value[0].ClearField("metadata")
+ events = [
+ event_pb2.Event(file_version="brain.Event:2"),
+ scalar_1,
+ scalar_2,
+ ]
+ call_args_list = self._populate_run_from_events(1, events)
+ scalar_tag_counts = _extract_tag_counts(call_args_list)
+ self.assertEqual(scalar_tag_counts, {"loss": 2})
+
+ def test_expands_multiple_values_in_event(self):
+ event = event_pb2.Event(step=1, wall_time=123.456)
+ event.summary.value.add(tag="foo", simple_value=1.0)
+ event.summary.value.add(tag="foo", simple_value=2.0)
+ event.summary.value.add(tag="foo", simple_value=3.0)
+ call_args_list = self._populate_run_from_events(1, [event])
+
+ time_series_data = tensorboard_data.TimeSeriesData(
+ tensorboard_time_series_id="foo",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[
+ tensorboard_data.TimeSeriesDataPoint(
+ step=1,
+ wall_time=_timestamp_pb(123456000000),
+ scalar=tensorboard_data.Scalar(value=1.0),
+ ),
+ tensorboard_data.TimeSeriesDataPoint(
+ step=1,
+ wall_time=_timestamp_pb(123456000000),
+ scalar=tensorboard_data.Scalar(value=2.0),
+ ),
+ tensorboard_data.TimeSeriesDataPoint(
+ step=1,
+ wall_time=_timestamp_pb(123456000000),
+ scalar=tensorboard_data.Scalar(value=3.0),
+ ),
+ ],
+ )
+
+ self.assertProtoEquals(
+ time_series_data,
+ call_args_list[0][1]["write_run_data_requests"][0].time_series_data[0],
+ )
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class ProfileRequestSenderTest(tf.test.TestCase):
+ def _create_builder(self, mock_client, logdir):
+ return _create_dispatcher(
+ experiment_resource_name=_TEST_ONE_PLATFORM_EXPERIMENT_NAME,
+ api=mock_client,
+ logdir=logdir,
+ allowed_plugins=frozenset({"profile"}),
+ )
+
+ def _populate_run_from_events(
+ self,
+ events,
+ logdir,
+ mock_client=None,
+ builder=None,
+ ):
+ if not mock_client:
+ mock_client = _create_mock_client()
+
+ if not builder:
+ builder = self._create_builder(mock_client, logdir)
+
+ builder.dispatch_requests({"": _apply_compat(events)})
+ profile_requests = mock_client.write_tensorboard_run_data.call_args_list
+
+ return profile_requests
+
+ def test_profile_event_missing_prof_run_dirs(self):
+ events = [
+ event_pb2.Event(file_version="brain.Event:2"),
+ ]
+ with tempfile.TemporaryDirectory() as logdir:
+ call_args_list = self._populate_run_from_events(events, logdir)
+
+ self.assertProtoEquals(call_args_list, [])
+
+ @patch.object(uploader_utils.OnePlatformResourceManager, "get_run_resource_name")
+ def test_profile_event_bad_prof_path(self, run_resource_mock):
+ events = [
+ event_pb2.Event(file_version="brain.Event:2"),
+ ]
+ prof_run_name = "bad_run_name"
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+
+ with tempfile.TemporaryDirectory() as logdir:
+ prof_path = os.path.join(
+ logdir, profile_uploader.ProfileRequestSender.PROFILE_PATH
+ )
+ os.makedirs(prof_path)
+
+ run_path = os.path.join(prof_path, prof_run_name)
+ os.makedirs(run_path)
+
+ call_args_list = self._populate_run_from_events(events, logdir)
+
+ self.assertProtoEquals(call_args_list, [])
+
+ @patch.object(uploader_utils.OnePlatformResourceManager, "get_run_resource_name")
+ def test_profile_event_single_prof_run(self, run_resource_mock):
+ events = [
+ event_pb2.Event(file_version="brain.Event:2"),
+ ]
+ prof_run_name = "2021_01_01_01_10_10"
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+
+ with tempfile.TemporaryDirectory() as logdir:
+ prof_path = os.path.join(
+ logdir, profile_uploader.ProfileRequestSender.PROFILE_PATH
+ )
+ os.makedirs(prof_path)
+
+ run_path = os.path.join(prof_path, prof_run_name)
+ os.makedirs(run_path)
+
+ with tempfile.NamedTemporaryFile(suffix=".xplane.pb", dir=run_path):
+ call_args_list = self._populate_run_from_events(events, logdir)
+
+ profile_tag_counts = _extract_tag_counts_time_series(call_args_list)
+ self.assertEqual(profile_tag_counts, {prof_run_name: 1})
+
+ @patch.object(uploader_utils.OnePlatformResourceManager, "get_run_resource_name")
+ def test_profile_event_single_prof_run_new_files(self, run_resource_mock):
+ # Check that files are not uploaded twice for the same profiling run
+ events = [
+ event_pb2.Event(file_version="brain.Event:2"),
+ ]
+ prof_run_name = "2021_01_01_01_10_10"
+ mock_client = _create_mock_client()
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+
+ with tempfile.TemporaryDirectory() as logdir:
+ builder = self._create_builder(mock_client=mock_client, logdir=logdir)
+ prof_path = os.path.join(
+ logdir, profile_uploader.ProfileRequestSender.PROFILE_PATH
+ )
+ os.makedirs(prof_path)
+
+ run_path = os.path.join(prof_path, prof_run_name)
+ os.makedirs(run_path)
+
+ with tempfile.NamedTemporaryFile(
+ prefix="a", suffix=".xplane.pb", dir=run_path
+ ):
+ call_args_list = self._populate_run_from_events(
+ events, logdir, builder=builder, mock_client=mock_client
+ )
+ with tempfile.NamedTemporaryFile(
+ prefix="b", suffix=".xplane.pb", dir=run_path
+ ):
+ call_args_list = self._populate_run_from_events(
+ events, logdir, builder=builder, mock_client=mock_client
+ )
+
+ profile_tag_counts = _extract_tag_counts_time_series(call_args_list)
+ self.assertEqual(profile_tag_counts, {prof_run_name: 1})
+
+ @patch.object(uploader_utils.OnePlatformResourceManager, "get_run_resource_name")
+ def test_profile_event_multi_prof_run(self, run_resource_mock):
+ events = [
+ event_pb2.Event(file_version="brain.Event:2"),
+ ]
+ prof_run_names = [
+ "2021_01_01_01_10_10",
+ "2021_02_02_02_20_20",
+ ]
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+
+ with tempfile.TemporaryDirectory() as logdir:
+ prof_path = os.path.join(
+ logdir, profile_uploader.ProfileRequestSender.PROFILE_PATH
+ )
+ os.makedirs(prof_path)
+
+ run_paths = [
+ os.path.join(prof_path, prof_run_names[0]),
+ os.path.join(prof_path, prof_run_names[1]),
+ ]
+ [os.makedirs(run_path) for run_path in run_paths]
+
+ named_temp = functools.partial(
+ tempfile.NamedTemporaryFile, suffix=".xplane.pb"
+ )
+
+ with named_temp(dir=run_paths[0]), named_temp(dir=run_paths[1]):
+ call_args_list = self._populate_run_from_events(events, logdir)
+
+ self.assertLen(call_args_list, 2)
+ profile_tag_counts = _extract_tag_counts_time_series(call_args_list)
+ self.assertEqual(profile_tag_counts, dict.fromkeys(prof_run_names, 1))
+
+ @patch.object(uploader_utils.OnePlatformResourceManager, "get_run_resource_name")
+ def test_profile_event_add_consecutive_prof_runs(self, run_resource_mock):
+ # Multiple profiling events happen one after another, should only update
+ # new profiling runs
+ events = [
+ event_pb2.Event(file_version="brain.Event:2"),
+ ]
+
+ prof_run_name = "2021_01_01_01_10_10"
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+
+ mock_client = _create_mock_client()
+
+ with tempfile.TemporaryDirectory() as logdir:
+ builder = self._create_builder(mock_client=mock_client, logdir=logdir)
+
+ prof_path = os.path.join(
+ logdir, profile_uploader.ProfileRequestSender.PROFILE_PATH
+ )
+ os.makedirs(prof_path)
+
+ run_path = os.path.join(prof_path, prof_run_name)
+ os.makedirs(run_path)
+
+ named_temp = functools.partial(
+ tempfile.NamedTemporaryFile, suffix=".xplane.pb"
+ )
+
+ with named_temp(dir=run_path):
+ call_args_list = self._populate_run_from_events(
+ events,
+ logdir,
+ mock_client=mock_client,
+ builder=builder,
+ )
+
+ self.assertLen(call_args_list, 1)
+ self.assertEqual(
+ call_args_list[0][1]["time_series_data"][0].tensorboard_time_series_id,
+ prof_run_name,
+ )
+
+ prof_run_name_2 = "2021_02_02_02_20_20"
+
+ run_path = os.path.join(prof_path, prof_run_name_2)
+ os.makedirs(run_path)
+ mock_client.write_tensorboard_run_data.reset_mock()
+
+ with named_temp(dir=run_path):
+ call_args_list = self._populate_run_from_events(
+ events,
+ logdir,
+ mock_client=mock_client,
+ builder=builder,
+ )
+
+ self.assertLen(call_args_list, 1)
+ self.assertEqual(
+ call_args_list[0][1]["time_series_data"][0].tensorboard_time_series_id,
+ prof_run_name_2,
+ )
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class ScalarBatchedRequestSenderTest(tf.test.TestCase):
+ def _add_events(self, sender, events):
+ for event in events:
+ for value in event.summary.value:
+ sender.add_event(_TEST_RUN_NAME, event, value, value.metadata)
+
+ def _add_events_and_flush(self, events, expected_n_time_series):
+ mock_client = _create_mock_client()
+ sender = _create_scalar_request_sender(
+ experiment_resource_id=_TEST_EXPERIMENT_NAME,
+ api=mock_client,
+ )
+ self._add_events(sender, events)
+ sender.flush()
+
+ requests = mock_client.write_tensorboard_experiment_data.call_args_list
+ self.assertLen(requests, 1)
+ call_args = requests[0]
+ self.assertLen(
+ call_args[1]["write_run_data_requests"][0].time_series_data,
+ expected_n_time_series,
+ )
+ return call_args
+
+ @patch.object(uploader_utils.OnePlatformResourceManager, "get_run_resource_name")
+ def test_aggregation_by_tag(self, run_resource_mock):
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+
+ def make_event(step, wall_time, tag, value):
+ return event_pb2.Event(
+ step=step,
+ wall_time=wall_time,
+ summary=scalar_v2_pb(tag, value),
+ )
+
+ events = [
+ make_event(1, 1.0, "one", 11.0),
+ make_event(1, 2.0, "two", 22.0),
+ make_event(2, 3.0, "one", 33.0),
+ make_event(2, 4.0, "two", 44.0),
+ make_event(1, 5.0, "one", 55.0), # Should preserve duplicate step=1.
+ make_event(1, 6.0, "three", 66.0),
+ ]
+ call_args = self._add_events_and_flush(events, 3)
+ ts_data = call_args[1]["write_run_data_requests"][0].time_series_data
+ tag_data = {
+ ts.tensorboard_time_series_id: [
+ (
+ value.step,
+ value.wall_time.timestamp_pb().ToSeconds(),
+ value.scalar.value,
+ )
+ for value in ts.values
+ ]
+ for ts in ts_data
+ }
+ self.assertEqual(
+ tag_data,
+ {
+ "one": [(1, 1.0, 11.0), (2, 3.0, 33.0), (1, 5.0, 55.0)],
+ "two": [(1, 2.0, 22.0), (2, 4.0, 44.0)],
+ "three": [(1, 6.0, 66.0)],
+ },
+ )
+
+ @patch.object(uploader_utils.OnePlatformResourceManager, "get_run_resource_name")
+ def test_v1_summary(self, run_resource_mock):
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+ event = event_pb2.Event(step=1, wall_time=123.456)
+ event.summary.value.add(tag="foo", simple_value=5.0)
+ call_args = self._add_events_and_flush(_apply_compat([event]), 1)
+
+ self.assertEqual(_TEST_EXPERIMENT_NAME, call_args[1]["tensorboard_experiment"])
+ self.assertEqual(
+ [
+ tensorboard_data.TimeSeriesData(
+ tensorboard_time_series_id="foo",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[
+ tensorboard_data.TimeSeriesDataPoint(
+ step=1,
+ wall_time=_timestamp_pb(123456000000),
+ scalar=tensorboard_data.Scalar(value=5.0),
+ )
+ ],
+ )
+ ],
+ call_args[1]["write_run_data_requests"][0].time_series_data,
+ )
+
+ @patch.object(uploader_utils.OnePlatformResourceManager, "get_run_resource_name")
+ def test_v1_summary_tb_summary(self, run_resource_mock):
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+ tf_summary = summary_v1.scalar_pb("foo", 5.0)
+ tb_summary = summary_pb2.Summary.FromString(tf_summary.SerializeToString())
+ event = event_pb2.Event(step=1, wall_time=123.456, summary=tb_summary)
+ call_args = self._add_events_and_flush(_apply_compat([event]), 1)
+
+ self.assertEqual(_TEST_EXPERIMENT_NAME, call_args[1]["tensorboard_experiment"])
+ self.assertEqual(
+ [
+ tensorboard_data.TimeSeriesData(
+ tensorboard_time_series_id="scalar_summary",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[
+ tensorboard_data.TimeSeriesDataPoint(
+ step=1,
+ wall_time=_timestamp_pb(123456000000),
+ scalar=tensorboard_data.Scalar(value=5.0),
+ )
+ ],
+ )
+ ],
+ call_args[1]["write_run_data_requests"][0].time_series_data,
+ )
+
+ @patch.object(uploader_utils.OnePlatformResourceManager, "get_run_resource_name")
+ def test_v2_summary(self, run_resource_mock):
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+ event = event_pb2.Event(
+ step=1, wall_time=123.456, summary=scalar_v2_pb("foo", 5.0)
+ )
+ call_args = self._add_events_and_flush(_apply_compat([event]), 1)
+
+ self.assertEqual(_TEST_EXPERIMENT_NAME, call_args[1]["tensorboard_experiment"])
+ self.assertEqual(
+ [
+ tensorboard_data.TimeSeriesData(
+ tensorboard_time_series_id="foo",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[
+ tensorboard_data.TimeSeriesDataPoint(
+ step=1,
+ wall_time=_timestamp_pb(123456000000),
+ scalar=tensorboard_data.Scalar(value=5.0),
+ )
+ ],
+ )
+ ],
+ call_args[1]["write_run_data_requests"][0].time_series_data,
+ )
+
+ @patch.object(uploader_utils.OnePlatformResourceManager, "get_run_resource_name")
+ def test_propagates_experiment_deletion(self, run_resource_mock):
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+ event = event_pb2.Event(step=1)
+ event.summary.value.add(tag="foo", simple_value=1.0)
+
+ mock_client = _create_mock_client()
+ sender = _create_scalar_request_sender("123", mock_client)
+ self._add_events(sender, _apply_compat([event]))
+
+ error = _grpc_error(grpc.StatusCode.NOT_FOUND, "nope")
+ mock_client.write_tensorboard_experiment_data.side_effect = error
+ with self.assertRaises(uploader_lib.ExperimentNotFoundError):
+ sender.flush()
+
+ def test_no_budget_for_base_request(self):
+ mock_client = _create_mock_client()
+ long_experiment_id = "A" * 12
+ with self.assertRaises(uploader_lib._OutOfSpaceError) as cm:
+ _create_scalar_request_sender(
+ experiment_resource_id=long_experiment_id,
+ api=mock_client,
+ max_request_size=12,
+ )
+ self.assertEqual(str(cm.exception), "Byte budget too small for base request")
+
+ @patch.object(uploader_utils.OnePlatformResourceManager, "get_run_resource_name")
+ def test_no_room_for_single_point(self, run_resource_mock):
+ mock_client = _create_mock_client()
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+ event = event_pb2.Event(step=1, wall_time=123.456)
+ event.summary.value.add(tag="foo", simple_value=1.0)
+ sender = _create_scalar_request_sender("123", mock_client, max_request_size=12)
+ with self.assertRaises(RuntimeError) as cm:
+ self._add_events(sender, [event])
+ self.assertEqual(str(cm.exception), "add_event failed despite flush")
+
+ @patch.object(uploader_utils.OnePlatformResourceManager, "get_run_resource_name")
+ def test_break_at_run_boundary(self, run_resource_mock):
+ mock_client = _create_mock_client()
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+ # Choose run name sizes such that one run fits in a 1024 byte request,
+ # but not two.
+ long_run_1 = "A" * 768
+ long_run_2 = "B" * 768
+ event_1 = event_pb2.Event(step=1)
+ event_1.summary.value.add(tag="foo", simple_value=1.0)
+ event_2 = event_pb2.Event(step=2)
+ event_2.summary.value.add(tag="bar", simple_value=-2.0)
+
+ sender_1 = _create_scalar_request_sender(
+ long_run_1,
+ mock_client,
+ # Set a limit to request size
+ max_request_size=1024,
+ )
+
+ sender_2 = _create_scalar_request_sender(
+ long_run_2,
+ mock_client,
+ # Set a limit to request size
+ max_request_size=1024,
+ )
+ self._add_events(sender_1, _apply_compat([event_1]))
+ self._add_events(sender_2, _apply_compat([event_2]))
+ sender_1.flush()
+ sender_2.flush()
+ call_args_list = mock_client.write_tensorboard_experiment_data.call_args_list
+
+ for call_args in call_args_list:
+ _clear_wall_times(
+ call_args[1]["write_run_data_requests"][0].time_series_data
+ )
+
+ # Expect two calls despite a single explicit call to flush().
+
+ expected = [
+ [
+ tensorboard_data.TimeSeriesData(
+ tensorboard_time_series_id="foo",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[
+ tensorboard_data.TimeSeriesDataPoint(
+ step=1, scalar=tensorboard_data.Scalar(value=1.0)
+ )
+ ],
+ )
+ ],
+ [
+ tensorboard_data.TimeSeriesData(
+ tensorboard_time_series_id="bar",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[
+ tensorboard_data.TimeSeriesDataPoint(
+ step=2, scalar=tensorboard_data.Scalar(value=-2.0)
+ )
+ ],
+ )
+ ],
+ ]
+
+ self.assertEqual(
+ expected[0],
+ call_args_list[0][1]["write_run_data_requests"][0].time_series_data,
+ )
+ self.assertEqual(
+ expected[1],
+ call_args_list[1][1]["write_run_data_requests"][0].time_series_data,
+ )
+
+ @patch.object(uploader_utils.OnePlatformResourceManager, "get_run_resource_name")
+ def test_break_at_tag_boundary(self, run_resource_mock):
+ mock_client = _create_mock_client()
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+ # Choose tag name sizes such that one tag fits in a 1024 byte request,
+ # but not two. Note that tag names appear in both `Tag.name` and the
+ # summary metadata.
+ long_tag_1 = "a" * 384
+ long_tag_2 = "b" * 384
+ event = event_pb2.Event(step=1)
+ event.summary.value.add(tag=long_tag_1, simple_value=1.0)
+ event.summary.value.add(tag=long_tag_2, simple_value=2.0)
+
+ sender = _create_scalar_request_sender(
+ "train",
+ mock_client,
+ # Set a limit to request size
+ max_request_size=1024,
+ )
+ self._add_events(sender, _apply_compat([event]))
+ sender.flush()
+ call_args_list = mock_client.write_tensorboard_experiment_data.call_args_list
+
+ request1 = call_args_list[0][1]["write_run_data_requests"][0].time_series_data
+ _clear_wall_times(request1)
+
+ # Convenience helpers for constructing expected requests.
+ data = tensorboard_data.TimeSeriesData
+ point = tensorboard_data.TimeSeriesDataPoint
+ scalar = tensorboard_data.Scalar
+
+ expected_request1 = [
+ data(
+ tensorboard_time_series_id=long_tag_1,
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[point(step=1, scalar=scalar(value=1.0))],
+ ),
+ data(
+ tensorboard_time_series_id=long_tag_2,
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[point(step=1, scalar=scalar(value=2.0))],
+ ),
+ ]
+ self.assertProtoEquals(expected_request1[0], request1[0])
+ self.assertProtoEquals(expected_request1[1], request1[1])
+
+ @patch.object(uploader_utils.OnePlatformResourceManager, "get_run_resource_name")
+ def test_break_at_scalar_point_boundary(self, run_resource_mock):
+ mock_client = _create_mock_client()
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+ point_count = 2000 # comfortably saturates a single 1024-byte request
+ events = []
+ for step in range(point_count):
+ summary = scalar_v2_pb("loss", -2.0 * step)
+ if step > 0:
+ summary.value[0].ClearField("metadata")
+ events.append(event_pb2.Event(summary=summary, step=step))
+
+ sender = _create_scalar_request_sender(
+ "train",
+ mock_client,
+ # Set a limit to request size
+ max_request_size=1024,
+ )
+ self._add_events(sender, _apply_compat(events))
+ sender.flush()
+ call_args_list = mock_client.write_tensorboard_experiment_data.call_args_list
+
+ for call_args in call_args_list:
+ _clear_wall_times(
+ call_args[1]["write_run_data_requests"][0].time_series_data
+ )
+
+ self.assertGreater(len(call_args_list), 1)
+ self.assertLess(len(call_args_list), point_count)
+ # This is the observed number of requests when running the test. There
+ # is no reasonable way to derive this value from just reading the code.
+ # The number of requests does not have to be 37 to be correct but if it
+ # changes it probably warrants some investigation or thought.
+ self.assertEqual(37, len(call_args_list))
+
+ total_points_in_result = 0
+ for call_args in call_args_list:
+ self.assertLen(
+ call_args[1]["write_run_data_requests"][0].time_series_data, 1
+ )
+ time_series_data = call_args[1]["write_run_data_requests"][
+ 0
+ ].time_series_data[0]
+ self.assertEqual(time_series_data.tensorboard_time_series_id, "loss")
+ for point in time_series_data.values:
+ self.assertEqual(point.step, total_points_in_result)
+ self.assertEqual(point.scalar.value, -2.0 * point.step)
+ total_points_in_result += 1
+ self.assertEqual(total_points_in_result, point_count)
+
+ @patch.object(uploader_utils.OnePlatformResourceManager, "get_run_resource_name")
+ def test_prunes_tags_and_runs(self, run_resource_mock):
+ mock_client = _create_mock_client()
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+ event_1 = event_pb2.Event(step=1)
+ event_1.summary.value.add(tag="foo", simple_value=1.0)
+ event_2 = event_pb2.Event(step=2)
+ event_2.summary.value.add(tag="bar", simple_value=-2.0)
+
+ add_point_call_count_box = [0]
+
+ def mock_add_point(byte_budget_manager_self, point):
+ # Simulate out-of-space error the first time that we try to store
+ # the second point.
+ add_point_call_count_box[0] += 1
+ if add_point_call_count_box[0] == 2:
+ raise uploader_lib._OutOfSpaceError()
+
+ with mock.patch.object(
+ uploader_lib._ByteBudgetManager,
+ "add_point",
+ mock_add_point,
+ ):
+ sender = _create_scalar_request_sender("123", mock_client)
+ self._add_events(sender, _apply_compat([event_1]))
+ self._add_events(sender, _apply_compat([event_2]))
+ sender.flush()
+
+ call_args_list = mock_client.write_tensorboard_experiment_data.call_args_list
+ request1, request2 = (
+ call_args_list[0][1]["write_run_data_requests"][0].time_series_data,
+ call_args_list[1][1]["write_run_data_requests"][0].time_series_data,
+ )
+ _clear_wall_times(request1)
+ _clear_wall_times(request2)
+
+ # Convenience helpers for constructing expected requests.
+ data = tensorboard_data.TimeSeriesData
+ point = tensorboard_data.TimeSeriesDataPoint
+ scalar = tensorboard_data.Scalar
+
+ expected_request1 = [
+ data(
+ tensorboard_time_series_id="foo",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[point(step=1, scalar=scalar(value=1.0))],
+ )
+ ]
+
+ expected_request2 = [
+ data(
+ tensorboard_time_series_id="bar",
+ value_type=tensorboard_time_series_type.TensorboardTimeSeries.ValueType.SCALAR,
+ values=[point(step=2, scalar=scalar(value=-2.0))],
+ )
+ ]
+ self.assertProtoEquals(expected_request1[0], request1[0])
+ self.assertProtoEquals(expected_request2[0], request2[0])
+
+ @patch.object(uploader_utils.OnePlatformResourceManager, "get_run_resource_name")
+ def test_wall_time_precision(self, run_resource_mock):
+ run_resource_mock.return_value = _TEST_ONE_PLATFORM_RUN_NAME
+ # Test a wall time that is exactly representable in float64 but has enough
+ # digits to incur error if converted to nanoseconds the naive way (* 1e9).
+ event1 = event_pb2.Event(step=1, wall_time=1567808404.765432119)
+ event1.summary.value.add(tag="foo", simple_value=1.0)
+ # Test a wall time where as a float64, the fractional part on its own will
+ # introduce error if truncated to 9 decimal places instead of rounded.
+ event2 = event_pb2.Event(step=2, wall_time=1.000000002)
+ event2.summary.value.add(tag="foo", simple_value=2.0)
+ call_args = self._add_events_and_flush(_apply_compat([event1, event2]), 1)
+ self.assertEqual(
+ datetime_helpers.DatetimeWithNanoseconds.from_timestamp_pb(
+ _timestamp_pb(1567808404765432119)
+ ),
+ call_args[1]["write_run_data_requests"][0]
+ .time_series_data[0]
+ .values[0]
+ .wall_time,
+ )
+ self.assertEqual(
+ datetime_helpers.DatetimeWithNanoseconds.from_timestamp_pb(
+ _timestamp_pb(1000000002)
+ ),
+ call_args[1]["write_run_data_requests"][0]
+ .time_series_data[0]
+ .values[1]
+ .wall_time,
+ )
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class FileRequestSenderTest(tf.test.TestCase):
+ def test_empty_files_no_messages(self):
+ mock_client = _create_mock_client()
+ sender = _create_file_request_sender(
+ api=mock_client,
+ run_resource_id=_TEST_ONE_PLATFORM_RUN_NAME,
+ )
+
+ sender.add_files(
+ files=[], tag="my_tag", plugin="test_plugin", event_timestamp=""
+ )
+
+ self.assertEmpty(mock_client.write_tensorboard_run_data.call_args_list)
+
+ def test_fake_files_no_sent_messages(self):
+ mock_client = _create_mock_client()
+ sender = _create_file_request_sender(
+ api=mock_client,
+ run_resource_id=_TEST_ONE_PLATFORM_RUN_NAME,
+ )
+
+ with mock.patch("os.path.isfile", return_value=False):
+ sender.add_files(
+ files=["fakefile1", "fakefile2"],
+ tag="my_tag",
+ plugin="test_plugin",
+ event_timestamp="",
+ )
+
+ self.assertEmpty(mock_client.write_tensorboard_run_data.call_args_list)
+
+ def test_files_too_large(self):
+ mock_client = _create_mock_client()
+ sender = _create_file_request_sender(
+ api=mock_client,
+ run_resource_id=_TEST_ONE_PLATFORM_RUN_NAME,
+ max_blob_size=10,
+ )
+
+ with tempfile.NamedTemporaryFile() as f1:
+ f1.write(b"A" * 12)
+ f1.flush()
+ sender.add_files(
+ files=[f1.name],
+ tag="my_tag",
+ plugin="test_plugin",
+ event_timestamp=timestamp_pb2.Timestamp().FromDatetime(
+ datetime.datetime.strptime("2020-01-01", "%Y-%m-%d")
+ ),
+ )
+
+ self.assertEmpty(mock_client.write_tensorboard_run_data.call_args_list)
+
+ def test_single_file_upload(self):
+ mock_client = _create_mock_client()
+ sender = _create_file_request_sender(
+ api=mock_client,
+ run_resource_id=_TEST_ONE_PLATFORM_RUN_NAME,
+ )
+
+ with tempfile.NamedTemporaryFile() as f1:
+ fn = os.path.basename(f1.name)
+ sender.add_files(
+ files=[f1.name],
+ tag="my_tag",
+ plugin="test_plugin",
+ event_timestamp=timestamp_pb2.Timestamp().FromDatetime(
+ datetime.datetime.strptime("2020-01-01", "%Y-%m-%d")
+ ),
+ )
+
+ call_args_list = mock_client.write_tensorboard_run_data.call_args_list[0][1]
+ self.assertEqual(
+ fn, call_args_list["time_series_data"][0].values[0].blobs.values[0].id
+ )
+
+ def test_multi_file_upload(self):
+ mock_client = _create_mock_client()
+ sender = _create_file_request_sender(
+ api=mock_client,
+ run_resource_id=_TEST_ONE_PLATFORM_RUN_NAME,
+ )
+
+ files = None
+ with tempfile.NamedTemporaryFile() as f1, tempfile.NamedTemporaryFile() as f2:
+ files = [os.path.basename(f1.name), os.path.basename(f2.name)]
+ sender.add_files(
+ files=[f1.name, f2.name],
+ tag="my_tag",
+ plugin="test_plugin",
+ event_timestamp=timestamp_pb2.Timestamp().FromDatetime(
+ datetime.datetime.strptime("2020-01-01", "%Y-%m-%d")
+ ),
+ )
+
+ call_args_list = mock_client.write_tensorboard_run_data.call_args_list[0][1]
+
+ self.assertEqual(
+ files,
+ [
+ x.id
+ for x in call_args_list["time_series_data"][0].values[0].blobs.values
+ ],
+ )
+
+ def test_add_files_no_experiment(self):
+ mock_client = _create_mock_client()
+ mock_client.write_tensorboard_run_data.side_effect = grpc.RpcError
+
+ sender = _create_file_request_sender(
+ api=mock_client,
+ run_resource_id=_TEST_ONE_PLATFORM_RUN_NAME,
+ )
+
+ with tempfile.NamedTemporaryFile() as f1:
+ sender.add_files(
+ files=[f1.name],
+ tag="my_tag",
+ plugin="test_plugin",
+ event_timestamp=timestamp_pb2.Timestamp().FromDatetime(
+ datetime.datetime.strptime("2020-01-01", "%Y-%m-%d")
+ ),
+ )
+
+ mock_client.write_tensorboard_run_data.assert_called_once()
+
+ def test_add_files_from_local(self):
+ mock_client = _create_mock_client()
+ bucket = _create_mock_blob_storage()
+
+ sender = _create_file_request_sender(
+ api=mock_client,
+ run_resource_id=_TEST_ONE_PLATFORM_RUN_NAME,
+ blob_storage_bucket=bucket,
+ source_bucket=None,
+ )
+
+ with tempfile.NamedTemporaryFile() as f1:
+ sender.add_files(
+ files=[f1.name],
+ tag="my_tag",
+ plugin="test_plugin",
+ event_timestamp=timestamp_pb2.Timestamp().FromDatetime(
+ datetime.datetime.strptime("2020-01-01", "%Y-%m-%d")
+ ),
+ )
+
+ bucket.blob.assert_called_once()
+
+ def test_copy_blobs(self):
+ mock_client = _create_mock_client()
+ sender = _create_file_request_sender(
+ api=mock_client,
+ run_resource_id=_TEST_ONE_PLATFORM_RUN_NAME,
+ )
+
+ sender._copy_between_buckets("gs://path/to/my/file", None)
+ self.assertLen(sender._source_bucket.copy_blob.call_args_list, 1)
+
+
+class VarintCostTest(tf.test.TestCase):
+ def test_varint_cost(self):
+ self.assertEqual(uploader_lib._varint_cost(0), 1)
+ self.assertEqual(uploader_lib._varint_cost(7), 1)
+ self.assertEqual(uploader_lib._varint_cost(127), 1)
+ self.assertEqual(uploader_lib._varint_cost(128), 2)
+ self.assertEqual(uploader_lib._varint_cost(128 * 128 - 1), 2)
+ self.assertEqual(uploader_lib._varint_cost(128 * 128), 3)
+
+
+def _clear_wall_times(repeated_time_series_data):
+ """Clears the wall_time fields in a TimeSeriesData to be deterministic.
+
+ Args:
+ repeated_time_series_data: Iterable of tensorboard_data.TimeSeriesData.
+ """
+
+ for time_series_data in repeated_time_series_data:
+ for value in time_series_data.values:
+ value.wall_time = None
+
+
+def _apply_compat(events):
+ initial_metadata = {}
+ for event in events:
+ event = data_compat.migrate_event(event)
+ events = dataclass_compat.migrate_event(
+ event, initial_metadata=initial_metadata
+ )
+ for migrated_event in events:
+ yield migrated_event
+
+
+def _extract_tag_counts(call_args_list):
+ return {
+ ts_data.tensorboard_time_series_id: len(ts_data.values)
+ for call_args in call_args_list
+ for ts_data in call_args[1]["write_run_data_requests"][0].time_series_data
+ }
+
+
+def _extract_tag_counts_time_series(call_args_list):
+ return {
+ ts_data.tensorboard_time_series_id: len(ts_data.values)
+ for call_args in call_args_list
+ for ts_data in call_args[1]["time_series_data"]
+ }
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_uploader_main.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_uploader_main.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a7e17ea555d601b5bd1da2728eaf954237abb2
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_uploader_main.py
@@ -0,0 +1,131 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+
+from importlib import reload
+from unittest.mock import patch
+
+from google.api_core import exceptions
+from google.cloud import aiplatform
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform.tensorboard import uploader_main
+from google.cloud.aiplatform.compat.types import (
+ job_state as gca_job_state_compat,
+)
+from google.cloud.aiplatform.compat.types import (
+ custom_job as gca_custom_job_compat,
+)
+from google.cloud.aiplatform.compat.services import (
+ job_service_client,
+)
+
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+_TEST_CUSTOM_JOB_ID = "445768"
+_TEST_CUSTOM_JOB_NAME = f"{_TEST_PARENT}/customJobs/{_TEST_CUSTOM_JOB_ID}"
+_TEST_CUSTOM_JOBS_DISPLAY_NAME = "a custom job display name"
+_TEST_PASSED_IN_EXPERIMENT_DISPLAY_NAME = "someDisplayName"
+
+
+def _get_custom_job_proto(state=None, name=None):
+ custom_job_proto = gca_custom_job_compat.CustomJob()
+ custom_job_proto.name = name
+ custom_job_proto.state = state
+ custom_job_proto.display_name = _TEST_CUSTOM_JOBS_DISPLAY_NAME
+ return custom_job_proto
+
+
+@pytest.fixture
+def get_custom_job_mock_not_found():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_custom_job"
+ ) as get_custom_job_mock:
+ get_custom_job_mock.side_effect = exceptions.NotFound("not found")
+ yield get_custom_job_mock
+
+
+@pytest.fixture
+def get_custom_job_mock():
+ with patch.object(
+ job_service_client.JobServiceClient, "get_custom_job"
+ ) as get_custom_job_mock:
+ get_custom_job_mock.side_effect = [
+ _get_custom_job_proto(
+ name=_TEST_CUSTOM_JOB_NAME,
+ state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED,
+ ),
+ ]
+ yield get_custom_job_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestUploaderMain:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_get_default_custom_job_display_name(self, get_custom_job_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ assert (
+ uploader_main.get_experiment_display_name_with_override(
+ _TEST_CUSTOM_JOB_ID, None, _TEST_PROJECT, _TEST_LOCATION
+ )
+ == _TEST_CUSTOM_JOBS_DISPLAY_NAME
+ )
+
+ def test_non_decimal_experiment_name(self, get_custom_job_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ assert (
+ uploader_main.get_experiment_display_name_with_override(
+ "someExperimentName",
+ _TEST_PASSED_IN_EXPERIMENT_DISPLAY_NAME,
+ _TEST_PROJECT,
+ _TEST_LOCATION,
+ )
+ == _TEST_PASSED_IN_EXPERIMENT_DISPLAY_NAME
+ )
+ get_custom_job_mock.assert_not_called()
+
+ def test_display_name_already_specified(self, get_custom_job_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ assert (
+ uploader_main.get_experiment_display_name_with_override(
+ _TEST_CUSTOM_JOB_ID,
+ _TEST_PASSED_IN_EXPERIMENT_DISPLAY_NAME,
+ _TEST_PROJECT,
+ _TEST_LOCATION,
+ )
+ == _TEST_PASSED_IN_EXPERIMENT_DISPLAY_NAME
+ )
+ get_custom_job_mock.assert_not_called()
+
+ def test_custom_job_not_found(self, get_custom_job_mock_not_found):
+ aiplatform.init(project=_TEST_PROJECT)
+ assert (
+ uploader_main.get_experiment_display_name_with_override(
+ _TEST_CUSTOM_JOB_ID,
+ _TEST_PASSED_IN_EXPERIMENT_DISPLAY_NAME,
+ _TEST_PROJECT,
+ _TEST_LOCATION,
+ )
+ == _TEST_PASSED_IN_EXPERIMENT_DISPLAY_NAME
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_utils.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..80f0c00144b306277e041bd9738dd665036dbbd0
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_utils.py
@@ -0,0 +1,1062 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import datetime
+import importlib
+import json
+import os
+import re
+import tempfile
+import textwrap
+from typing import Callable, Dict, Optional, Tuple
+from unittest import mock
+from unittest.mock import patch
+from urllib import request as urllib_request
+
+import pytest
+import yaml
+from google.api_core import client_options, gapic_v1
+from google.auth import credentials
+from google.cloud import aiplatform
+from google.cloud import storage
+from google.cloud.aiplatform import compat, utils
+from google.cloud.aiplatform.compat.types import pipeline_failure_policy
+from google.cloud.aiplatform import datasets
+from google.cloud.aiplatform.utils import (
+ column_transformations_utils,
+ gcs_utils,
+ pipeline_utils,
+ prediction_utils,
+ tensorboard_utils,
+ yaml_utils,
+)
+from google.cloud.aiplatform_v1.services.model_service import (
+ client as model_service_client_v1,
+)
+from google.cloud.aiplatform_v1beta1.services.model_service import (
+ client as model_service_client_v1beta1,
+)
+from google.protobuf import timestamp_pb2
+
+model_service_client_default = model_service_client_v1
+
+
+GCS_BUCKET = "fake-bucket"
+GCS_PREFIX = "fake/prefix"
+FAKE_FILENAME = "fake-filename"
+EXPECTED_TIME = datetime.datetime(2023, 1, 6, 8, 54, 41, 734495)
+
+
+@pytest.fixture
+def mock_storage_client():
+ class Blob:
+ def __init__(self, name):
+ self.name = name
+
+ blob1 = mock.MagicMock()
+ type(blob1).name = mock.PropertyMock(return_value=f"{GCS_PREFIX}/{FAKE_FILENAME}")
+ blob2 = mock.MagicMock()
+ type(blob2).name = mock.PropertyMock(return_value=f"{GCS_PREFIX}/")
+
+ def get_blobs(bucket_name, prefix=""):
+ return [blob1, blob2]
+
+ with patch.object(storage, "Client") as mock_storage_client:
+ mock_storage_client.return_value.list_blobs.side_effect = get_blobs
+ yield mock_storage_client
+
+
+@pytest.fixture()
+def mock_datetime():
+ with patch.object(datetime, "datetime", autospec=True) as mock_datetime:
+ mock_datetime.now.return_value = EXPECTED_TIME
+ yield mock_datetime
+
+
+@pytest.fixture
+def mock_storage_blob_upload_from_filename():
+ with patch(
+ "google.cloud.storage.Blob.upload_from_filename"
+ ) as mock_blob_upload_from_filename, patch(
+ "google.cloud.storage.Bucket.exists", return_value=True
+ ):
+ yield mock_blob_upload_from_filename
+
+
+@pytest.fixture
+def mock_storage_client_list_blobs():
+ with patch("google.cloud.storage.Client.list_blobs") as mock_list_blobs:
+ mock_list_blobs.return_value = [
+ storage.Blob(name=f"{GCS_PREFIX}/", bucket=GCS_BUCKET),
+ storage.Blob(name=f"{GCS_PREFIX}/{FAKE_FILENAME}-1", bucket=GCS_BUCKET),
+ storage.Blob(
+ name=f"{GCS_PREFIX}/fake-dir/{FAKE_FILENAME}-2", bucket=GCS_BUCKET
+ ),
+ ]
+ yield mock_list_blobs
+
+
+@pytest.fixture
+def mock_storage_client_list_blob():
+ with patch("google.cloud.storage.Client.list_blobs") as mock_list_blobs:
+ mock_list_blobs.return_value = [
+ storage.Blob(name=f"{GCS_PREFIX}/{FAKE_FILENAME}", bucket=GCS_BUCKET),
+ ]
+ yield mock_list_blobs
+
+
+@pytest.fixture
+def mock_storage_blob_download_to_filename():
+ with patch(
+ "google.cloud.storage.Blob.download_to_filename"
+ ) as mock_blob_download_to_filename:
+ yield mock_blob_download_to_filename
+
+
+def test_invalid_region_raises_with_invalid_region():
+ with pytest.raises(ValueError):
+ aiplatform.utils.validate_region(region="europe-west10")
+
+
+def test_invalid_region_does_not_raise_with_valid_region():
+ aiplatform.utils.validate_region(region="us-central1")
+
+
+@pytest.fixture
+def copy_tree_mock():
+ with mock.patch("shutil.copytree") as copy_tree_mock:
+ yield copy_tree_mock
+
+
+@pytest.mark.parametrize(
+ "resource_noun, project, parse_resource_name_method, format_resource_name_method, parent_resource_name_fields, location, full_name",
+ [
+ (
+ "datasets",
+ "123456",
+ aiplatform.TabularDataset._parse_resource_name,
+ aiplatform.TabularDataset._format_resource_name,
+ None,
+ "us-central1",
+ "projects/123456/locations/us-central1/datasets/987654",
+ ),
+ (
+ "trainingPipelines",
+ "857392",
+ aiplatform.CustomTrainingJob._parse_resource_name,
+ aiplatform.CustomTrainingJob._format_resource_name,
+ None,
+ "us-west20",
+ "projects/857392/locations/us-central1/trainingPipelines/347292",
+ ),
+ (
+ "contexts",
+ "123456",
+ aiplatform.metadata.context.Context._parse_resource_name,
+ aiplatform.metadata.context.Context._format_resource_name,
+ {
+ aiplatform.metadata.metadata_store._MetadataStore._resource_noun: "default"
+ },
+ "europe-west4",
+ "projects/857392/locations/us-central1/metadataStores/default/contexts/123",
+ ),
+ (
+ "timeSeries",
+ "857392",
+ aiplatform.gapic.TensorboardServiceClient.parse_tensorboard_time_series_path,
+ aiplatform.gapic.TensorboardServiceClient.tensorboard_time_series_path,
+ {
+ aiplatform.Tensorboard._resource_noun: "123",
+ "experiments": "456",
+ "runs": "789",
+ },
+ "us-central1",
+ "projects/857392/locations/us-central1/tensorboards/123/experiments/456/runs/789/timeSeries/1",
+ ),
+ ],
+)
+def test_full_resource_name_with_full_name(
+ resource_noun: str,
+ project: str,
+ parse_resource_name_method: Callable[[str], Dict[str, str]],
+ format_resource_name_method: Callable[..., str],
+ parent_resource_name_fields: Optional[Dict[str, str]],
+ location: str,
+ full_name: str,
+):
+ # should ignore issues with other arguments as resource_name is full_name
+ assert (
+ aiplatform.utils.full_resource_name(
+ resource_name=full_name,
+ resource_noun=resource_noun,
+ parse_resource_name_method=parse_resource_name_method,
+ format_resource_name_method=format_resource_name_method,
+ parent_resource_name_fields=parent_resource_name_fields,
+ project=project,
+ location=location,
+ )
+ == full_name
+ )
+
+
+@pytest.mark.parametrize(
+ "partial_name, resource_noun, parse_resource_name_method, format_resource_name_method, parent_resource_name_fields, project, location, full_name",
+ [
+ (
+ "987654",
+ "datasets",
+ aiplatform.TabularDataset._parse_resource_name,
+ aiplatform.TabularDataset._format_resource_name,
+ None,
+ "123456",
+ "us-central1",
+ "projects/123456/locations/us-central1/datasets/987654",
+ ),
+ (
+ "347292",
+ "trainingPipelines",
+ aiplatform.CustomTrainingJob._parse_resource_name,
+ aiplatform.CustomTrainingJob._format_resource_name,
+ None,
+ "857392",
+ "us-central1",
+ "projects/857392/locations/us-central1/trainingPipelines/347292",
+ ),
+ (
+ "123",
+ "contexts",
+ aiplatform.metadata.context.Context._parse_resource_name,
+ aiplatform.metadata.context.Context._format_resource_name,
+ {
+ aiplatform.metadata.metadata_store._MetadataStore._resource_noun: "default"
+ },
+ "857392",
+ "us-central1",
+ "projects/857392/locations/us-central1/metadataStores/default/contexts/123",
+ ),
+ (
+ "1",
+ "timeSeries",
+ aiplatform.gapic.TensorboardServiceClient.parse_tensorboard_time_series_path,
+ aiplatform.gapic.TensorboardServiceClient.tensorboard_time_series_path,
+ {
+ aiplatform.Tensorboard._resource_noun: "123",
+ "experiments": "456",
+ "runs": "789",
+ },
+ "857392",
+ "us-central1",
+ "projects/857392/locations/us-central1/tensorboards/123/experiments/456/runs/789/timeSeries/1",
+ ),
+ ],
+)
+def test_full_resource_name_with_partial_name(
+ partial_name: str,
+ resource_noun: str,
+ parse_resource_name_method: Callable[[str], Dict[str, str]],
+ format_resource_name_method: Callable[..., str],
+ parent_resource_name_fields: Optional[Dict[str, str]],
+ project: str,
+ location: str,
+ full_name: str,
+):
+ assert (
+ aiplatform.utils.full_resource_name(
+ resource_name=partial_name,
+ resource_noun=resource_noun,
+ parse_resource_name_method=parse_resource_name_method,
+ format_resource_name_method=format_resource_name_method,
+ parent_resource_name_fields=parent_resource_name_fields,
+ project=project,
+ location=location,
+ )
+ == full_name
+ )
+
+
+@pytest.mark.parametrize(
+ "partial_name, resource_noun, project, location",
+ [("347292", "trainingPipelines", "857392", "us-west2020")],
+)
+def test_full_resource_name_raises_value_error(
+ partial_name: str,
+ resource_noun: str,
+ project: str,
+ location: str,
+):
+ with pytest.raises(ValueError):
+ aiplatform.utils.full_resource_name(
+ resource_name=partial_name,
+ resource_noun=resource_noun,
+ parse_resource_name_method=aiplatform.CustomTrainingJob._parse_resource_name,
+ format_resource_name_method=aiplatform.CustomTrainingJob._format_resource_name,
+ project=project,
+ location=location,
+ )
+
+
+def test_validate_display_name_raises_length():
+ with pytest.raises(ValueError):
+ aiplatform.utils.validate_display_name(
+ "slanflksdnlikh;likhq290u90rflkasndfkljashndfkl;jhowq2342;iehoiwerhowqihjer34564356o;iqwjr;oijsdalfjasl;kfjas;ldifhja;slkdfsdlkfhj"
+ )
+
+
+def test_validate_display_name():
+ aiplatform.utils.validate_display_name("my_model_abc")
+
+
+def test_validate_labels_raises_value_not_str():
+ with pytest.raises(ValueError):
+ aiplatform.utils.validate_labels({"my_key1": 1, "my_key2": 2})
+
+
+def test_validate_labels_raises_key_not_str():
+ with pytest.raises(ValueError):
+ aiplatform.utils.validate_labels({1: "my_value1", 2: "my_value2"})
+
+
+def test_validate_labels():
+ aiplatform.utils.validate_labels({"my_key1": "my_value1", "my_key2": "my_value2"})
+
+
+@pytest.mark.parametrize(
+ "accelerator_type, expected",
+ [
+ ("NVIDIA_TESLA_K80", True),
+ ("ACCELERATOR_TYPE_UNSPECIFIED", True),
+ ("NONEXISTENT_GPU", False),
+ ("NVIDIA_GALAXY_R7", False),
+ ("", False),
+ (None, False),
+ ],
+)
+def test_validate_accelerator_type(accelerator_type: str, expected: bool):
+ # Invalid type raises specific ValueError
+ if not expected:
+ with pytest.raises(ValueError) as e:
+ utils.validate_accelerator_type(accelerator_type)
+ assert e.match(regexp=r"Given accelerator_type")
+ # Valid type returns True
+ else:
+ assert utils.validate_accelerator_type(accelerator_type)
+
+
+@pytest.mark.parametrize(
+ "gcs_path, expected",
+ [
+ ("gs://example-bucket/path/to/folder", ("example-bucket", "path/to/folder")),
+ ("example-bucket/path/to/folder/", ("example-bucket", "path/to/folder")),
+ ("gs://example-bucket", ("example-bucket", None)),
+ ("gs://example-bucket/", ("example-bucket", None)),
+ ("gs://example-bucket/path", ("example-bucket", "path")),
+ ],
+)
+def test_extract_bucket_and_prefix_from_gcs_path(gcs_path: str, expected: tuple):
+ # Given a GCS path, ensure correct bucket and prefix are extracted
+ assert expected == utils.extract_bucket_and_prefix_from_gcs_path(gcs_path)
+
+
+@pytest.mark.parametrize(
+ "parent, expected",
+ [
+ (
+ "projects/123/locations/us-central1/datasets/456",
+ {"project": "123", "location": "us-central1"},
+ ),
+ (
+ "projects/123/locations/us-central1/",
+ {"project": "123", "location": "us-central1"},
+ ),
+ (
+ "projects/123/locations/us-central1",
+ {"project": "123", "location": "us-central1"},
+ ),
+ ("projects/123/locations/", {}),
+ ("projects/123", {}),
+ ],
+)
+def test_extract_project_and_location_from_parent(parent: str, expected: tuple):
+ # Given a parent resource name, ensure correct project and location are extracted
+ assert expected == utils.extract_project_and_location_from_parent(parent)
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+def test_wrapped_client():
+ test_client_info = gapic_v1.client_info.ClientInfo()
+ test_client_options = client_options.ClientOptions()
+
+ wrapped_client = utils.ClientWithOverride.WrappedClient(
+ client_class=model_service_client_default.ModelServiceClient,
+ client_options=test_client_options,
+ client_info=test_client_info,
+ )
+
+ assert isinstance(
+ wrapped_client.get_model.__self__,
+ model_service_client_default.ModelServiceClient,
+ )
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+def test_client_w_override_default_version():
+
+ test_client_info = gapic_v1.client_info.ClientInfo()
+ test_client_options = client_options.ClientOptions()
+
+ client_w_override = utils.ModelClientWithOverride(
+ client_options=test_client_options,
+ client_info=test_client_info,
+ )
+ assert isinstance(
+ client_w_override._clients[
+ client_w_override._default_version
+ ].get_model.__self__,
+ model_service_client_default.ModelServiceClient,
+ )
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+def test_client_w_override_select_version():
+
+ test_client_info = gapic_v1.client_info.ClientInfo()
+ test_client_options = client_options.ClientOptions()
+
+ client_w_override = utils.ModelClientWithOverride(
+ client_options=test_client_options,
+ client_info=test_client_info,
+ )
+
+ assert isinstance(
+ client_w_override.select_version(compat.V1BETA1).get_model.__self__,
+ model_service_client_v1beta1.ModelServiceClient,
+ )
+ assert isinstance(
+ client_w_override.select_version(compat.V1).get_model.__self__,
+ model_service_client_v1.ModelServiceClient,
+ )
+
+
+@pytest.mark.parametrize(
+ "year,month,day,hour,minute,second,microsecond,expected_seconds,expected_nanos",
+ [
+ (
+ 2021,
+ 12,
+ 23,
+ 23,
+ 59,
+ 59,
+ 999999,
+ 1640303999,
+ 999000000,
+ ),
+ (
+ 2013,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 199999,
+ 1357002061,
+ 199000000,
+ ),
+ ],
+)
+def test_get_timestamp_proto(
+ year,
+ month,
+ day,
+ hour,
+ minute,
+ second,
+ microsecond,
+ expected_seconds,
+ expected_nanos,
+):
+ time = datetime.datetime(
+ year=year,
+ month=month,
+ day=day,
+ hour=hour,
+ minute=minute,
+ second=second,
+ microsecond=microsecond,
+ )
+ true_timestamp_proto = timestamp_pb2.Timestamp(
+ seconds=expected_seconds, nanos=expected_nanos
+ )
+ assert true_timestamp_proto == utils.get_timestamp_proto(time)
+
+
+def test_timestamped_unique_name():
+ name = utils.timestamped_unique_name()
+ assert re.match(r"\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}-.{5}", name)
+
+
+class TestColumnTransformationsUtils:
+
+ column_transformations = [
+ {"auto": {"column_name": "a"}},
+ {"auto": {"column_name": "b"}},
+ ]
+ column_specs = {"a": "auto", "b": "auto"}
+
+ def test_get_default_column_transformations(self):
+ ds = mock.MagicMock(datasets.TimeSeriesDataset)
+ ds.column_names = ["a", "b", "target"]
+ (
+ transforms,
+ columns,
+ ) = column_transformations_utils.get_default_column_transformations(
+ dataset=ds, target_column="target"
+ )
+ assert transforms == [
+ {"auto": {"column_name": "a"}},
+ {"auto": {"column_name": "b"}},
+ ]
+ assert columns == ["a", "b"]
+
+ def test_validate_transformations_with_multiple_configs(self):
+ with pytest.raises(ValueError):
+ (
+ column_transformations_utils.validate_and_get_column_transformations(
+ column_transformations=self.column_transformations,
+ column_specs=self.column_specs,
+ )
+ )
+
+ def test_validate_transformations_with_column_specs(self):
+ actual = column_transformations_utils.validate_and_get_column_transformations(
+ column_specs=self.column_specs
+ )
+ assert actual == self.column_transformations
+
+ def test_validate_transformations_with_column_transformations(self):
+ actual = column_transformations_utils.validate_and_get_column_transformations(
+ column_transformations=self.column_transformations
+ )
+ assert actual == self.column_transformations
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestGcsUtils:
+ def test_upload_to_gcs(self, json_file, mock_storage_blob_upload_from_filename):
+ gcs_utils.upload_to_gcs(json_file, f"gs://{GCS_BUCKET}/{GCS_PREFIX}")
+ assert mock_storage_blob_upload_from_filename.called_once_with(json_file)
+
+ def test_stage_local_data_in_gcs(
+ self, json_file, mock_datetime, mock_storage_blob_upload_from_filename
+ ):
+ timestamp = EXPECTED_TIME.isoformat(sep="-", timespec="milliseconds")
+ staging_gcs_dir = f"gs://{GCS_BUCKET}/{GCS_PREFIX}"
+ data_uri = gcs_utils.stage_local_data_in_gcs(json_file, staging_gcs_dir)
+ assert mock_storage_blob_upload_from_filename.called_once_with(json_file)
+ assert (
+ data_uri
+ == f"{staging_gcs_dir}/vertex_ai_auto_staging/{timestamp}/test.json"
+ )
+
+ def test_generate_gcs_directory_for_pipeline_artifacts(self):
+ output = gcs_utils.generate_gcs_directory_for_pipeline_artifacts(
+ "project", "us-central1"
+ )
+ assert output == "gs://project-vertex-pipelines-us-central1/output_artifacts/"
+
+ @patch.object(storage.Bucket, "exists", return_value=False)
+ @patch.object(storage, "Client")
+ @patch.object(
+ gcs_utils.resource_manager_utils, "get_project_number", return_value=12345
+ )
+ def test_create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist(
+ self, mock_bucket_not_exist, mock_storage_client, mock_get_project_number
+ ):
+ output = (
+ gcs_utils.create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist(
+ project="test-project", location="us-central1"
+ )
+ )
+ assert mock_storage_client.called
+ assert mock_bucket_not_exist.called
+ assert mock_get_project_number.called
+ assert (
+ output == "gs://test-project-vertex-pipelines-us-central1/output_artifacts/"
+ )
+
+ def test_download_from_gcs_dir(
+ self, mock_storage_client_list_blobs, mock_storage_blob_download_to_filename
+ ):
+ with tempfile.TemporaryDirectory() as temp_dir:
+ source_uri = f"gs://{GCS_BUCKET}/{GCS_PREFIX}"
+ destination_path = f"{temp_dir}/test-dir"
+
+ gcs_utils.download_from_gcs(source_uri, destination_path)
+
+ mock_storage_client_list_blobs.assert_called_once_with(
+ bucket_or_name=GCS_BUCKET,
+ prefix=GCS_PREFIX,
+ )
+
+ assert mock_storage_blob_download_to_filename.call_count == 2
+ mock_storage_blob_download_to_filename.assert_any_call(
+ filename=f"{destination_path}/{FAKE_FILENAME}-1"
+ )
+ mock_storage_blob_download_to_filename.assert_any_call(
+ filename=f"{destination_path}/fake-dir/{FAKE_FILENAME}-2"
+ )
+
+ def test_download_from_gcs_file(
+ self, mock_storage_client_list_blob, mock_storage_blob_download_to_filename
+ ):
+ with tempfile.TemporaryDirectory() as temp_dir:
+ source_uri = f"gs://{GCS_BUCKET}/{GCS_PREFIX}/{FAKE_FILENAME}"
+ destination_path = f"{temp_dir}/test-file"
+
+ gcs_utils.download_from_gcs(source_uri, destination_path)
+
+ mock_storage_client_list_blob.assert_called_once_with(
+ bucket_or_name=GCS_BUCKET,
+ prefix=f"{GCS_PREFIX}/{FAKE_FILENAME}",
+ )
+
+ mock_storage_blob_download_to_filename.assert_called_once_with(
+ filename=destination_path
+ )
+
+ def test_download_from_gcs_invalid_source_uri(self):
+ with tempfile.TemporaryDirectory() as temp_dir:
+ source_uri = f"{GCS_BUCKET}/{GCS_PREFIX}"
+ destination_path = f"{temp_dir}/test-dir"
+
+ with pytest.raises(
+ ValueError,
+ match=(
+ f"Invalid GCS path {source_uri}. "
+ "Please provide a valid GCS path starting with 'gs://'"
+ ),
+ ):
+ gcs_utils.download_from_gcs(source_uri, destination_path)
+
+ def test_validate_gcs_path(self):
+ test_valid_path = "gs://test_valid_path"
+ gcs_utils.validate_gcs_path(test_valid_path)
+
+ test_invalid_path = "test_invalid_path"
+ err_msg = re.escape(
+ f"Invalid GCS path {test_invalid_path}. Please provide a valid GCS path starting with 'gs://'"
+ )
+ with pytest.raises(ValueError, match=err_msg):
+ gcs_utils.validate_gcs_path(test_invalid_path)
+
+
+class TestPipelineUtils:
+ SAMPLE_JOB_SPEC = {
+ "pipelineSpec": {
+ "root": {
+ "inputDefinitions": {
+ "parameters": {
+ "string_param": {"type": "STRING"},
+ "int_param": {"type": "INT"},
+ "float_param": {"type": "DOUBLE"},
+ "new_param": {"type": "STRING"},
+ "bool_param": {"type": "STRING"},
+ "dict_param": {"type": "STRING"},
+ "list_param": {"type": "STRING"},
+ }
+ }
+ },
+ "schemaVersion": "2.0.0",
+ },
+ "runtimeConfig": {
+ "gcsOutputDirectory": "path/to/my/root",
+ "parameters": {
+ "string_param": {"stringValue": "test-string"},
+ "int_param": {"intValue": 42},
+ "float_param": {"doubleValue": 3.14},
+ },
+ "inputArtifacts": {},
+ },
+ }
+
+ def test_pipeline_utils_runtime_config_builder_from_values(self):
+ my_builder = pipeline_utils.PipelineRuntimeConfigBuilder(
+ pipeline_root="path/to/my/root",
+ schema_version="2.0.0",
+ parameter_types={
+ "string_param": "STRING",
+ "int_param": "INT",
+ "float_param": "DOUBLE",
+ },
+ parameter_values={
+ "string_param": "test-string",
+ "int_param": 42,
+ "float_param": 3.14,
+ },
+ )
+ actual_runtime_config = my_builder.build()
+ assert True
+
+ expected_runtime_config = self.SAMPLE_JOB_SPEC["runtimeConfig"]
+ assert expected_runtime_config == actual_runtime_config
+
+ def test_pipeline_utils_runtime_config_builder_from_json(self):
+ my_builder = pipeline_utils.PipelineRuntimeConfigBuilder.from_job_spec_json(
+ self.SAMPLE_JOB_SPEC
+ )
+ actual_runtime_config = my_builder.build()
+
+ expected_runtime_config = self.SAMPLE_JOB_SPEC["runtimeConfig"]
+ assert expected_runtime_config == actual_runtime_config
+
+ def test_pipeline_utils_runtime_config_builder_with_no_op_updates(self):
+ my_builder = pipeline_utils.PipelineRuntimeConfigBuilder.from_job_spec_json(
+ self.SAMPLE_JOB_SPEC
+ )
+ my_builder.update_pipeline_root(None)
+ my_builder.update_runtime_parameters(None)
+ actual_runtime_config = my_builder.build()
+
+ expected_runtime_config = self.SAMPLE_JOB_SPEC["runtimeConfig"]
+ assert expected_runtime_config == actual_runtime_config
+
+ @pytest.mark.parametrize(
+ "failure_policy",
+ [
+ (
+ "slow",
+ pipeline_failure_policy.PipelineFailurePolicy.PIPELINE_FAILURE_POLICY_FAIL_SLOW,
+ ),
+ (
+ "fast",
+ pipeline_failure_policy.PipelineFailurePolicy.PIPELINE_FAILURE_POLICY_FAIL_FAST,
+ ),
+ ],
+ )
+ def test_pipeline_utils_runtime_config_builder_with_merge_updates(
+ self, failure_policy
+ ):
+ my_builder = pipeline_utils.PipelineRuntimeConfigBuilder.from_job_spec_json(
+ self.SAMPLE_JOB_SPEC
+ )
+ my_builder.update_pipeline_root("path/to/my/new/root")
+ my_builder.update_runtime_parameters(
+ {
+ "int_param": 888,
+ "new_param": "new-string",
+ "dict_param": {"a": 1},
+ "list_param": [1, 2, 3],
+ "bool_param": True,
+ }
+ )
+ my_builder.update_failure_policy(failure_policy[0])
+ actual_runtime_config = my_builder.build()
+
+ expected_runtime_config = {
+ "gcsOutputDirectory": "path/to/my/new/root",
+ "parameters": {
+ "string_param": {"stringValue": "test-string"},
+ "int_param": {"intValue": 888},
+ "float_param": {"doubleValue": 3.14},
+ "new_param": {"stringValue": "new-string"},
+ "dict_param": {"stringValue": '{"a": 1}'},
+ "list_param": {"stringValue": "[1, 2, 3]"},
+ "bool_param": {"stringValue": "true"},
+ },
+ "inputArtifacts": {},
+ "failurePolicy": failure_policy[1],
+ }
+ assert expected_runtime_config == actual_runtime_config
+
+ def test_pipeline_utils_runtime_config_builder_invalid_failure_policy(self):
+ my_builder = pipeline_utils.PipelineRuntimeConfigBuilder.from_job_spec_json(
+ self.SAMPLE_JOB_SPEC
+ )
+ with pytest.raises(ValueError) as e:
+ my_builder.update_failure_policy("slo")
+
+ assert e.match(
+ regexp=r'failure_policy should be either "slow" or "fast", but got: "slo".'
+ )
+
+ def test_pipeline_utils_runtime_config_builder_parameter_not_found(self):
+ my_builder = pipeline_utils.PipelineRuntimeConfigBuilder.from_job_spec_json(
+ self.SAMPLE_JOB_SPEC
+ )
+ my_builder.update_pipeline_root("path/to/my/new/root")
+ my_builder.update_runtime_parameters({"no_such_param": "new-string"})
+ with pytest.raises(ValueError) as e:
+ my_builder.build()
+
+ assert e.match(regexp=r"The pipeline parameter no_such_param is not found")
+
+
+class TestTensorboardUtils:
+ def test_tensorboard_get_experiment_url(self):
+ actual = tensorboard_utils.get_experiment_url(
+ "projects/123/locations/asia-east1/tensorboards/456/experiments/exp1"
+ )
+ assert actual == (
+ "https://asia-east1.tensorboard."
+ + "googleusercontent.com/experiment/projects+123+locations+asia-east1+tensorboards+456+experiments+exp1"
+ )
+
+ def test_get_experiments_url_bad_experiment_name(self):
+ with pytest.raises(ValueError, match="Invalid experiment name: foo-bar."):
+ tensorboard_utils.get_experiment_url("foo-bar")
+
+ def test_tensorboard_get_experiments_compare_url(self):
+ actual = tensorboard_utils.get_experiments_compare_url(
+ (
+ "projects/123/locations/asia-east1/tensorboards/456/experiments/exp1",
+ "projects/123/locations/asia-east1/tensorboards/456/experiments/exp2",
+ )
+ )
+ assert actual == (
+ "https://asia-east1.tensorboard."
+ + "googleusercontent.com/compare/1-exp1:123+asia-east1+456+exp1,"
+ + "2-exp2:123+asia-east1+456+exp2"
+ )
+
+ def test_tensorboard_get_experiments_compare_url_fail_just_one_exp(self):
+ with pytest.raises(
+ ValueError, match="At least two experiment_names are required."
+ ):
+ tensorboard_utils.get_experiments_compare_url(
+ ("projects/123/locations/asia-east1/tensorboards/456/experiments/exp1",)
+ )
+
+ def test_tensorboard_get_experiments_compare_url_fail_diff_region(self):
+ with pytest.raises(
+ ValueError,
+ match="Got experiments from different locations: asia-east.",
+ ):
+ tensorboard_utils.get_experiments_compare_url(
+ (
+ "projects/123/locations/asia-east1/tensorboards/456/experiments/exp1",
+ "projects/123/locations/asia-east2/tensorboards/456/experiments/exp2",
+ )
+ )
+
+ def test_get_experiments_compare_url_bad_experiment_name(self):
+ with pytest.raises(ValueError, match="Invalid experiment name: foo-bar."):
+ tensorboard_utils.get_experiments_compare_url(("foo-bar", "foo-bar1"))
+
+
+class TestPredictionUtils:
+ SRC_DIR = "user_code"
+ CUSTOM_CLASS_FILE = "custom_class.py"
+ CUSTOM_CLASS_FILE_STEM = "custom_class"
+ CUSTOM_CLASS = "MyClass"
+
+ def _load_module(self, name, location):
+ spec = importlib.util.spec_from_file_location(name, location)
+ return importlib.util.module_from_spec(spec)
+
+ def test_inspect_source_from_class(self, tmp_path):
+ src_dir = tmp_path / self.SRC_DIR
+ src_dir.mkdir()
+ custom_class = src_dir / self.CUSTOM_CLASS_FILE
+ custom_class.write_text(
+ textwrap.dedent(
+ """
+ class {custom_class}:
+ pass
+ """
+ ).format(custom_class=self.CUSTOM_CLASS)
+ )
+ my_custom_class = self._load_module(self.CUSTOM_CLASS, str(custom_class))
+
+ class_import, class_name = prediction_utils.inspect_source_from_class(
+ my_custom_class, str(src_dir)
+ )
+
+ assert class_import == f"{self.CUSTOM_CLASS_FILE_STEM}"
+ assert class_name == self.CUSTOM_CLASS
+
+ def test_inspect_source_from_class_fails_class_not_in_source(self, tmp_path):
+ src_dir = tmp_path / self.SRC_DIR
+ src_dir.mkdir()
+ custom_class = tmp_path / self.CUSTOM_CLASS_FILE
+ custom_class.write_text(
+ textwrap.dedent(
+ """
+ class {custom_class}:
+ pass
+ """
+ ).format(custom_class=self.CUSTOM_CLASS)
+ )
+ my_custom_class = self._load_module(self.CUSTOM_CLASS, str(custom_class))
+ expected_message = (
+ f'The file implementing "{self.CUSTOM_CLASS}" must be in "{src_dir}".'
+ )
+
+ with pytest.raises(ValueError) as exception:
+ _ = prediction_utils.inspect_source_from_class(
+ my_custom_class, str(src_dir)
+ )
+
+ assert str(exception.value) == expected_message
+
+ @pytest.mark.parametrize(
+ "image_uri, expected",
+ [
+ ("gcr.io/myproject/myimage", True),
+ ("us.gcr.io/myproject/myimage", True),
+ ("us-docker.pkg.dev/myproject/myimage", True),
+ ("us-central1-docker.pkg.dev/myproject/myimage", True),
+ ("myproject/myimage", False),
+ ("random.host/myproject/myimage", False),
+ ],
+ )
+ def test_is_registry_uri(self, image_uri, expected):
+ result = prediction_utils.is_registry_uri(image_uri)
+
+ assert result == expected
+
+ def test_get_prediction_aip_http_port(self):
+ ports = [1000, 2000, 3000]
+
+ http_port = prediction_utils.get_prediction_aip_http_port(ports)
+
+ assert http_port == ports[0]
+
+ def test_get_prediction_aip_http_port_default(self):
+ http_port = prediction_utils.get_prediction_aip_http_port(None)
+
+ assert http_port == 8080
+
+ def test_download_model_artifacts(self, mock_storage_client):
+ prediction_utils.download_model_artifacts(f"gs://{GCS_BUCKET}/{GCS_PREFIX}")
+
+ assert mock_storage_client.called
+ mock_storage_client().list_blobs.assert_called_once_with(
+ GCS_BUCKET, prefix=GCS_PREFIX
+ )
+ mock_storage_client().list_blobs.side_effect("")[
+ 0
+ ].download_to_filename.assert_called_once_with(FAKE_FILENAME)
+ assert (
+ not mock_storage_client()
+ .list_blobs.side_effect("")[1]
+ .download_to_filename.called
+ )
+
+ def test_download_model_artifacts_not_gcs_uri(
+ self, mock_storage_client, tmp_path, copy_tree_mock
+ ):
+ model_dir_name = "/tmp/models"
+
+ prediction_utils.download_model_artifacts(model_dir_name)
+
+ assert not mock_storage_client.called
+ copy_tree_mock.assert_called_once_with(model_dir_name, ".", dirs_exist_ok=True)
+
+
+@pytest.fixture(scope="function")
+def yaml_file(tmp_path):
+ data = {"key": "val", "list": ["1", 2, 3.0]}
+ yaml_file_path = os.path.join(tmp_path, "test.yaml")
+ with open(yaml_file_path, "w") as f:
+ yaml.dump(data, f)
+ yield yaml_file_path
+
+
+@pytest.fixture(scope="function")
+def json_file(tmp_path):
+ data = {"key": "val", "list": ["1", 2, 3.0]}
+ json_file_path = os.path.join(tmp_path, "test.json")
+ with open(json_file_path, "w") as f:
+ json.dump(data, f)
+ yield json_file_path
+
+
+@pytest.fixture(scope="function")
+def mock_request_urlopen(request: str) -> Tuple[str, mock.MagicMock]:
+ data = {"key": "val", "list": ["1", 2, 3.0]}
+ with mock.patch.object(urllib_request, "urlopen") as mock_urlopen:
+ mock_read_response = mock.MagicMock()
+ mock_decode_response = mock.MagicMock()
+ mock_decode_response.return_value = json.dumps(data)
+ mock_read_response.return_value.decode = mock_decode_response
+ mock_urlopen.return_value.read = mock_read_response
+ yield request.param, mock_urlopen
+
+
+class TestYamlUtils:
+ def test_load_yaml_from_local_file__with_yaml(self, yaml_file):
+ actual = yaml_utils.load_yaml(yaml_file)
+ expected = {"key": "val", "list": ["1", 2, 3.0]}
+ assert actual == expected
+
+ def test_load_yaml_from_local_file__with_json(self, json_file):
+ actual = yaml_utils.load_yaml(json_file)
+ expected = {"key": "val", "list": ["1", 2, 3.0]}
+ assert actual == expected
+
+ @pytest.mark.parametrize(
+ "mock_request_urlopen",
+ ["https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"],
+ indirect=True,
+ )
+ def test_load_yaml_from_ar_uri_passes_creds(self, mock_request_urlopen):
+ url, mock_urlopen = mock_request_urlopen
+ mock_credentials = mock.create_autospec(credentials.Credentials, instance=True)
+ mock_credentials.valid = True
+ mock_credentials.token = "some_token"
+ actual = yaml_utils.load_yaml(url, credentials=mock_credentials)
+ expected = {"key": "val", "list": ["1", 2, 3.0]}
+ assert actual == expected
+ assert mock_urlopen.call_args[0][0].headers == {
+ "Authorization": "Bearer some_token"
+ }
+
+ @pytest.mark.parametrize(
+ "mock_request_urlopen",
+ [
+ "https://raw.githubusercontent.com/repo/pipeline.json",
+ "https://raw.githubusercontent.com/repo/pipeline.yaml",
+ "https://raw.githubusercontent.com/repo/pipeline.yml",
+ ],
+ indirect=True,
+ )
+ def test_load_yaml_from_https_uri_ignores_creds(self, mock_request_urlopen):
+ url, mock_urlopen = mock_request_urlopen
+ mock_credentials = mock.create_autospec(credentials.Credentials, instance=True)
+ mock_credentials.valid = True
+ mock_credentials.token = "some_token"
+ actual = yaml_utils.load_yaml(url, credentials=mock_credentials)
+ expected = {"key": "val", "list": ["1", 2, 3.0]}
+ assert actual == expected
+ assert mock_urlopen.call_args[0][0].headers == {}
+
+ @pytest.mark.parametrize(
+ "uri",
+ [
+ "https://us-docker.pkg.dev/v2/proj/repo/img/tags/list",
+ "https://example.com/pipeline.exe",
+ "http://example.com/pipeline.yaml",
+ ],
+ )
+ def test_load_yaml_from_invalid_uri(self, uri: str):
+ message = (
+ "Invalid HTTPS URI. If not using Artifact Registry, please "
+ "ensure the URI ends with .json, .yaml, or .yml."
+ )
+ with pytest.raises(ValueError, match=message):
+ yaml_utils.load_yaml(uri)
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_vizier.py b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_vizier.py
new file mode 100644
index 0000000000000000000000000000000000000000..f56d4ce084499a84e52cd5fd6bb972c2c5d03e6d
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/aiplatform/test_vizier.py
@@ -0,0 +1,1101 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from importlib import reload
+from unittest import mock
+from unittest.mock import ANY
+from unittest.mock import patch
+
+import attr
+from google.api_core import exceptions
+from google.api_core import operation
+from google.cloud import aiplatform
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform.compat.services import vizier_service_client
+from google.cloud.aiplatform.compat.types import study as study_pb2
+from google.cloud.aiplatform.compat.types import study as gca_study
+from google.cloud.aiplatform.compat.types import (
+ vizier_service as gca_vizier_service,
+)
+from google.cloud.aiplatform.vizier import pyvizier
+from google.cloud.aiplatform.vizier import Study
+from google.cloud.aiplatform.vizier import Trial
+from google.cloud.aiplatform.vizier.pyvizier import proto_converters
+import pytest
+
+from google.protobuf import duration_pb2
+from google.protobuf import struct_pb2
+from google.protobuf import timestamp_pb2
+
+
+# project
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+
+# study
+_TEST_STUDY_ID = "12345"
+_TEST_STUDY_NAME = f"{_TEST_PARENT}/studies/{_TEST_STUDY_ID}"
+
+# trial
+_TEST_TRIAL_ID = "1"
+_TEST_TRIAL_NAME = f"{_TEST_STUDY_NAME}/trials/{_TEST_TRIAL_ID}"
+
+_TEST_METRIC_ID = "pr-auc"
+_TEST_DISPLAY_NAME = "test_study_python_aiplatform"
+
+_TEST_PARAMETER_ID_1 = "learning_rate"
+_TEST_PARAMETER_ID_MIN_VALUE_1 = 1e-05
+_TEST_PARAMETER_ID_MAX_VALUE_1 = 1.0
+
+_TEST_PARAMETER_ID_2 = "optimizer"
+_TEST_PARAMETER_VALUE_2 = ["adagrad", "adam", "experimental"]
+
+_TEST_STUDY = gca_study.Study(
+ display_name=_TEST_DISPLAY_NAME,
+ study_spec=gca_study.StudySpec(
+ algorithm=gca_study.StudySpec.Algorithm.RANDOM_SEARCH,
+ metrics=[
+ gca_study.StudySpec.MetricSpec(
+ metric_id=_TEST_METRIC_ID,
+ goal=gca_study.StudySpec.MetricSpec.GoalType.MAXIMIZE,
+ )
+ ],
+ parameters=[
+ gca_study.StudySpec.ParameterSpec(
+ parameter_id=_TEST_PARAMETER_ID_1,
+ scale_type=gca_study.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
+ double_value_spec=gca_study.StudySpec.ParameterSpec.DoubleValueSpec(
+ min_value=_TEST_PARAMETER_ID_MIN_VALUE_1,
+ max_value=_TEST_PARAMETER_ID_MAX_VALUE_1,
+ ),
+ ),
+ gca_study.StudySpec.ParameterSpec(
+ parameter_id=_TEST_PARAMETER_ID_2,
+ categorical_value_spec=gca_study.StudySpec.ParameterSpec.CategoricalValueSpec(
+ values=_TEST_PARAMETER_VALUE_2
+ ),
+ ),
+ ],
+ ),
+)
+
+
+@pytest.fixture
+def get_study_mock():
+ with patch.object(
+ vizier_service_client.VizierServiceClient, "get_study"
+ ) as get_study_mock:
+ get_study_mock.return_value = gca_study.Study(name=_TEST_STUDY_NAME)
+ yield get_study_mock
+
+
+@pytest.fixture
+def get_trial_mock():
+ with patch.object(
+ vizier_service_client.VizierServiceClient, "get_trial"
+ ) as get_trial_mock:
+ get_trial_mock.return_value = gca_study.Trial(
+ name=_TEST_TRIAL_NAME,
+ state=gca_study.Trial.State.ACTIVE,
+ parameters=[
+ gca_study.Trial.Parameter(
+ parameter_id=_TEST_PARAMETER_ID_1,
+ value=_TEST_PARAMETER_ID_MIN_VALUE_1,
+ )
+ ],
+ )
+ yield get_trial_mock
+
+
+@pytest.fixture
+def create_study_mock():
+ with patch.object(
+ vizier_service_client.VizierServiceClient, "create_study"
+ ) as create_study_mock:
+ create_study_mock.return_value = gca_study.Study(
+ name=_TEST_STUDY_NAME,
+ )
+ yield create_study_mock
+
+
+@pytest.fixture
+def lookup_study_mock():
+ with patch.object(
+ vizier_service_client.VizierServiceClient, "lookup_study"
+ ) as lookup_study_mock:
+ lookup_study_mock.return_value = gca_study.Study(
+ name=_TEST_STUDY_NAME,
+ )
+ yield lookup_study_mock
+
+
+@pytest.fixture
+def suggest_trials_mock():
+ with patch.object(
+ vizier_service_client.VizierServiceClient, "suggest_trials"
+ ) as suggest_trials_mock:
+ suggest_trials_lro_mock = mock.Mock(operation.Operation)
+ suggest_trials_lro_mock.result.return_value = (
+ gca_vizier_service.SuggestTrialsResponse(
+ trials=[gca_study.Trial(name=_TEST_TRIAL_NAME)]
+ )
+ )
+ suggest_trials_mock.return_value = suggest_trials_lro_mock
+ yield suggest_trials_mock
+
+
+@pytest.fixture
+def list_optimal_trials_mock():
+ with patch.object(
+ vizier_service_client.VizierServiceClient, "list_optimal_trials"
+ ) as list_optimal_trials_mock:
+ list_optimal_trials_mock.return_value = (
+ gca_vizier_service.ListOptimalTrialsResponse(
+ optimal_trials=[gca_study.Trial(name=_TEST_TRIAL_NAME)]
+ )
+ )
+ yield list_optimal_trials_mock
+
+
+@pytest.fixture
+def list_trials_mock():
+ with patch.object(
+ vizier_service_client.VizierServiceClient, "list_trials"
+ ) as list_trials_mock:
+ list_trials_mock.return_value = gca_vizier_service.ListTrialsResponse(
+ trials=[gca_study.Trial(name=_TEST_TRIAL_NAME)]
+ )
+ yield list_trials_mock
+
+
+@pytest.fixture
+def delete_study_mock():
+ with patch.object(
+ vizier_service_client.VizierServiceClient, "delete_study"
+ ) as delete_study_mock:
+ yield delete_study_mock
+
+
+@pytest.fixture
+def delete_trial_mock():
+ with patch.object(
+ vizier_service_client.VizierServiceClient, "delete_trial"
+ ) as delete_trial_mock:
+ yield delete_trial_mock
+
+
+@pytest.fixture
+def complete_trial_mock():
+ with patch.object(
+ vizier_service_client.VizierServiceClient, "complete_trial"
+ ) as complete_trial_mock:
+ complete_trial_mock.return_value = gca_study.Trial(
+ name=_TEST_TRIAL_NAME,
+ final_measurement=gca_study.Measurement(
+ step_count=3,
+ metrics=[gca_study.Measurement.Metric(metric_id="y", value=5)],
+ ),
+ )
+ yield complete_trial_mock
+
+
+@pytest.fixture
+def complete_trial_empty_measurement_mock():
+ with patch.object(
+ vizier_service_client.VizierServiceClient, "complete_trial"
+ ) as complete_trial_empty_measurement_mock:
+ complete_trial_empty_measurement_mock.return_value = gca_study.Trial(
+ name=_TEST_TRIAL_NAME
+ )
+ yield complete_trial_empty_measurement_mock
+
+
+@pytest.fixture
+def should_stop_mock():
+ with patch.object(
+ vizier_service_client.VizierServiceClient, "check_trial_early_stopping_state"
+ ) as should_stop_mock:
+ should_stop_lro_mock = mock.Mock(operation.Operation)
+ should_stop_lro_mock.result.return_value = (
+ gca_vizier_service.CheckTrialEarlyStoppingStateResponse(should_stop=True)
+ )
+ should_stop_mock.return_value = should_stop_lro_mock
+ yield should_stop_mock
+
+
+@pytest.fixture
+def create_study_mock_already_exists():
+ with patch.object(
+ vizier_service_client.VizierServiceClient, "create_study"
+ ) as create_study_mock:
+ create_study_mock.side_effect = [
+ exceptions.AlreadyExists("Study already exists."),
+ gca_study.Study(
+ name=_TEST_STUDY_NAME,
+ ),
+ ]
+ yield create_study_mock
+
+
+@pytest.fixture
+def add_measurement_mock():
+ with patch.object(
+ vizier_service_client.VizierServiceClient, "add_trial_measurement"
+ ) as add_measurement_mock:
+ yield add_measurement_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestStudy:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.usefixtures("get_study_mock")
+ def test_create_study(self, create_study_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ sc = pyvizier.StudyConfig()
+ sc.algorithm = pyvizier.Algorithm.RANDOM_SEARCH
+ sc.metric_information.append(
+ pyvizier.MetricInformation(
+ name=_TEST_METRIC_ID, goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE
+ )
+ )
+ root = sc.search_space.select_root()
+ root.add_float_param(
+ _TEST_PARAMETER_ID_1,
+ _TEST_PARAMETER_ID_MIN_VALUE_1,
+ _TEST_PARAMETER_ID_MAX_VALUE_1,
+ scale_type=pyvizier.ScaleType.LINEAR,
+ )
+ root.add_categorical_param(_TEST_PARAMETER_ID_2, _TEST_PARAMETER_VALUE_2)
+
+ study = Study.create_or_load(display_name=_TEST_DISPLAY_NAME, problem=sc)
+
+ create_study_mock.assert_called_once_with(
+ parent=_TEST_PARENT, study=_TEST_STUDY
+ )
+ assert isinstance(study, Study)
+
+ @pytest.mark.usefixtures("get_study_mock")
+ def test_create_study_already_exists(
+ self, create_study_mock_already_exists, lookup_study_mock
+ ):
+ aiplatform.init(project=_TEST_PROJECT)
+ sc = pyvizier.StudyConfig()
+ sc.algorithm = pyvizier.Algorithm.RANDOM_SEARCH
+ sc.metric_information.append(
+ pyvizier.MetricInformation(
+ name=_TEST_METRIC_ID, goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE
+ )
+ )
+ root = sc.search_space.select_root()
+ root.add_float_param(
+ _TEST_PARAMETER_ID_1,
+ _TEST_PARAMETER_ID_MIN_VALUE_1,
+ _TEST_PARAMETER_ID_MAX_VALUE_1,
+ scale_type=pyvizier.ScaleType.LINEAR,
+ )
+ root.add_categorical_param(_TEST_PARAMETER_ID_2, _TEST_PARAMETER_VALUE_2)
+
+ study = Study.create_or_load(display_name=_TEST_DISPLAY_NAME, problem=sc)
+
+ lookup_study_mock.assert_called_once_with(
+ request={"parent": _TEST_PARENT, "display_name": _TEST_DISPLAY_NAME}
+ )
+ assert isinstance(study, Study)
+
+ @pytest.mark.usefixtures("get_study_mock")
+ def test_materialize_study_config(self, create_study_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ sc = pyvizier.StudyConfig()
+ sc.algorithm = pyvizier.Algorithm.RANDOM_SEARCH
+ sc.metric_information.append(
+ pyvizier.MetricInformation(
+ name=_TEST_METRIC_ID, goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE
+ )
+ )
+ root = sc.search_space.select_root()
+ root.add_float_param(
+ _TEST_PARAMETER_ID_1,
+ _TEST_PARAMETER_ID_MIN_VALUE_1,
+ _TEST_PARAMETER_ID_MAX_VALUE_1,
+ scale_type=pyvizier.ScaleType.LINEAR,
+ )
+ root.add_categorical_param(_TEST_PARAMETER_ID_2, _TEST_PARAMETER_VALUE_2)
+ study = Study.create_or_load(display_name=_TEST_DISPLAY_NAME, problem=sc)
+
+ study_config = study.materialize_study_config()
+
+ create_study_mock.assert_called_once_with(
+ parent=_TEST_PARENT, study=_TEST_STUDY
+ )
+ assert isinstance(study_config, pyvizier.StudyConfig)
+
+ @pytest.mark.usefixtures("get_study_mock", "get_trial_mock")
+ def test_suggest(self, create_study_mock, suggest_trials_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ sc = pyvizier.StudyConfig()
+ sc.algorithm = pyvizier.Algorithm.RANDOM_SEARCH
+ sc.metric_information.append(
+ pyvizier.MetricInformation(
+ name=_TEST_METRIC_ID, goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE
+ )
+ )
+ root = sc.search_space.select_root()
+ root.add_float_param(
+ _TEST_PARAMETER_ID_1,
+ _TEST_PARAMETER_ID_MIN_VALUE_1,
+ _TEST_PARAMETER_ID_MAX_VALUE_1,
+ scale_type=pyvizier.ScaleType.LINEAR,
+ )
+ root.add_categorical_param(_TEST_PARAMETER_ID_2, _TEST_PARAMETER_VALUE_2)
+ study = Study.create_or_load(display_name=_TEST_DISPLAY_NAME, problem=sc)
+
+ trials = study.suggest(count=5, worker="test_worker")
+
+ suggest_trials_mock.assert_called_once_with(
+ request={
+ "parent": _TEST_STUDY_NAME,
+ "suggestion_count": 5,
+ "client_id": "test_worker",
+ }
+ )
+ assert isinstance(trials[0], Trial)
+
+ @pytest.mark.usefixtures("get_study_mock")
+ def test_from_uid(self):
+ aiplatform.init(project=_TEST_PROJECT)
+
+ study = Study.from_uid(uid=_TEST_STUDY_ID)
+
+ assert isinstance(study, Study)
+ assert study.name == _TEST_STUDY_ID
+
+ @pytest.mark.usefixtures("get_study_mock")
+ def test_delete(self, create_study_mock, delete_study_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ sc = pyvizier.StudyConfig()
+ sc.algorithm = pyvizier.Algorithm.RANDOM_SEARCH
+ sc.metric_information.append(
+ pyvizier.MetricInformation(
+ name=_TEST_METRIC_ID, goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE
+ )
+ )
+ root = sc.search_space.select_root()
+ root.add_float_param(
+ _TEST_PARAMETER_ID_1,
+ _TEST_PARAMETER_ID_MIN_VALUE_1,
+ _TEST_PARAMETER_ID_MAX_VALUE_1,
+ scale_type=pyvizier.ScaleType.LINEAR,
+ )
+ root.add_categorical_param(_TEST_PARAMETER_ID_2, _TEST_PARAMETER_VALUE_2)
+ study = Study.create_or_load(display_name=_TEST_DISPLAY_NAME, problem=sc)
+
+ study.delete()
+
+ delete_study_mock.assert_called_once_with(name=_TEST_STUDY_NAME)
+
+ @pytest.mark.usefixtures("get_study_mock", "create_study_mock", "get_trial_mock")
+ def test_optimal_trials(self, list_optimal_trials_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ sc = pyvizier.StudyConfig()
+ sc.algorithm = pyvizier.Algorithm.RANDOM_SEARCH
+ sc.metric_information.append(
+ pyvizier.MetricInformation(
+ name=_TEST_METRIC_ID, goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE
+ )
+ )
+ root = sc.search_space.select_root()
+ root.add_float_param(
+ _TEST_PARAMETER_ID_1,
+ _TEST_PARAMETER_ID_MIN_VALUE_1,
+ _TEST_PARAMETER_ID_MAX_VALUE_1,
+ scale_type=pyvizier.ScaleType.LINEAR,
+ )
+ root.add_categorical_param(_TEST_PARAMETER_ID_2, _TEST_PARAMETER_VALUE_2)
+ study = Study.create_or_load(display_name=_TEST_DISPLAY_NAME, problem=sc)
+
+ trials = study.optimal_trials()
+
+ list_optimal_trials_mock.assert_called_once_with(
+ request={"parent": _TEST_STUDY_NAME}
+ )
+ assert isinstance(trials[0], Trial)
+
+ @pytest.mark.usefixtures("get_study_mock", "create_study_mock", "get_trial_mock")
+ def test_list_trials(self, list_trials_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ sc = pyvizier.StudyConfig()
+ sc.algorithm = pyvizier.Algorithm.RANDOM_SEARCH
+ sc.metric_information.append(
+ pyvizier.MetricInformation(
+ name=_TEST_METRIC_ID, goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE
+ )
+ )
+ root = sc.search_space.select_root()
+ root.add_float_param(
+ _TEST_PARAMETER_ID_1,
+ _TEST_PARAMETER_ID_MIN_VALUE_1,
+ _TEST_PARAMETER_ID_MAX_VALUE_1,
+ scale_type=pyvizier.ScaleType.LINEAR,
+ )
+ root.add_categorical_param(_TEST_PARAMETER_ID_2, _TEST_PARAMETER_VALUE_2)
+ study = Study.create_or_load(display_name=_TEST_DISPLAY_NAME, problem=sc)
+
+ trials = study.trials()
+
+ list_trials_mock.assert_called_once_with(request={"parent": _TEST_STUDY_NAME})
+ assert isinstance(trials[0], Trial)
+
+ @pytest.mark.usefixtures("get_study_mock", "create_study_mock")
+ def test_get_trial(self, get_trial_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ sc = pyvizier.StudyConfig()
+ sc.algorithm = pyvizier.Algorithm.RANDOM_SEARCH
+ sc.metric_information.append(
+ pyvizier.MetricInformation(
+ name=_TEST_METRIC_ID, goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE
+ )
+ )
+ root = sc.search_space.select_root()
+ root.add_float_param(
+ _TEST_PARAMETER_ID_1,
+ _TEST_PARAMETER_ID_MIN_VALUE_1,
+ _TEST_PARAMETER_ID_MAX_VALUE_1,
+ scale_type=pyvizier.ScaleType.LINEAR,
+ )
+ root.add_categorical_param(_TEST_PARAMETER_ID_2, _TEST_PARAMETER_VALUE_2)
+ study = Study.create_or_load(display_name=_TEST_DISPLAY_NAME, problem=sc)
+
+ trial = study.get_trial(1)
+
+ get_trial_mock.assert_called_once_with(name=_TEST_TRIAL_NAME, retry=ANY)
+ assert isinstance(trial, Trial)
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestTrial:
+ def setup_method(self):
+ reload(initializer)
+ reload(aiplatform)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.usefixtures("get_trial_mock")
+ def test_delete(self, delete_trial_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ trial = Trial(trial_name=_TEST_TRIAL_NAME)
+
+ trial.delete()
+
+ delete_trial_mock.assert_called_once_with(name=_TEST_TRIAL_NAME)
+ assert isinstance(trial, Trial)
+
+ @pytest.mark.usefixtures("get_trial_mock")
+ def test_complete(self, complete_trial_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ trial = Trial(trial_name=_TEST_TRIAL_NAME)
+ measurement = pyvizier.Measurement()
+ measurement.metrics["y"] = 4
+
+ measurement = trial.complete(
+ measurement=measurement, infeasible_reason="infeasible"
+ )
+
+ complete_trial_mock.assert_called_once_with(
+ request={
+ "name": _TEST_TRIAL_NAME,
+ "infeasible_reason": "infeasible",
+ "trial_infeasible": True,
+ "final_measurement": gca_study.Measurement(
+ elapsed_duration=duration_pb2.Duration(),
+ metrics=[gca_study.Measurement.Metric(metric_id="y", value=4)],
+ ),
+ }
+ )
+ assert isinstance(measurement, pyvizier.Measurement)
+
+ @pytest.mark.usefixtures("get_trial_mock")
+ def test_complete_empty_measurement(self, complete_trial_empty_measurement_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ trial = Trial(trial_name=_TEST_TRIAL_NAME)
+ measurement = pyvizier.Measurement()
+ measurement.metrics["y"] = 4
+
+ measurement = trial.complete(
+ measurement=measurement, infeasible_reason="infeasible"
+ )
+
+ complete_trial_empty_measurement_mock.assert_called_once_with(
+ request={
+ "name": _TEST_TRIAL_NAME,
+ "infeasible_reason": "infeasible",
+ "trial_infeasible": True,
+ "final_measurement": gca_study.Measurement(
+ elapsed_duration=duration_pb2.Duration(),
+ metrics=[gca_study.Measurement.Metric(metric_id="y", value=4)],
+ ),
+ }
+ )
+ assert measurement is None
+
+ @pytest.mark.usefixtures("get_trial_mock")
+ def test_should_stop(self, should_stop_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ trial = Trial(trial_name=_TEST_TRIAL_NAME)
+
+ should_stop = trial.should_stop()
+
+ should_stop_mock.assert_called_once_with(
+ request={"trial_name": _TEST_TRIAL_NAME}
+ )
+ assert should_stop is True
+
+ @pytest.mark.usefixtures("get_trial_mock")
+ def test_add_measurement(self, add_measurement_mock):
+ aiplatform.init(project=_TEST_PROJECT)
+ trial = Trial(trial_name=_TEST_TRIAL_NAME)
+ measurement = pyvizier.Measurement()
+ measurement.metrics["y"] = 4
+
+ add_measurement = trial.add_measurement(measurement=measurement)
+
+ add_measurement_mock.assert_called_once_with(
+ request={
+ "trial_name": _TEST_TRIAL_NAME,
+ "measurement": gca_study.Measurement(
+ elapsed_duration=duration_pb2.Duration(),
+ metrics=[gca_study.Measurement.Metric(metric_id="y", value=4)],
+ ),
+ }
+ )
+ assert add_measurement is None
+
+ @pytest.mark.usefixtures("get_trial_mock")
+ def test_properties(self):
+ aiplatform.init(project=_TEST_PROJECT)
+ trial = Trial(trial_name=_TEST_TRIAL_NAME)
+ measurement = pyvizier.Measurement()
+ measurement.metrics["y"] = 4
+
+ uid = trial.uid
+ status = trial.status
+ parameters = trial.parameters
+
+ assert uid == 1
+ assert status == pyvizier.TrialStatus.ACTIVE
+ assert (
+ parameters.get_value(_TEST_PARAMETER_ID_1) == _TEST_PARAMETER_ID_MIN_VALUE_1
+ )
+
+ @pytest.mark.usefixtures("get_trial_mock")
+ def test_materialize(self):
+ aiplatform.init(project=_TEST_PROJECT)
+ trial = Trial(trial_name=_TEST_TRIAL_NAME)
+ measurement = pyvizier.Measurement()
+ measurement.metrics["y"] = 4
+
+ materialize_trial = trial.materialize()
+
+ assert materialize_trial.id == 1
+ assert (
+ materialize_trial.parameters.get_value(_TEST_PARAMETER_ID_1)
+ == _TEST_PARAMETER_ID_MIN_VALUE_1
+ )
+
+
+class TestMeasurementConverter:
+ def test_measurement_proto_with_empty_named_metric(self):
+ proto = study_pb2.Measurement()
+ proto.metrics.append(study_pb2.Measurement.Metric(metric_id="", value=0.8))
+
+ measurement = proto_converters.MeasurementConverter.from_proto(proto)
+ assert measurement.metrics[""] == pyvizier.Metric(value=0.8)
+
+ def test_measurement_creation(self):
+ measurement = pyvizier.Measurement(
+ metrics={
+ "": pyvizier.Metric(value=0),
+ # The empty metric always exists in Measurement.
+ "pr-auc:": pyvizier.Metric(value=0.8),
+ "latency": pyvizier.Metric(value=32),
+ },
+ elapsed_secs=12,
+ steps=12,
+ )
+ proto = proto_converters.MeasurementConverter.to_proto(measurement)
+ assert attr.asdict(
+ proto_converters.MeasurementConverter.from_proto(proto)
+ ) == attr.asdict(measurement)
+
+
+class TestParameterValueConverter:
+ def test_to_double_proto(self):
+ value = pyvizier.ParameterValue(True)
+ assert proto_converters.ParameterValueConverter.to_proto(
+ value, "aa"
+ ) == study_pb2.Trial.Parameter(
+ parameter_id="aa", value=struct_pb2.Value(number_value=1.0)
+ )
+
+ def test_to_discrete_proto(self):
+ value = pyvizier.ParameterValue(True)
+ assert proto_converters.ParameterValueConverter.to_proto(
+ value, "aa"
+ ) == study_pb2.Trial.Parameter(
+ parameter_id="aa", value=struct_pb2.Value(number_value=1.0)
+ )
+
+ def testto_string_proto(self):
+ value = pyvizier.ParameterValue("category")
+ assert proto_converters.ParameterValueConverter.to_proto(
+ value, "aa"
+ ) == study_pb2.Trial.Parameter(
+ parameter_id="aa", value=struct_pb2.Value(string_value="category")
+ )
+
+ def test_to_integer_proto(self):
+ value = pyvizier.ParameterValue(True)
+ assert proto_converters.ParameterValueConverter.to_proto(
+ value, "aa"
+ ) == study_pb2.Trial.Parameter(
+ parameter_id="aa", value=struct_pb2.Value(number_value=1.0)
+ )
+
+
+class TestTrialConverter:
+ def test_from_proto_completed(self):
+ proto = study_pb2.Trial(name=str(1))
+ proto.state = study_pb2.Trial.State.SUCCEEDED
+ proto.parameters.append(
+ study_pb2.Trial.Parameter(
+ parameter_id="float", value=struct_pb2.Value(number_value=1.0)
+ )
+ )
+ proto.parameters.append(
+ study_pb2.Trial.Parameter(
+ parameter_id="int", value=struct_pb2.Value(number_value=2)
+ )
+ )
+ proto.parameters.append(
+ study_pb2.Trial.Parameter(
+ parameter_id="str", value=struct_pb2.Value(string_value="3")
+ )
+ )
+ proto.final_measurement.metrics.append(
+ study_pb2.Measurement.Metric(metric_id="pr-auc", value=0.8)
+ )
+ proto.final_measurement.metrics.append(
+ study_pb2.Measurement.Metric(metric_id="latency", value=32)
+ )
+
+ creation_secs = 1586649600
+ start_time = timestamp_pb2.Timestamp(
+ seconds=int(creation_secs),
+ nanos=int(1e9 * (creation_secs - int(creation_secs))),
+ )
+ setattr(proto, "start_time", start_time)
+
+ completion_secs = 1586649600 + 10
+ end_time = timestamp_pb2.Timestamp(
+ seconds=int(completion_secs),
+ nanos=int(1e9 * (completion_secs - int(completion_secs))),
+ )
+ setattr(proto, "end_time", end_time)
+
+ proto.measurements.append(
+ study_pb2.Measurement(
+ step_count=10, elapsed_duration=duration_pb2.Duration(seconds=15)
+ )
+ )
+ proto.measurements[-1].metrics.append(
+ study_pb2.Measurement.Metric(metric_id="pr-auc", value=0.7)
+ )
+ proto.measurements[-1].metrics.append(
+ study_pb2.Measurement.Metric(metric_id="latency", value=42)
+ )
+
+ proto.measurements.append(
+ study_pb2.Measurement(
+ step_count=20, elapsed_duration=duration_pb2.Duration(seconds=30)
+ )
+ )
+ proto.measurements[-1].metrics.append(
+ study_pb2.Measurement.Metric(metric_id="pr-auc", value=0.75)
+ )
+ proto.measurements[-1].metrics.append(
+ study_pb2.Measurement.Metric(metric_id="latency", value=37)
+ )
+
+ test = proto_converters.TrialConverter.from_proto(proto=proto)
+ assert test.id == 1
+ assert test.status == pyvizier.TrialStatus.COMPLETED
+ assert test.is_completed
+ assert not test.infeasible
+ assert test.infeasibility_reason is None
+ assert len(test.parameters) == 3
+ assert test.parameters["float"].value == 1.0
+ assert test.parameters["int"].value == 2
+ assert test.parameters["str"].value == "3"
+
+ # Final measurement
+ assert len(test.final_measurement.metrics) == 2
+ assert test.final_measurement.metrics["pr-auc"].value == 0.8
+ assert test.final_measurement.metrics["latency"].value == 32
+
+ # Intermediate measurement
+ assert test.measurements[0] == pyvizier.Measurement(
+ metrics={"pr-auc": 0.7, "latency": 42}, steps=10, elapsed_secs=15
+ )
+
+ assert test.measurements[1] == pyvizier.Measurement(
+ metrics={"pr-auc": 0.75, "latency": 37}, steps=20, elapsed_secs=30
+ )
+
+ assert test.id == 1
+
+ assert test.creation_time is not None
+ assert test.creation_time.timestamp() == start_time.seconds
+ assert test.completion_time is not None
+ assert test.completion_time.timestamp() == end_time.seconds
+ assert test.duration.total_seconds() == 10
+
+ assert not test.infeasible
+
+ def test_from_proto_pending(self):
+ proto = study_pb2.Trial(name=str(2))
+ proto.state = study_pb2.Trial.State.ACTIVE
+
+ start_time = timestamp_pb2.Timestamp(seconds=int(1586649600))
+ setattr(proto, "start_time", start_time)
+
+ test = proto_converters.TrialConverter.from_proto(proto=proto)
+ assert test.status == pyvizier.TrialStatus.ACTIVE
+ assert not test.is_completed
+ assert not test.infeasible
+ assert test.infeasibility_reason is None
+ assert test.creation_time is not None
+ assert test.completion_time is None
+ assert test.duration is None
+
+ def test_from_proto_infeasible(self):
+ proto = study_pb2.Trial(name=str(1))
+ proto.state = study_pb2.Trial.State.INFEASIBLE
+ proto.parameters.append(
+ study_pb2.Trial.Parameter(
+ parameter_id="float", value=struct_pb2.Value(number_value=1.0)
+ )
+ )
+ proto.parameters.append(
+ study_pb2.Trial.Parameter(
+ parameter_id="int", value=struct_pb2.Value(number_value=2)
+ )
+ )
+ proto.parameters.append(
+ study_pb2.Trial.Parameter(
+ parameter_id="str", value=struct_pb2.Value(string_value="3")
+ )
+ )
+
+ start_time = timestamp_pb2.Timestamp(seconds=int(1586649600))
+ setattr(proto, "start_time", start_time)
+ end_time = timestamp_pb2.Timestamp(seconds=int(1586649600 + 10))
+ setattr(proto, "end_time", end_time)
+ setattr(proto, "infeasible_reason", "A reason")
+
+ test = proto_converters.TrialConverter.from_proto(proto=proto)
+ assert test.status == pyvizier.TrialStatus.COMPLETED
+ assert test.is_completed
+ assert test.infeasible
+ assert test.infeasibility_reason == "A reason"
+
+ def test_from_proto_invalid_trial(self):
+ proto = study_pb2.Trial(name=str(2))
+ proto.parameters.append(
+ study_pb2.Trial.Parameter(
+ parameter_id="float", value=struct_pb2.Value(number_value=1.0)
+ )
+ )
+ proto.parameters.append(
+ study_pb2.Trial.Parameter(
+ parameter_id="float", value=struct_pb2.Value(number_value=2.0)
+ )
+ )
+ proto.state = study_pb2.Trial.State.ACTIVE
+ start_time = timestamp_pb2.Timestamp(seconds=int(1586649600))
+ setattr(proto, "start_time", start_time)
+ try:
+ proto_converters.TrialConverter.from_proto(proto=proto)
+ except ValueError as e:
+ assert "Invalid trial proto" in str(e)
+
+
+class TestTrialConverterToProto:
+ def _get_single_objective_base_trial(self):
+ proto = study_pb2.Trial(
+ name="owners/my_username/studies/2", id="2", client_id="worker0"
+ )
+
+ proto.parameters.append(
+ study_pb2.Trial.Parameter(
+ parameter_id="activation", value=struct_pb2.Value(string_value="relu")
+ )
+ )
+ proto.parameters.append(
+ study_pb2.Trial.Parameter(
+ parameter_id="synchronus", value=struct_pb2.Value(string_value="true")
+ )
+ )
+ proto.parameters.append(
+ study_pb2.Trial.Parameter(
+ parameter_id="batch_size", value=struct_pb2.Value(number_value=32)
+ )
+ )
+ proto.parameters.append(
+ study_pb2.Trial.Parameter(
+ parameter_id="floating_point_param",
+ value=struct_pb2.Value(number_value=32.0),
+ )
+ )
+ proto.parameters.append(
+ study_pb2.Trial.Parameter(
+ parameter_id="learning_rate", value=struct_pb2.Value(number_value=0.5)
+ )
+ )
+ proto.parameters.append(
+ study_pb2.Trial.Parameter(
+ parameter_id="units", value=struct_pb2.Value(number_value=50)
+ )
+ )
+ creation_secs = 1630505100
+ start_time = timestamp_pb2.Timestamp(
+ seconds=int(creation_secs),
+ nanos=int(1e9 * (creation_secs - int(creation_secs))),
+ )
+ setattr(proto, "start_time", start_time)
+ return proto
+
+ def test_parameter_back_to_back_conversion(self):
+ proto = self._get_single_objective_base_trial()
+ proto.state = study_pb2.Trial.State.ACTIVE
+ pytrial = proto_converters.TrialConverter.from_proto(proto)
+ got = proto_converters.TrialConverter.to_proto(pytrial)
+ assert proto == got
+
+ def test_final_measurement_back_to_back_conversion(self):
+ proto = study_pb2.Trial(
+ name=str(1),
+ id=str(1),
+ state=study_pb2.Trial.State.SUCCEEDED,
+ final_measurement=gca_study.Measurement(
+ step_count=101, elapsed_duration=duration_pb2.Duration(seconds=67)
+ ),
+ )
+ creation_secs = 12456
+ start_time = timestamp_pb2.Timestamp(
+ seconds=int(creation_secs),
+ nanos=int(1e9 * (creation_secs - int(creation_secs))),
+ )
+ setattr(proto, "start_time", start_time)
+
+ completion_secs = 12456 + 10
+ end_time = timestamp_pb2.Timestamp(
+ seconds=int(completion_secs),
+ nanos=int(1e9 * (completion_secs - int(completion_secs))),
+ )
+ setattr(proto, "end_time", end_time)
+ proto.parameters.append(
+ study_pb2.Trial.Parameter(
+ parameter_id="learning_rate", value=struct_pb2.Value(number_value=0.5)
+ )
+ )
+ proto.final_measurement.metrics.append(
+ study_pb2.Measurement.Metric(metric_id="loss", value=56.8)
+ )
+ proto.final_measurement.metrics.append(
+ study_pb2.Measurement.Metric(metric_id="objective", value=77.7)
+ )
+ proto.final_measurement.metrics.append(
+ study_pb2.Measurement.Metric(metric_id="objective2", value=-0.2)
+ )
+
+ pytrial = proto_converters.TrialConverter.from_proto(proto)
+ got = proto_converters.TrialConverter.to_proto(pytrial)
+ assert proto == got
+
+ def test_measurement_back_to_back_conversion(self):
+ proto = study_pb2.Trial(
+ name=str(2),
+ id=str(2),
+ state=study_pb2.Trial.State.ACTIVE,
+ client_id="worker0",
+ )
+ creation_secs = 1630505100
+ start_time = timestamp_pb2.Timestamp(
+ seconds=int(creation_secs),
+ nanos=int(1e9 * (creation_secs - int(creation_secs))),
+ )
+ setattr(proto, "start_time", start_time)
+ proto.measurements.append(
+ study_pb2.Measurement(
+ step_count=123, elapsed_duration=duration_pb2.Duration(seconds=22)
+ )
+ )
+ proto.measurements[-1].metrics.append(
+ study_pb2.Measurement.Metric(metric_id="objective", value=0.4321)
+ )
+ proto.measurements[-1].metrics.append(
+ study_pb2.Measurement.Metric(metric_id="loss", value=0.001)
+ )
+
+ proto.measurements.append(
+ study_pb2.Measurement(
+ step_count=789, elapsed_duration=duration_pb2.Duration(seconds=55)
+ )
+ )
+ proto.measurements[-1].metrics.append(
+ study_pb2.Measurement.Metric(metric_id="objective", value=0.21)
+ )
+ proto.measurements[-1].metrics.append(
+ study_pb2.Measurement.Metric(metric_id="loss", value=0.0001)
+ )
+
+ pytrial = proto_converters.TrialConverter.from_proto(proto)
+ got = proto_converters.TrialConverter.to_proto(pytrial)
+ assert proto == got
+
+
+class TestParameterConfigConverterToProto:
+ def test_discrete_config_to_proto(self):
+ feasible_values = (-1, 3, 2)
+ child_parameter_config = pyvizier.ParameterConfig.factory(
+ "child", bounds=(-1.0, 1.0)
+ )
+ parameter_config = pyvizier.ParameterConfig.factory(
+ "name",
+ feasible_values=feasible_values,
+ scale_type=pyvizier.ScaleType.LOG,
+ default_value=2,
+ children=[([-1], child_parameter_config)],
+ )
+
+ proto = proto_converters.ParameterConfigConverter.to_proto(parameter_config)
+ assert proto.parameter_id == "name"
+ assert proto.discrete_value_spec.values == [-1.0, 2.0, 3.0]
+ assert proto.discrete_value_spec.default_value == 2
+ assert (
+ proto.scale_type
+ == study_pb2.StudySpec.ParameterSpec.ScaleType.UNIT_LOG_SCALE
+ )
+ assert len(proto.conditional_parameter_specs) == 1
+
+ spec = proto.conditional_parameter_specs[0]
+ assert spec.parameter_spec.parameter_id == "child"
+ assert spec.parameter_spec.double_value_spec.min_value == -1.0
+ assert spec.parameter_spec.double_value_spec.max_value == 1.0
+ assert len(spec.parent_discrete_values.values) == 1
+ assert spec.parent_discrete_values.values[0] == -1
+
+ def test_categorical_config_to_proto_with_children(self):
+ feasible_values = ("option_a", "option_b")
+ child_parameter_config = pyvizier.ParameterConfig.factory(
+ "child", bounds=(-1.0, 1.0)
+ )
+ parameter_config = pyvizier.ParameterConfig.factory(
+ "name",
+ feasible_values=feasible_values,
+ children=[(["option_a"], child_parameter_config)],
+ )
+ proto = proto_converters.ParameterConfigConverter.to_proto(parameter_config)
+ assert len(proto.conditional_parameter_specs) == 1
+ spec = proto.conditional_parameter_specs[0]
+ assert len(spec.parent_categorical_values.values) == 1
+ assert spec.parent_categorical_values.values[0] == "option_a"
+
+ def test_integer_config_to_proto_with_children(self):
+ child_parameter_config = pyvizier.ParameterConfig.factory(
+ "child", bounds=(-1.0, 1.0)
+ )
+ parameter_config = pyvizier.ParameterConfig.factory(
+ "name", bounds=(1, 10), children=[([6], child_parameter_config)]
+ )
+ proto = proto_converters.ParameterConfigConverter.to_proto(parameter_config)
+ assert len(proto.conditional_parameter_specs) == 1
+ spec = proto.conditional_parameter_specs[0]
+ assert len(spec.parent_int_values.values) == 1
+ assert spec.parent_int_values.values[0] == 6
+
+
+class TestParameterConfigConverterFromProto:
+ """Test ParameterConfigConverter.from_proto."""
+
+ def test_from_proto_discrete(self):
+ """Test from_proto."""
+ proto = study_pb2.StudySpec.ParameterSpec(
+ parameter_id="name",
+ discrete_value_spec=study_pb2.StudySpec.ParameterSpec.DiscreteValueSpec(
+ values=[1.0, 2.0, 3.0], default_value=2.0
+ ),
+ )
+
+ parameter_config = proto_converters.ParameterConfigConverter.from_proto(proto)
+
+ assert parameter_config.name == proto.parameter_id
+ assert parameter_config.type == pyvizier.ParameterType.DISCRETE
+ assert parameter_config.bounds == (1.0, 3.0)
+ assert parameter_config.feasible_values == [1.0, 2.0, 3.0]
+ assert parameter_config.default_value == 2.0
+ assert parameter_config.external_type == pyvizier.ExternalType.INTERNAL
+
+ def test_from_proto_integer(self):
+ """Test from_proto."""
+ proto = study_pb2.StudySpec.ParameterSpec(
+ parameter_id="name",
+ integer_value_spec=study_pb2.StudySpec.ParameterSpec.IntegerValueSpec(
+ default_value=2, min_value=1, max_value=3
+ ),
+ )
+
+ parameter_config = proto_converters.ParameterConfigConverter.from_proto(proto)
+
+ assert parameter_config.name == proto.parameter_id
+ assert parameter_config.type == pyvizier.ParameterType.INTEGER
+ assert parameter_config.bounds == (1, 3)
+ assert parameter_config.default_value == 2
+ assert parameter_config.external_type == pyvizier.ExternalType.INTEGER
+
+ def test_from_proto_bool(self):
+ """Test from_proto."""
+ proto = study_pb2.StudySpec.ParameterSpec(
+ parameter_id="name",
+ categorical_value_spec=study_pb2.StudySpec.ParameterSpec.CategoricalValueSpec(
+ default_value="True", values=["True", "False"]
+ ),
+ )
+
+ parameter_config = proto_converters.ParameterConfigConverter.from_proto(proto)
+
+ assert parameter_config.name == proto.parameter_id
+ assert parameter_config.type == pyvizier.ParameterType.CATEGORICAL
+ assert parameter_config.feasible_values == ["False", "True"]
+ assert parameter_config.default_value == "True"
+ assert parameter_config.external_type == pyvizier.ExternalType.BOOLEAN
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/enhanced_library/test_value_converter.py b/testbed/googleapis__python-aiplatform/tests/unit/enhanced_library/test_value_converter.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bb24372e2effed43d1c688a2aee6a3e91ebdccd
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/enhanced_library/test_value_converter.py
@@ -0,0 +1,83 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+from google.cloud.aiplatform.utils.enhanced_library import value_converter
+from google.protobuf import json_format
+from google.protobuf.struct_pb2 import Value
+import proto
+
+
+class SomeMessage(proto.Message):
+ test_str = proto.Field(proto.STRING, number=1)
+ test_int64 = proto.Field(proto.INT64, number=2)
+ test_bool = proto.Field(proto.BOOL, number=3)
+
+
+class SomeInType(proto.Message):
+ test_map = proto.MapField(proto.STRING, proto.INT32, number=1)
+
+
+class SomeOutType(proto.Message):
+ test_int = proto.Field(proto.INT32, number=1)
+
+
+input_dict = {
+ "test_str": "Omnia Gallia est divisa",
+ "test_int64": 3,
+ "test_bool": True,
+}
+input_value = json_format.ParseDict(input_dict, Value())
+input_message = SomeMessage(input_dict)
+
+
+def test_convert_message_to_value():
+ actual_to_value_output = value_converter.to_value(input_message)
+ expected_type = Value()
+ assert isinstance(expected_type, type(actual_to_value_output))
+
+ actual_inner_fields = actual_to_value_output.struct_value.fields
+
+ actual_bool_type = actual_inner_fields["test_bool"]
+ assert hasattr(actual_bool_type, "bool_value")
+
+ actual_int64_type = actual_inner_fields["test_int64"]
+ assert hasattr(actual_int64_type, "number_value")
+
+ actual_string_type = actual_inner_fields["test_str"]
+ assert hasattr(actual_string_type, "string_value")
+
+
+def test_convert_value_to_message():
+ actual_from_value_output = value_converter.from_value(SomeMessage, input_value)
+ expected_type = SomeMessage(input_dict)
+
+ assert actual_from_value_output.__class__.__name__ == SomeMessage.__name__
+
+ # Check property-level ("duck-typing") equivalency
+ assert actual_from_value_output.test_str == expected_type.test_str
+ assert actual_from_value_output.test_bool == expected_type.test_bool
+ assert actual_from_value_output.test_int64 == expected_type.test_int64
+
+
+def test_convert_map_to_message():
+ message_with_map = SomeInType()
+ message_with_map.test_map["test_int"] = 42
+ map_composite = message_with_map.test_map
+ actual_output = value_converter.from_map(SomeOutType, map_composite)
+
+ assert actual_output.__class__.__name__ == SomeOutType.__name__
+
+ # Check property-to-key/value equivalency
+ assert actual_output.test_int == map_composite["test_int"]
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/__init__.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f6cf068242c282e78ed205a7a66af26b6f1928f
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1/test_job_service.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1/test_job_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb9526aaaabf01f2e6dcb6dc4b6ff84e92d024d7
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1/test_job_service.py
@@ -0,0 +1,42380 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+ from unittest.mock import AsyncMock # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ import mock
+
+import grpc
+from grpc.experimental import aio
+from collections.abc import Iterable, AsyncIterable
+from google.protobuf import json_format
+import json
+import math
+import pytest
+from google.api_core import api_core_version
+from proto.marshal.rules.dates import DurationRule, TimestampRule
+from proto.marshal.rules import wrappers
+
+try:
+ import aiohttp # type: ignore
+ from google.auth.aio.transport.sessions import AsyncAuthorizedSession
+ from google.api_core.operations_v1 import AsyncOperationsRestClient
+
+ HAS_ASYNC_REST_EXTRA = True
+except ImportError: # pragma: NO COVER
+ HAS_ASYNC_REST_EXTRA = False
+from requests import Response
+from requests import Request, PreparedRequest
+from requests.sessions import Session
+from google.protobuf import json_format
+
+try:
+ from google.auth.aio import credentials as ga_credentials_async
+
+ HAS_GOOGLE_AUTH_AIO = True
+except ImportError: # pragma: NO COVER
+ HAS_GOOGLE_AUTH_AIO = False
+
+from google.api_core import client_options
+from google.api_core import exceptions as core_exceptions
+from google.api_core import future
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers
+from google.api_core import grpc_helpers_async
+from google.api_core import operation
+from google.api_core import operation_async # type: ignore
+from google.api_core import operations_v1
+from google.api_core import path_template
+from google.api_core import retry as retries
+from google.auth import credentials as ga_credentials
+from google.auth.exceptions import MutualTLSChannelError
+from google.cloud.aiplatform_v1.services.job_service import JobServiceAsyncClient
+from google.cloud.aiplatform_v1.services.job_service import JobServiceClient
+from google.cloud.aiplatform_v1.services.job_service import pagers
+from google.cloud.aiplatform_v1.services.job_service import transports
+from google.cloud.aiplatform_v1.types import accelerator_type
+from google.cloud.aiplatform_v1.types import batch_prediction_job
+from google.cloud.aiplatform_v1.types import (
+ batch_prediction_job as gca_batch_prediction_job,
+)
+from google.cloud.aiplatform_v1.types import completion_stats
+from google.cloud.aiplatform_v1.types import custom_job
+from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job
+from google.cloud.aiplatform_v1.types import data_labeling_job
+from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job
+from google.cloud.aiplatform_v1.types import encryption_spec
+from google.cloud.aiplatform_v1.types import env_var
+from google.cloud.aiplatform_v1.types import explanation
+from google.cloud.aiplatform_v1.types import explanation_metadata
+from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job
+from google.cloud.aiplatform_v1.types import (
+ hyperparameter_tuning_job as gca_hyperparameter_tuning_job,
+)
+from google.cloud.aiplatform_v1.types import io
+from google.cloud.aiplatform_v1.types import job_service
+from google.cloud.aiplatform_v1.types import job_state
+from google.cloud.aiplatform_v1.types import machine_resources
+from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters
+from google.cloud.aiplatform_v1.types import model
+from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job
+from google.cloud.aiplatform_v1.types import (
+ model_deployment_monitoring_job as gca_model_deployment_monitoring_job,
+)
+from google.cloud.aiplatform_v1.types import model_monitoring
+from google.cloud.aiplatform_v1.types import nas_job
+from google.cloud.aiplatform_v1.types import nas_job as gca_nas_job
+from google.cloud.aiplatform_v1.types import operation as gca_operation
+from google.cloud.aiplatform_v1.types import reservation_affinity
+from google.cloud.aiplatform_v1.types import study
+from google.cloud.aiplatform_v1.types import unmanaged_container_model
+from google.cloud.location import locations_pb2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import options_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.oauth2 import service_account
+from google.protobuf import any_pb2 # type: ignore
+from google.protobuf import duration_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import struct_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
+from google.protobuf import wrappers_pb2 # type: ignore
+from google.rpc import status_pb2 # type: ignore
+from google.type import money_pb2 # type: ignore
+import google.auth
+
+
+async def mock_async_gen(data, chunk_size=1):
+ for i in range(0, len(data)): # pragma: NO COVER
+ chunk = data[i : i + chunk_size]
+ yield chunk.encode("utf-8")
+
+
+def client_cert_source_callback():
+ return b"cert bytes", b"key bytes"
+
+
+# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded.
+# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107.
+def async_anonymous_credentials():
+ if HAS_GOOGLE_AUTH_AIO:
+ return ga_credentials_async.AnonymousCredentials()
+ return ga_credentials.AnonymousCredentials()
+
+
+# If default endpoint is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint(client):
+ return (
+ "foo.googleapis.com"
+ if ("localhost" in client.DEFAULT_ENDPOINT)
+ else client.DEFAULT_ENDPOINT
+ )
+
+
+# If default endpoint template is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint template so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint_template(client):
+ return (
+ "test.{UNIVERSE_DOMAIN}"
+ if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE)
+ else client._DEFAULT_ENDPOINT_TEMPLATE
+ )
+
+
+def test__get_default_mtls_endpoint():
+ api_endpoint = "example.googleapis.com"
+ api_mtls_endpoint = "example.mtls.googleapis.com"
+ sandbox_endpoint = "example.sandbox.googleapis.com"
+ sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
+ non_googleapi = "api.example.com"
+
+ assert JobServiceClient._get_default_mtls_endpoint(None) is None
+ assert (
+ JobServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
+ )
+ assert (
+ JobServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ JobServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ JobServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert JobServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
+
+
+def test__read_environment_variables():
+ assert JobServiceClient._read_environment_variables() == (False, "auto", None)
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert JobServiceClient._read_environment_variables() == (True, "auto", None)
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ assert JobServiceClient._read_environment_variables() == (False, "auto", None)
+
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ JobServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ assert JobServiceClient._read_environment_variables() == (False, "never", None)
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ assert JobServiceClient._read_environment_variables() == (False, "always", None)
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}):
+ assert JobServiceClient._read_environment_variables() == (False, "auto", None)
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ JobServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}):
+ assert JobServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ "foo.com",
+ )
+
+
+def test__get_client_cert_source():
+ mock_provided_cert_source = mock.Mock()
+ mock_default_cert_source = mock.Mock()
+
+ assert JobServiceClient._get_client_cert_source(None, False) is None
+ assert (
+ JobServiceClient._get_client_cert_source(mock_provided_cert_source, False)
+ is None
+ )
+ assert (
+ JobServiceClient._get_client_cert_source(mock_provided_cert_source, True)
+ == mock_provided_cert_source
+ )
+
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source", return_value=True
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_default_cert_source,
+ ):
+ assert (
+ JobServiceClient._get_client_cert_source(None, True)
+ is mock_default_cert_source
+ )
+ assert (
+ JobServiceClient._get_client_cert_source(
+ mock_provided_cert_source, "true"
+ )
+ is mock_provided_cert_source
+ )
+
+
+@mock.patch.object(
+ JobServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(JobServiceClient),
+)
+@mock.patch.object(
+ JobServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(JobServiceAsyncClient),
+)
+def test__get_api_endpoint():
+ api_override = "foo.com"
+ mock_client_cert_source = mock.Mock()
+ default_universe = JobServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = JobServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = JobServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ assert (
+ JobServiceClient._get_api_endpoint(
+ api_override, mock_client_cert_source, default_universe, "always"
+ )
+ == api_override
+ )
+ assert (
+ JobServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "auto"
+ )
+ == JobServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ JobServiceClient._get_api_endpoint(None, None, default_universe, "auto")
+ == default_endpoint
+ )
+ assert (
+ JobServiceClient._get_api_endpoint(None, None, default_universe, "always")
+ == JobServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ JobServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "always"
+ )
+ == JobServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ JobServiceClient._get_api_endpoint(None, None, mock_universe, "never")
+ == mock_endpoint
+ )
+ assert (
+ JobServiceClient._get_api_endpoint(None, None, default_universe, "never")
+ == default_endpoint
+ )
+
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ JobServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, mock_universe, "auto"
+ )
+ assert (
+ str(excinfo.value)
+ == "mTLS is not supported in any universe other than googleapis.com."
+ )
+
+
+def test__get_universe_domain():
+ client_universe_domain = "foo.com"
+ universe_domain_env = "bar.com"
+
+ assert (
+ JobServiceClient._get_universe_domain(
+ client_universe_domain, universe_domain_env
+ )
+ == client_universe_domain
+ )
+ assert (
+ JobServiceClient._get_universe_domain(None, universe_domain_env)
+ == universe_domain_env
+ )
+ assert (
+ JobServiceClient._get_universe_domain(None, None)
+ == JobServiceClient._DEFAULT_UNIVERSE
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ JobServiceClient._get_universe_domain("", None)
+ assert str(excinfo.value) == "Universe Domain cannot be an empty string."
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (JobServiceClient, "grpc"),
+ (JobServiceAsyncClient, "grpc_asyncio"),
+ (JobServiceClient, "rest"),
+ ],
+)
+def test_job_service_client_from_service_account_info(client_class, transport_name):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_info"
+ ) as factory:
+ factory.return_value = creds
+ info = {"valid": True}
+ client = client_class.from_service_account_info(info, transport=transport_name)
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (transports.JobServiceGrpcTransport, "grpc"),
+ (transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"),
+ (transports.JobServiceRestTransport, "rest"),
+ ],
+)
+def test_job_service_client_service_account_always_use_jwt(
+ transport_class, transport_name
+):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=True)
+ use_jwt.assert_called_once_with(True)
+
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=False)
+ use_jwt.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (JobServiceClient, "grpc"),
+ (JobServiceAsyncClient, "grpc_asyncio"),
+ (JobServiceClient, "rest"),
+ ],
+)
+def test_job_service_client_from_service_account_file(client_class, transport_name):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_file"
+ ) as factory:
+ factory.return_value = creds
+ client = client_class.from_service_account_file(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ client = client_class.from_service_account_json(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+def test_job_service_client_get_transport_class():
+ transport = JobServiceClient.get_transport_class()
+ available_transports = [
+ transports.JobServiceGrpcTransport,
+ transports.JobServiceRestTransport,
+ ]
+ assert transport in available_transports
+
+ transport = JobServiceClient.get_transport_class("grpc")
+ assert transport == transports.JobServiceGrpcTransport
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"),
+ (
+ JobServiceAsyncClient,
+ transports.JobServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (JobServiceClient, transports.JobServiceRestTransport, "rest"),
+ ],
+)
+@mock.patch.object(
+ JobServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(JobServiceClient),
+)
+@mock.patch.object(
+ JobServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(JobServiceAsyncClient),
+)
+def test_job_service_client_client_options(
+ client_class, transport_class, transport_name
+):
+ # Check that if channel is provided we won't create a new one.
+ with mock.patch.object(JobServiceClient, "get_transport_class") as gtc:
+ transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
+ client = client_class(transport=transport)
+ gtc.assert_not_called()
+
+ # Check that if channel is provided via str we will create a new one.
+ with mock.patch.object(JobServiceClient, "get_transport_class") as gtc:
+ client = client_class(transport=transport_name)
+ gtc.assert_called()
+
+ # Check the case api_endpoint is provided.
+ options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name, client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_MTLS_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ # Check the case quota_project_id is provided
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id="octopus",
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+ # Check the case api_endpoint is provided
+ options = client_options.ClientOptions(
+ api_audience="https://language.googleapis.com"
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience="https://language.googleapis.com",
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,use_client_cert_env",
+ [
+ (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "true"),
+ (
+ JobServiceAsyncClient,
+ transports.JobServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "true",
+ ),
+ (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "false"),
+ (
+ JobServiceAsyncClient,
+ transports.JobServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "false",
+ ),
+ (JobServiceClient, transports.JobServiceRestTransport, "rest", "true"),
+ (JobServiceClient, transports.JobServiceRestTransport, "rest", "false"),
+ ],
+)
+@mock.patch.object(
+ JobServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(JobServiceClient),
+)
+@mock.patch.object(
+ JobServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(JobServiceAsyncClient),
+)
+@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
+def test_job_service_client_mtls_env_auto(
+ client_class, transport_class, transport_name, use_client_cert_env
+):
+ # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
+ # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
+
+ # Check the case client_cert_source is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=client_cert_source_callback
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+
+ if use_client_cert_env == "false":
+ expected_client_cert_source = None
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ else:
+ expected_client_cert_source = client_cert_source_callback
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case ADC client cert is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=client_cert_source_callback,
+ ):
+ if use_client_cert_env == "false":
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ expected_client_cert_source = None
+ else:
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+ expected_client_cert_source = client_cert_source_callback
+
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case client_cert_source and ADC client cert are not provided.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize("client_class", [JobServiceClient, JobServiceAsyncClient])
+@mock.patch.object(
+ JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)
+)
+@mock.patch.object(
+ JobServiceAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(JobServiceAsyncClient),
+)
+def test_job_service_client_get_mtls_endpoint_and_cert_source(client_class):
+ mock_client_cert_source = mock.Mock()
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source == mock_client_cert_source
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_client_cert_source,
+ ):
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source == mock_client_cert_source
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+
+@pytest.mark.parametrize("client_class", [JobServiceClient, JobServiceAsyncClient])
+@mock.patch.object(
+ JobServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(JobServiceClient),
+)
+@mock.patch.object(
+ JobServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(JobServiceAsyncClient),
+)
+def test_job_service_client_client_api_endpoint(client_class):
+ mock_client_cert_source = client_cert_source_callback
+ api_override = "foo.com"
+ default_universe = JobServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = JobServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = JobServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true",
+ # use ClientOptions.api_endpoint as the api endpoint regardless.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=api_override
+ )
+ client = client_class(
+ client_options=options,
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert client.api_endpoint == api_override
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == default_endpoint
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always",
+ # use the DEFAULT_MTLS_ENDPOINT as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+
+ # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default),
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist,
+ # and ClientOptions.universe_domain="bar.com",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint.
+ options = client_options.ClientOptions()
+ universe_exists = hasattr(options, "universe_domain")
+ if universe_exists:
+ options = client_options.ClientOptions(universe_domain=mock_universe)
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ else:
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == (
+ mock_endpoint if universe_exists else default_endpoint
+ )
+ assert client.universe_domain == (
+ mock_universe if universe_exists else default_universe
+ )
+
+ # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ options = client_options.ClientOptions()
+ if hasattr(options, "universe_domain"):
+ delattr(options, "universe_domain")
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == default_endpoint
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"),
+ (
+ JobServiceAsyncClient,
+ transports.JobServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (JobServiceClient, transports.JobServiceRestTransport, "rest"),
+ ],
+)
+def test_job_service_client_client_options_scopes(
+ client_class, transport_class, transport_name
+):
+ # Check the case scopes are provided.
+ options = client_options.ClientOptions(
+ scopes=["1", "2"],
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=["1", "2"],
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", grpc_helpers),
+ (
+ JobServiceAsyncClient,
+ transports.JobServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ (JobServiceClient, transports.JobServiceRestTransport, "rest", None),
+ ],
+)
+def test_job_service_client_client_options_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+def test_job_service_client_client_options_from_dict():
+ with mock.patch(
+ "google.cloud.aiplatform_v1.services.job_service.transports.JobServiceGrpcTransport.__init__"
+ ) as grpc_transport:
+ grpc_transport.return_value = None
+ client = JobServiceClient(client_options={"api_endpoint": "squid.clam.whelk"})
+ grpc_transport.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", grpc_helpers),
+ (
+ JobServiceAsyncClient,
+ transports.JobServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_job_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=(
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ ),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateCustomJobRequest,
+ dict,
+ ],
+)
+def test_create_custom_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_custom_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_custom_job.CustomJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ response = client.create_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CreateCustomJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_custom_job.CustomJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+def test_create_custom_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.CreateCustomJobRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_custom_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.create_custom_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.CreateCustomJobRequest(
+ parent="parent_value",
+ )
+
+
+def test_create_custom_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.create_custom_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_custom_job
+ ] = mock_rpc
+ request = {}
+ client.create_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_custom_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_custom_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.create_custom_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.create_custom_job
+ ] = mock_rpc
+
+ request = {}
+ await client.create_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.create_custom_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_custom_job_async(
+ transport: str = "grpc_asyncio", request_type=job_service.CreateCustomJobRequest
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_custom_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_custom_job.CustomJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ response = await client.create_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CreateCustomJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_custom_job.CustomJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+async def test_create_custom_job_async_from_dict():
+ await test_create_custom_job_async(request_type=dict)
+
+
+def test_create_custom_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CreateCustomJobRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_custom_job), "__call__"
+ ) as call:
+ call.return_value = gca_custom_job.CustomJob()
+ client.create_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_create_custom_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CreateCustomJobRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_custom_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_custom_job.CustomJob()
+ )
+ await client.create_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_create_custom_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_custom_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_custom_job.CustomJob()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.create_custom_job(
+ parent="parent_value",
+ custom_job=gca_custom_job.CustomJob(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].custom_job
+ mock_val = gca_custom_job.CustomJob(name="name_value")
+ assert arg == mock_val
+
+
+def test_create_custom_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_custom_job(
+ job_service.CreateCustomJobRequest(),
+ parent="parent_value",
+ custom_job=gca_custom_job.CustomJob(name="name_value"),
+ )
+
+
+@pytest.mark.asyncio
+async def test_create_custom_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_custom_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_custom_job.CustomJob()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_custom_job.CustomJob()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.create_custom_job(
+ parent="parent_value",
+ custom_job=gca_custom_job.CustomJob(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].custom_job
+ mock_val = gca_custom_job.CustomJob(name="name_value")
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_create_custom_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.create_custom_job(
+ job_service.CreateCustomJobRequest(),
+ parent="parent_value",
+ custom_job=gca_custom_job.CustomJob(name="name_value"),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetCustomJobRequest,
+ dict,
+ ],
+)
+def test_get_custom_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = custom_job.CustomJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ response = client.get_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.GetCustomJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, custom_job.CustomJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+def test_get_custom_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.GetCustomJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.get_custom_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.GetCustomJobRequest(
+ name="name_value",
+ )
+
+
+def test_get_custom_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.get_custom_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.get_custom_job] = mock_rpc
+ request = {}
+ client.get_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_custom_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_custom_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.get_custom_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.get_custom_job
+ ] = mock_rpc
+
+ request = {}
+ await client.get_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.get_custom_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_custom_job_async(
+ transport: str = "grpc_asyncio", request_type=job_service.GetCustomJobRequest
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ custom_job.CustomJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ response = await client.get_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.GetCustomJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, custom_job.CustomJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+async def test_get_custom_job_async_from_dict():
+ await test_get_custom_job_async(request_type=dict)
+
+
+def test_get_custom_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.GetCustomJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
+ call.return_value = custom_job.CustomJob()
+ client.get_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_custom_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.GetCustomJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ custom_job.CustomJob()
+ )
+ await client.get_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_get_custom_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = custom_job.CustomJob()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_custom_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_get_custom_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_custom_job(
+ job_service.GetCustomJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_custom_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = custom_job.CustomJob()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ custom_job.CustomJob()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_custom_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_get_custom_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_custom_job(
+ job_service.GetCustomJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListCustomJobsRequest,
+ dict,
+ ],
+)
+def test_list_custom_jobs(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListCustomJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_custom_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.ListCustomJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListCustomJobsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_custom_jobs_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.ListCustomJobsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.list_custom_jobs(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.ListCustomJobsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ )
+
+
+def test_list_custom_jobs_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.list_custom_jobs in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_custom_jobs
+ ] = mock_rpc
+ request = {}
+ client.list_custom_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_custom_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_custom_jobs_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.list_custom_jobs
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.list_custom_jobs
+ ] = mock_rpc
+
+ request = {}
+ await client.list_custom_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.list_custom_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_custom_jobs_async(
+ transport: str = "grpc_asyncio", request_type=job_service.ListCustomJobsRequest
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListCustomJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.list_custom_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.ListCustomJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListCustomJobsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_custom_jobs_async_from_dict():
+ await test_list_custom_jobs_async(request_type=dict)
+
+
+def test_list_custom_jobs_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.ListCustomJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
+ call.return_value = job_service.ListCustomJobsResponse()
+ client.list_custom_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_custom_jobs_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.ListCustomJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListCustomJobsResponse()
+ )
+ await client.list_custom_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_list_custom_jobs_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListCustomJobsResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_custom_jobs(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+def test_list_custom_jobs_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_custom_jobs(
+ job_service.ListCustomJobsRequest(),
+ parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_custom_jobs_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListCustomJobsResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListCustomJobsResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_custom_jobs(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_list_custom_jobs_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_custom_jobs(
+ job_service.ListCustomJobsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_custom_jobs_pager(transport_name: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[
+ custom_job.CustomJob(),
+ custom_job.CustomJob(),
+ custom_job.CustomJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[
+ custom_job.CustomJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[
+ custom_job.CustomJob(),
+ custom_job.CustomJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_custom_jobs(request={}, retry=retry, timeout=timeout)
+
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, custom_job.CustomJob) for i in results)
+
+
+def test_list_custom_jobs_pages(transport_name: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[
+ custom_job.CustomJob(),
+ custom_job.CustomJob(),
+ custom_job.CustomJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[
+ custom_job.CustomJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[
+ custom_job.CustomJob(),
+ custom_job.CustomJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_custom_jobs(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_custom_jobs_async_pager():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_custom_jobs), "__call__", new_callable=mock.AsyncMock
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[
+ custom_job.CustomJob(),
+ custom_job.CustomJob(),
+ custom_job.CustomJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[
+ custom_job.CustomJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[
+ custom_job.CustomJob(),
+ custom_job.CustomJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_custom_jobs(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(isinstance(i, custom_job.CustomJob) for i in responses)
+
+
+@pytest.mark.asyncio
+async def test_list_custom_jobs_async_pages():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_custom_jobs), "__call__", new_callable=mock.AsyncMock
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[
+ custom_job.CustomJob(),
+ custom_job.CustomJob(),
+ custom_job.CustomJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[
+ custom_job.CustomJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[
+ custom_job.CustomJob(),
+ custom_job.CustomJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.list_custom_jobs(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteCustomJobRequest,
+ dict,
+ ],
+)
+def test_delete_custom_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_custom_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.delete_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.DeleteCustomJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_delete_custom_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.DeleteCustomJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_custom_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.delete_custom_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.DeleteCustomJobRequest(
+ name="name_value",
+ )
+
+
+def test_delete_custom_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.delete_custom_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_custom_job
+ ] = mock_rpc
+ request = {}
+ client.delete_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_custom_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_custom_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.delete_custom_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.delete_custom_job
+ ] = mock_rpc
+
+ request = {}
+ await client.delete_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.delete_custom_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_custom_job_async(
+ transport: str = "grpc_asyncio", request_type=job_service.DeleteCustomJobRequest
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_custom_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.delete_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.DeleteCustomJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_delete_custom_job_async_from_dict():
+ await test_delete_custom_job_async(request_type=dict)
+
+
+def test_delete_custom_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.DeleteCustomJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_custom_job), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_custom_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.DeleteCustomJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_custom_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.delete_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_delete_custom_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_custom_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_custom_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_delete_custom_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_custom_job(
+ job_service.DeleteCustomJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_delete_custom_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_custom_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_custom_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_delete_custom_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.delete_custom_job(
+ job_service.DeleteCustomJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CancelCustomJobRequest,
+ dict,
+ ],
+)
+def test_cancel_custom_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_custom_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CancelCustomJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_custom_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.CancelCustomJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_custom_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.cancel_custom_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.CancelCustomJobRequest(
+ name="name_value",
+ )
+
+
+def test_cancel_custom_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.cancel_custom_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.cancel_custom_job
+ ] = mock_rpc
+ request = {}
+ client.cancel_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.cancel_custom_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_cancel_custom_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.cancel_custom_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.cancel_custom_job
+ ] = mock_rpc
+
+ request = {}
+ await client.cancel_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.cancel_custom_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_cancel_custom_job_async(
+ transport: str = "grpc_asyncio", request_type=job_service.CancelCustomJobRequest
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_custom_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CancelCustomJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_custom_job_async_from_dict():
+ await test_cancel_custom_job_async(request_type=dict)
+
+
+def test_cancel_custom_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CancelCustomJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_custom_job), "__call__"
+ ) as call:
+ call.return_value = None
+ client.cancel_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_custom_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CancelCustomJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_custom_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_cancel_custom_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_custom_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.cancel_custom_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_cancel_custom_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.cancel_custom_job(
+ job_service.CancelCustomJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_cancel_custom_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_custom_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.cancel_custom_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_cancel_custom_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.cancel_custom_job(
+ job_service.CancelCustomJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateDataLabelingJobRequest,
+ dict,
+ ],
+)
+def test_create_data_labeling_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_data_labeling_job.DataLabelingJob(
+ name="name_value",
+ display_name="display_name_value",
+ datasets=["datasets_value"],
+ labeler_count=1375,
+ instruction_uri="instruction_uri_value",
+ inputs_schema_uri="inputs_schema_uri_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ labeling_progress=1810,
+ specialist_pools=["specialist_pools_value"],
+ )
+ response = client.create_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CreateDataLabelingJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_data_labeling_job.DataLabelingJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.datasets == ["datasets_value"]
+ assert response.labeler_count == 1375
+ assert response.instruction_uri == "instruction_uri_value"
+ assert response.inputs_schema_uri == "inputs_schema_uri_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.labeling_progress == 1810
+ assert response.specialist_pools == ["specialist_pools_value"]
+
+
+def test_create_data_labeling_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.CreateDataLabelingJobRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_data_labeling_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.create_data_labeling_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.CreateDataLabelingJobRequest(
+ parent="parent_value",
+ )
+
+
+def test_create_data_labeling_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_data_labeling_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_data_labeling_job
+ ] = mock_rpc
+ request = {}
+ client.create_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_data_labeling_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_data_labeling_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.create_data_labeling_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.create_data_labeling_job
+ ] = mock_rpc
+
+ request = {}
+ await client.create_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.create_data_labeling_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_data_labeling_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.CreateDataLabelingJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_data_labeling_job.DataLabelingJob(
+ name="name_value",
+ display_name="display_name_value",
+ datasets=["datasets_value"],
+ labeler_count=1375,
+ instruction_uri="instruction_uri_value",
+ inputs_schema_uri="inputs_schema_uri_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ labeling_progress=1810,
+ specialist_pools=["specialist_pools_value"],
+ )
+ )
+ response = await client.create_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CreateDataLabelingJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_data_labeling_job.DataLabelingJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.datasets == ["datasets_value"]
+ assert response.labeler_count == 1375
+ assert response.instruction_uri == "instruction_uri_value"
+ assert response.inputs_schema_uri == "inputs_schema_uri_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.labeling_progress == 1810
+ assert response.specialist_pools == ["specialist_pools_value"]
+
+
+@pytest.mark.asyncio
+async def test_create_data_labeling_job_async_from_dict():
+ await test_create_data_labeling_job_async(request_type=dict)
+
+
+def test_create_data_labeling_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CreateDataLabelingJobRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_data_labeling_job), "__call__"
+ ) as call:
+ call.return_value = gca_data_labeling_job.DataLabelingJob()
+ client.create_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_create_data_labeling_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CreateDataLabelingJobRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_data_labeling_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_data_labeling_job.DataLabelingJob()
+ )
+ await client.create_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_create_data_labeling_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_data_labeling_job.DataLabelingJob()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.create_data_labeling_job(
+ parent="parent_value",
+ data_labeling_job=gca_data_labeling_job.DataLabelingJob(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].data_labeling_job
+ mock_val = gca_data_labeling_job.DataLabelingJob(name="name_value")
+ assert arg == mock_val
+
+
+def test_create_data_labeling_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_data_labeling_job(
+ job_service.CreateDataLabelingJobRequest(),
+ parent="parent_value",
+ data_labeling_job=gca_data_labeling_job.DataLabelingJob(name="name_value"),
+ )
+
+
+@pytest.mark.asyncio
+async def test_create_data_labeling_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_data_labeling_job.DataLabelingJob()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_data_labeling_job.DataLabelingJob()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.create_data_labeling_job(
+ parent="parent_value",
+ data_labeling_job=gca_data_labeling_job.DataLabelingJob(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].data_labeling_job
+ mock_val = gca_data_labeling_job.DataLabelingJob(name="name_value")
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_create_data_labeling_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.create_data_labeling_job(
+ job_service.CreateDataLabelingJobRequest(),
+ parent="parent_value",
+ data_labeling_job=gca_data_labeling_job.DataLabelingJob(name="name_value"),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetDataLabelingJobRequest,
+ dict,
+ ],
+)
+def test_get_data_labeling_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = data_labeling_job.DataLabelingJob(
+ name="name_value",
+ display_name="display_name_value",
+ datasets=["datasets_value"],
+ labeler_count=1375,
+ instruction_uri="instruction_uri_value",
+ inputs_schema_uri="inputs_schema_uri_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ labeling_progress=1810,
+ specialist_pools=["specialist_pools_value"],
+ )
+ response = client.get_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.GetDataLabelingJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, data_labeling_job.DataLabelingJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.datasets == ["datasets_value"]
+ assert response.labeler_count == 1375
+ assert response.instruction_uri == "instruction_uri_value"
+ assert response.inputs_schema_uri == "inputs_schema_uri_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.labeling_progress == 1810
+ assert response.specialist_pools == ["specialist_pools_value"]
+
+
+def test_get_data_labeling_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.GetDataLabelingJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_data_labeling_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.get_data_labeling_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.GetDataLabelingJobRequest(
+ name="name_value",
+ )
+
+
+def test_get_data_labeling_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_data_labeling_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_data_labeling_job
+ ] = mock_rpc
+ request = {}
+ client.get_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_data_labeling_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_data_labeling_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.get_data_labeling_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.get_data_labeling_job
+ ] = mock_rpc
+
+ request = {}
+ await client.get_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.get_data_labeling_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_data_labeling_job_async(
+ transport: str = "grpc_asyncio", request_type=job_service.GetDataLabelingJobRequest
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ data_labeling_job.DataLabelingJob(
+ name="name_value",
+ display_name="display_name_value",
+ datasets=["datasets_value"],
+ labeler_count=1375,
+ instruction_uri="instruction_uri_value",
+ inputs_schema_uri="inputs_schema_uri_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ labeling_progress=1810,
+ specialist_pools=["specialist_pools_value"],
+ )
+ )
+ response = await client.get_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.GetDataLabelingJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, data_labeling_job.DataLabelingJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.datasets == ["datasets_value"]
+ assert response.labeler_count == 1375
+ assert response.instruction_uri == "instruction_uri_value"
+ assert response.inputs_schema_uri == "inputs_schema_uri_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.labeling_progress == 1810
+ assert response.specialist_pools == ["specialist_pools_value"]
+
+
+@pytest.mark.asyncio
+async def test_get_data_labeling_job_async_from_dict():
+ await test_get_data_labeling_job_async(request_type=dict)
+
+
+def test_get_data_labeling_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.GetDataLabelingJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_data_labeling_job), "__call__"
+ ) as call:
+ call.return_value = data_labeling_job.DataLabelingJob()
+ client.get_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_data_labeling_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.GetDataLabelingJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_data_labeling_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ data_labeling_job.DataLabelingJob()
+ )
+ await client.get_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_get_data_labeling_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = data_labeling_job.DataLabelingJob()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_data_labeling_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_get_data_labeling_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_data_labeling_job(
+ job_service.GetDataLabelingJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_data_labeling_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = data_labeling_job.DataLabelingJob()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ data_labeling_job.DataLabelingJob()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_data_labeling_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_get_data_labeling_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_data_labeling_job(
+ job_service.GetDataLabelingJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListDataLabelingJobsRequest,
+ dict,
+ ],
+)
+def test_list_data_labeling_jobs(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_data_labeling_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListDataLabelingJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_data_labeling_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.ListDataLabelingJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListDataLabelingJobsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_data_labeling_jobs_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.ListDataLabelingJobsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ order_by="order_by_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_data_labeling_jobs), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.list_data_labeling_jobs(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.ListDataLabelingJobsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ order_by="order_by_value",
+ )
+
+
+def test_list_data_labeling_jobs_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_data_labeling_jobs
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_data_labeling_jobs
+ ] = mock_rpc
+ request = {}
+ client.list_data_labeling_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_data_labeling_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_data_labeling_jobs_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.list_data_labeling_jobs
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.list_data_labeling_jobs
+ ] = mock_rpc
+
+ request = {}
+ await client.list_data_labeling_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.list_data_labeling_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_data_labeling_jobs_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.ListDataLabelingJobsRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_data_labeling_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListDataLabelingJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.list_data_labeling_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.ListDataLabelingJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListDataLabelingJobsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_data_labeling_jobs_async_from_dict():
+ await test_list_data_labeling_jobs_async(request_type=dict)
+
+
+def test_list_data_labeling_jobs_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.ListDataLabelingJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_data_labeling_jobs), "__call__"
+ ) as call:
+ call.return_value = job_service.ListDataLabelingJobsResponse()
+ client.list_data_labeling_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_data_labeling_jobs_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.ListDataLabelingJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_data_labeling_jobs), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListDataLabelingJobsResponse()
+ )
+ await client.list_data_labeling_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_list_data_labeling_jobs_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_data_labeling_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListDataLabelingJobsResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_data_labeling_jobs(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+def test_list_data_labeling_jobs_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_data_labeling_jobs(
+ job_service.ListDataLabelingJobsRequest(),
+ parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_data_labeling_jobs_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_data_labeling_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListDataLabelingJobsResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListDataLabelingJobsResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_data_labeling_jobs(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_list_data_labeling_jobs_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_data_labeling_jobs(
+ job_service.ListDataLabelingJobsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_data_labeling_jobs_pager(transport_name: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_data_labeling_jobs), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[
+ data_labeling_job.DataLabelingJob(),
+ data_labeling_job.DataLabelingJob(),
+ data_labeling_job.DataLabelingJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[
+ data_labeling_job.DataLabelingJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[
+ data_labeling_job.DataLabelingJob(),
+ data_labeling_job.DataLabelingJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_data_labeling_jobs(request={}, retry=retry, timeout=timeout)
+
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, data_labeling_job.DataLabelingJob) for i in results)
+
+
+def test_list_data_labeling_jobs_pages(transport_name: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_data_labeling_jobs), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[
+ data_labeling_job.DataLabelingJob(),
+ data_labeling_job.DataLabelingJob(),
+ data_labeling_job.DataLabelingJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[
+ data_labeling_job.DataLabelingJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[
+ data_labeling_job.DataLabelingJob(),
+ data_labeling_job.DataLabelingJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_data_labeling_jobs(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_data_labeling_jobs_async_pager():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_data_labeling_jobs),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[
+ data_labeling_job.DataLabelingJob(),
+ data_labeling_job.DataLabelingJob(),
+ data_labeling_job.DataLabelingJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[
+ data_labeling_job.DataLabelingJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[
+ data_labeling_job.DataLabelingJob(),
+ data_labeling_job.DataLabelingJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_data_labeling_jobs(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(isinstance(i, data_labeling_job.DataLabelingJob) for i in responses)
+
+
+@pytest.mark.asyncio
+async def test_list_data_labeling_jobs_async_pages():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_data_labeling_jobs),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[
+ data_labeling_job.DataLabelingJob(),
+ data_labeling_job.DataLabelingJob(),
+ data_labeling_job.DataLabelingJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[
+ data_labeling_job.DataLabelingJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[
+ data_labeling_job.DataLabelingJob(),
+ data_labeling_job.DataLabelingJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.list_data_labeling_jobs(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteDataLabelingJobRequest,
+ dict,
+ ],
+)
+def test_delete_data_labeling_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.delete_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.DeleteDataLabelingJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_delete_data_labeling_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.DeleteDataLabelingJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_data_labeling_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.delete_data_labeling_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.DeleteDataLabelingJobRequest(
+ name="name_value",
+ )
+
+
+def test_delete_data_labeling_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_data_labeling_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_data_labeling_job
+ ] = mock_rpc
+ request = {}
+ client.delete_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_data_labeling_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_data_labeling_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.delete_data_labeling_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.delete_data_labeling_job
+ ] = mock_rpc
+
+ request = {}
+ await client.delete_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.delete_data_labeling_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_data_labeling_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.DeleteDataLabelingJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.delete_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.DeleteDataLabelingJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_delete_data_labeling_job_async_from_dict():
+ await test_delete_data_labeling_job_async(request_type=dict)
+
+
+def test_delete_data_labeling_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.DeleteDataLabelingJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_data_labeling_job), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_data_labeling_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.DeleteDataLabelingJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_data_labeling_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.delete_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_delete_data_labeling_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_data_labeling_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_delete_data_labeling_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_data_labeling_job(
+ job_service.DeleteDataLabelingJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_delete_data_labeling_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_data_labeling_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_delete_data_labeling_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.delete_data_labeling_job(
+ job_service.DeleteDataLabelingJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CancelDataLabelingJobRequest,
+ dict,
+ ],
+)
+def test_cancel_data_labeling_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CancelDataLabelingJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_data_labeling_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.CancelDataLabelingJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_data_labeling_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.cancel_data_labeling_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.CancelDataLabelingJobRequest(
+ name="name_value",
+ )
+
+
+def test_cancel_data_labeling_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.cancel_data_labeling_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.cancel_data_labeling_job
+ ] = mock_rpc
+ request = {}
+ client.cancel_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.cancel_data_labeling_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_cancel_data_labeling_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.cancel_data_labeling_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.cancel_data_labeling_job
+ ] = mock_rpc
+
+ request = {}
+ await client.cancel_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.cancel_data_labeling_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_cancel_data_labeling_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.CancelDataLabelingJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CancelDataLabelingJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_data_labeling_job_async_from_dict():
+ await test_cancel_data_labeling_job_async(request_type=dict)
+
+
+def test_cancel_data_labeling_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CancelDataLabelingJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_data_labeling_job), "__call__"
+ ) as call:
+ call.return_value = None
+ client.cancel_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_data_labeling_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CancelDataLabelingJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_data_labeling_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_cancel_data_labeling_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.cancel_data_labeling_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_cancel_data_labeling_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.cancel_data_labeling_job(
+ job_service.CancelDataLabelingJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_cancel_data_labeling_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.cancel_data_labeling_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_cancel_data_labeling_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.cancel_data_labeling_job(
+ job_service.CancelDataLabelingJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateHyperparameterTuningJobRequest,
+ dict,
+ ],
+)
+def test_create_hyperparameter_tuning_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value",
+ display_name="display_name_value",
+ max_trial_count=1609,
+ parallel_trial_count=2128,
+ max_failed_trial_count=2317,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ response = client.create_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CreateHyperparameterTuningJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_trial_count == 1609
+ assert response.parallel_trial_count == 2128
+ assert response.max_failed_trial_count == 2317
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+def test_create_hyperparameter_tuning_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.CreateHyperparameterTuningJobRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.create_hyperparameter_tuning_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.CreateHyperparameterTuningJobRequest(
+ parent="parent_value",
+ )
+
+
+def test_create_hyperparameter_tuning_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_hyperparameter_tuning_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_hyperparameter_tuning_job
+ ] = mock_rpc
+ request = {}
+ client.create_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_hyperparameter_tuning_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_hyperparameter_tuning_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.create_hyperparameter_tuning_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.create_hyperparameter_tuning_job
+ ] = mock_rpc
+
+ request = {}
+ await client.create_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.create_hyperparameter_tuning_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_hyperparameter_tuning_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.CreateHyperparameterTuningJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value",
+ display_name="display_name_value",
+ max_trial_count=1609,
+ parallel_trial_count=2128,
+ max_failed_trial_count=2317,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ response = await client.create_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CreateHyperparameterTuningJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_trial_count == 1609
+ assert response.parallel_trial_count == 2128
+ assert response.max_failed_trial_count == 2317
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+async def test_create_hyperparameter_tuning_job_async_from_dict():
+ await test_create_hyperparameter_tuning_job_async(request_type=dict)
+
+
+def test_create_hyperparameter_tuning_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CreateHyperparameterTuningJobRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob()
+ client.create_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_create_hyperparameter_tuning_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CreateHyperparameterTuningJobRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_hyperparameter_tuning_job.HyperparameterTuningJob()
+ )
+ await client.create_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_create_hyperparameter_tuning_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.create_hyperparameter_tuning_job(
+ parent="parent_value",
+ hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value"
+ ),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].hyperparameter_tuning_job
+ mock_val = gca_hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value"
+ )
+ assert arg == mock_val
+
+
+def test_create_hyperparameter_tuning_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_hyperparameter_tuning_job(
+ job_service.CreateHyperparameterTuningJobRequest(),
+ parent="parent_value",
+ hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value"
+ ),
+ )
+
+
+@pytest.mark.asyncio
+async def test_create_hyperparameter_tuning_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_hyperparameter_tuning_job.HyperparameterTuningJob()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.create_hyperparameter_tuning_job(
+ parent="parent_value",
+ hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value"
+ ),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].hyperparameter_tuning_job
+ mock_val = gca_hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value"
+ )
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_create_hyperparameter_tuning_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.create_hyperparameter_tuning_job(
+ job_service.CreateHyperparameterTuningJobRequest(),
+ parent="parent_value",
+ hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value"
+ ),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetHyperparameterTuningJobRequest,
+ dict,
+ ],
+)
+def test_get_hyperparameter_tuning_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value",
+ display_name="display_name_value",
+ max_trial_count=1609,
+ parallel_trial_count=2128,
+ max_failed_trial_count=2317,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ response = client.get_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.GetHyperparameterTuningJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_trial_count == 1609
+ assert response.parallel_trial_count == 2128
+ assert response.max_failed_trial_count == 2317
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+def test_get_hyperparameter_tuning_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.GetHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.get_hyperparameter_tuning_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.GetHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+
+def test_get_hyperparameter_tuning_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_hyperparameter_tuning_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_hyperparameter_tuning_job
+ ] = mock_rpc
+ request = {}
+ client.get_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_hyperparameter_tuning_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_hyperparameter_tuning_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.get_hyperparameter_tuning_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.get_hyperparameter_tuning_job
+ ] = mock_rpc
+
+ request = {}
+ await client.get_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.get_hyperparameter_tuning_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_hyperparameter_tuning_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.GetHyperparameterTuningJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value",
+ display_name="display_name_value",
+ max_trial_count=1609,
+ parallel_trial_count=2128,
+ max_failed_trial_count=2317,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ response = await client.get_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.GetHyperparameterTuningJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_trial_count == 1609
+ assert response.parallel_trial_count == 2128
+ assert response.max_failed_trial_count == 2317
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+async def test_get_hyperparameter_tuning_job_async_from_dict():
+ await test_get_hyperparameter_tuning_job_async(request_type=dict)
+
+
+def test_get_hyperparameter_tuning_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.GetHyperparameterTuningJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob()
+ client.get_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_hyperparameter_tuning_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.GetHyperparameterTuningJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ hyperparameter_tuning_job.HyperparameterTuningJob()
+ )
+ await client.get_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_get_hyperparameter_tuning_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_hyperparameter_tuning_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_get_hyperparameter_tuning_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_hyperparameter_tuning_job(
+ job_service.GetHyperparameterTuningJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_hyperparameter_tuning_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ hyperparameter_tuning_job.HyperparameterTuningJob()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_hyperparameter_tuning_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_get_hyperparameter_tuning_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_hyperparameter_tuning_job(
+ job_service.GetHyperparameterTuningJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListHyperparameterTuningJobsRequest,
+ dict,
+ ],
+)
+def test_list_hyperparameter_tuning_jobs(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListHyperparameterTuningJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_hyperparameter_tuning_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.ListHyperparameterTuningJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListHyperparameterTuningJobsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_hyperparameter_tuning_jobs_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.ListHyperparameterTuningJobsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.list_hyperparameter_tuning_jobs(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.ListHyperparameterTuningJobsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ )
+
+
+def test_list_hyperparameter_tuning_jobs_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_hyperparameter_tuning_jobs
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_hyperparameter_tuning_jobs
+ ] = mock_rpc
+ request = {}
+ client.list_hyperparameter_tuning_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_hyperparameter_tuning_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_hyperparameter_tuning_jobs_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.list_hyperparameter_tuning_jobs
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.list_hyperparameter_tuning_jobs
+ ] = mock_rpc
+
+ request = {}
+ await client.list_hyperparameter_tuning_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.list_hyperparameter_tuning_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_hyperparameter_tuning_jobs_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.ListHyperparameterTuningJobsRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListHyperparameterTuningJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.list_hyperparameter_tuning_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.ListHyperparameterTuningJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListHyperparameterTuningJobsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_hyperparameter_tuning_jobs_async_from_dict():
+ await test_list_hyperparameter_tuning_jobs_async(request_type=dict)
+
+
+def test_list_hyperparameter_tuning_jobs_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.ListHyperparameterTuningJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
+ ) as call:
+ call.return_value = job_service.ListHyperparameterTuningJobsResponse()
+ client.list_hyperparameter_tuning_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_hyperparameter_tuning_jobs_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.ListHyperparameterTuningJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListHyperparameterTuningJobsResponse()
+ )
+ await client.list_hyperparameter_tuning_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_list_hyperparameter_tuning_jobs_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListHyperparameterTuningJobsResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_hyperparameter_tuning_jobs(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+def test_list_hyperparameter_tuning_jobs_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_hyperparameter_tuning_jobs(
+ job_service.ListHyperparameterTuningJobsRequest(),
+ parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_hyperparameter_tuning_jobs_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListHyperparameterTuningJobsResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListHyperparameterTuningJobsResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_hyperparameter_tuning_jobs(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_list_hyperparameter_tuning_jobs_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_hyperparameter_tuning_jobs(
+ job_service.ListHyperparameterTuningJobsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_hyperparameter_tuning_jobs_pager(transport_name: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_hyperparameter_tuning_jobs(
+ request={}, retry=retry, timeout=timeout
+ )
+
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(
+ isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob)
+ for i in results
+ )
+
+
+def test_list_hyperparameter_tuning_jobs_pages(transport_name: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_hyperparameter_tuning_jobs(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_hyperparameter_tuning_jobs_async_pager():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_hyperparameter_tuning_jobs),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_hyperparameter_tuning_jobs(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(
+ isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob)
+ for i in responses
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_hyperparameter_tuning_jobs_async_pages():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_hyperparameter_tuning_jobs),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.list_hyperparameter_tuning_jobs(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteHyperparameterTuningJobRequest,
+ dict,
+ ],
+)
+def test_delete_hyperparameter_tuning_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.delete_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.DeleteHyperparameterTuningJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_delete_hyperparameter_tuning_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.DeleteHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.delete_hyperparameter_tuning_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.DeleteHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+
+def test_delete_hyperparameter_tuning_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_hyperparameter_tuning_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_hyperparameter_tuning_job
+ ] = mock_rpc
+ request = {}
+ client.delete_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_hyperparameter_tuning_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_hyperparameter_tuning_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.delete_hyperparameter_tuning_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.delete_hyperparameter_tuning_job
+ ] = mock_rpc
+
+ request = {}
+ await client.delete_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.delete_hyperparameter_tuning_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_hyperparameter_tuning_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.DeleteHyperparameterTuningJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.delete_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.DeleteHyperparameterTuningJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_delete_hyperparameter_tuning_job_async_from_dict():
+ await test_delete_hyperparameter_tuning_job_async(request_type=dict)
+
+
+def test_delete_hyperparameter_tuning_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.DeleteHyperparameterTuningJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_hyperparameter_tuning_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.DeleteHyperparameterTuningJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.delete_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_delete_hyperparameter_tuning_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_hyperparameter_tuning_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_delete_hyperparameter_tuning_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_hyperparameter_tuning_job(
+ job_service.DeleteHyperparameterTuningJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_delete_hyperparameter_tuning_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_hyperparameter_tuning_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_delete_hyperparameter_tuning_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.delete_hyperparameter_tuning_job(
+ job_service.DeleteHyperparameterTuningJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CancelHyperparameterTuningJobRequest,
+ dict,
+ ],
+)
+def test_cancel_hyperparameter_tuning_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CancelHyperparameterTuningJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_hyperparameter_tuning_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.CancelHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.cancel_hyperparameter_tuning_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.CancelHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+
+def test_cancel_hyperparameter_tuning_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.cancel_hyperparameter_tuning_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.cancel_hyperparameter_tuning_job
+ ] = mock_rpc
+ request = {}
+ client.cancel_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.cancel_hyperparameter_tuning_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_cancel_hyperparameter_tuning_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.cancel_hyperparameter_tuning_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.cancel_hyperparameter_tuning_job
+ ] = mock_rpc
+
+ request = {}
+ await client.cancel_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.cancel_hyperparameter_tuning_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_cancel_hyperparameter_tuning_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.CancelHyperparameterTuningJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CancelHyperparameterTuningJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_hyperparameter_tuning_job_async_from_dict():
+ await test_cancel_hyperparameter_tuning_job_async(request_type=dict)
+
+
+def test_cancel_hyperparameter_tuning_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CancelHyperparameterTuningJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ call.return_value = None
+ client.cancel_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_hyperparameter_tuning_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CancelHyperparameterTuningJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_cancel_hyperparameter_tuning_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.cancel_hyperparameter_tuning_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_cancel_hyperparameter_tuning_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.cancel_hyperparameter_tuning_job(
+ job_service.CancelHyperparameterTuningJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_cancel_hyperparameter_tuning_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.cancel_hyperparameter_tuning_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_cancel_hyperparameter_tuning_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.cancel_hyperparameter_tuning_job(
+ job_service.CancelHyperparameterTuningJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateNasJobRequest,
+ dict,
+ ],
+)
+def test_create_nas_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_nas_job.NasJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ enable_restricted_image_training=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ response = client.create_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CreateNasJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_nas_job.NasJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.enable_restricted_image_training is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+def test_create_nas_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.CreateNasJobRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_nas_job), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.create_nas_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.CreateNasJobRequest(
+ parent="parent_value",
+ )
+
+
+def test_create_nas_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.create_nas_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.create_nas_job] = mock_rpc
+ request = {}
+ client.create_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_nas_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_nas_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.create_nas_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.create_nas_job
+ ] = mock_rpc
+
+ request = {}
+ await client.create_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.create_nas_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_nas_job_async(
+ transport: str = "grpc_asyncio", request_type=job_service.CreateNasJobRequest
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_nas_job.NasJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ enable_restricted_image_training=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ response = await client.create_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CreateNasJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_nas_job.NasJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.enable_restricted_image_training is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+async def test_create_nas_job_async_from_dict():
+ await test_create_nas_job_async(request_type=dict)
+
+
+def test_create_nas_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CreateNasJobRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_nas_job), "__call__") as call:
+ call.return_value = gca_nas_job.NasJob()
+ client.create_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_create_nas_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CreateNasJobRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_nas_job), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_nas_job.NasJob())
+ await client.create_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_create_nas_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_nas_job.NasJob()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.create_nas_job(
+ parent="parent_value",
+ nas_job=gca_nas_job.NasJob(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].nas_job
+ mock_val = gca_nas_job.NasJob(name="name_value")
+ assert arg == mock_val
+
+
+def test_create_nas_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_nas_job(
+ job_service.CreateNasJobRequest(),
+ parent="parent_value",
+ nas_job=gca_nas_job.NasJob(name="name_value"),
+ )
+
+
+@pytest.mark.asyncio
+async def test_create_nas_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_nas_job.NasJob()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_nas_job.NasJob())
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.create_nas_job(
+ parent="parent_value",
+ nas_job=gca_nas_job.NasJob(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].nas_job
+ mock_val = gca_nas_job.NasJob(name="name_value")
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_create_nas_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.create_nas_job(
+ job_service.CreateNasJobRequest(),
+ parent="parent_value",
+ nas_job=gca_nas_job.NasJob(name="name_value"),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetNasJobRequest,
+ dict,
+ ],
+)
+def test_get_nas_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = nas_job.NasJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ enable_restricted_image_training=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ response = client.get_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.GetNasJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, nas_job.NasJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.enable_restricted_image_training is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+def test_get_nas_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.GetNasJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_nas_job), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.get_nas_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.GetNasJobRequest(
+ name="name_value",
+ )
+
+
+def test_get_nas_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.get_nas_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.get_nas_job] = mock_rpc
+ request = {}
+ client.get_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_nas_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_nas_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.get_nas_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.get_nas_job
+ ] = mock_rpc
+
+ request = {}
+ await client.get_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.get_nas_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_nas_job_async(
+ transport: str = "grpc_asyncio", request_type=job_service.GetNasJobRequest
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ nas_job.NasJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ enable_restricted_image_training=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ response = await client.get_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.GetNasJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, nas_job.NasJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.enable_restricted_image_training is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+async def test_get_nas_job_async_from_dict():
+ await test_get_nas_job_async(request_type=dict)
+
+
+def test_get_nas_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.GetNasJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_nas_job), "__call__") as call:
+ call.return_value = nas_job.NasJob()
+ client.get_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_nas_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.GetNasJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_nas_job), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(nas_job.NasJob())
+ await client.get_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_get_nas_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = nas_job.NasJob()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_nas_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_get_nas_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_nas_job(
+ job_service.GetNasJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_nas_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = nas_job.NasJob()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(nas_job.NasJob())
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_nas_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_get_nas_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_nas_job(
+ job_service.GetNasJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListNasJobsRequest,
+ dict,
+ ],
+)
+def test_list_nas_jobs(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_nas_jobs), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListNasJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_nas_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.ListNasJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListNasJobsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_nas_jobs_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.ListNasJobsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_nas_jobs), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.list_nas_jobs(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.ListNasJobsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ )
+
+
+def test_list_nas_jobs_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.list_nas_jobs in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.list_nas_jobs] = mock_rpc
+ request = {}
+ client.list_nas_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_nas_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_nas_jobs_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.list_nas_jobs
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.list_nas_jobs
+ ] = mock_rpc
+
+ request = {}
+ await client.list_nas_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.list_nas_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_nas_jobs_async(
+ transport: str = "grpc_asyncio", request_type=job_service.ListNasJobsRequest
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_nas_jobs), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListNasJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.list_nas_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.ListNasJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListNasJobsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_nas_jobs_async_from_dict():
+ await test_list_nas_jobs_async(request_type=dict)
+
+
+def test_list_nas_jobs_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.ListNasJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_nas_jobs), "__call__") as call:
+ call.return_value = job_service.ListNasJobsResponse()
+ client.list_nas_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_nas_jobs_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.ListNasJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_nas_jobs), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListNasJobsResponse()
+ )
+ await client.list_nas_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_list_nas_jobs_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_nas_jobs), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListNasJobsResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_nas_jobs(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+def test_list_nas_jobs_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_nas_jobs(
+ job_service.ListNasJobsRequest(),
+ parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_nas_jobs_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_nas_jobs), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListNasJobsResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListNasJobsResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_nas_jobs(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_list_nas_jobs_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_nas_jobs(
+ job_service.ListNasJobsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_nas_jobs_pager(transport_name: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_nas_jobs), "__call__") as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListNasJobsResponse(
+ nas_jobs=[
+ nas_job.NasJob(),
+ nas_job.NasJob(),
+ nas_job.NasJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListNasJobsResponse(
+ nas_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListNasJobsResponse(
+ nas_jobs=[
+ nas_job.NasJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListNasJobsResponse(
+ nas_jobs=[
+ nas_job.NasJob(),
+ nas_job.NasJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_nas_jobs(request={}, retry=retry, timeout=timeout)
+
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, nas_job.NasJob) for i in results)
+
+
+def test_list_nas_jobs_pages(transport_name: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_nas_jobs), "__call__") as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListNasJobsResponse(
+ nas_jobs=[
+ nas_job.NasJob(),
+ nas_job.NasJob(),
+ nas_job.NasJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListNasJobsResponse(
+ nas_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListNasJobsResponse(
+ nas_jobs=[
+ nas_job.NasJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListNasJobsResponse(
+ nas_jobs=[
+ nas_job.NasJob(),
+ nas_job.NasJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_nas_jobs(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_nas_jobs_async_pager():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_jobs), "__call__", new_callable=mock.AsyncMock
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListNasJobsResponse(
+ nas_jobs=[
+ nas_job.NasJob(),
+ nas_job.NasJob(),
+ nas_job.NasJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListNasJobsResponse(
+ nas_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListNasJobsResponse(
+ nas_jobs=[
+ nas_job.NasJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListNasJobsResponse(
+ nas_jobs=[
+ nas_job.NasJob(),
+ nas_job.NasJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_nas_jobs(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(isinstance(i, nas_job.NasJob) for i in responses)
+
+
+@pytest.mark.asyncio
+async def test_list_nas_jobs_async_pages():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_jobs), "__call__", new_callable=mock.AsyncMock
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListNasJobsResponse(
+ nas_jobs=[
+ nas_job.NasJob(),
+ nas_job.NasJob(),
+ nas_job.NasJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListNasJobsResponse(
+ nas_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListNasJobsResponse(
+ nas_jobs=[
+ nas_job.NasJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListNasJobsResponse(
+ nas_jobs=[
+ nas_job.NasJob(),
+ nas_job.NasJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.list_nas_jobs(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteNasJobRequest,
+ dict,
+ ],
+)
+def test_delete_nas_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.delete_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.DeleteNasJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_delete_nas_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.DeleteNasJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_nas_job), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.delete_nas_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.DeleteNasJobRequest(
+ name="name_value",
+ )
+
+
+def test_delete_nas_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.delete_nas_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.delete_nas_job] = mock_rpc
+ request = {}
+ client.delete_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_nas_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_nas_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.delete_nas_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.delete_nas_job
+ ] = mock_rpc
+
+ request = {}
+ await client.delete_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.delete_nas_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_nas_job_async(
+ transport: str = "grpc_asyncio", request_type=job_service.DeleteNasJobRequest
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.delete_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.DeleteNasJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_delete_nas_job_async_from_dict():
+ await test_delete_nas_job_async(request_type=dict)
+
+
+def test_delete_nas_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.DeleteNasJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_nas_job), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_nas_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.DeleteNasJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_nas_job), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.delete_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_delete_nas_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_nas_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_delete_nas_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_nas_job(
+ job_service.DeleteNasJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_delete_nas_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_nas_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_delete_nas_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.delete_nas_job(
+ job_service.DeleteNasJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CancelNasJobRequest,
+ dict,
+ ],
+)
+def test_cancel_nas_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CancelNasJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_nas_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.CancelNasJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_nas_job), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.cancel_nas_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.CancelNasJobRequest(
+ name="name_value",
+ )
+
+
+def test_cancel_nas_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.cancel_nas_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.cancel_nas_job] = mock_rpc
+ request = {}
+ client.cancel_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.cancel_nas_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_cancel_nas_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.cancel_nas_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.cancel_nas_job
+ ] = mock_rpc
+
+ request = {}
+ await client.cancel_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.cancel_nas_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_cancel_nas_job_async(
+ transport: str = "grpc_asyncio", request_type=job_service.CancelNasJobRequest
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CancelNasJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_nas_job_async_from_dict():
+ await test_cancel_nas_job_async(request_type=dict)
+
+
+def test_cancel_nas_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CancelNasJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_nas_job), "__call__") as call:
+ call.return_value = None
+ client.cancel_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_nas_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CancelNasJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_nas_job), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_cancel_nas_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.cancel_nas_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_cancel_nas_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.cancel_nas_job(
+ job_service.CancelNasJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_cancel_nas_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.cancel_nas_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_cancel_nas_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.cancel_nas_job(
+ job_service.CancelNasJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetNasTrialDetailRequest,
+ dict,
+ ],
+)
+def test_get_nas_trial_detail(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_nas_trial_detail), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = nas_job.NasTrialDetail(
+ name="name_value",
+ parameters="parameters_value",
+ )
+ response = client.get_nas_trial_detail(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.GetNasTrialDetailRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, nas_job.NasTrialDetail)
+ assert response.name == "name_value"
+ assert response.parameters == "parameters_value"
+
+
+def test_get_nas_trial_detail_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.GetNasTrialDetailRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_nas_trial_detail), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.get_nas_trial_detail(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.GetNasTrialDetailRequest(
+ name="name_value",
+ )
+
+
+def test_get_nas_trial_detail_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_nas_trial_detail in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_nas_trial_detail
+ ] = mock_rpc
+ request = {}
+ client.get_nas_trial_detail(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_nas_trial_detail(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_nas_trial_detail_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.get_nas_trial_detail
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.get_nas_trial_detail
+ ] = mock_rpc
+
+ request = {}
+ await client.get_nas_trial_detail(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.get_nas_trial_detail(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_nas_trial_detail_async(
+ transport: str = "grpc_asyncio", request_type=job_service.GetNasTrialDetailRequest
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_nas_trial_detail), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ nas_job.NasTrialDetail(
+ name="name_value",
+ parameters="parameters_value",
+ )
+ )
+ response = await client.get_nas_trial_detail(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.GetNasTrialDetailRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, nas_job.NasTrialDetail)
+ assert response.name == "name_value"
+ assert response.parameters == "parameters_value"
+
+
+@pytest.mark.asyncio
+async def test_get_nas_trial_detail_async_from_dict():
+ await test_get_nas_trial_detail_async(request_type=dict)
+
+
+def test_get_nas_trial_detail_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.GetNasTrialDetailRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_nas_trial_detail), "__call__"
+ ) as call:
+ call.return_value = nas_job.NasTrialDetail()
+ client.get_nas_trial_detail(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_nas_trial_detail_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.GetNasTrialDetailRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_nas_trial_detail), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ nas_job.NasTrialDetail()
+ )
+ await client.get_nas_trial_detail(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_get_nas_trial_detail_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_nas_trial_detail), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = nas_job.NasTrialDetail()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_nas_trial_detail(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_get_nas_trial_detail_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_nas_trial_detail(
+ job_service.GetNasTrialDetailRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_nas_trial_detail_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_nas_trial_detail), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = nas_job.NasTrialDetail()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ nas_job.NasTrialDetail()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_nas_trial_detail(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_get_nas_trial_detail_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_nas_trial_detail(
+ job_service.GetNasTrialDetailRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListNasTrialDetailsRequest,
+ dict,
+ ],
+)
+def test_list_nas_trial_details(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_trial_details), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListNasTrialDetailsResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_nas_trial_details(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.ListNasTrialDetailsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListNasTrialDetailsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_nas_trial_details_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.ListNasTrialDetailsRequest(
+ parent="parent_value",
+ page_token="page_token_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_trial_details), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.list_nas_trial_details(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.ListNasTrialDetailsRequest(
+ parent="parent_value",
+ page_token="page_token_value",
+ )
+
+
+def test_list_nas_trial_details_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_nas_trial_details
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_nas_trial_details
+ ] = mock_rpc
+ request = {}
+ client.list_nas_trial_details(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_nas_trial_details(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_nas_trial_details_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.list_nas_trial_details
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.list_nas_trial_details
+ ] = mock_rpc
+
+ request = {}
+ await client.list_nas_trial_details(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.list_nas_trial_details(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_nas_trial_details_async(
+ transport: str = "grpc_asyncio", request_type=job_service.ListNasTrialDetailsRequest
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_trial_details), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListNasTrialDetailsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.list_nas_trial_details(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.ListNasTrialDetailsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListNasTrialDetailsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_nas_trial_details_async_from_dict():
+ await test_list_nas_trial_details_async(request_type=dict)
+
+
+def test_list_nas_trial_details_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.ListNasTrialDetailsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_trial_details), "__call__"
+ ) as call:
+ call.return_value = job_service.ListNasTrialDetailsResponse()
+ client.list_nas_trial_details(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_nas_trial_details_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.ListNasTrialDetailsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_trial_details), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListNasTrialDetailsResponse()
+ )
+ await client.list_nas_trial_details(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_list_nas_trial_details_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_trial_details), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListNasTrialDetailsResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_nas_trial_details(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+def test_list_nas_trial_details_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_nas_trial_details(
+ job_service.ListNasTrialDetailsRequest(),
+ parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_nas_trial_details_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_trial_details), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListNasTrialDetailsResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListNasTrialDetailsResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_nas_trial_details(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_list_nas_trial_details_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_nas_trial_details(
+ job_service.ListNasTrialDetailsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_nas_trial_details_pager(transport_name: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_trial_details), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[
+ nas_job.NasTrialDetail(),
+ nas_job.NasTrialDetail(),
+ nas_job.NasTrialDetail(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[],
+ next_page_token="def",
+ ),
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[
+ nas_job.NasTrialDetail(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[
+ nas_job.NasTrialDetail(),
+ nas_job.NasTrialDetail(),
+ ],
+ ),
+ RuntimeError,
+ )
+
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_nas_trial_details(request={}, retry=retry, timeout=timeout)
+
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, nas_job.NasTrialDetail) for i in results)
+
+
+def test_list_nas_trial_details_pages(transport_name: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_trial_details), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[
+ nas_job.NasTrialDetail(),
+ nas_job.NasTrialDetail(),
+ nas_job.NasTrialDetail(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[],
+ next_page_token="def",
+ ),
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[
+ nas_job.NasTrialDetail(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[
+ nas_job.NasTrialDetail(),
+ nas_job.NasTrialDetail(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_nas_trial_details(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_nas_trial_details_async_pager():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_trial_details),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[
+ nas_job.NasTrialDetail(),
+ nas_job.NasTrialDetail(),
+ nas_job.NasTrialDetail(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[],
+ next_page_token="def",
+ ),
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[
+ nas_job.NasTrialDetail(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[
+ nas_job.NasTrialDetail(),
+ nas_job.NasTrialDetail(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_nas_trial_details(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(isinstance(i, nas_job.NasTrialDetail) for i in responses)
+
+
+@pytest.mark.asyncio
+async def test_list_nas_trial_details_async_pages():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_trial_details),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[
+ nas_job.NasTrialDetail(),
+ nas_job.NasTrialDetail(),
+ nas_job.NasTrialDetail(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[],
+ next_page_token="def",
+ ),
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[
+ nas_job.NasTrialDetail(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[
+ nas_job.NasTrialDetail(),
+ nas_job.NasTrialDetail(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.list_nas_trial_details(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateBatchPredictionJobRequest,
+ dict,
+ ],
+)
+def test_create_batch_prediction_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_batch_prediction_job.BatchPredictionJob(
+ name="name_value",
+ display_name="display_name_value",
+ model="model_value",
+ model_version_id="model_version_id_value",
+ service_account="service_account_value",
+ generate_explanation=True,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ disable_container_logging=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ response = client.create_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CreateBatchPredictionJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.model == "model_value"
+ assert response.model_version_id == "model_version_id_value"
+ assert response.service_account == "service_account_value"
+ assert response.generate_explanation is True
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.disable_container_logging is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+def test_create_batch_prediction_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.CreateBatchPredictionJobRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_batch_prediction_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.create_batch_prediction_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.CreateBatchPredictionJobRequest(
+ parent="parent_value",
+ )
+
+
+def test_create_batch_prediction_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_batch_prediction_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_batch_prediction_job
+ ] = mock_rpc
+ request = {}
+ client.create_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_batch_prediction_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_batch_prediction_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.create_batch_prediction_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.create_batch_prediction_job
+ ] = mock_rpc
+
+ request = {}
+ await client.create_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.create_batch_prediction_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_batch_prediction_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.CreateBatchPredictionJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_batch_prediction_job.BatchPredictionJob(
+ name="name_value",
+ display_name="display_name_value",
+ model="model_value",
+ model_version_id="model_version_id_value",
+ service_account="service_account_value",
+ generate_explanation=True,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ disable_container_logging=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ response = await client.create_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CreateBatchPredictionJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.model == "model_value"
+ assert response.model_version_id == "model_version_id_value"
+ assert response.service_account == "service_account_value"
+ assert response.generate_explanation is True
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.disable_container_logging is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+async def test_create_batch_prediction_job_async_from_dict():
+ await test_create_batch_prediction_job_async(request_type=dict)
+
+
+def test_create_batch_prediction_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CreateBatchPredictionJobRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_batch_prediction_job), "__call__"
+ ) as call:
+ call.return_value = gca_batch_prediction_job.BatchPredictionJob()
+ client.create_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_create_batch_prediction_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CreateBatchPredictionJobRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_batch_prediction_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_batch_prediction_job.BatchPredictionJob()
+ )
+ await client.create_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_create_batch_prediction_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_batch_prediction_job.BatchPredictionJob()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.create_batch_prediction_job(
+ parent="parent_value",
+ batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(
+ name="name_value"
+ ),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].batch_prediction_job
+ mock_val = gca_batch_prediction_job.BatchPredictionJob(name="name_value")
+ assert arg == mock_val
+
+
+def test_create_batch_prediction_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_batch_prediction_job(
+ job_service.CreateBatchPredictionJobRequest(),
+ parent="parent_value",
+ batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(
+ name="name_value"
+ ),
+ )
+
+
+@pytest.mark.asyncio
+async def test_create_batch_prediction_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_batch_prediction_job.BatchPredictionJob()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_batch_prediction_job.BatchPredictionJob()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.create_batch_prediction_job(
+ parent="parent_value",
+ batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(
+ name="name_value"
+ ),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].batch_prediction_job
+ mock_val = gca_batch_prediction_job.BatchPredictionJob(name="name_value")
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_create_batch_prediction_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.create_batch_prediction_job(
+ job_service.CreateBatchPredictionJobRequest(),
+ parent="parent_value",
+ batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(
+ name="name_value"
+ ),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetBatchPredictionJobRequest,
+ dict,
+ ],
+)
+def test_get_batch_prediction_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = batch_prediction_job.BatchPredictionJob(
+ name="name_value",
+ display_name="display_name_value",
+ model="model_value",
+ model_version_id="model_version_id_value",
+ service_account="service_account_value",
+ generate_explanation=True,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ disable_container_logging=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ response = client.get_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.GetBatchPredictionJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, batch_prediction_job.BatchPredictionJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.model == "model_value"
+ assert response.model_version_id == "model_version_id_value"
+ assert response.service_account == "service_account_value"
+ assert response.generate_explanation is True
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.disable_container_logging is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+def test_get_batch_prediction_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.GetBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_batch_prediction_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.get_batch_prediction_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.GetBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+
+def test_get_batch_prediction_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_batch_prediction_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_batch_prediction_job
+ ] = mock_rpc
+ request = {}
+ client.get_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_batch_prediction_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_batch_prediction_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.get_batch_prediction_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.get_batch_prediction_job
+ ] = mock_rpc
+
+ request = {}
+ await client.get_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.get_batch_prediction_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_batch_prediction_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.GetBatchPredictionJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ batch_prediction_job.BatchPredictionJob(
+ name="name_value",
+ display_name="display_name_value",
+ model="model_value",
+ model_version_id="model_version_id_value",
+ service_account="service_account_value",
+ generate_explanation=True,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ disable_container_logging=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ response = await client.get_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.GetBatchPredictionJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, batch_prediction_job.BatchPredictionJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.model == "model_value"
+ assert response.model_version_id == "model_version_id_value"
+ assert response.service_account == "service_account_value"
+ assert response.generate_explanation is True
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.disable_container_logging is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+async def test_get_batch_prediction_job_async_from_dict():
+ await test_get_batch_prediction_job_async(request_type=dict)
+
+
+def test_get_batch_prediction_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.GetBatchPredictionJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_batch_prediction_job), "__call__"
+ ) as call:
+ call.return_value = batch_prediction_job.BatchPredictionJob()
+ client.get_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_batch_prediction_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.GetBatchPredictionJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_batch_prediction_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ batch_prediction_job.BatchPredictionJob()
+ )
+ await client.get_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_get_batch_prediction_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = batch_prediction_job.BatchPredictionJob()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_batch_prediction_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_get_batch_prediction_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_batch_prediction_job(
+ job_service.GetBatchPredictionJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_batch_prediction_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = batch_prediction_job.BatchPredictionJob()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ batch_prediction_job.BatchPredictionJob()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_batch_prediction_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_get_batch_prediction_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_batch_prediction_job(
+ job_service.GetBatchPredictionJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListBatchPredictionJobsRequest,
+ dict,
+ ],
+)
+def test_list_batch_prediction_jobs(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batch_prediction_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListBatchPredictionJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_batch_prediction_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.ListBatchPredictionJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListBatchPredictionJobsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_batch_prediction_jobs_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.ListBatchPredictionJobsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batch_prediction_jobs), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.list_batch_prediction_jobs(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.ListBatchPredictionJobsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ )
+
+
+def test_list_batch_prediction_jobs_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_batch_prediction_jobs
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_batch_prediction_jobs
+ ] = mock_rpc
+ request = {}
+ client.list_batch_prediction_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_batch_prediction_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_batch_prediction_jobs_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.list_batch_prediction_jobs
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.list_batch_prediction_jobs
+ ] = mock_rpc
+
+ request = {}
+ await client.list_batch_prediction_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.list_batch_prediction_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_batch_prediction_jobs_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.ListBatchPredictionJobsRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batch_prediction_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListBatchPredictionJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.list_batch_prediction_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.ListBatchPredictionJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListBatchPredictionJobsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_batch_prediction_jobs_async_from_dict():
+ await test_list_batch_prediction_jobs_async(request_type=dict)
+
+
+def test_list_batch_prediction_jobs_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.ListBatchPredictionJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batch_prediction_jobs), "__call__"
+ ) as call:
+ call.return_value = job_service.ListBatchPredictionJobsResponse()
+ client.list_batch_prediction_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_batch_prediction_jobs_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.ListBatchPredictionJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batch_prediction_jobs), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListBatchPredictionJobsResponse()
+ )
+ await client.list_batch_prediction_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_list_batch_prediction_jobs_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batch_prediction_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListBatchPredictionJobsResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_batch_prediction_jobs(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+def test_list_batch_prediction_jobs_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_batch_prediction_jobs(
+ job_service.ListBatchPredictionJobsRequest(),
+ parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_batch_prediction_jobs_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batch_prediction_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListBatchPredictionJobsResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListBatchPredictionJobsResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_batch_prediction_jobs(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_list_batch_prediction_jobs_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_batch_prediction_jobs(
+ job_service.ListBatchPredictionJobsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_batch_prediction_jobs_pager(transport_name: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batch_prediction_jobs), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[
+ batch_prediction_job.BatchPredictionJob(),
+ batch_prediction_job.BatchPredictionJob(),
+ batch_prediction_job.BatchPredictionJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[
+ batch_prediction_job.BatchPredictionJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[
+ batch_prediction_job.BatchPredictionJob(),
+ batch_prediction_job.BatchPredictionJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_batch_prediction_jobs(
+ request={}, retry=retry, timeout=timeout
+ )
+
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(
+ isinstance(i, batch_prediction_job.BatchPredictionJob) for i in results
+ )
+
+
+def test_list_batch_prediction_jobs_pages(transport_name: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batch_prediction_jobs), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[
+ batch_prediction_job.BatchPredictionJob(),
+ batch_prediction_job.BatchPredictionJob(),
+ batch_prediction_job.BatchPredictionJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[
+ batch_prediction_job.BatchPredictionJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[
+ batch_prediction_job.BatchPredictionJob(),
+ batch_prediction_job.BatchPredictionJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_batch_prediction_jobs(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_batch_prediction_jobs_async_pager():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batch_prediction_jobs),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[
+ batch_prediction_job.BatchPredictionJob(),
+ batch_prediction_job.BatchPredictionJob(),
+ batch_prediction_job.BatchPredictionJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[
+ batch_prediction_job.BatchPredictionJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[
+ batch_prediction_job.BatchPredictionJob(),
+ batch_prediction_job.BatchPredictionJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_batch_prediction_jobs(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(
+ isinstance(i, batch_prediction_job.BatchPredictionJob) for i in responses
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_batch_prediction_jobs_async_pages():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batch_prediction_jobs),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[
+ batch_prediction_job.BatchPredictionJob(),
+ batch_prediction_job.BatchPredictionJob(),
+ batch_prediction_job.BatchPredictionJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[
+ batch_prediction_job.BatchPredictionJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[
+ batch_prediction_job.BatchPredictionJob(),
+ batch_prediction_job.BatchPredictionJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.list_batch_prediction_jobs(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteBatchPredictionJobRequest,
+ dict,
+ ],
+)
+def test_delete_batch_prediction_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.delete_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.DeleteBatchPredictionJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_delete_batch_prediction_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.DeleteBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_batch_prediction_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.delete_batch_prediction_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.DeleteBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+
+def test_delete_batch_prediction_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_batch_prediction_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_batch_prediction_job
+ ] = mock_rpc
+ request = {}
+ client.delete_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_batch_prediction_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_batch_prediction_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.delete_batch_prediction_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.delete_batch_prediction_job
+ ] = mock_rpc
+
+ request = {}
+ await client.delete_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.delete_batch_prediction_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_batch_prediction_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.DeleteBatchPredictionJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.delete_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.DeleteBatchPredictionJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_delete_batch_prediction_job_async_from_dict():
+ await test_delete_batch_prediction_job_async(request_type=dict)
+
+
+def test_delete_batch_prediction_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.DeleteBatchPredictionJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_batch_prediction_job), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_batch_prediction_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.DeleteBatchPredictionJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_batch_prediction_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.delete_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_delete_batch_prediction_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_batch_prediction_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_delete_batch_prediction_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_batch_prediction_job(
+ job_service.DeleteBatchPredictionJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_delete_batch_prediction_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_batch_prediction_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_delete_batch_prediction_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.delete_batch_prediction_job(
+ job_service.DeleteBatchPredictionJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CancelBatchPredictionJobRequest,
+ dict,
+ ],
+)
+def test_cancel_batch_prediction_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CancelBatchPredictionJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_batch_prediction_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.CancelBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_batch_prediction_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.cancel_batch_prediction_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.CancelBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+
+def test_cancel_batch_prediction_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.cancel_batch_prediction_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.cancel_batch_prediction_job
+ ] = mock_rpc
+ request = {}
+ client.cancel_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.cancel_batch_prediction_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_cancel_batch_prediction_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.cancel_batch_prediction_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.cancel_batch_prediction_job
+ ] = mock_rpc
+
+ request = {}
+ await client.cancel_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.cancel_batch_prediction_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_cancel_batch_prediction_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.CancelBatchPredictionJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CancelBatchPredictionJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_batch_prediction_job_async_from_dict():
+ await test_cancel_batch_prediction_job_async(request_type=dict)
+
+
+def test_cancel_batch_prediction_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CancelBatchPredictionJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_batch_prediction_job), "__call__"
+ ) as call:
+ call.return_value = None
+ client.cancel_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_batch_prediction_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CancelBatchPredictionJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_batch_prediction_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_cancel_batch_prediction_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.cancel_batch_prediction_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_cancel_batch_prediction_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.cancel_batch_prediction_job(
+ job_service.CancelBatchPredictionJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_cancel_batch_prediction_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.cancel_batch_prediction_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_cancel_batch_prediction_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.cancel_batch_prediction_job(
+ job_service.CancelBatchPredictionJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+def test_create_model_deployment_monitoring_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value",
+ display_name="display_name_value",
+ endpoint="endpoint_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
+ predict_instance_schema_uri="predict_instance_schema_uri_value",
+ analysis_instance_schema_uri="analysis_instance_schema_uri_value",
+ enable_monitoring_pipeline_logs=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ response = client.create_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CreateModelDeploymentMonitoringJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(
+ response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob
+ )
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.endpoint == "endpoint_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert (
+ response.schedule_state
+ == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING
+ )
+ assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value"
+ assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value"
+ assert response.enable_monitoring_pipeline_logs is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+def test_create_model_deployment_monitoring_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.CreateModelDeploymentMonitoringJobRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.create_model_deployment_monitoring_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest(
+ parent="parent_value",
+ )
+
+
+def test_create_model_deployment_monitoring_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_model_deployment_monitoring_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_model_deployment_monitoring_job
+ ] = mock_rpc
+ request = {}
+ client.create_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_model_deployment_monitoring_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.create_model_deployment_monitoring_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.create_model_deployment_monitoring_job
+ ] = mock_rpc
+
+ request = {}
+ await client.create_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.create_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_model_deployment_monitoring_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.CreateModelDeploymentMonitoringJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value",
+ display_name="display_name_value",
+ endpoint="endpoint_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
+ predict_instance_schema_uri="predict_instance_schema_uri_value",
+ analysis_instance_schema_uri="analysis_instance_schema_uri_value",
+ enable_monitoring_pipeline_logs=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ response = await client.create_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.CreateModelDeploymentMonitoringJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(
+ response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob
+ )
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.endpoint == "endpoint_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert (
+ response.schedule_state
+ == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING
+ )
+ assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value"
+ assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value"
+ assert response.enable_monitoring_pipeline_logs is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+async def test_create_model_deployment_monitoring_job_async_from_dict():
+ await test_create_model_deployment_monitoring_job_async(request_type=dict)
+
+
+def test_create_model_deployment_monitoring_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CreateModelDeploymentMonitoringJobRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = (
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+ client.create_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_create_model_deployment_monitoring_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.CreateModelDeploymentMonitoringJobRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+ await client.create_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_create_model_deployment_monitoring_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = (
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.create_model_deployment_monitoring_job(
+ parent="parent_value",
+ model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value"
+ ),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].model_deployment_monitoring_job
+ mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value"
+ )
+ assert arg == mock_val
+
+
+def test_create_model_deployment_monitoring_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_model_deployment_monitoring_job(
+ job_service.CreateModelDeploymentMonitoringJobRequest(),
+ parent="parent_value",
+ model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value"
+ ),
+ )
+
+
+@pytest.mark.asyncio
+async def test_create_model_deployment_monitoring_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = (
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.create_model_deployment_monitoring_job(
+ parent="parent_value",
+ model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value"
+ ),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].model_deployment_monitoring_job
+ mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value"
+ )
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_create_model_deployment_monitoring_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.create_model_deployment_monitoring_job(
+ job_service.CreateModelDeploymentMonitoringJobRequest(),
+ parent="parent_value",
+ model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value"
+ ),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest,
+ dict,
+ ],
+)
+def test_search_model_deployment_monitoring_stats_anomalies(
+ request_type, transport: str = "grpc"
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.search_model_deployment_monitoring_stats_anomalies),
+ "__call__",
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = client.search_model_deployment_monitoring_stats_anomalies(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(
+ response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager
+ )
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_search_model_deployment_monitoring_stats_anomalies_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(
+ model_deployment_monitoring_job="model_deployment_monitoring_job_value",
+ deployed_model_id="deployed_model_id_value",
+ feature_display_name="feature_display_name_value",
+ page_token="page_token_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.search_model_deployment_monitoring_stats_anomalies),
+ "__call__",
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.search_model_deployment_monitoring_stats_anomalies(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[
+ 0
+ ] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(
+ model_deployment_monitoring_job="model_deployment_monitoring_job_value",
+ deployed_model_id="deployed_model_id_value",
+ feature_display_name="feature_display_name_value",
+ page_token="page_token_value",
+ )
+
+
+def test_search_model_deployment_monitoring_stats_anomalies_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.search_model_deployment_monitoring_stats_anomalies
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.search_model_deployment_monitoring_stats_anomalies
+ ] = mock_rpc
+ request = {}
+ client.search_model_deployment_monitoring_stats_anomalies(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.search_model_deployment_monitoring_stats_anomalies(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_search_model_deployment_monitoring_stats_anomalies_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.search_model_deployment_monitoring_stats_anomalies
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.search_model_deployment_monitoring_stats_anomalies
+ ] = mock_rpc
+
+ request = {}
+ await client.search_model_deployment_monitoring_stats_anomalies(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.search_model_deployment_monitoring_stats_anomalies(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_search_model_deployment_monitoring_stats_anomalies_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.search_model_deployment_monitoring_stats_anomalies),
+ "__call__",
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.search_model_deployment_monitoring_stats_anomalies(
+ request
+ )
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(
+ response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager
+ )
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_search_model_deployment_monitoring_stats_anomalies_async_from_dict():
+ await test_search_model_deployment_monitoring_stats_anomalies_async(
+ request_type=dict
+ )
+
+
+def test_search_model_deployment_monitoring_stats_anomalies_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
+
+ request.model_deployment_monitoring_job = "model_deployment_monitoring_job_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.search_model_deployment_monitoring_stats_anomalies),
+ "__call__",
+ ) as call:
+ call.return_value = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
+ )
+ client.search_model_deployment_monitoring_stats_anomalies(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "model_deployment_monitoring_job=model_deployment_monitoring_job_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_search_model_deployment_monitoring_stats_anomalies_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
+
+ request.model_deployment_monitoring_job = "model_deployment_monitoring_job_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.search_model_deployment_monitoring_stats_anomalies),
+ "__call__",
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
+ )
+ await client.search_model_deployment_monitoring_stats_anomalies(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "model_deployment_monitoring_job=model_deployment_monitoring_job_value",
+ ) in kw["metadata"]
+
+
+def test_search_model_deployment_monitoring_stats_anomalies_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.search_model_deployment_monitoring_stats_anomalies),
+ "__call__",
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.search_model_deployment_monitoring_stats_anomalies(
+ model_deployment_monitoring_job="model_deployment_monitoring_job_value",
+ deployed_model_id="deployed_model_id_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].model_deployment_monitoring_job
+ mock_val = "model_deployment_monitoring_job_value"
+ assert arg == mock_val
+ arg = args[0].deployed_model_id
+ mock_val = "deployed_model_id_value"
+ assert arg == mock_val
+
+
+def test_search_model_deployment_monitoring_stats_anomalies_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.search_model_deployment_monitoring_stats_anomalies(
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(),
+ model_deployment_monitoring_job="model_deployment_monitoring_job_value",
+ deployed_model_id="deployed_model_id_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_search_model_deployment_monitoring_stats_anomalies_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.search_model_deployment_monitoring_stats_anomalies),
+ "__call__",
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
+ )
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.search_model_deployment_monitoring_stats_anomalies(
+ model_deployment_monitoring_job="model_deployment_monitoring_job_value",
+ deployed_model_id="deployed_model_id_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].model_deployment_monitoring_job
+ mock_val = "model_deployment_monitoring_job_value"
+ assert arg == mock_val
+ arg = args[0].deployed_model_id
+ mock_val = "deployed_model_id_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_search_model_deployment_monitoring_stats_anomalies_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.search_model_deployment_monitoring_stats_anomalies(
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(),
+ model_deployment_monitoring_job="model_deployment_monitoring_job_value",
+ deployed_model_id="deployed_model_id_value",
+ )
+
+
+def test_search_model_deployment_monitoring_stats_anomalies_pager(
+ transport_name: str = "grpc",
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.search_model_deployment_monitoring_stats_anomalies),
+ "__call__",
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[],
+ next_page_token="def",
+ ),
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ ],
+ ),
+ RuntimeError,
+ )
+
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("model_deployment_monitoring_job", ""),)
+ ),
+ )
+ pager = client.search_model_deployment_monitoring_stats_anomalies(
+ request={}, retry=retry, timeout=timeout
+ )
+
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(
+ isinstance(
+ i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies
+ )
+ for i in results
+ )
+
+
+def test_search_model_deployment_monitoring_stats_anomalies_pages(
+ transport_name: str = "grpc",
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.search_model_deployment_monitoring_stats_anomalies),
+ "__call__",
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[],
+ next_page_token="def",
+ ),
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(
+ client.search_model_deployment_monitoring_stats_anomalies(request={}).pages
+ )
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_search_model_deployment_monitoring_stats_anomalies_async_pager():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.search_model_deployment_monitoring_stats_anomalies),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[],
+ next_page_token="def",
+ ),
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.search_model_deployment_monitoring_stats_anomalies(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(
+ isinstance(
+ i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies
+ )
+ for i in responses
+ )
+
+
+@pytest.mark.asyncio
+async def test_search_model_deployment_monitoring_stats_anomalies_async_pages():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.search_model_deployment_monitoring_stats_anomalies),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[],
+ next_page_token="def",
+ ),
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.search_model_deployment_monitoring_stats_anomalies(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+def test_get_model_deployment_monitoring_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value",
+ display_name="display_name_value",
+ endpoint="endpoint_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
+ predict_instance_schema_uri="predict_instance_schema_uri_value",
+ analysis_instance_schema_uri="analysis_instance_schema_uri_value",
+ enable_monitoring_pipeline_logs=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ response = client.get_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.GetModelDeploymentMonitoringJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(
+ response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob
+ )
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.endpoint == "endpoint_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert (
+ response.schedule_state
+ == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING
+ )
+ assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value"
+ assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value"
+ assert response.enable_monitoring_pipeline_logs is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+def test_get_model_deployment_monitoring_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.GetModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.get_model_deployment_monitoring_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+
+def test_get_model_deployment_monitoring_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_model_deployment_monitoring_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_model_deployment_monitoring_job
+ ] = mock_rpc
+ request = {}
+ client.get_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_model_deployment_monitoring_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.get_model_deployment_monitoring_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.get_model_deployment_monitoring_job
+ ] = mock_rpc
+
+ request = {}
+ await client.get_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.get_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_model_deployment_monitoring_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.GetModelDeploymentMonitoringJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value",
+ display_name="display_name_value",
+ endpoint="endpoint_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
+ predict_instance_schema_uri="predict_instance_schema_uri_value",
+ analysis_instance_schema_uri="analysis_instance_schema_uri_value",
+ enable_monitoring_pipeline_logs=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ response = await client.get_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.GetModelDeploymentMonitoringJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(
+ response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob
+ )
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.endpoint == "endpoint_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert (
+ response.schedule_state
+ == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING
+ )
+ assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value"
+ assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value"
+ assert response.enable_monitoring_pipeline_logs is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+async def test_get_model_deployment_monitoring_job_async_from_dict():
+ await test_get_model_deployment_monitoring_job_async(request_type=dict)
+
+
+def test_get_model_deployment_monitoring_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.GetModelDeploymentMonitoringJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = (
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+ client.get_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_model_deployment_monitoring_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.GetModelDeploymentMonitoringJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+ await client.get_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_get_model_deployment_monitoring_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = (
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_model_deployment_monitoring_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_get_model_deployment_monitoring_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_model_deployment_monitoring_job(
+ job_service.GetModelDeploymentMonitoringJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_model_deployment_monitoring_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = (
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_model_deployment_monitoring_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_get_model_deployment_monitoring_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_model_deployment_monitoring_job(
+ job_service.GetModelDeploymentMonitoringJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListModelDeploymentMonitoringJobsRequest,
+ dict,
+ ],
+)
+def test_list_model_deployment_monitoring_jobs(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_model_deployment_monitoring_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.ListModelDeploymentMonitoringJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_model_deployment_monitoring_jobs_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.ListModelDeploymentMonitoringJobsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.list_model_deployment_monitoring_jobs(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ )
+
+
+def test_list_model_deployment_monitoring_jobs_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_model_deployment_monitoring_jobs
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_model_deployment_monitoring_jobs
+ ] = mock_rpc
+ request = {}
+ client.list_model_deployment_monitoring_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_model_deployment_monitoring_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_model_deployment_monitoring_jobs_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.list_model_deployment_monitoring_jobs
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.list_model_deployment_monitoring_jobs
+ ] = mock_rpc
+
+ request = {}
+ await client.list_model_deployment_monitoring_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.list_model_deployment_monitoring_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_model_deployment_monitoring_jobs_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.ListModelDeploymentMonitoringJobsRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.list_model_deployment_monitoring_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.ListModelDeploymentMonitoringJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_model_deployment_monitoring_jobs_async_from_dict():
+ await test_list_model_deployment_monitoring_jobs_async(request_type=dict)
+
+
+def test_list_model_deployment_monitoring_jobs_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.ListModelDeploymentMonitoringJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
+ ) as call:
+ call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse()
+ client.list_model_deployment_monitoring_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_model_deployment_monitoring_jobs_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.ListModelDeploymentMonitoringJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListModelDeploymentMonitoringJobsResponse()
+ )
+ await client.list_model_deployment_monitoring_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_list_model_deployment_monitoring_jobs_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_model_deployment_monitoring_jobs(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+def test_list_model_deployment_monitoring_jobs_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_model_deployment_monitoring_jobs(
+ job_service.ListModelDeploymentMonitoringJobsRequest(),
+ parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_model_deployment_monitoring_jobs_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListModelDeploymentMonitoringJobsResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_model_deployment_monitoring_jobs(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_list_model_deployment_monitoring_jobs_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_model_deployment_monitoring_jobs(
+ job_service.ListModelDeploymentMonitoringJobsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_model_deployment_monitoring_jobs_pager(transport_name: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_model_deployment_monitoring_jobs(
+ request={}, retry=retry, timeout=timeout
+ )
+
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(
+ isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob)
+ for i in results
+ )
+
+
+def test_list_model_deployment_monitoring_jobs_pages(transport_name: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_model_deployment_monitoring_jobs(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_model_deployment_monitoring_jobs_async_pager():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_model_deployment_monitoring_jobs),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_model_deployment_monitoring_jobs(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(
+ isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob)
+ for i in responses
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_model_deployment_monitoring_jobs_async_pages():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_model_deployment_monitoring_jobs),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.list_model_deployment_monitoring_jobs(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.UpdateModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+def test_update_model_deployment_monitoring_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.update_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.UpdateModelDeploymentMonitoringJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_update_model_deployment_monitoring_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.UpdateModelDeploymentMonitoringJobRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.update_model_deployment_monitoring_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest()
+
+
+def test_update_model_deployment_monitoring_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.update_model_deployment_monitoring_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.update_model_deployment_monitoring_job
+ ] = mock_rpc
+ request = {}
+ client.update_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.update_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_update_model_deployment_monitoring_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.update_model_deployment_monitoring_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.update_model_deployment_monitoring_job
+ ] = mock_rpc
+
+ request = {}
+ await client.update_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.update_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_update_model_deployment_monitoring_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.UpdateModelDeploymentMonitoringJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.update_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.UpdateModelDeploymentMonitoringJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_update_model_deployment_monitoring_job_async_from_dict():
+ await test_update_model_deployment_monitoring_job_async(request_type=dict)
+
+
+def test_update_model_deployment_monitoring_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.UpdateModelDeploymentMonitoringJobRequest()
+
+ request.model_deployment_monitoring_job.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.update_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "model_deployment_monitoring_job.name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_update_model_deployment_monitoring_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.UpdateModelDeploymentMonitoringJobRequest()
+
+ request.model_deployment_monitoring_job.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.update_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "model_deployment_monitoring_job.name=name_value",
+ ) in kw["metadata"]
+
+
+def test_update_model_deployment_monitoring_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.update_model_deployment_monitoring_job(
+ model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value"
+ ),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].model_deployment_monitoring_job
+ mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value"
+ )
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
+
+
+def test_update_model_deployment_monitoring_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.update_model_deployment_monitoring_job(
+ job_service.UpdateModelDeploymentMonitoringJobRequest(),
+ model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value"
+ ),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+@pytest.mark.asyncio
+async def test_update_model_deployment_monitoring_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.update_model_deployment_monitoring_job(
+ model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value"
+ ),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].model_deployment_monitoring_job
+ mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value"
+ )
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_update_model_deployment_monitoring_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.update_model_deployment_monitoring_job(
+ job_service.UpdateModelDeploymentMonitoringJobRequest(),
+ model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value"
+ ),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+def test_delete_model_deployment_monitoring_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.delete_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.DeleteModelDeploymentMonitoringJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_delete_model_deployment_monitoring_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.DeleteModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.delete_model_deployment_monitoring_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+
+def test_delete_model_deployment_monitoring_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_model_deployment_monitoring_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_model_deployment_monitoring_job
+ ] = mock_rpc
+ request = {}
+ client.delete_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_model_deployment_monitoring_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.delete_model_deployment_monitoring_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.delete_model_deployment_monitoring_job
+ ] = mock_rpc
+
+ request = {}
+ await client.delete_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.delete_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_model_deployment_monitoring_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.DeleteModelDeploymentMonitoringJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.delete_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.DeleteModelDeploymentMonitoringJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_delete_model_deployment_monitoring_job_async_from_dict():
+ await test_delete_model_deployment_monitoring_job_async(request_type=dict)
+
+
+def test_delete_model_deployment_monitoring_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.DeleteModelDeploymentMonitoringJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_model_deployment_monitoring_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.DeleteModelDeploymentMonitoringJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.delete_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_delete_model_deployment_monitoring_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_model_deployment_monitoring_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_delete_model_deployment_monitoring_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_model_deployment_monitoring_job(
+ job_service.DeleteModelDeploymentMonitoringJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_delete_model_deployment_monitoring_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_model_deployment_monitoring_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_delete_model_deployment_monitoring_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.delete_model_deployment_monitoring_job(
+ job_service.DeleteModelDeploymentMonitoringJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.PauseModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+def test_pause_model_deployment_monitoring_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.pause_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.pause_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.PauseModelDeploymentMonitoringJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_pause_model_deployment_monitoring_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.PauseModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.pause_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.pause_model_deployment_monitoring_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+
+def test_pause_model_deployment_monitoring_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.pause_model_deployment_monitoring_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.pause_model_deployment_monitoring_job
+ ] = mock_rpc
+ request = {}
+ client.pause_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.pause_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_pause_model_deployment_monitoring_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.pause_model_deployment_monitoring_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.pause_model_deployment_monitoring_job
+ ] = mock_rpc
+
+ request = {}
+ await client.pause_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.pause_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_pause_model_deployment_monitoring_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.PauseModelDeploymentMonitoringJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.pause_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.pause_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.PauseModelDeploymentMonitoringJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_pause_model_deployment_monitoring_job_async_from_dict():
+ await test_pause_model_deployment_monitoring_job_async(request_type=dict)
+
+
+def test_pause_model_deployment_monitoring_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.PauseModelDeploymentMonitoringJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.pause_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = None
+ client.pause_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_pause_model_deployment_monitoring_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.PauseModelDeploymentMonitoringJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.pause_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.pause_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_pause_model_deployment_monitoring_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.pause_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.pause_model_deployment_monitoring_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_pause_model_deployment_monitoring_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.pause_model_deployment_monitoring_job(
+ job_service.PauseModelDeploymentMonitoringJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_pause_model_deployment_monitoring_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.pause_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.pause_model_deployment_monitoring_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_pause_model_deployment_monitoring_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.pause_model_deployment_monitoring_job(
+ job_service.PauseModelDeploymentMonitoringJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ResumeModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+def test_resume_model_deployment_monitoring_job(request_type, transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.resume_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.resume_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = job_service.ResumeModelDeploymentMonitoringJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_resume_model_deployment_monitoring_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = job_service.ResumeModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.resume_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.resume_model_deployment_monitoring_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+
+def test_resume_model_deployment_monitoring_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.resume_model_deployment_monitoring_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.resume_model_deployment_monitoring_job
+ ] = mock_rpc
+ request = {}
+ client.resume_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.resume_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_resume_model_deployment_monitoring_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.resume_model_deployment_monitoring_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.resume_model_deployment_monitoring_job
+ ] = mock_rpc
+
+ request = {}
+ await client.resume_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.resume_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_resume_model_deployment_monitoring_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=job_service.ResumeModelDeploymentMonitoringJobRequest,
+):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.resume_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.resume_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = job_service.ResumeModelDeploymentMonitoringJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_resume_model_deployment_monitoring_job_async_from_dict():
+ await test_resume_model_deployment_monitoring_job_async(request_type=dict)
+
+
+def test_resume_model_deployment_monitoring_job_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.ResumeModelDeploymentMonitoringJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.resume_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = None
+ client.resume_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_resume_model_deployment_monitoring_job_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = job_service.ResumeModelDeploymentMonitoringJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.resume_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.resume_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_resume_model_deployment_monitoring_job_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.resume_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.resume_model_deployment_monitoring_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_resume_model_deployment_monitoring_job_flattened_error():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.resume_model_deployment_monitoring_job(
+ job_service.ResumeModelDeploymentMonitoringJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_resume_model_deployment_monitoring_job_flattened_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.resume_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.resume_model_deployment_monitoring_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_resume_model_deployment_monitoring_job_flattened_error_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.resume_model_deployment_monitoring_job(
+ job_service.ResumeModelDeploymentMonitoringJobRequest(),
+ name="name_value",
+ )
+
+
+def test_create_custom_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.create_custom_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_custom_job
+ ] = mock_rpc
+
+ request = {}
+ client.create_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_custom_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_create_custom_job_rest_required_fields(
+ request_type=job_service.CreateCustomJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_custom_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_custom_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = gca_custom_job.CustomJob()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_custom_job.CustomJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.create_custom_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_create_custom_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.create_custom_job._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "customJob",
+ )
+ )
+ )
+
+
+def test_create_custom_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_custom_job.CustomJob()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ custom_job=gca_custom_job.CustomJob(name="name_value"),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = gca_custom_job.CustomJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.create_custom_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{parent=projects/*/locations/*}/customJobs" % client.transport._host,
+ args[1],
+ )
+
+
+def test_create_custom_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_custom_job(
+ job_service.CreateCustomJobRequest(),
+ parent="parent_value",
+ custom_job=gca_custom_job.CustomJob(name="name_value"),
+ )
+
+
+def test_get_custom_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.get_custom_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.get_custom_job] = mock_rpc
+
+ request = {}
+ client.get_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_custom_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_get_custom_job_rest_required_fields(
+ request_type=job_service.GetCustomJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_custom_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_custom_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = custom_job.CustomJob()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = custom_job.CustomJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_custom_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_get_custom_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.get_custom_job._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_custom_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = custom_job.CustomJob()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/customJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = custom_job.CustomJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_custom_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/customJobs/*}" % client.transport._host,
+ args[1],
+ )
+
+
+def test_get_custom_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_custom_job(
+ job_service.GetCustomJobRequest(),
+ name="name_value",
+ )
+
+
+def test_list_custom_jobs_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.list_custom_jobs in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_custom_jobs
+ ] = mock_rpc
+
+ request = {}
+ client.list_custom_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_custom_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_list_custom_jobs_rest_required_fields(
+ request_type=job_service.ListCustomJobsRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_custom_jobs._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_custom_jobs._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "filter",
+ "page_size",
+ "page_token",
+ "read_mask",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListCustomJobsResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListCustomJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_custom_jobs(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_list_custom_jobs_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.list_custom_jobs._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(
+ (
+ "filter",
+ "pageSize",
+ "pageToken",
+ "readMask",
+ )
+ )
+ & set(("parent",))
+ )
+
+
+def test_list_custom_jobs_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListCustomJobsResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = job_service.ListCustomJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.list_custom_jobs(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{parent=projects/*/locations/*}/customJobs" % client.transport._host,
+ args[1],
+ )
+
+
+def test_list_custom_jobs_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_custom_jobs(
+ job_service.ListCustomJobsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_custom_jobs_rest_pager(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[
+ custom_job.CustomJob(),
+ custom_job.CustomJob(),
+ custom_job.CustomJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[
+ custom_job.CustomJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListCustomJobsResponse(
+ custom_jobs=[
+ custom_job.CustomJob(),
+ custom_job.CustomJob(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ job_service.ListCustomJobsResponse.to_json(x) for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ pager = client.list_custom_jobs(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, custom_job.CustomJob) for i in results)
+
+ pages = list(client.list_custom_jobs(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_delete_custom_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.delete_custom_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_custom_job
+ ] = mock_rpc
+
+ request = {}
+ client.delete_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_custom_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_delete_custom_job_rest_required_fields(
+ request_type=job_service.DeleteCustomJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_custom_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_custom_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "delete",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_custom_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_delete_custom_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.delete_custom_job._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_delete_custom_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/customJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.delete_custom_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/customJobs/*}" % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_custom_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_custom_job(
+ job_service.DeleteCustomJobRequest(),
+ name="name_value",
+ )
+
+
+def test_cancel_custom_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.cancel_custom_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.cancel_custom_job
+ ] = mock_rpc
+
+ request = {}
+ client.cancel_custom_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.cancel_custom_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_cancel_custom_job_rest_required_fields(
+ request_type=job_service.CancelCustomJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).cancel_custom_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).cancel_custom_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = None
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_custom_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_cancel_custom_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.cancel_custom_job._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_cancel_custom_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/customJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.cancel_custom_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/customJobs/*}:cancel"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_cancel_custom_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.cancel_custom_job(
+ job_service.CancelCustomJobRequest(),
+ name="name_value",
+ )
+
+
+def test_create_data_labeling_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_data_labeling_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_data_labeling_job
+ ] = mock_rpc
+
+ request = {}
+ client.create_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_data_labeling_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_create_data_labeling_job_rest_required_fields(
+ request_type=job_service.CreateDataLabelingJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_data_labeling_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_data_labeling_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = gca_data_labeling_job.DataLabelingJob()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_data_labeling_job.DataLabelingJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.create_data_labeling_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_create_data_labeling_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.create_data_labeling_job._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "dataLabelingJob",
+ )
+ )
+ )
+
+
+def test_create_data_labeling_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_data_labeling_job.DataLabelingJob()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ data_labeling_job=gca_data_labeling_job.DataLabelingJob(name="name_value"),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = gca_data_labeling_job.DataLabelingJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.create_data_labeling_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{parent=projects/*/locations/*}/dataLabelingJobs"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_create_data_labeling_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_data_labeling_job(
+ job_service.CreateDataLabelingJobRequest(),
+ parent="parent_value",
+ data_labeling_job=gca_data_labeling_job.DataLabelingJob(name="name_value"),
+ )
+
+
+def test_get_data_labeling_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_data_labeling_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_data_labeling_job
+ ] = mock_rpc
+
+ request = {}
+ client.get_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_data_labeling_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_get_data_labeling_job_rest_required_fields(
+ request_type=job_service.GetDataLabelingJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_data_labeling_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_data_labeling_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = data_labeling_job.DataLabelingJob()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = data_labeling_job.DataLabelingJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_data_labeling_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_get_data_labeling_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.get_data_labeling_job._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_data_labeling_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = data_labeling_job.DataLabelingJob()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/dataLabelingJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = data_labeling_job.DataLabelingJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_data_labeling_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/dataLabelingJobs/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_get_data_labeling_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_data_labeling_job(
+ job_service.GetDataLabelingJobRequest(),
+ name="name_value",
+ )
+
+
+def test_list_data_labeling_jobs_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_data_labeling_jobs
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_data_labeling_jobs
+ ] = mock_rpc
+
+ request = {}
+ client.list_data_labeling_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_data_labeling_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_list_data_labeling_jobs_rest_required_fields(
+ request_type=job_service.ListDataLabelingJobsRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_data_labeling_jobs._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_data_labeling_jobs._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "filter",
+ "order_by",
+ "page_size",
+ "page_token",
+ "read_mask",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListDataLabelingJobsResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListDataLabelingJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_data_labeling_jobs(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_list_data_labeling_jobs_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.list_data_labeling_jobs._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(
+ (
+ "filter",
+ "orderBy",
+ "pageSize",
+ "pageToken",
+ "readMask",
+ )
+ )
+ & set(("parent",))
+ )
+
+
+def test_list_data_labeling_jobs_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListDataLabelingJobsResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = job_service.ListDataLabelingJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.list_data_labeling_jobs(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{parent=projects/*/locations/*}/dataLabelingJobs"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_list_data_labeling_jobs_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_data_labeling_jobs(
+ job_service.ListDataLabelingJobsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_data_labeling_jobs_rest_pager(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[
+ data_labeling_job.DataLabelingJob(),
+ data_labeling_job.DataLabelingJob(),
+ data_labeling_job.DataLabelingJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[
+ data_labeling_job.DataLabelingJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListDataLabelingJobsResponse(
+ data_labeling_jobs=[
+ data_labeling_job.DataLabelingJob(),
+ data_labeling_job.DataLabelingJob(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ job_service.ListDataLabelingJobsResponse.to_json(x) for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ pager = client.list_data_labeling_jobs(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, data_labeling_job.DataLabelingJob) for i in results)
+
+ pages = list(client.list_data_labeling_jobs(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_delete_data_labeling_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_data_labeling_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_data_labeling_job
+ ] = mock_rpc
+
+ request = {}
+ client.delete_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_data_labeling_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_delete_data_labeling_job_rest_required_fields(
+ request_type=job_service.DeleteDataLabelingJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_data_labeling_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_data_labeling_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "delete",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_data_labeling_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_delete_data_labeling_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.delete_data_labeling_job._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_delete_data_labeling_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/dataLabelingJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.delete_data_labeling_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/dataLabelingJobs/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_data_labeling_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_data_labeling_job(
+ job_service.DeleteDataLabelingJobRequest(),
+ name="name_value",
+ )
+
+
+def test_cancel_data_labeling_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.cancel_data_labeling_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.cancel_data_labeling_job
+ ] = mock_rpc
+
+ request = {}
+ client.cancel_data_labeling_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.cancel_data_labeling_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_cancel_data_labeling_job_rest_required_fields(
+ request_type=job_service.CancelDataLabelingJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).cancel_data_labeling_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).cancel_data_labeling_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = None
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_data_labeling_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_cancel_data_labeling_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.cancel_data_labeling_job._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_cancel_data_labeling_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/dataLabelingJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.cancel_data_labeling_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/dataLabelingJobs/*}:cancel"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_cancel_data_labeling_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.cancel_data_labeling_job(
+ job_service.CancelDataLabelingJobRequest(),
+ name="name_value",
+ )
+
+
+def test_create_hyperparameter_tuning_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_hyperparameter_tuning_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_hyperparameter_tuning_job
+ ] = mock_rpc
+
+ request = {}
+ client.create_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_hyperparameter_tuning_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_create_hyperparameter_tuning_job_rest_required_fields(
+ request_type=job_service.CreateHyperparameterTuningJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_hyperparameter_tuning_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_hyperparameter_tuning_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.create_hyperparameter_tuning_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_create_hyperparameter_tuning_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = (
+ transport.create_hyperparameter_tuning_job._get_unset_required_fields({})
+ )
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "hyperparameterTuningJob",
+ )
+ )
+ )
+
+
+def test_create_hyperparameter_tuning_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value"
+ ),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.create_hyperparameter_tuning_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{parent=projects/*/locations/*}/hyperparameterTuningJobs"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_create_hyperparameter_tuning_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_hyperparameter_tuning_job(
+ job_service.CreateHyperparameterTuningJobRequest(),
+ parent="parent_value",
+ hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value"
+ ),
+ )
+
+
+def test_get_hyperparameter_tuning_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_hyperparameter_tuning_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_hyperparameter_tuning_job
+ ] = mock_rpc
+
+ request = {}
+ client.get_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_hyperparameter_tuning_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_get_hyperparameter_tuning_job_rest_required_fields(
+ request_type=job_service.GetHyperparameterTuningJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_hyperparameter_tuning_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_hyperparameter_tuning_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = hyperparameter_tuning_job.HyperparameterTuningJob()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = hyperparameter_tuning_job.HyperparameterTuningJob.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_hyperparameter_tuning_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_get_hyperparameter_tuning_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.get_hyperparameter_tuning_job._get_unset_required_fields(
+ {}
+ )
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_hyperparameter_tuning_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = hyperparameter_tuning_job.HyperparameterTuningJob()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/hyperparameterTuningJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = hyperparameter_tuning_job.HyperparameterTuningJob.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_hyperparameter_tuning_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_get_hyperparameter_tuning_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_hyperparameter_tuning_job(
+ job_service.GetHyperparameterTuningJobRequest(),
+ name="name_value",
+ )
+
+
+def test_list_hyperparameter_tuning_jobs_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_hyperparameter_tuning_jobs
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_hyperparameter_tuning_jobs
+ ] = mock_rpc
+
+ request = {}
+ client.list_hyperparameter_tuning_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_hyperparameter_tuning_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_list_hyperparameter_tuning_jobs_rest_required_fields(
+ request_type=job_service.ListHyperparameterTuningJobsRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_hyperparameter_tuning_jobs._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_hyperparameter_tuning_jobs._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "filter",
+ "page_size",
+ "page_token",
+ "read_mask",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListHyperparameterTuningJobsResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListHyperparameterTuningJobsResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_hyperparameter_tuning_jobs(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_list_hyperparameter_tuning_jobs_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.list_hyperparameter_tuning_jobs._get_unset_required_fields(
+ {}
+ )
+ assert set(unset_fields) == (
+ set(
+ (
+ "filter",
+ "pageSize",
+ "pageToken",
+ "readMask",
+ )
+ )
+ & set(("parent",))
+ )
+
+
+def test_list_hyperparameter_tuning_jobs_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListHyperparameterTuningJobsResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = job_service.ListHyperparameterTuningJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.list_hyperparameter_tuning_jobs(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{parent=projects/*/locations/*}/hyperparameterTuningJobs"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_list_hyperparameter_tuning_jobs_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_hyperparameter_tuning_jobs(
+ job_service.ListHyperparameterTuningJobsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_hyperparameter_tuning_jobs_rest_pager(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListHyperparameterTuningJobsResponse(
+ hyperparameter_tuning_jobs=[
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ hyperparameter_tuning_job.HyperparameterTuningJob(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ job_service.ListHyperparameterTuningJobsResponse.to_json(x)
+ for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ pager = client.list_hyperparameter_tuning_jobs(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(
+ isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob)
+ for i in results
+ )
+
+ pages = list(
+ client.list_hyperparameter_tuning_jobs(request=sample_request).pages
+ )
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_delete_hyperparameter_tuning_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_hyperparameter_tuning_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_hyperparameter_tuning_job
+ ] = mock_rpc
+
+ request = {}
+ client.delete_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_hyperparameter_tuning_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_delete_hyperparameter_tuning_job_rest_required_fields(
+ request_type=job_service.DeleteHyperparameterTuningJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_hyperparameter_tuning_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_hyperparameter_tuning_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "delete",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_hyperparameter_tuning_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_delete_hyperparameter_tuning_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = (
+ transport.delete_hyperparameter_tuning_job._get_unset_required_fields({})
+ )
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_delete_hyperparameter_tuning_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/hyperparameterTuningJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.delete_hyperparameter_tuning_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_hyperparameter_tuning_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_hyperparameter_tuning_job(
+ job_service.DeleteHyperparameterTuningJobRequest(),
+ name="name_value",
+ )
+
+
+def test_cancel_hyperparameter_tuning_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.cancel_hyperparameter_tuning_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.cancel_hyperparameter_tuning_job
+ ] = mock_rpc
+
+ request = {}
+ client.cancel_hyperparameter_tuning_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.cancel_hyperparameter_tuning_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_cancel_hyperparameter_tuning_job_rest_required_fields(
+ request_type=job_service.CancelHyperparameterTuningJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).cancel_hyperparameter_tuning_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).cancel_hyperparameter_tuning_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = None
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_hyperparameter_tuning_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_cancel_hyperparameter_tuning_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = (
+ transport.cancel_hyperparameter_tuning_job._get_unset_required_fields({})
+ )
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_cancel_hyperparameter_tuning_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/hyperparameterTuningJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.cancel_hyperparameter_tuning_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}:cancel"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_cancel_hyperparameter_tuning_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.cancel_hyperparameter_tuning_job(
+ job_service.CancelHyperparameterTuningJobRequest(),
+ name="name_value",
+ )
+
+
+def test_create_nas_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.create_nas_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.create_nas_job] = mock_rpc
+
+ request = {}
+ client.create_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_nas_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_create_nas_job_rest_required_fields(
+ request_type=job_service.CreateNasJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_nas_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_nas_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = gca_nas_job.NasJob()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_nas_job.NasJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.create_nas_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_create_nas_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.create_nas_job._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "nasJob",
+ )
+ )
+ )
+
+
+def test_create_nas_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_nas_job.NasJob()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ nas_job=gca_nas_job.NasJob(name="name_value"),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = gca_nas_job.NasJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.create_nas_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{parent=projects/*/locations/*}/nasJobs" % client.transport._host,
+ args[1],
+ )
+
+
+def test_create_nas_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_nas_job(
+ job_service.CreateNasJobRequest(),
+ parent="parent_value",
+ nas_job=gca_nas_job.NasJob(name="name_value"),
+ )
+
+
+def test_get_nas_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.get_nas_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.get_nas_job] = mock_rpc
+
+ request = {}
+ client.get_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_nas_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_get_nas_job_rest_required_fields(request_type=job_service.GetNasJobRequest):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_nas_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_nas_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = nas_job.NasJob()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = nas_job.NasJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_nas_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_get_nas_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.get_nas_job._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_nas_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = nas_job.NasJob()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"name": "projects/sample1/locations/sample2/nasJobs/sample3"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = nas_job.NasJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_nas_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/nasJobs/*}" % client.transport._host,
+ args[1],
+ )
+
+
+def test_get_nas_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_nas_job(
+ job_service.GetNasJobRequest(),
+ name="name_value",
+ )
+
+
+def test_list_nas_jobs_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.list_nas_jobs in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.list_nas_jobs] = mock_rpc
+
+ request = {}
+ client.list_nas_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_nas_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_list_nas_jobs_rest_required_fields(
+ request_type=job_service.ListNasJobsRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_nas_jobs._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_nas_jobs._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "filter",
+ "page_size",
+ "page_token",
+ "read_mask",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListNasJobsResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListNasJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_nas_jobs(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_list_nas_jobs_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.list_nas_jobs._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(
+ (
+ "filter",
+ "pageSize",
+ "pageToken",
+ "readMask",
+ )
+ )
+ & set(("parent",))
+ )
+
+
+def test_list_nas_jobs_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListNasJobsResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = job_service.ListNasJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.list_nas_jobs(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{parent=projects/*/locations/*}/nasJobs" % client.transport._host,
+ args[1],
+ )
+
+
+def test_list_nas_jobs_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_nas_jobs(
+ job_service.ListNasJobsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_nas_jobs_rest_pager(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ job_service.ListNasJobsResponse(
+ nas_jobs=[
+ nas_job.NasJob(),
+ nas_job.NasJob(),
+ nas_job.NasJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListNasJobsResponse(
+ nas_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListNasJobsResponse(
+ nas_jobs=[
+ nas_job.NasJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListNasJobsResponse(
+ nas_jobs=[
+ nas_job.NasJob(),
+ nas_job.NasJob(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(job_service.ListNasJobsResponse.to_json(x) for x in response)
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ pager = client.list_nas_jobs(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, nas_job.NasJob) for i in results)
+
+ pages = list(client.list_nas_jobs(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_delete_nas_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.delete_nas_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.delete_nas_job] = mock_rpc
+
+ request = {}
+ client.delete_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_nas_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_delete_nas_job_rest_required_fields(
+ request_type=job_service.DeleteNasJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_nas_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_nas_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "delete",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_nas_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_delete_nas_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.delete_nas_job._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_delete_nas_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"name": "projects/sample1/locations/sample2/nasJobs/sample3"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.delete_nas_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/nasJobs/*}" % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_nas_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_nas_job(
+ job_service.DeleteNasJobRequest(),
+ name="name_value",
+ )
+
+
+def test_cancel_nas_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.cancel_nas_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.cancel_nas_job] = mock_rpc
+
+ request = {}
+ client.cancel_nas_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.cancel_nas_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_cancel_nas_job_rest_required_fields(
+ request_type=job_service.CancelNasJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).cancel_nas_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).cancel_nas_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = None
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_nas_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_cancel_nas_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.cancel_nas_job._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_cancel_nas_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"name": "projects/sample1/locations/sample2/nasJobs/sample3"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.cancel_nas_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/nasJobs/*}:cancel"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_cancel_nas_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.cancel_nas_job(
+ job_service.CancelNasJobRequest(),
+ name="name_value",
+ )
+
+
+def test_get_nas_trial_detail_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_nas_trial_detail in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_nas_trial_detail
+ ] = mock_rpc
+
+ request = {}
+ client.get_nas_trial_detail(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_nas_trial_detail(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_get_nas_trial_detail_rest_required_fields(
+ request_type=job_service.GetNasTrialDetailRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_nas_trial_detail._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_nas_trial_detail._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = nas_job.NasTrialDetail()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = nas_job.NasTrialDetail.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_nas_trial_detail(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_get_nas_trial_detail_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.get_nas_trial_detail._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_nas_trial_detail_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = nas_job.NasTrialDetail()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/nasJobs/sample3/nasTrialDetails/sample4"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = nas_job.NasTrialDetail.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_nas_trial_detail(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/nasJobs/*/nasTrialDetails/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_get_nas_trial_detail_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_nas_trial_detail(
+ job_service.GetNasTrialDetailRequest(),
+ name="name_value",
+ )
+
+
+def test_list_nas_trial_details_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_nas_trial_details
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_nas_trial_details
+ ] = mock_rpc
+
+ request = {}
+ client.list_nas_trial_details(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_nas_trial_details(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_list_nas_trial_details_rest_required_fields(
+ request_type=job_service.ListNasTrialDetailsRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_nas_trial_details._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_nas_trial_details._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "page_size",
+ "page_token",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListNasTrialDetailsResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListNasTrialDetailsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_nas_trial_details(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_list_nas_trial_details_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.list_nas_trial_details._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(
+ (
+ "pageSize",
+ "pageToken",
+ )
+ )
+ & set(("parent",))
+ )
+
+
+def test_list_nas_trial_details_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListNasTrialDetailsResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "parent": "projects/sample1/locations/sample2/nasJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = job_service.ListNasTrialDetailsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.list_nas_trial_details(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{parent=projects/*/locations/*/nasJobs/*}/nasTrialDetails"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_list_nas_trial_details_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_nas_trial_details(
+ job_service.ListNasTrialDetailsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_nas_trial_details_rest_pager(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[
+ nas_job.NasTrialDetail(),
+ nas_job.NasTrialDetail(),
+ nas_job.NasTrialDetail(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[],
+ next_page_token="def",
+ ),
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[
+ nas_job.NasTrialDetail(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListNasTrialDetailsResponse(
+ nas_trial_details=[
+ nas_job.NasTrialDetail(),
+ nas_job.NasTrialDetail(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ job_service.ListNasTrialDetailsResponse.to_json(x) for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {
+ "parent": "projects/sample1/locations/sample2/nasJobs/sample3"
+ }
+
+ pager = client.list_nas_trial_details(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, nas_job.NasTrialDetail) for i in results)
+
+ pages = list(client.list_nas_trial_details(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_create_batch_prediction_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_batch_prediction_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_batch_prediction_job
+ ] = mock_rpc
+
+ request = {}
+ client.create_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_batch_prediction_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_create_batch_prediction_job_rest_required_fields(
+ request_type=job_service.CreateBatchPredictionJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_batch_prediction_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_batch_prediction_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = gca_batch_prediction_job.BatchPredictionJob()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_batch_prediction_job.BatchPredictionJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.create_batch_prediction_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_create_batch_prediction_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.create_batch_prediction_job._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "batchPredictionJob",
+ )
+ )
+ )
+
+
+def test_create_batch_prediction_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_batch_prediction_job.BatchPredictionJob()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(
+ name="name_value"
+ ),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = gca_batch_prediction_job.BatchPredictionJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.create_batch_prediction_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{parent=projects/*/locations/*}/batchPredictionJobs"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_create_batch_prediction_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_batch_prediction_job(
+ job_service.CreateBatchPredictionJobRequest(),
+ parent="parent_value",
+ batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(
+ name="name_value"
+ ),
+ )
+
+
+def test_get_batch_prediction_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_batch_prediction_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_batch_prediction_job
+ ] = mock_rpc
+
+ request = {}
+ client.get_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_batch_prediction_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_get_batch_prediction_job_rest_required_fields(
+ request_type=job_service.GetBatchPredictionJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_batch_prediction_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_batch_prediction_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = batch_prediction_job.BatchPredictionJob()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = batch_prediction_job.BatchPredictionJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_batch_prediction_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_get_batch_prediction_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.get_batch_prediction_job._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_batch_prediction_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = batch_prediction_job.BatchPredictionJob()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/batchPredictionJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = batch_prediction_job.BatchPredictionJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_batch_prediction_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/batchPredictionJobs/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_get_batch_prediction_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_batch_prediction_job(
+ job_service.GetBatchPredictionJobRequest(),
+ name="name_value",
+ )
+
+
+def test_list_batch_prediction_jobs_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_batch_prediction_jobs
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_batch_prediction_jobs
+ ] = mock_rpc
+
+ request = {}
+ client.list_batch_prediction_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_batch_prediction_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_list_batch_prediction_jobs_rest_required_fields(
+ request_type=job_service.ListBatchPredictionJobsRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_batch_prediction_jobs._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_batch_prediction_jobs._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "filter",
+ "page_size",
+ "page_token",
+ "read_mask",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListBatchPredictionJobsResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListBatchPredictionJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_batch_prediction_jobs(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_list_batch_prediction_jobs_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.list_batch_prediction_jobs._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(
+ (
+ "filter",
+ "pageSize",
+ "pageToken",
+ "readMask",
+ )
+ )
+ & set(("parent",))
+ )
+
+
+def test_list_batch_prediction_jobs_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListBatchPredictionJobsResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = job_service.ListBatchPredictionJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.list_batch_prediction_jobs(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{parent=projects/*/locations/*}/batchPredictionJobs"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_list_batch_prediction_jobs_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_batch_prediction_jobs(
+ job_service.ListBatchPredictionJobsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_batch_prediction_jobs_rest_pager(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[
+ batch_prediction_job.BatchPredictionJob(),
+ batch_prediction_job.BatchPredictionJob(),
+ batch_prediction_job.BatchPredictionJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[
+ batch_prediction_job.BatchPredictionJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListBatchPredictionJobsResponse(
+ batch_prediction_jobs=[
+ batch_prediction_job.BatchPredictionJob(),
+ batch_prediction_job.BatchPredictionJob(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ job_service.ListBatchPredictionJobsResponse.to_json(x) for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ pager = client.list_batch_prediction_jobs(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(
+ isinstance(i, batch_prediction_job.BatchPredictionJob) for i in results
+ )
+
+ pages = list(client.list_batch_prediction_jobs(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_delete_batch_prediction_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_batch_prediction_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_batch_prediction_job
+ ] = mock_rpc
+
+ request = {}
+ client.delete_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_batch_prediction_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_delete_batch_prediction_job_rest_required_fields(
+ request_type=job_service.DeleteBatchPredictionJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_batch_prediction_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_batch_prediction_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "delete",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_batch_prediction_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_delete_batch_prediction_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.delete_batch_prediction_job._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_delete_batch_prediction_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/batchPredictionJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.delete_batch_prediction_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/batchPredictionJobs/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_batch_prediction_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_batch_prediction_job(
+ job_service.DeleteBatchPredictionJobRequest(),
+ name="name_value",
+ )
+
+
+def test_cancel_batch_prediction_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.cancel_batch_prediction_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.cancel_batch_prediction_job
+ ] = mock_rpc
+
+ request = {}
+ client.cancel_batch_prediction_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.cancel_batch_prediction_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_cancel_batch_prediction_job_rest_required_fields(
+ request_type=job_service.CancelBatchPredictionJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).cancel_batch_prediction_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).cancel_batch_prediction_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = None
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_batch_prediction_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_cancel_batch_prediction_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.cancel_batch_prediction_job._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_cancel_batch_prediction_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/batchPredictionJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.cancel_batch_prediction_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/batchPredictionJobs/*}:cancel"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_cancel_batch_prediction_job_rest_flattened_error(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.cancel_batch_prediction_job(
+ job_service.CancelBatchPredictionJobRequest(),
+ name="name_value",
+ )
+
+
+def test_create_model_deployment_monitoring_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_model_deployment_monitoring_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_model_deployment_monitoring_job
+ ] = mock_rpc
+
+ request = {}
+ client.create_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_create_model_deployment_monitoring_job_rest_required_fields(
+ request_type=job_service.CreateModelDeploymentMonitoringJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_model_deployment_monitoring_job._get_unset_required_fields(
+ jsonified_request
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_model_deployment_monitoring_job._get_unset_required_fields(
+ jsonified_request
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = (
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.pb(
+ return_value
+ )
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.create_model_deployment_monitoring_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_create_model_deployment_monitoring_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = (
+ transport.create_model_deployment_monitoring_job._get_unset_required_fields({})
+ )
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "modelDeploymentMonitoringJob",
+ )
+ )
+ )
+
+
+def test_create_model_deployment_monitoring_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = (
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value"
+ ),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = (
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.pb(
+ return_value
+ )
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.create_model_deployment_monitoring_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{parent=projects/*/locations/*}/modelDeploymentMonitoringJobs"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_create_model_deployment_monitoring_job_rest_flattened_error(
+ transport: str = "rest",
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_model_deployment_monitoring_job(
+ job_service.CreateModelDeploymentMonitoringJobRequest(),
+ parent="parent_value",
+ model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value"
+ ),
+ )
+
+
+def test_search_model_deployment_monitoring_stats_anomalies_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.search_model_deployment_monitoring_stats_anomalies
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.search_model_deployment_monitoring_stats_anomalies
+ ] = mock_rpc
+
+ request = {}
+ client.search_model_deployment_monitoring_stats_anomalies(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.search_model_deployment_monitoring_stats_anomalies(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_search_model_deployment_monitoring_stats_anomalies_rest_required_fields(
+ request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["model_deployment_monitoring_job"] = ""
+ request_init["deployed_model_id"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).search_model_deployment_monitoring_stats_anomalies._get_unset_required_fields(
+ jsonified_request
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request[
+ "modelDeploymentMonitoringJob"
+ ] = "model_deployment_monitoring_job_value"
+ jsonified_request["deployedModelId"] = "deployed_model_id_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).search_model_deployment_monitoring_stats_anomalies._get_unset_required_fields(
+ jsonified_request
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "modelDeploymentMonitoringJob" in jsonified_request
+ assert (
+ jsonified_request["modelDeploymentMonitoringJob"]
+ == "model_deployment_monitoring_job_value"
+ )
+ assert "deployedModelId" in jsonified_request
+ assert jsonified_request["deployedModelId"] == "deployed_model_id_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.pb(
+ return_value
+ )
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.search_model_deployment_monitoring_stats_anomalies(
+ request
+ )
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_search_model_deployment_monitoring_stats_anomalies_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.search_model_deployment_monitoring_stats_anomalies._get_unset_required_fields(
+ {}
+ )
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "modelDeploymentMonitoringJob",
+ "deployedModelId",
+ "objectives",
+ )
+ )
+ )
+
+
+def test_search_model_deployment_monitoring_stats_anomalies_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
+ )
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "model_deployment_monitoring_job": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ model_deployment_monitoring_job="model_deployment_monitoring_job_value",
+ deployed_model_id="deployed_model_id_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.pb(
+ return_value
+ )
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.search_model_deployment_monitoring_stats_anomalies(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{model_deployment_monitoring_job=projects/*/locations/*/modelDeploymentMonitoringJobs/*}:searchModelDeploymentMonitoringStatsAnomalies"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_search_model_deployment_monitoring_stats_anomalies_rest_flattened_error(
+ transport: str = "rest",
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.search_model_deployment_monitoring_stats_anomalies(
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(),
+ model_deployment_monitoring_job="model_deployment_monitoring_job_value",
+ deployed_model_id="deployed_model_id_value",
+ )
+
+
+def test_search_model_deployment_monitoring_stats_anomalies_rest_pager(
+ transport: str = "rest",
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[],
+ next_page_token="def",
+ ),
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ monitoring_stats=[
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.to_json(x)
+ for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {
+ "model_deployment_monitoring_job": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+
+ pager = client.search_model_deployment_monitoring_stats_anomalies(
+ request=sample_request
+ )
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(
+ isinstance(
+ i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies
+ )
+ for i in results
+ )
+
+ pages = list(
+ client.search_model_deployment_monitoring_stats_anomalies(
+ request=sample_request
+ ).pages
+ )
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_get_model_deployment_monitoring_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_model_deployment_monitoring_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_model_deployment_monitoring_job
+ ] = mock_rpc
+
+ request = {}
+ client.get_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_get_model_deployment_monitoring_job_rest_required_fields(
+ request_type=job_service.GetModelDeploymentMonitoringJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_model_deployment_monitoring_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_model_deployment_monitoring_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = (
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob.pb(
+ return_value
+ )
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_model_deployment_monitoring_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_get_model_deployment_monitoring_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = (
+ transport.get_model_deployment_monitoring_job._get_unset_required_fields({})
+ )
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_model_deployment_monitoring_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_model_deployment_monitoring_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_get_model_deployment_monitoring_job_rest_flattened_error(
+ transport: str = "rest",
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_model_deployment_monitoring_job(
+ job_service.GetModelDeploymentMonitoringJobRequest(),
+ name="name_value",
+ )
+
+
+def test_list_model_deployment_monitoring_jobs_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_model_deployment_monitoring_jobs
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_model_deployment_monitoring_jobs
+ ] = mock_rpc
+
+ request = {}
+ client.list_model_deployment_monitoring_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_model_deployment_monitoring_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_list_model_deployment_monitoring_jobs_rest_required_fields(
+ request_type=job_service.ListModelDeploymentMonitoringJobsRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_model_deployment_monitoring_jobs._get_unset_required_fields(
+ jsonified_request
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_model_deployment_monitoring_jobs._get_unset_required_fields(
+ jsonified_request
+ )
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "filter",
+ "page_size",
+ "page_token",
+ "read_mask",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListModelDeploymentMonitoringJobsResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListModelDeploymentMonitoringJobsResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_model_deployment_monitoring_jobs(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_list_model_deployment_monitoring_jobs_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = (
+ transport.list_model_deployment_monitoring_jobs._get_unset_required_fields({})
+ )
+ assert set(unset_fields) == (
+ set(
+ (
+ "filter",
+ "pageSize",
+ "pageToken",
+ "readMask",
+ )
+ )
+ & set(("parent",))
+ )
+
+
+def test_list_model_deployment_monitoring_jobs_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListModelDeploymentMonitoringJobsResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = job_service.ListModelDeploymentMonitoringJobsResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.list_model_deployment_monitoring_jobs(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{parent=projects/*/locations/*}/modelDeploymentMonitoringJobs"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_list_model_deployment_monitoring_jobs_rest_flattened_error(
+ transport: str = "rest",
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_model_deployment_monitoring_jobs(
+ job_service.ListModelDeploymentMonitoringJobsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_model_deployment_monitoring_jobs_rest_pager(transport: str = "rest"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ ],
+ next_page_token="abc",
+ ),
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[],
+ next_page_token="def",
+ ),
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ model_deployment_monitoring_jobs=[
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ job_service.ListModelDeploymentMonitoringJobsResponse.to_json(x)
+ for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ pager = client.list_model_deployment_monitoring_jobs(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(
+ isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob)
+ for i in results
+ )
+
+ pages = list(
+ client.list_model_deployment_monitoring_jobs(request=sample_request).pages
+ )
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_update_model_deployment_monitoring_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.update_model_deployment_monitoring_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.update_model_deployment_monitoring_job
+ ] = mock_rpc
+
+ request = {}
+ client.update_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.update_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_update_model_deployment_monitoring_job_rest_required_fields(
+ request_type=job_service.UpdateModelDeploymentMonitoringJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).update_model_deployment_monitoring_job._get_unset_required_fields(
+ jsonified_request
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).update_model_deployment_monitoring_job._get_unset_required_fields(
+ jsonified_request
+ )
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(("update_mask",))
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "patch",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.update_model_deployment_monitoring_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_update_model_deployment_monitoring_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = (
+ transport.update_model_deployment_monitoring_job._get_unset_required_fields({})
+ )
+ assert set(unset_fields) == (
+ set(("updateMask",))
+ & set(
+ (
+ "modelDeploymentMonitoringJob",
+ "updateMask",
+ )
+ )
+ )
+
+
+def test_update_model_deployment_monitoring_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "model_deployment_monitoring_job": {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value"
+ ),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.update_model_deployment_monitoring_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{model_deployment_monitoring_job.name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_update_model_deployment_monitoring_job_rest_flattened_error(
+ transport: str = "rest",
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.update_model_deployment_monitoring_job(
+ job_service.UpdateModelDeploymentMonitoringJobRequest(),
+ model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value"
+ ),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+def test_delete_model_deployment_monitoring_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_model_deployment_monitoring_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_model_deployment_monitoring_job
+ ] = mock_rpc
+
+ request = {}
+ client.delete_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_delete_model_deployment_monitoring_job_rest_required_fields(
+ request_type=job_service.DeleteModelDeploymentMonitoringJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_model_deployment_monitoring_job._get_unset_required_fields(
+ jsonified_request
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_model_deployment_monitoring_job._get_unset_required_fields(
+ jsonified_request
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "delete",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_model_deployment_monitoring_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_delete_model_deployment_monitoring_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = (
+ transport.delete_model_deployment_monitoring_job._get_unset_required_fields({})
+ )
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_delete_model_deployment_monitoring_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.delete_model_deployment_monitoring_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_model_deployment_monitoring_job_rest_flattened_error(
+ transport: str = "rest",
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_model_deployment_monitoring_job(
+ job_service.DeleteModelDeploymentMonitoringJobRequest(),
+ name="name_value",
+ )
+
+
+def test_pause_model_deployment_monitoring_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.pause_model_deployment_monitoring_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.pause_model_deployment_monitoring_job
+ ] = mock_rpc
+
+ request = {}
+ client.pause_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.pause_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_pause_model_deployment_monitoring_job_rest_required_fields(
+ request_type=job_service.PauseModelDeploymentMonitoringJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).pause_model_deployment_monitoring_job._get_unset_required_fields(
+ jsonified_request
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).pause_model_deployment_monitoring_job._get_unset_required_fields(
+ jsonified_request
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = None
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.pause_model_deployment_monitoring_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_pause_model_deployment_monitoring_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = (
+ transport.pause_model_deployment_monitoring_job._get_unset_required_fields({})
+ )
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_pause_model_deployment_monitoring_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.pause_model_deployment_monitoring_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}:pause"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_pause_model_deployment_monitoring_job_rest_flattened_error(
+ transport: str = "rest",
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.pause_model_deployment_monitoring_job(
+ job_service.PauseModelDeploymentMonitoringJobRequest(),
+ name="name_value",
+ )
+
+
+def test_resume_model_deployment_monitoring_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.resume_model_deployment_monitoring_job
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.resume_model_deployment_monitoring_job
+ ] = mock_rpc
+
+ request = {}
+ client.resume_model_deployment_monitoring_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.resume_model_deployment_monitoring_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_resume_model_deployment_monitoring_job_rest_required_fields(
+ request_type=job_service.ResumeModelDeploymentMonitoringJobRequest,
+):
+ transport_class = transports.JobServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).resume_model_deployment_monitoring_job._get_unset_required_fields(
+ jsonified_request
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).resume_model_deployment_monitoring_job._get_unset_required_fields(
+ jsonified_request
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = None
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.resume_model_deployment_monitoring_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_resume_model_deployment_monitoring_job_rest_unset_required_fields():
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = (
+ transport.resume_model_deployment_monitoring_job._get_unset_required_fields({})
+ )
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_resume_model_deployment_monitoring_job_rest_flattened():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.resume_model_deployment_monitoring_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}:resume"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_resume_model_deployment_monitoring_job_rest_flattened_error(
+ transport: str = "rest",
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.resume_model_deployment_monitoring_job(
+ job_service.ResumeModelDeploymentMonitoringJobRequest(),
+ name="name_value",
+ )
+
+
+def test_credentials_transport_error():
+ # It is an error to provide credentials and a transport instance.
+ transport = transports.JobServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # It is an error to provide a credentials file and a transport instance.
+ transport = transports.JobServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = JobServiceClient(
+ client_options={"credentials_file": "credentials.json"},
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a transport instance.
+ transport = transports.JobServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = JobServiceClient(
+ client_options=options,
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a credential.
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = JobServiceClient(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # It is an error to provide scopes and a transport instance.
+ transport = transports.JobServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = JobServiceClient(
+ client_options={"scopes": ["1", "2"]},
+ transport=transport,
+ )
+
+
+def test_transport_instance():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.JobServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ client = JobServiceClient(transport=transport)
+ assert client.transport is transport
+
+
+def test_transport_get_channel():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.JobServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+ transport = transports.JobServiceGrpcAsyncIOTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.JobServiceGrpcTransport,
+ transports.JobServiceGrpcAsyncIOTransport,
+ transports.JobServiceRestTransport,
+ ],
+)
+def test_transport_adc(transport_class):
+ # Test default credentials are used if not provided.
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class()
+ adc.assert_called_once()
+
+
+def test_transport_kind_grpc():
+ transport = JobServiceClient.get_transport_class("grpc")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "grpc"
+
+
+def test_initialize_client_w_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_custom_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_custom_job), "__call__"
+ ) as call:
+ call.return_value = gca_custom_job.CustomJob()
+ client.create_custom_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateCustomJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_custom_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
+ call.return_value = custom_job.CustomJob()
+ client.get_custom_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetCustomJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_custom_jobs_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
+ call.return_value = job_service.ListCustomJobsResponse()
+ client.list_custom_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListCustomJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_custom_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_custom_job), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_custom_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteCustomJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_cancel_custom_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_custom_job), "__call__"
+ ) as call:
+ call.return_value = None
+ client.cancel_custom_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelCustomJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_data_labeling_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_data_labeling_job), "__call__"
+ ) as call:
+ call.return_value = gca_data_labeling_job.DataLabelingJob()
+ client.create_data_labeling_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateDataLabelingJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_data_labeling_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_data_labeling_job), "__call__"
+ ) as call:
+ call.return_value = data_labeling_job.DataLabelingJob()
+ client.get_data_labeling_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetDataLabelingJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_data_labeling_jobs_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_data_labeling_jobs), "__call__"
+ ) as call:
+ call.return_value = job_service.ListDataLabelingJobsResponse()
+ client.list_data_labeling_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListDataLabelingJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_data_labeling_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_data_labeling_job), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_data_labeling_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteDataLabelingJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_cancel_data_labeling_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_data_labeling_job), "__call__"
+ ) as call:
+ call.return_value = None
+ client.cancel_data_labeling_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelDataLabelingJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_hyperparameter_tuning_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob()
+ client.create_hyperparameter_tuning_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateHyperparameterTuningJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_hyperparameter_tuning_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob()
+ client.get_hyperparameter_tuning_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetHyperparameterTuningJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_hyperparameter_tuning_jobs_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
+ ) as call:
+ call.return_value = job_service.ListHyperparameterTuningJobsResponse()
+ client.list_hyperparameter_tuning_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListHyperparameterTuningJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_hyperparameter_tuning_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_hyperparameter_tuning_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteHyperparameterTuningJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_cancel_hyperparameter_tuning_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ call.return_value = None
+ client.cancel_hyperparameter_tuning_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelHyperparameterTuningJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_nas_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.create_nas_job), "__call__") as call:
+ call.return_value = gca_nas_job.NasJob()
+ client.create_nas_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateNasJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_nas_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_nas_job), "__call__") as call:
+ call.return_value = nas_job.NasJob()
+ client.get_nas_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetNasJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_nas_jobs_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_nas_jobs), "__call__") as call:
+ call.return_value = job_service.ListNasJobsResponse()
+ client.list_nas_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListNasJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_nas_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_nas_job), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_nas_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteNasJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_cancel_nas_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_nas_job), "__call__") as call:
+ call.return_value = None
+ client.cancel_nas_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelNasJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_nas_trial_detail_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_nas_trial_detail), "__call__"
+ ) as call:
+ call.return_value = nas_job.NasTrialDetail()
+ client.get_nas_trial_detail(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetNasTrialDetailRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_nas_trial_details_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_trial_details), "__call__"
+ ) as call:
+ call.return_value = job_service.ListNasTrialDetailsResponse()
+ client.list_nas_trial_details(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListNasTrialDetailsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_batch_prediction_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_batch_prediction_job), "__call__"
+ ) as call:
+ call.return_value = gca_batch_prediction_job.BatchPredictionJob()
+ client.create_batch_prediction_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateBatchPredictionJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_batch_prediction_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_batch_prediction_job), "__call__"
+ ) as call:
+ call.return_value = batch_prediction_job.BatchPredictionJob()
+ client.get_batch_prediction_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetBatchPredictionJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_batch_prediction_jobs_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batch_prediction_jobs), "__call__"
+ ) as call:
+ call.return_value = job_service.ListBatchPredictionJobsResponse()
+ client.list_batch_prediction_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListBatchPredictionJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_batch_prediction_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_batch_prediction_job), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_batch_prediction_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteBatchPredictionJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_cancel_batch_prediction_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_batch_prediction_job), "__call__"
+ ) as call:
+ call.return_value = None
+ client.cancel_batch_prediction_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelBatchPredictionJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_model_deployment_monitoring_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = (
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+ client.create_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_search_model_deployment_monitoring_stats_anomalies_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.search_model_deployment_monitoring_stats_anomalies),
+ "__call__",
+ ) as call:
+ call.return_value = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
+ )
+ client.search_model_deployment_monitoring_stats_anomalies(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_model_deployment_monitoring_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = (
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+ client.get_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_model_deployment_monitoring_jobs_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
+ ) as call:
+ call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse()
+ client.list_model_deployment_monitoring_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListModelDeploymentMonitoringJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_model_deployment_monitoring_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.update_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.UpdateModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_model_deployment_monitoring_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_pause_model_deployment_monitoring_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.pause_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = None
+ client.pause_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.PauseModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_resume_model_deployment_monitoring_job_empty_call_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.resume_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ call.return_value = None
+ client.resume_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ResumeModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_grpc_asyncio():
+ transport = JobServiceAsyncClient.get_transport_class("grpc_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "grpc_asyncio"
+
+
+def test_initialize_client_w_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_custom_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_custom_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_custom_job.CustomJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ await client.create_custom_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateCustomJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_custom_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ custom_job.CustomJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ await client.get_custom_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetCustomJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_custom_jobs_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListCustomJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_custom_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListCustomJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_custom_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_custom_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.delete_custom_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteCustomJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_cancel_custom_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_custom_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_custom_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelCustomJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_data_labeling_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_data_labeling_job.DataLabelingJob(
+ name="name_value",
+ display_name="display_name_value",
+ datasets=["datasets_value"],
+ labeler_count=1375,
+ instruction_uri="instruction_uri_value",
+ inputs_schema_uri="inputs_schema_uri_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ labeling_progress=1810,
+ specialist_pools=["specialist_pools_value"],
+ )
+ )
+ await client.create_data_labeling_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateDataLabelingJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_data_labeling_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ data_labeling_job.DataLabelingJob(
+ name="name_value",
+ display_name="display_name_value",
+ datasets=["datasets_value"],
+ labeler_count=1375,
+ instruction_uri="instruction_uri_value",
+ inputs_schema_uri="inputs_schema_uri_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ labeling_progress=1810,
+ specialist_pools=["specialist_pools_value"],
+ )
+ )
+ await client.get_data_labeling_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetDataLabelingJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_data_labeling_jobs_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_data_labeling_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListDataLabelingJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_data_labeling_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListDataLabelingJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_data_labeling_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.delete_data_labeling_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteDataLabelingJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_cancel_data_labeling_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_data_labeling_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_data_labeling_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelDataLabelingJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_hyperparameter_tuning_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value",
+ display_name="display_name_value",
+ max_trial_count=1609,
+ parallel_trial_count=2128,
+ max_failed_trial_count=2317,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ await client.create_hyperparameter_tuning_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateHyperparameterTuningJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_hyperparameter_tuning_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value",
+ display_name="display_name_value",
+ max_trial_count=1609,
+ parallel_trial_count=2128,
+ max_failed_trial_count=2317,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ await client.get_hyperparameter_tuning_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetHyperparameterTuningJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_hyperparameter_tuning_jobs_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListHyperparameterTuningJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_hyperparameter_tuning_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListHyperparameterTuningJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_hyperparameter_tuning_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.delete_hyperparameter_tuning_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteHyperparameterTuningJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_cancel_hyperparameter_tuning_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_hyperparameter_tuning_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelHyperparameterTuningJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_nas_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.create_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_nas_job.NasJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ enable_restricted_image_training=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ await client.create_nas_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateNasJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_nas_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ nas_job.NasJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ enable_restricted_image_training=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ await client.get_nas_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetNasJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_nas_jobs_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_nas_jobs), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListNasJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_nas_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListNasJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_nas_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.delete_nas_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteNasJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_cancel_nas_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_nas_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_nas_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelNasJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_nas_trial_detail_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_nas_trial_detail), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ nas_job.NasTrialDetail(
+ name="name_value",
+ parameters="parameters_value",
+ )
+ )
+ await client.get_nas_trial_detail(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetNasTrialDetailRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_nas_trial_details_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_trial_details), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListNasTrialDetailsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_nas_trial_details(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListNasTrialDetailsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_batch_prediction_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_batch_prediction_job.BatchPredictionJob(
+ name="name_value",
+ display_name="display_name_value",
+ model="model_value",
+ model_version_id="model_version_id_value",
+ service_account="service_account_value",
+ generate_explanation=True,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ disable_container_logging=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ await client.create_batch_prediction_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateBatchPredictionJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_batch_prediction_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ batch_prediction_job.BatchPredictionJob(
+ name="name_value",
+ display_name="display_name_value",
+ model="model_value",
+ model_version_id="model_version_id_value",
+ service_account="service_account_value",
+ generate_explanation=True,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ disable_container_logging=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ await client.get_batch_prediction_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetBatchPredictionJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_batch_prediction_jobs_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batch_prediction_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListBatchPredictionJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_batch_prediction_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListBatchPredictionJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_batch_prediction_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.delete_batch_prediction_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteBatchPredictionJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_cancel_batch_prediction_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_batch_prediction_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_batch_prediction_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelBatchPredictionJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_model_deployment_monitoring_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value",
+ display_name="display_name_value",
+ endpoint="endpoint_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
+ predict_instance_schema_uri="predict_instance_schema_uri_value",
+ analysis_instance_schema_uri="analysis_instance_schema_uri_value",
+ enable_monitoring_pipeline_logs=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ await client.create_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_search_model_deployment_monitoring_stats_anomalies_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.search_model_deployment_monitoring_stats_anomalies),
+ "__call__",
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.search_model_deployment_monitoring_stats_anomalies(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_model_deployment_monitoring_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value",
+ display_name="display_name_value",
+ endpoint="endpoint_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
+ predict_instance_schema_uri="predict_instance_schema_uri_value",
+ analysis_instance_schema_uri="analysis_instance_schema_uri_value",
+ enable_monitoring_pipeline_logs=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ await client.get_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_model_deployment_monitoring_jobs_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ job_service.ListModelDeploymentMonitoringJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_model_deployment_monitoring_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListModelDeploymentMonitoringJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_update_model_deployment_monitoring_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.update_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.UpdateModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_model_deployment_monitoring_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.delete_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_pause_model_deployment_monitoring_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.pause_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.pause_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.PauseModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_resume_model_deployment_monitoring_job_empty_call_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.resume_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.resume_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ResumeModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_rest():
+ transport = JobServiceClient.get_transport_class("rest")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "rest"
+
+
+def test_create_custom_job_rest_bad_request(
+ request_type=job_service.CreateCustomJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.create_custom_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateCustomJobRequest,
+ dict,
+ ],
+)
+def test_create_custom_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["custom_job"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "job_spec": {
+ "persistent_resource_id": "persistent_resource_id_value",
+ "worker_pool_specs": [
+ {
+ "container_spec": {
+ "image_uri": "image_uri_value",
+ "command": ["command_value1", "command_value2"],
+ "args": ["args_value1", "args_value2"],
+ "env": [{"name": "name_value", "value": "value_value"}],
+ },
+ "python_package_spec": {
+ "executor_image_uri": "executor_image_uri_value",
+ "package_uris": ["package_uris_value1", "package_uris_value2"],
+ "python_module": "python_module_value",
+ "args": ["args_value1", "args_value2"],
+ "env": {},
+ },
+ "machine_spec": {
+ "machine_type": "machine_type_value",
+ "accelerator_type": 1,
+ "accelerator_count": 1805,
+ "tpu_topology": "tpu_topology_value",
+ "reservation_affinity": {
+ "reservation_affinity_type": 1,
+ "key": "key_value",
+ "values": ["values_value1", "values_value2"],
+ },
+ },
+ "replica_count": 1384,
+ "nfs_mounts": [
+ {
+ "server": "server_value",
+ "path": "path_value",
+ "mount_point": "mount_point_value",
+ }
+ ],
+ "disk_spec": {
+ "boot_disk_type": "boot_disk_type_value",
+ "boot_disk_size_gb": 1792,
+ },
+ }
+ ],
+ "scheduling": {
+ "timeout": {"seconds": 751, "nanos": 543},
+ "restart_job_on_worker_restart": True,
+ "strategy": 1,
+ "disable_retries": True,
+ "max_wait_duration": {},
+ },
+ "service_account": "service_account_value",
+ "network": "network_value",
+ "reserved_ip_ranges": [
+ "reserved_ip_ranges_value1",
+ "reserved_ip_ranges_value2",
+ ],
+ "base_output_directory": {"output_uri_prefix": "output_uri_prefix_value"},
+ "protected_artifact_location_id": "protected_artifact_location_id_value",
+ "tensorboard": "tensorboard_value",
+ "enable_web_access": True,
+ "enable_dashboard_access": True,
+ "experiment": "experiment_value",
+ "experiment_run": "experiment_run_value",
+ "models": ["models_value1", "models_value2"],
+ },
+ "state": 1,
+ "create_time": {"seconds": 751, "nanos": 543},
+ "start_time": {},
+ "end_time": {},
+ "update_time": {},
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "labels": {},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "web_access_uris": {},
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = job_service.CreateCustomJobRequest.meta.fields["custom_job"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["custom_job"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["custom_job"][field])):
+ del request_init["custom_job"][field][i][subfield]
+ else:
+ del request_init["custom_job"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_custom_job.CustomJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_custom_job.CustomJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.create_custom_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_custom_job.CustomJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_create_custom_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_create_custom_job"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_create_custom_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.CreateCustomJobRequest.pb(
+ job_service.CreateCustomJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_custom_job.CustomJob.to_json(gca_custom_job.CustomJob())
+ req.return_value.content = return_value
+
+ request = job_service.CreateCustomJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_custom_job.CustomJob()
+
+ client.create_custom_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_custom_job_rest_bad_request(request_type=job_service.GetCustomJobRequest):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/customJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_custom_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetCustomJobRequest,
+ dict,
+ ],
+)
+def test_get_custom_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/customJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = custom_job.CustomJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = custom_job.CustomJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_custom_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, custom_job.CustomJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_get_custom_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_get_custom_job"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_get_custom_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.GetCustomJobRequest.pb(
+ job_service.GetCustomJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = custom_job.CustomJob.to_json(custom_job.CustomJob())
+ req.return_value.content = return_value
+
+ request = job_service.GetCustomJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = custom_job.CustomJob()
+
+ client.get_custom_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_list_custom_jobs_rest_bad_request(
+ request_type=job_service.ListCustomJobsRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_custom_jobs(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListCustomJobsRequest,
+ dict,
+ ],
+)
+def test_list_custom_jobs_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListCustomJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListCustomJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.list_custom_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListCustomJobsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_list_custom_jobs_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_list_custom_jobs"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_list_custom_jobs"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.ListCustomJobsRequest.pb(
+ job_service.ListCustomJobsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = job_service.ListCustomJobsResponse.to_json(
+ job_service.ListCustomJobsResponse()
+ )
+ req.return_value.content = return_value
+
+ request = job_service.ListCustomJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = job_service.ListCustomJobsResponse()
+
+ client.list_custom_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_delete_custom_job_rest_bad_request(
+ request_type=job_service.DeleteCustomJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/customJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_custom_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteCustomJobRequest,
+ dict,
+ ],
+)
+def test_delete_custom_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/customJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.delete_custom_job(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_delete_custom_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_delete_custom_job"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_delete_custom_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.DeleteCustomJobRequest.pb(
+ job_service.DeleteCustomJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = job_service.DeleteCustomJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.delete_custom_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_cancel_custom_job_rest_bad_request(
+ request_type=job_service.CancelCustomJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/customJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_custom_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CancelCustomJobRequest,
+ dict,
+ ],
+)
+def test_cancel_custom_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/customJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.cancel_custom_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_cancel_custom_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_cancel_custom_job"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = job_service.CancelCustomJobRequest.pb(
+ job_service.CancelCustomJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = job_service.CancelCustomJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ client.cancel_custom_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+def test_create_data_labeling_job_rest_bad_request(
+ request_type=job_service.CreateDataLabelingJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.create_data_labeling_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateDataLabelingJobRequest,
+ dict,
+ ],
+)
+def test_create_data_labeling_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["data_labeling_job"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "datasets": ["datasets_value1", "datasets_value2"],
+ "annotation_labels": {},
+ "labeler_count": 1375,
+ "instruction_uri": "instruction_uri_value",
+ "inputs_schema_uri": "inputs_schema_uri_value",
+ "inputs": {
+ "null_value": 0,
+ "number_value": 0.1285,
+ "string_value": "string_value_value",
+ "bool_value": True,
+ "struct_value": {"fields": {}},
+ "list_value": {"values": {}},
+ },
+ "state": 1,
+ "labeling_progress": 1810,
+ "current_spend": {
+ "currency_code": "currency_code_value",
+ "units": 563,
+ "nanos": 543,
+ },
+ "create_time": {"seconds": 751, "nanos": 543},
+ "update_time": {},
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "labels": {},
+ "specialist_pools": ["specialist_pools_value1", "specialist_pools_value2"],
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "active_learning_config": {
+ "max_data_item_count": 2005,
+ "max_data_item_percentage": 2506,
+ "sample_config": {
+ "initial_batch_sample_percentage": 3241,
+ "following_batch_sample_percentage": 3472,
+ "sample_strategy": 1,
+ },
+ "training_config": {"timeout_training_milli_hours": 3016},
+ },
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = job_service.CreateDataLabelingJobRequest.meta.fields[
+ "data_labeling_job"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["data_labeling_job"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["data_labeling_job"][field])):
+ del request_init["data_labeling_job"][field][i][subfield]
+ else:
+ del request_init["data_labeling_job"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_data_labeling_job.DataLabelingJob(
+ name="name_value",
+ display_name="display_name_value",
+ datasets=["datasets_value"],
+ labeler_count=1375,
+ instruction_uri="instruction_uri_value",
+ inputs_schema_uri="inputs_schema_uri_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ labeling_progress=1810,
+ specialist_pools=["specialist_pools_value"],
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_data_labeling_job.DataLabelingJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.create_data_labeling_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_data_labeling_job.DataLabelingJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.datasets == ["datasets_value"]
+ assert response.labeler_count == 1375
+ assert response.instruction_uri == "instruction_uri_value"
+ assert response.inputs_schema_uri == "inputs_schema_uri_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.labeling_progress == 1810
+ assert response.specialist_pools == ["specialist_pools_value"]
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_create_data_labeling_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_create_data_labeling_job"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_create_data_labeling_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.CreateDataLabelingJobRequest.pb(
+ job_service.CreateDataLabelingJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_data_labeling_job.DataLabelingJob.to_json(
+ gca_data_labeling_job.DataLabelingJob()
+ )
+ req.return_value.content = return_value
+
+ request = job_service.CreateDataLabelingJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_data_labeling_job.DataLabelingJob()
+
+ client.create_data_labeling_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_data_labeling_job_rest_bad_request(
+ request_type=job_service.GetDataLabelingJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/dataLabelingJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_data_labeling_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetDataLabelingJobRequest,
+ dict,
+ ],
+)
+def test_get_data_labeling_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/dataLabelingJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = data_labeling_job.DataLabelingJob(
+ name="name_value",
+ display_name="display_name_value",
+ datasets=["datasets_value"],
+ labeler_count=1375,
+ instruction_uri="instruction_uri_value",
+ inputs_schema_uri="inputs_schema_uri_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ labeling_progress=1810,
+ specialist_pools=["specialist_pools_value"],
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = data_labeling_job.DataLabelingJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_data_labeling_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, data_labeling_job.DataLabelingJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.datasets == ["datasets_value"]
+ assert response.labeler_count == 1375
+ assert response.instruction_uri == "instruction_uri_value"
+ assert response.inputs_schema_uri == "inputs_schema_uri_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.labeling_progress == 1810
+ assert response.specialist_pools == ["specialist_pools_value"]
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_get_data_labeling_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_get_data_labeling_job"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_get_data_labeling_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.GetDataLabelingJobRequest.pb(
+ job_service.GetDataLabelingJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = data_labeling_job.DataLabelingJob.to_json(
+ data_labeling_job.DataLabelingJob()
+ )
+ req.return_value.content = return_value
+
+ request = job_service.GetDataLabelingJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = data_labeling_job.DataLabelingJob()
+
+ client.get_data_labeling_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_list_data_labeling_jobs_rest_bad_request(
+ request_type=job_service.ListDataLabelingJobsRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_data_labeling_jobs(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListDataLabelingJobsRequest,
+ dict,
+ ],
+)
+def test_list_data_labeling_jobs_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListDataLabelingJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListDataLabelingJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.list_data_labeling_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListDataLabelingJobsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_list_data_labeling_jobs_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_list_data_labeling_jobs"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_list_data_labeling_jobs"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.ListDataLabelingJobsRequest.pb(
+ job_service.ListDataLabelingJobsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = job_service.ListDataLabelingJobsResponse.to_json(
+ job_service.ListDataLabelingJobsResponse()
+ )
+ req.return_value.content = return_value
+
+ request = job_service.ListDataLabelingJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = job_service.ListDataLabelingJobsResponse()
+
+ client.list_data_labeling_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_delete_data_labeling_job_rest_bad_request(
+ request_type=job_service.DeleteDataLabelingJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/dataLabelingJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_data_labeling_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteDataLabelingJobRequest,
+ dict,
+ ],
+)
+def test_delete_data_labeling_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/dataLabelingJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.delete_data_labeling_job(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_delete_data_labeling_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_delete_data_labeling_job"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_delete_data_labeling_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.DeleteDataLabelingJobRequest.pb(
+ job_service.DeleteDataLabelingJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = job_service.DeleteDataLabelingJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.delete_data_labeling_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_cancel_data_labeling_job_rest_bad_request(
+ request_type=job_service.CancelDataLabelingJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/dataLabelingJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_data_labeling_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CancelDataLabelingJobRequest,
+ dict,
+ ],
+)
+def test_cancel_data_labeling_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/dataLabelingJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.cancel_data_labeling_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_cancel_data_labeling_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_cancel_data_labeling_job"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = job_service.CancelDataLabelingJobRequest.pb(
+ job_service.CancelDataLabelingJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = job_service.CancelDataLabelingJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ client.cancel_data_labeling_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+def test_create_hyperparameter_tuning_job_rest_bad_request(
+ request_type=job_service.CreateHyperparameterTuningJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.create_hyperparameter_tuning_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateHyperparameterTuningJobRequest,
+ dict,
+ ],
+)
+def test_create_hyperparameter_tuning_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["hyperparameter_tuning_job"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "study_spec": {
+ "decay_curve_stopping_spec": {"use_elapsed_duration": True},
+ "median_automated_stopping_spec": {"use_elapsed_duration": True},
+ "convex_automated_stopping_spec": {
+ "max_step_count": 1513,
+ "min_step_count": 1511,
+ "min_measurement_count": 2257,
+ "learning_rate_parameter_name": "learning_rate_parameter_name_value",
+ "use_elapsed_duration": True,
+ "update_all_stopped_trials": True,
+ },
+ "metrics": [
+ {
+ "metric_id": "metric_id_value",
+ "goal": 1,
+ "safety_config": {
+ "safety_threshold": 0.17200000000000001,
+ "desired_min_safe_trials_fraction": 0.33640000000000003,
+ },
+ }
+ ],
+ "parameters": [
+ {
+ "double_value_spec": {
+ "min_value": 0.96,
+ "max_value": 0.962,
+ "default_value": 0.13770000000000002,
+ },
+ "integer_value_spec": {
+ "min_value": 960,
+ "max_value": 962,
+ "default_value": 1377,
+ },
+ "categorical_value_spec": {
+ "values": ["values_value1", "values_value2"],
+ "default_value": "default_value_value",
+ },
+ "discrete_value_spec": {
+ "values": [0.657, 0.658],
+ "default_value": 0.13770000000000002,
+ },
+ "parameter_id": "parameter_id_value",
+ "scale_type": 1,
+ "conditional_parameter_specs": [
+ {
+ "parent_discrete_values": {"values": [0.657, 0.658]},
+ "parent_int_values": {"values": [657, 658]},
+ "parent_categorical_values": {
+ "values": ["values_value1", "values_value2"]
+ },
+ "parameter_spec": {},
+ }
+ ],
+ }
+ ],
+ "algorithm": 2,
+ "observation_noise": 1,
+ "measurement_selection_type": 1,
+ "study_stopping_config": {
+ "should_stop_asap": {"value": True},
+ "minimum_runtime_constraint": {
+ "max_duration": {"seconds": 751, "nanos": 543},
+ "end_time": {"seconds": 751, "nanos": 543},
+ },
+ "maximum_runtime_constraint": {},
+ "min_num_trials": {"value": 541},
+ "max_num_trials": {},
+ "max_num_trials_no_progress": {},
+ "max_duration_no_progress": {},
+ },
+ },
+ "max_trial_count": 1609,
+ "parallel_trial_count": 2128,
+ "max_failed_trial_count": 2317,
+ "trial_job_spec": {
+ "persistent_resource_id": "persistent_resource_id_value",
+ "worker_pool_specs": [
+ {
+ "container_spec": {
+ "image_uri": "image_uri_value",
+ "command": ["command_value1", "command_value2"],
+ "args": ["args_value1", "args_value2"],
+ "env": [{"name": "name_value", "value": "value_value"}],
+ },
+ "python_package_spec": {
+ "executor_image_uri": "executor_image_uri_value",
+ "package_uris": ["package_uris_value1", "package_uris_value2"],
+ "python_module": "python_module_value",
+ "args": ["args_value1", "args_value2"],
+ "env": {},
+ },
+ "machine_spec": {
+ "machine_type": "machine_type_value",
+ "accelerator_type": 1,
+ "accelerator_count": 1805,
+ "tpu_topology": "tpu_topology_value",
+ "reservation_affinity": {
+ "reservation_affinity_type": 1,
+ "key": "key_value",
+ "values": ["values_value1", "values_value2"],
+ },
+ },
+ "replica_count": 1384,
+ "nfs_mounts": [
+ {
+ "server": "server_value",
+ "path": "path_value",
+ "mount_point": "mount_point_value",
+ }
+ ],
+ "disk_spec": {
+ "boot_disk_type": "boot_disk_type_value",
+ "boot_disk_size_gb": 1792,
+ },
+ }
+ ],
+ "scheduling": {
+ "timeout": {},
+ "restart_job_on_worker_restart": True,
+ "strategy": 1,
+ "disable_retries": True,
+ "max_wait_duration": {},
+ },
+ "service_account": "service_account_value",
+ "network": "network_value",
+ "reserved_ip_ranges": [
+ "reserved_ip_ranges_value1",
+ "reserved_ip_ranges_value2",
+ ],
+ "base_output_directory": {"output_uri_prefix": "output_uri_prefix_value"},
+ "protected_artifact_location_id": "protected_artifact_location_id_value",
+ "tensorboard": "tensorboard_value",
+ "enable_web_access": True,
+ "enable_dashboard_access": True,
+ "experiment": "experiment_value",
+ "experiment_run": "experiment_run_value",
+ "models": ["models_value1", "models_value2"],
+ },
+ "trials": [
+ {
+ "name": "name_value",
+ "id": "id_value",
+ "state": 1,
+ "parameters": [
+ {
+ "parameter_id": "parameter_id_value",
+ "value": {
+ "null_value": 0,
+ "number_value": 0.1285,
+ "string_value": "string_value_value",
+ "bool_value": True,
+ "struct_value": {"fields": {}},
+ "list_value": {"values": {}},
+ },
+ }
+ ],
+ "final_measurement": {
+ "elapsed_duration": {},
+ "step_count": 1092,
+ "metrics": [{"metric_id": "metric_id_value", "value": 0.541}],
+ },
+ "measurements": {},
+ "start_time": {},
+ "end_time": {},
+ "client_id": "client_id_value",
+ "infeasible_reason": "infeasible_reason_value",
+ "custom_job": "custom_job_value",
+ "web_access_uris": {},
+ }
+ ],
+ "state": 1,
+ "create_time": {},
+ "start_time": {},
+ "end_time": {},
+ "update_time": {},
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "labels": {},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = job_service.CreateHyperparameterTuningJobRequest.meta.fields[
+ "hyperparameter_tuning_job"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init[
+ "hyperparameter_tuning_job"
+ ].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(
+ 0, len(request_init["hyperparameter_tuning_job"][field])
+ ):
+ del request_init["hyperparameter_tuning_job"][field][i][subfield]
+ else:
+ del request_init["hyperparameter_tuning_job"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value",
+ display_name="display_name_value",
+ max_trial_count=1609,
+ parallel_trial_count=2128,
+ max_failed_trial_count=2317,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.create_hyperparameter_tuning_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_trial_count == 1609
+ assert response.parallel_trial_count == 2128
+ assert response.max_failed_trial_count == 2317
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_create_hyperparameter_tuning_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_create_hyperparameter_tuning_job"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_create_hyperparameter_tuning_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.CreateHyperparameterTuningJobRequest.pb(
+ job_service.CreateHyperparameterTuningJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob.to_json(
+ gca_hyperparameter_tuning_job.HyperparameterTuningJob()
+ )
+ req.return_value.content = return_value
+
+ request = job_service.CreateHyperparameterTuningJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob()
+
+ client.create_hyperparameter_tuning_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_hyperparameter_tuning_job_rest_bad_request(
+ request_type=job_service.GetHyperparameterTuningJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/hyperparameterTuningJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_hyperparameter_tuning_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetHyperparameterTuningJobRequest,
+ dict,
+ ],
+)
+def test_get_hyperparameter_tuning_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/hyperparameterTuningJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value",
+ display_name="display_name_value",
+ max_trial_count=1609,
+ parallel_trial_count=2128,
+ max_failed_trial_count=2317,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = hyperparameter_tuning_job.HyperparameterTuningJob.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_hyperparameter_tuning_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_trial_count == 1609
+ assert response.parallel_trial_count == 2128
+ assert response.max_failed_trial_count == 2317
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_get_hyperparameter_tuning_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_get_hyperparameter_tuning_job"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_get_hyperparameter_tuning_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.GetHyperparameterTuningJobRequest.pb(
+ job_service.GetHyperparameterTuningJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = hyperparameter_tuning_job.HyperparameterTuningJob.to_json(
+ hyperparameter_tuning_job.HyperparameterTuningJob()
+ )
+ req.return_value.content = return_value
+
+ request = job_service.GetHyperparameterTuningJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = hyperparameter_tuning_job.HyperparameterTuningJob()
+
+ client.get_hyperparameter_tuning_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_list_hyperparameter_tuning_jobs_rest_bad_request(
+ request_type=job_service.ListHyperparameterTuningJobsRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_hyperparameter_tuning_jobs(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListHyperparameterTuningJobsRequest,
+ dict,
+ ],
+)
+def test_list_hyperparameter_tuning_jobs_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListHyperparameterTuningJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListHyperparameterTuningJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.list_hyperparameter_tuning_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListHyperparameterTuningJobsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_list_hyperparameter_tuning_jobs_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_list_hyperparameter_tuning_jobs"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_list_hyperparameter_tuning_jobs"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.ListHyperparameterTuningJobsRequest.pb(
+ job_service.ListHyperparameterTuningJobsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = job_service.ListHyperparameterTuningJobsResponse.to_json(
+ job_service.ListHyperparameterTuningJobsResponse()
+ )
+ req.return_value.content = return_value
+
+ request = job_service.ListHyperparameterTuningJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = job_service.ListHyperparameterTuningJobsResponse()
+
+ client.list_hyperparameter_tuning_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_delete_hyperparameter_tuning_job_rest_bad_request(
+ request_type=job_service.DeleteHyperparameterTuningJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/hyperparameterTuningJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_hyperparameter_tuning_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteHyperparameterTuningJobRequest,
+ dict,
+ ],
+)
+def test_delete_hyperparameter_tuning_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/hyperparameterTuningJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.delete_hyperparameter_tuning_job(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_delete_hyperparameter_tuning_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_delete_hyperparameter_tuning_job"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_delete_hyperparameter_tuning_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.DeleteHyperparameterTuningJobRequest.pb(
+ job_service.DeleteHyperparameterTuningJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = job_service.DeleteHyperparameterTuningJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.delete_hyperparameter_tuning_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_cancel_hyperparameter_tuning_job_rest_bad_request(
+ request_type=job_service.CancelHyperparameterTuningJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/hyperparameterTuningJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_hyperparameter_tuning_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CancelHyperparameterTuningJobRequest,
+ dict,
+ ],
+)
+def test_cancel_hyperparameter_tuning_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/hyperparameterTuningJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.cancel_hyperparameter_tuning_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_cancel_hyperparameter_tuning_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_cancel_hyperparameter_tuning_job"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = job_service.CancelHyperparameterTuningJobRequest.pb(
+ job_service.CancelHyperparameterTuningJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = job_service.CancelHyperparameterTuningJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ client.cancel_hyperparameter_tuning_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+def test_create_nas_job_rest_bad_request(request_type=job_service.CreateNasJobRequest):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.create_nas_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateNasJobRequest,
+ dict,
+ ],
+)
+def test_create_nas_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["nas_job"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "nas_job_spec": {
+ "multi_trial_algorithm_spec": {
+ "multi_trial_algorithm": 1,
+ "metric": {"metric_id": "metric_id_value", "goal": 1},
+ "search_trial_spec": {
+ "search_trial_job_spec": {
+ "persistent_resource_id": "persistent_resource_id_value",
+ "worker_pool_specs": [
+ {
+ "container_spec": {
+ "image_uri": "image_uri_value",
+ "command": ["command_value1", "command_value2"],
+ "args": ["args_value1", "args_value2"],
+ "env": [
+ {"name": "name_value", "value": "value_value"}
+ ],
+ },
+ "python_package_spec": {
+ "executor_image_uri": "executor_image_uri_value",
+ "package_uris": [
+ "package_uris_value1",
+ "package_uris_value2",
+ ],
+ "python_module": "python_module_value",
+ "args": ["args_value1", "args_value2"],
+ "env": {},
+ },
+ "machine_spec": {
+ "machine_type": "machine_type_value",
+ "accelerator_type": 1,
+ "accelerator_count": 1805,
+ "tpu_topology": "tpu_topology_value",
+ "reservation_affinity": {
+ "reservation_affinity_type": 1,
+ "key": "key_value",
+ "values": ["values_value1", "values_value2"],
+ },
+ },
+ "replica_count": 1384,
+ "nfs_mounts": [
+ {
+ "server": "server_value",
+ "path": "path_value",
+ "mount_point": "mount_point_value",
+ }
+ ],
+ "disk_spec": {
+ "boot_disk_type": "boot_disk_type_value",
+ "boot_disk_size_gb": 1792,
+ },
+ }
+ ],
+ "scheduling": {
+ "timeout": {"seconds": 751, "nanos": 543},
+ "restart_job_on_worker_restart": True,
+ "strategy": 1,
+ "disable_retries": True,
+ "max_wait_duration": {},
+ },
+ "service_account": "service_account_value",
+ "network": "network_value",
+ "reserved_ip_ranges": [
+ "reserved_ip_ranges_value1",
+ "reserved_ip_ranges_value2",
+ ],
+ "base_output_directory": {
+ "output_uri_prefix": "output_uri_prefix_value"
+ },
+ "protected_artifact_location_id": "protected_artifact_location_id_value",
+ "tensorboard": "tensorboard_value",
+ "enable_web_access": True,
+ "enable_dashboard_access": True,
+ "experiment": "experiment_value",
+ "experiment_run": "experiment_run_value",
+ "models": ["models_value1", "models_value2"],
+ },
+ "max_trial_count": 1609,
+ "max_parallel_trial_count": 2549,
+ "max_failed_trial_count": 2317,
+ },
+ "train_trial_spec": {
+ "train_trial_job_spec": {},
+ "max_parallel_trial_count": 2549,
+ "frequency": 978,
+ },
+ },
+ "resume_nas_job_id": "resume_nas_job_id_value",
+ "search_space_spec": "search_space_spec_value",
+ },
+ "nas_job_output": {
+ "multi_trial_job_output": {
+ "search_trials": [
+ {
+ "id": "id_value",
+ "state": 1,
+ "final_measurement": {
+ "elapsed_duration": {},
+ "step_count": 1092,
+ "metrics": [
+ {"metric_id": "metric_id_value", "value": 0.541}
+ ],
+ },
+ "start_time": {"seconds": 751, "nanos": 543},
+ "end_time": {},
+ }
+ ],
+ "train_trials": {},
+ }
+ },
+ "state": 1,
+ "create_time": {},
+ "start_time": {},
+ "end_time": {},
+ "update_time": {},
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "labels": {},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "enable_restricted_image_training": True,
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = job_service.CreateNasJobRequest.meta.fields["nas_job"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["nas_job"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["nas_job"][field])):
+ del request_init["nas_job"][field][i][subfield]
+ else:
+ del request_init["nas_job"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_nas_job.NasJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ enable_restricted_image_training=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_nas_job.NasJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.create_nas_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_nas_job.NasJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.enable_restricted_image_training is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_create_nas_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_create_nas_job"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_create_nas_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.CreateNasJobRequest.pb(
+ job_service.CreateNasJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_nas_job.NasJob.to_json(gca_nas_job.NasJob())
+ req.return_value.content = return_value
+
+ request = job_service.CreateNasJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_nas_job.NasJob()
+
+ client.create_nas_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_nas_job_rest_bad_request(request_type=job_service.GetNasJobRequest):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/nasJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_nas_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetNasJobRequest,
+ dict,
+ ],
+)
+def test_get_nas_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/nasJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = nas_job.NasJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ enable_restricted_image_training=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = nas_job.NasJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_nas_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, nas_job.NasJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.enable_restricted_image_training is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_get_nas_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_get_nas_job"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_get_nas_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.GetNasJobRequest.pb(job_service.GetNasJobRequest())
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = nas_job.NasJob.to_json(nas_job.NasJob())
+ req.return_value.content = return_value
+
+ request = job_service.GetNasJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = nas_job.NasJob()
+
+ client.get_nas_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_list_nas_jobs_rest_bad_request(request_type=job_service.ListNasJobsRequest):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_nas_jobs(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListNasJobsRequest,
+ dict,
+ ],
+)
+def test_list_nas_jobs_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListNasJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListNasJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.list_nas_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListNasJobsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_list_nas_jobs_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_list_nas_jobs"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_list_nas_jobs"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.ListNasJobsRequest.pb(job_service.ListNasJobsRequest())
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = job_service.ListNasJobsResponse.to_json(
+ job_service.ListNasJobsResponse()
+ )
+ req.return_value.content = return_value
+
+ request = job_service.ListNasJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = job_service.ListNasJobsResponse()
+
+ client.list_nas_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_delete_nas_job_rest_bad_request(request_type=job_service.DeleteNasJobRequest):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/nasJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_nas_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteNasJobRequest,
+ dict,
+ ],
+)
+def test_delete_nas_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/nasJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.delete_nas_job(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_delete_nas_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_delete_nas_job"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_delete_nas_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.DeleteNasJobRequest.pb(
+ job_service.DeleteNasJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = job_service.DeleteNasJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.delete_nas_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_cancel_nas_job_rest_bad_request(request_type=job_service.CancelNasJobRequest):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/nasJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_nas_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CancelNasJobRequest,
+ dict,
+ ],
+)
+def test_cancel_nas_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/nasJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.cancel_nas_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_cancel_nas_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_cancel_nas_job"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = job_service.CancelNasJobRequest.pb(
+ job_service.CancelNasJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = job_service.CancelNasJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ client.cancel_nas_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+def test_get_nas_trial_detail_rest_bad_request(
+ request_type=job_service.GetNasTrialDetailRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/nasJobs/sample3/nasTrialDetails/sample4"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_nas_trial_detail(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetNasTrialDetailRequest,
+ dict,
+ ],
+)
+def test_get_nas_trial_detail_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/nasJobs/sample3/nasTrialDetails/sample4"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = nas_job.NasTrialDetail(
+ name="name_value",
+ parameters="parameters_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = nas_job.NasTrialDetail.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_nas_trial_detail(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, nas_job.NasTrialDetail)
+ assert response.name == "name_value"
+ assert response.parameters == "parameters_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_get_nas_trial_detail_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_get_nas_trial_detail"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_get_nas_trial_detail"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.GetNasTrialDetailRequest.pb(
+ job_service.GetNasTrialDetailRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = nas_job.NasTrialDetail.to_json(nas_job.NasTrialDetail())
+ req.return_value.content = return_value
+
+ request = job_service.GetNasTrialDetailRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = nas_job.NasTrialDetail()
+
+ client.get_nas_trial_detail(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_list_nas_trial_details_rest_bad_request(
+ request_type=job_service.ListNasTrialDetailsRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2/nasJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_nas_trial_details(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListNasTrialDetailsRequest,
+ dict,
+ ],
+)
+def test_list_nas_trial_details_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2/nasJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListNasTrialDetailsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListNasTrialDetailsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.list_nas_trial_details(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListNasTrialDetailsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_list_nas_trial_details_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_list_nas_trial_details"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_list_nas_trial_details"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.ListNasTrialDetailsRequest.pb(
+ job_service.ListNasTrialDetailsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = job_service.ListNasTrialDetailsResponse.to_json(
+ job_service.ListNasTrialDetailsResponse()
+ )
+ req.return_value.content = return_value
+
+ request = job_service.ListNasTrialDetailsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = job_service.ListNasTrialDetailsResponse()
+
+ client.list_nas_trial_details(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_create_batch_prediction_job_rest_bad_request(
+ request_type=job_service.CreateBatchPredictionJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.create_batch_prediction_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateBatchPredictionJobRequest,
+ dict,
+ ],
+)
+def test_create_batch_prediction_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["batch_prediction_job"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "model": "model_value",
+ "model_version_id": "model_version_id_value",
+ "unmanaged_container_model": {
+ "artifact_uri": "artifact_uri_value",
+ "predict_schemata": {
+ "instance_schema_uri": "instance_schema_uri_value",
+ "parameters_schema_uri": "parameters_schema_uri_value",
+ "prediction_schema_uri": "prediction_schema_uri_value",
+ },
+ "container_spec": {
+ "image_uri": "image_uri_value",
+ "command": ["command_value1", "command_value2"],
+ "args": ["args_value1", "args_value2"],
+ "env": [{"name": "name_value", "value": "value_value"}],
+ "ports": [{"container_port": 1511}],
+ "predict_route": "predict_route_value",
+ "health_route": "health_route_value",
+ "grpc_ports": {},
+ "deployment_timeout": {"seconds": 751, "nanos": 543},
+ "shared_memory_size_mb": 2231,
+ "startup_probe": {
+ "exec_": {"command": ["command_value1", "command_value2"]},
+ "period_seconds": 1489,
+ "timeout_seconds": 1621,
+ },
+ "health_probe": {},
+ },
+ },
+ "input_config": {
+ "gcs_source": {"uris": ["uris_value1", "uris_value2"]},
+ "bigquery_source": {"input_uri": "input_uri_value"},
+ "instances_format": "instances_format_value",
+ },
+ "instance_config": {
+ "instance_type": "instance_type_value",
+ "key_field": "key_field_value",
+ "included_fields": ["included_fields_value1", "included_fields_value2"],
+ "excluded_fields": ["excluded_fields_value1", "excluded_fields_value2"],
+ },
+ "model_parameters": {
+ "null_value": 0,
+ "number_value": 0.1285,
+ "string_value": "string_value_value",
+ "bool_value": True,
+ "struct_value": {"fields": {}},
+ "list_value": {"values": {}},
+ },
+ "output_config": {
+ "gcs_destination": {"output_uri_prefix": "output_uri_prefix_value"},
+ "bigquery_destination": {"output_uri": "output_uri_value"},
+ "predictions_format": "predictions_format_value",
+ },
+ "dedicated_resources": {
+ "machine_spec": {
+ "machine_type": "machine_type_value",
+ "accelerator_type": 1,
+ "accelerator_count": 1805,
+ "tpu_topology": "tpu_topology_value",
+ "reservation_affinity": {
+ "reservation_affinity_type": 1,
+ "key": "key_value",
+ "values": ["values_value1", "values_value2"],
+ },
+ },
+ "starting_replica_count": 2355,
+ "max_replica_count": 1805,
+ },
+ "service_account": "service_account_value",
+ "manual_batch_tuning_parameters": {"batch_size": 1052},
+ "generate_explanation": True,
+ "explanation_spec": {
+ "parameters": {
+ "sampled_shapley_attribution": {"path_count": 1077},
+ "integrated_gradients_attribution": {
+ "step_count": 1092,
+ "smooth_grad_config": {
+ "noise_sigma": 0.11660000000000001,
+ "feature_noise_sigma": {
+ "noise_sigma": [{"name": "name_value", "sigma": 0.529}]
+ },
+ "noisy_sample_count": 1947,
+ },
+ "blur_baseline_config": {"max_blur_sigma": 0.1482},
+ },
+ "xrai_attribution": {
+ "step_count": 1092,
+ "smooth_grad_config": {},
+ "blur_baseline_config": {},
+ },
+ "examples": {
+ "example_gcs_source": {"data_format": 1, "gcs_source": {}},
+ "nearest_neighbor_search_config": {},
+ "presets": {"query": 1, "modality": 1},
+ "neighbor_count": 1494,
+ },
+ "top_k": 541,
+ "output_indices": {},
+ },
+ "metadata": {
+ "inputs": {},
+ "outputs": {},
+ "feature_attributions_schema_uri": "feature_attributions_schema_uri_value",
+ "latent_space_source": "latent_space_source_value",
+ },
+ },
+ "output_info": {
+ "gcs_output_directory": "gcs_output_directory_value",
+ "bigquery_output_dataset": "bigquery_output_dataset_value",
+ "bigquery_output_table": "bigquery_output_table_value",
+ },
+ "state": 1,
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "partial_failures": {},
+ "resources_consumed": {"replica_hours": 0.13920000000000002},
+ "completion_stats": {
+ "successful_count": 1736,
+ "failed_count": 1261,
+ "incomplete_count": 1720,
+ "successful_forecast_point_count": 3335,
+ },
+ "create_time": {"seconds": 751, "nanos": 543},
+ "start_time": {},
+ "end_time": {},
+ "update_time": {},
+ "labels": {},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "disable_container_logging": True,
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = job_service.CreateBatchPredictionJobRequest.meta.fields[
+ "batch_prediction_job"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init[
+ "batch_prediction_job"
+ ].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["batch_prediction_job"][field])):
+ del request_init["batch_prediction_job"][field][i][subfield]
+ else:
+ del request_init["batch_prediction_job"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_batch_prediction_job.BatchPredictionJob(
+ name="name_value",
+ display_name="display_name_value",
+ model="model_value",
+ model_version_id="model_version_id_value",
+ service_account="service_account_value",
+ generate_explanation=True,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ disable_container_logging=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_batch_prediction_job.BatchPredictionJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.create_batch_prediction_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.model == "model_value"
+ assert response.model_version_id == "model_version_id_value"
+ assert response.service_account == "service_account_value"
+ assert response.generate_explanation is True
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.disable_container_logging is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_create_batch_prediction_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_create_batch_prediction_job"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_create_batch_prediction_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.CreateBatchPredictionJobRequest.pb(
+ job_service.CreateBatchPredictionJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_batch_prediction_job.BatchPredictionJob.to_json(
+ gca_batch_prediction_job.BatchPredictionJob()
+ )
+ req.return_value.content = return_value
+
+ request = job_service.CreateBatchPredictionJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_batch_prediction_job.BatchPredictionJob()
+
+ client.create_batch_prediction_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_batch_prediction_job_rest_bad_request(
+ request_type=job_service.GetBatchPredictionJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/batchPredictionJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_batch_prediction_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetBatchPredictionJobRequest,
+ dict,
+ ],
+)
+def test_get_batch_prediction_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/batchPredictionJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = batch_prediction_job.BatchPredictionJob(
+ name="name_value",
+ display_name="display_name_value",
+ model="model_value",
+ model_version_id="model_version_id_value",
+ service_account="service_account_value",
+ generate_explanation=True,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ disable_container_logging=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = batch_prediction_job.BatchPredictionJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_batch_prediction_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, batch_prediction_job.BatchPredictionJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.model == "model_value"
+ assert response.model_version_id == "model_version_id_value"
+ assert response.service_account == "service_account_value"
+ assert response.generate_explanation is True
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.disable_container_logging is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_get_batch_prediction_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_get_batch_prediction_job"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_get_batch_prediction_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.GetBatchPredictionJobRequest.pb(
+ job_service.GetBatchPredictionJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = batch_prediction_job.BatchPredictionJob.to_json(
+ batch_prediction_job.BatchPredictionJob()
+ )
+ req.return_value.content = return_value
+
+ request = job_service.GetBatchPredictionJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = batch_prediction_job.BatchPredictionJob()
+
+ client.get_batch_prediction_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_list_batch_prediction_jobs_rest_bad_request(
+ request_type=job_service.ListBatchPredictionJobsRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_batch_prediction_jobs(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListBatchPredictionJobsRequest,
+ dict,
+ ],
+)
+def test_list_batch_prediction_jobs_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListBatchPredictionJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListBatchPredictionJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.list_batch_prediction_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListBatchPredictionJobsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_list_batch_prediction_jobs_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_list_batch_prediction_jobs"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_list_batch_prediction_jobs"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.ListBatchPredictionJobsRequest.pb(
+ job_service.ListBatchPredictionJobsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = job_service.ListBatchPredictionJobsResponse.to_json(
+ job_service.ListBatchPredictionJobsResponse()
+ )
+ req.return_value.content = return_value
+
+ request = job_service.ListBatchPredictionJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = job_service.ListBatchPredictionJobsResponse()
+
+ client.list_batch_prediction_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_delete_batch_prediction_job_rest_bad_request(
+ request_type=job_service.DeleteBatchPredictionJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/batchPredictionJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_batch_prediction_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteBatchPredictionJobRequest,
+ dict,
+ ],
+)
+def test_delete_batch_prediction_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/batchPredictionJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.delete_batch_prediction_job(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_delete_batch_prediction_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_delete_batch_prediction_job"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_delete_batch_prediction_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.DeleteBatchPredictionJobRequest.pb(
+ job_service.DeleteBatchPredictionJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = job_service.DeleteBatchPredictionJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.delete_batch_prediction_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_cancel_batch_prediction_job_rest_bad_request(
+ request_type=job_service.CancelBatchPredictionJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/batchPredictionJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_batch_prediction_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CancelBatchPredictionJobRequest,
+ dict,
+ ],
+)
+def test_cancel_batch_prediction_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/batchPredictionJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.cancel_batch_prediction_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_cancel_batch_prediction_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_cancel_batch_prediction_job"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = job_service.CancelBatchPredictionJobRequest.pb(
+ job_service.CancelBatchPredictionJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = job_service.CancelBatchPredictionJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ client.cancel_batch_prediction_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+def test_create_model_deployment_monitoring_job_rest_bad_request(
+ request_type=job_service.CreateModelDeploymentMonitoringJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.create_model_deployment_monitoring_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+def test_create_model_deployment_monitoring_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["model_deployment_monitoring_job"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "endpoint": "endpoint_value",
+ "state": 1,
+ "schedule_state": 1,
+ "latest_monitoring_pipeline_metadata": {
+ "run_time": {"seconds": 751, "nanos": 543},
+ "status": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ },
+ "model_deployment_monitoring_objective_configs": [
+ {
+ "deployed_model_id": "deployed_model_id_value",
+ "objective_config": {
+ "training_dataset": {
+ "dataset": "dataset_value",
+ "gcs_source": {"uris": ["uris_value1", "uris_value2"]},
+ "bigquery_source": {"input_uri": "input_uri_value"},
+ "data_format": "data_format_value",
+ "target_field": "target_field_value",
+ "logging_sampling_strategy": {
+ "random_sample_config": {"sample_rate": 0.1165}
+ },
+ },
+ "training_prediction_skew_detection_config": {
+ "skew_thresholds": {},
+ "attribution_score_skew_thresholds": {},
+ "default_skew_threshold": {"value": 0.541},
+ },
+ "prediction_drift_detection_config": {
+ "drift_thresholds": {},
+ "attribution_score_drift_thresholds": {},
+ "default_drift_threshold": {},
+ },
+ "explanation_config": {
+ "enable_feature_attributes": True,
+ "explanation_baseline": {
+ "gcs": {"output_uri_prefix": "output_uri_prefix_value"},
+ "bigquery": {"output_uri": "output_uri_value"},
+ "prediction_format": 2,
+ },
+ },
+ },
+ }
+ ],
+ "model_deployment_monitoring_schedule_config": {
+ "monitor_interval": {"seconds": 751, "nanos": 543},
+ "monitor_window": {},
+ },
+ "logging_sampling_strategy": {},
+ "model_monitoring_alert_config": {
+ "email_alert_config": {
+ "user_emails": ["user_emails_value1", "user_emails_value2"]
+ },
+ "enable_logging": True,
+ "notification_channels": [
+ "notification_channels_value1",
+ "notification_channels_value2",
+ ],
+ },
+ "predict_instance_schema_uri": "predict_instance_schema_uri_value",
+ "sample_predict_instance": {
+ "null_value": 0,
+ "number_value": 0.1285,
+ "string_value": "string_value_value",
+ "bool_value": True,
+ "struct_value": {"fields": {}},
+ "list_value": {"values": {}},
+ },
+ "analysis_instance_schema_uri": "analysis_instance_schema_uri_value",
+ "bigquery_tables": [
+ {
+ "log_source": 1,
+ "log_type": 1,
+ "bigquery_table_path": "bigquery_table_path_value",
+ "request_response_logging_schema_version": "request_response_logging_schema_version_value",
+ }
+ ],
+ "log_ttl": {},
+ "labels": {},
+ "create_time": {},
+ "update_time": {},
+ "next_schedule_time": {},
+ "stats_anomalies_base_directory": {},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "enable_monitoring_pipeline_logs": True,
+ "error": {},
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = job_service.CreateModelDeploymentMonitoringJobRequest.meta.fields[
+ "model_deployment_monitoring_job"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init[
+ "model_deployment_monitoring_job"
+ ].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(
+ 0, len(request_init["model_deployment_monitoring_job"][field])
+ ):
+ del request_init["model_deployment_monitoring_job"][field][i][
+ subfield
+ ]
+ else:
+ del request_init["model_deployment_monitoring_job"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value",
+ display_name="display_name_value",
+ endpoint="endpoint_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
+ predict_instance_schema_uri="predict_instance_schema_uri_value",
+ analysis_instance_schema_uri="analysis_instance_schema_uri_value",
+ enable_monitoring_pipeline_logs=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = (
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.pb(
+ return_value
+ )
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.create_model_deployment_monitoring_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(
+ response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob
+ )
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.endpoint == "endpoint_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert (
+ response.schedule_state
+ == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING
+ )
+ assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value"
+ assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value"
+ assert response.enable_monitoring_pipeline_logs is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_create_model_deployment_monitoring_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor,
+ "post_create_model_deployment_monitoring_job",
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor,
+ "pre_create_model_deployment_monitoring_job",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.CreateModelDeploymentMonitoringJobRequest.pb(
+ job_service.CreateModelDeploymentMonitoringJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = (
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.to_json(
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+ )
+ req.return_value.content = return_value
+
+ request = job_service.CreateModelDeploymentMonitoringJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = (
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+
+ client.create_model_deployment_monitoring_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_search_model_deployment_monitoring_stats_anomalies_rest_bad_request(
+ request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "model_deployment_monitoring_job": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.search_model_deployment_monitoring_stats_anomalies(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest,
+ dict,
+ ],
+)
+def test_search_model_deployment_monitoring_stats_anomalies_rest_call_success(
+ request_type,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "model_deployment_monitoring_job": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.pb(
+ return_value
+ )
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.search_model_deployment_monitoring_stats_anomalies(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(
+ response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager
+ )
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_search_model_deployment_monitoring_stats_anomalies_rest_interceptors(
+ null_interceptor,
+):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor,
+ "post_search_model_deployment_monitoring_stats_anomalies",
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor,
+ "pre_search_model_deployment_monitoring_stats_anomalies",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.pb(
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
+ )
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.to_json(
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
+ )
+ )
+ req.return_value.content = return_value
+
+ request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
+ )
+
+ client.search_model_deployment_monitoring_stats_anomalies(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_model_deployment_monitoring_job_rest_bad_request(
+ request_type=job_service.GetModelDeploymentMonitoringJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_model_deployment_monitoring_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+def test_get_model_deployment_monitoring_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value",
+ display_name="display_name_value",
+ endpoint="endpoint_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
+ predict_instance_schema_uri="predict_instance_schema_uri_value",
+ analysis_instance_schema_uri="analysis_instance_schema_uri_value",
+ enable_monitoring_pipeline_logs=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_model_deployment_monitoring_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(
+ response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob
+ )
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.endpoint == "endpoint_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert (
+ response.schedule_state
+ == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING
+ )
+ assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value"
+ assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value"
+ assert response.enable_monitoring_pipeline_logs is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_get_model_deployment_monitoring_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor, "post_get_model_deployment_monitoring_job"
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor, "pre_get_model_deployment_monitoring_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.GetModelDeploymentMonitoringJobRequest.pb(
+ job_service.GetModelDeploymentMonitoringJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = (
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob.to_json(
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+ )
+ req.return_value.content = return_value
+
+ request = job_service.GetModelDeploymentMonitoringJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = (
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+
+ client.get_model_deployment_monitoring_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_list_model_deployment_monitoring_jobs_rest_bad_request(
+ request_type=job_service.ListModelDeploymentMonitoringJobsRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_model_deployment_monitoring_jobs(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListModelDeploymentMonitoringJobsRequest,
+ dict,
+ ],
+)
+def test_list_model_deployment_monitoring_jobs_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListModelDeploymentMonitoringJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListModelDeploymentMonitoringJobsResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.list_model_deployment_monitoring_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_list_model_deployment_monitoring_jobs_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor,
+ "post_list_model_deployment_monitoring_jobs",
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor,
+ "pre_list_model_deployment_monitoring_jobs",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.ListModelDeploymentMonitoringJobsRequest.pb(
+ job_service.ListModelDeploymentMonitoringJobsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = job_service.ListModelDeploymentMonitoringJobsResponse.to_json(
+ job_service.ListModelDeploymentMonitoringJobsResponse()
+ )
+ req.return_value.content = return_value
+
+ request = job_service.ListModelDeploymentMonitoringJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = job_service.ListModelDeploymentMonitoringJobsResponse()
+
+ client.list_model_deployment_monitoring_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_update_model_deployment_monitoring_job_rest_bad_request(
+ request_type=job_service.UpdateModelDeploymentMonitoringJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "model_deployment_monitoring_job": {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.update_model_deployment_monitoring_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.UpdateModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+def test_update_model_deployment_monitoring_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "model_deployment_monitoring_job": {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ }
+ request_init["model_deployment_monitoring_job"] = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3",
+ "display_name": "display_name_value",
+ "endpoint": "endpoint_value",
+ "state": 1,
+ "schedule_state": 1,
+ "latest_monitoring_pipeline_metadata": {
+ "run_time": {"seconds": 751, "nanos": 543},
+ "status": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ },
+ "model_deployment_monitoring_objective_configs": [
+ {
+ "deployed_model_id": "deployed_model_id_value",
+ "objective_config": {
+ "training_dataset": {
+ "dataset": "dataset_value",
+ "gcs_source": {"uris": ["uris_value1", "uris_value2"]},
+ "bigquery_source": {"input_uri": "input_uri_value"},
+ "data_format": "data_format_value",
+ "target_field": "target_field_value",
+ "logging_sampling_strategy": {
+ "random_sample_config": {"sample_rate": 0.1165}
+ },
+ },
+ "training_prediction_skew_detection_config": {
+ "skew_thresholds": {},
+ "attribution_score_skew_thresholds": {},
+ "default_skew_threshold": {"value": 0.541},
+ },
+ "prediction_drift_detection_config": {
+ "drift_thresholds": {},
+ "attribution_score_drift_thresholds": {},
+ "default_drift_threshold": {},
+ },
+ "explanation_config": {
+ "enable_feature_attributes": True,
+ "explanation_baseline": {
+ "gcs": {"output_uri_prefix": "output_uri_prefix_value"},
+ "bigquery": {"output_uri": "output_uri_value"},
+ "prediction_format": 2,
+ },
+ },
+ },
+ }
+ ],
+ "model_deployment_monitoring_schedule_config": {
+ "monitor_interval": {"seconds": 751, "nanos": 543},
+ "monitor_window": {},
+ },
+ "logging_sampling_strategy": {},
+ "model_monitoring_alert_config": {
+ "email_alert_config": {
+ "user_emails": ["user_emails_value1", "user_emails_value2"]
+ },
+ "enable_logging": True,
+ "notification_channels": [
+ "notification_channels_value1",
+ "notification_channels_value2",
+ ],
+ },
+ "predict_instance_schema_uri": "predict_instance_schema_uri_value",
+ "sample_predict_instance": {
+ "null_value": 0,
+ "number_value": 0.1285,
+ "string_value": "string_value_value",
+ "bool_value": True,
+ "struct_value": {"fields": {}},
+ "list_value": {"values": {}},
+ },
+ "analysis_instance_schema_uri": "analysis_instance_schema_uri_value",
+ "bigquery_tables": [
+ {
+ "log_source": 1,
+ "log_type": 1,
+ "bigquery_table_path": "bigquery_table_path_value",
+ "request_response_logging_schema_version": "request_response_logging_schema_version_value",
+ }
+ ],
+ "log_ttl": {},
+ "labels": {},
+ "create_time": {},
+ "update_time": {},
+ "next_schedule_time": {},
+ "stats_anomalies_base_directory": {},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "enable_monitoring_pipeline_logs": True,
+ "error": {},
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = job_service.UpdateModelDeploymentMonitoringJobRequest.meta.fields[
+ "model_deployment_monitoring_job"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init[
+ "model_deployment_monitoring_job"
+ ].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(
+ 0, len(request_init["model_deployment_monitoring_job"][field])
+ ):
+ del request_init["model_deployment_monitoring_job"][field][i][
+ subfield
+ ]
+ else:
+ del request_init["model_deployment_monitoring_job"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.update_model_deployment_monitoring_job(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_update_model_deployment_monitoring_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.JobServiceRestInterceptor,
+ "post_update_model_deployment_monitoring_job",
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor,
+ "pre_update_model_deployment_monitoring_job",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.UpdateModelDeploymentMonitoringJobRequest.pb(
+ job_service.UpdateModelDeploymentMonitoringJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = job_service.UpdateModelDeploymentMonitoringJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.update_model_deployment_monitoring_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_delete_model_deployment_monitoring_job_rest_bad_request(
+ request_type=job_service.DeleteModelDeploymentMonitoringJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_model_deployment_monitoring_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+def test_delete_model_deployment_monitoring_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.delete_model_deployment_monitoring_job(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_delete_model_deployment_monitoring_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.JobServiceRestInterceptor,
+ "post_delete_model_deployment_monitoring_job",
+ ) as post, mock.patch.object(
+ transports.JobServiceRestInterceptor,
+ "pre_delete_model_deployment_monitoring_job",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.DeleteModelDeploymentMonitoringJobRequest.pb(
+ job_service.DeleteModelDeploymentMonitoringJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = job_service.DeleteModelDeploymentMonitoringJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.delete_model_deployment_monitoring_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_pause_model_deployment_monitoring_job_rest_bad_request(
+ request_type=job_service.PauseModelDeploymentMonitoringJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.pause_model_deployment_monitoring_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.PauseModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+def test_pause_model_deployment_monitoring_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.pause_model_deployment_monitoring_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_pause_model_deployment_monitoring_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor,
+ "pre_pause_model_deployment_monitoring_job",
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = job_service.PauseModelDeploymentMonitoringJobRequest.pb(
+ job_service.PauseModelDeploymentMonitoringJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = job_service.PauseModelDeploymentMonitoringJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ client.pause_model_deployment_monitoring_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+def test_resume_model_deployment_monitoring_job_rest_bad_request(
+ request_type=job_service.ResumeModelDeploymentMonitoringJobRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.resume_model_deployment_monitoring_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ResumeModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+def test_resume_model_deployment_monitoring_job_rest_call_success(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.resume_model_deployment_monitoring_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_resume_model_deployment_monitoring_job_rest_interceptors(null_interceptor):
+ transport = transports.JobServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.JobServiceRestInterceptor(),
+ )
+ client = JobServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.JobServiceRestInterceptor,
+ "pre_resume_model_deployment_monitoring_job",
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = job_service.ResumeModelDeploymentMonitoringJobRequest.pb(
+ job_service.ResumeModelDeploymentMonitoringJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = job_service.ResumeModelDeploymentMonitoringJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ client.resume_model_deployment_monitoring_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+def test_get_location_rest_bad_request(request_type=locations_pb2.GetLocationRequest):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_location(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+def test_get_location_rest(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_list_locations_rest_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_locations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+def test_list_locations_rest(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_get_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_get_iam_policy_rest(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_set_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.set_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_set_iam_policy_rest(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_test_iam_permissions_rest_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.test_iam_permissions(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+def test_test_iam_permissions_rest(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+def test_cancel_operation_rest_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+def test_cancel_operation_rest(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_rest_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+def test_delete_operation_rest(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_get_operation_rest_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+def test_get_operation_rest(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_list_operations_rest_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_operations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+def test_list_operations_rest(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_wait_operation_rest_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.wait_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+def test_wait_operation_rest(request_type):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_custom_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_custom_job), "__call__"
+ ) as call:
+ client.create_custom_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateCustomJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_custom_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
+ client.get_custom_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetCustomJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_custom_jobs_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
+ client.list_custom_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListCustomJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_custom_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_custom_job), "__call__"
+ ) as call:
+ client.delete_custom_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteCustomJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_cancel_custom_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_custom_job), "__call__"
+ ) as call:
+ client.cancel_custom_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelCustomJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_data_labeling_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_data_labeling_job), "__call__"
+ ) as call:
+ client.create_data_labeling_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateDataLabelingJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_data_labeling_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_data_labeling_job), "__call__"
+ ) as call:
+ client.get_data_labeling_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetDataLabelingJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_data_labeling_jobs_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_data_labeling_jobs), "__call__"
+ ) as call:
+ client.list_data_labeling_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListDataLabelingJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_data_labeling_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_data_labeling_job), "__call__"
+ ) as call:
+ client.delete_data_labeling_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteDataLabelingJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_cancel_data_labeling_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_data_labeling_job), "__call__"
+ ) as call:
+ client.cancel_data_labeling_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelDataLabelingJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_hyperparameter_tuning_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ client.create_hyperparameter_tuning_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateHyperparameterTuningJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_hyperparameter_tuning_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ client.get_hyperparameter_tuning_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetHyperparameterTuningJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_hyperparameter_tuning_jobs_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
+ ) as call:
+ client.list_hyperparameter_tuning_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListHyperparameterTuningJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_hyperparameter_tuning_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ client.delete_hyperparameter_tuning_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteHyperparameterTuningJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_cancel_hyperparameter_tuning_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ client.cancel_hyperparameter_tuning_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelHyperparameterTuningJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_nas_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.create_nas_job), "__call__") as call:
+ client.create_nas_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateNasJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_nas_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_nas_job), "__call__") as call:
+ client.get_nas_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetNasJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_nas_jobs_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_nas_jobs), "__call__") as call:
+ client.list_nas_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListNasJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_nas_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_nas_job), "__call__") as call:
+ client.delete_nas_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteNasJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_cancel_nas_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_nas_job), "__call__") as call:
+ client.cancel_nas_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelNasJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_nas_trial_detail_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_nas_trial_detail), "__call__"
+ ) as call:
+ client.get_nas_trial_detail(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetNasTrialDetailRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_nas_trial_details_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_trial_details), "__call__"
+ ) as call:
+ client.list_nas_trial_details(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListNasTrialDetailsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_batch_prediction_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_batch_prediction_job), "__call__"
+ ) as call:
+ client.create_batch_prediction_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateBatchPredictionJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_batch_prediction_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_batch_prediction_job), "__call__"
+ ) as call:
+ client.get_batch_prediction_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetBatchPredictionJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_batch_prediction_jobs_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batch_prediction_jobs), "__call__"
+ ) as call:
+ client.list_batch_prediction_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListBatchPredictionJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_batch_prediction_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_batch_prediction_job), "__call__"
+ ) as call:
+ client.delete_batch_prediction_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteBatchPredictionJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_cancel_batch_prediction_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_batch_prediction_job), "__call__"
+ ) as call:
+ client.cancel_batch_prediction_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelBatchPredictionJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_model_deployment_monitoring_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ client.create_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_search_model_deployment_monitoring_stats_anomalies_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.search_model_deployment_monitoring_stats_anomalies),
+ "__call__",
+ ) as call:
+ client.search_model_deployment_monitoring_stats_anomalies(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_model_deployment_monitoring_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ client.get_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_model_deployment_monitoring_jobs_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
+ ) as call:
+ client.list_model_deployment_monitoring_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListModelDeploymentMonitoringJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_model_deployment_monitoring_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ client.update_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.UpdateModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_model_deployment_monitoring_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ client.delete_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_pause_model_deployment_monitoring_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.pause_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ client.pause_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.PauseModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_resume_model_deployment_monitoring_job_empty_call_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.resume_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ client.resume_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ResumeModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+def test_job_service_rest_lro_client():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ transport = client.transport
+
+ # Ensure that we have an api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.AbstractOperationsClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_transport_kind_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = JobServiceAsyncClient.get_transport_class("rest_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "rest_asyncio"
+
+
+@pytest.mark.asyncio
+async def test_create_custom_job_rest_asyncio_bad_request(
+ request_type=job_service.CreateCustomJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.create_custom_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateCustomJobRequest,
+ dict,
+ ],
+)
+async def test_create_custom_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["custom_job"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "job_spec": {
+ "persistent_resource_id": "persistent_resource_id_value",
+ "worker_pool_specs": [
+ {
+ "container_spec": {
+ "image_uri": "image_uri_value",
+ "command": ["command_value1", "command_value2"],
+ "args": ["args_value1", "args_value2"],
+ "env": [{"name": "name_value", "value": "value_value"}],
+ },
+ "python_package_spec": {
+ "executor_image_uri": "executor_image_uri_value",
+ "package_uris": ["package_uris_value1", "package_uris_value2"],
+ "python_module": "python_module_value",
+ "args": ["args_value1", "args_value2"],
+ "env": {},
+ },
+ "machine_spec": {
+ "machine_type": "machine_type_value",
+ "accelerator_type": 1,
+ "accelerator_count": 1805,
+ "tpu_topology": "tpu_topology_value",
+ "reservation_affinity": {
+ "reservation_affinity_type": 1,
+ "key": "key_value",
+ "values": ["values_value1", "values_value2"],
+ },
+ },
+ "replica_count": 1384,
+ "nfs_mounts": [
+ {
+ "server": "server_value",
+ "path": "path_value",
+ "mount_point": "mount_point_value",
+ }
+ ],
+ "disk_spec": {
+ "boot_disk_type": "boot_disk_type_value",
+ "boot_disk_size_gb": 1792,
+ },
+ }
+ ],
+ "scheduling": {
+ "timeout": {"seconds": 751, "nanos": 543},
+ "restart_job_on_worker_restart": True,
+ "strategy": 1,
+ "disable_retries": True,
+ "max_wait_duration": {},
+ },
+ "service_account": "service_account_value",
+ "network": "network_value",
+ "reserved_ip_ranges": [
+ "reserved_ip_ranges_value1",
+ "reserved_ip_ranges_value2",
+ ],
+ "base_output_directory": {"output_uri_prefix": "output_uri_prefix_value"},
+ "protected_artifact_location_id": "protected_artifact_location_id_value",
+ "tensorboard": "tensorboard_value",
+ "enable_web_access": True,
+ "enable_dashboard_access": True,
+ "experiment": "experiment_value",
+ "experiment_run": "experiment_run_value",
+ "models": ["models_value1", "models_value2"],
+ },
+ "state": 1,
+ "create_time": {"seconds": 751, "nanos": 543},
+ "start_time": {},
+ "end_time": {},
+ "update_time": {},
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "labels": {},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "web_access_uris": {},
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = job_service.CreateCustomJobRequest.meta.fields["custom_job"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["custom_job"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["custom_job"][field])):
+ del request_init["custom_job"][field][i][subfield]
+ else:
+ del request_init["custom_job"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_custom_job.CustomJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_custom_job.CustomJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.create_custom_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_custom_job.CustomJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_create_custom_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_create_custom_job"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_create_custom_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.CreateCustomJobRequest.pb(
+ job_service.CreateCustomJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_custom_job.CustomJob.to_json(gca_custom_job.CustomJob())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.CreateCustomJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_custom_job.CustomJob()
+
+ await client.create_custom_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_custom_job_rest_asyncio_bad_request(
+ request_type=job_service.GetCustomJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/customJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_custom_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetCustomJobRequest,
+ dict,
+ ],
+)
+async def test_get_custom_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/customJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = custom_job.CustomJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = custom_job.CustomJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.get_custom_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, custom_job.CustomJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_get_custom_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_get_custom_job"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_get_custom_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.GetCustomJobRequest.pb(
+ job_service.GetCustomJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = custom_job.CustomJob.to_json(custom_job.CustomJob())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.GetCustomJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = custom_job.CustomJob()
+
+ await client.get_custom_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_list_custom_jobs_rest_asyncio_bad_request(
+ request_type=job_service.ListCustomJobsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_custom_jobs(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListCustomJobsRequest,
+ dict,
+ ],
+)
+async def test_list_custom_jobs_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListCustomJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListCustomJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.list_custom_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListCustomJobsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_list_custom_jobs_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_list_custom_jobs"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_list_custom_jobs"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.ListCustomJobsRequest.pb(
+ job_service.ListCustomJobsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = job_service.ListCustomJobsResponse.to_json(
+ job_service.ListCustomJobsResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.ListCustomJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = job_service.ListCustomJobsResponse()
+
+ await client.list_custom_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_delete_custom_job_rest_asyncio_bad_request(
+ request_type=job_service.DeleteCustomJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/customJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_custom_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteCustomJobRequest,
+ dict,
+ ],
+)
+async def test_delete_custom_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/customJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.delete_custom_job(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_delete_custom_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_delete_custom_job"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_delete_custom_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.DeleteCustomJobRequest.pb(
+ job_service.DeleteCustomJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.DeleteCustomJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.delete_custom_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_cancel_custom_job_rest_asyncio_bad_request(
+ request_type=job_service.CancelCustomJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/customJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_custom_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CancelCustomJobRequest,
+ dict,
+ ],
+)
+async def test_cancel_custom_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/customJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.cancel_custom_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_cancel_custom_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_cancel_custom_job"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = job_service.CancelCustomJobRequest.pb(
+ job_service.CancelCustomJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = job_service.CancelCustomJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ await client.cancel_custom_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_create_data_labeling_job_rest_asyncio_bad_request(
+ request_type=job_service.CreateDataLabelingJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.create_data_labeling_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateDataLabelingJobRequest,
+ dict,
+ ],
+)
+async def test_create_data_labeling_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["data_labeling_job"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "datasets": ["datasets_value1", "datasets_value2"],
+ "annotation_labels": {},
+ "labeler_count": 1375,
+ "instruction_uri": "instruction_uri_value",
+ "inputs_schema_uri": "inputs_schema_uri_value",
+ "inputs": {
+ "null_value": 0,
+ "number_value": 0.1285,
+ "string_value": "string_value_value",
+ "bool_value": True,
+ "struct_value": {"fields": {}},
+ "list_value": {"values": {}},
+ },
+ "state": 1,
+ "labeling_progress": 1810,
+ "current_spend": {
+ "currency_code": "currency_code_value",
+ "units": 563,
+ "nanos": 543,
+ },
+ "create_time": {"seconds": 751, "nanos": 543},
+ "update_time": {},
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "labels": {},
+ "specialist_pools": ["specialist_pools_value1", "specialist_pools_value2"],
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "active_learning_config": {
+ "max_data_item_count": 2005,
+ "max_data_item_percentage": 2506,
+ "sample_config": {
+ "initial_batch_sample_percentage": 3241,
+ "following_batch_sample_percentage": 3472,
+ "sample_strategy": 1,
+ },
+ "training_config": {"timeout_training_milli_hours": 3016},
+ },
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = job_service.CreateDataLabelingJobRequest.meta.fields[
+ "data_labeling_job"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["data_labeling_job"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["data_labeling_job"][field])):
+ del request_init["data_labeling_job"][field][i][subfield]
+ else:
+ del request_init["data_labeling_job"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_data_labeling_job.DataLabelingJob(
+ name="name_value",
+ display_name="display_name_value",
+ datasets=["datasets_value"],
+ labeler_count=1375,
+ instruction_uri="instruction_uri_value",
+ inputs_schema_uri="inputs_schema_uri_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ labeling_progress=1810,
+ specialist_pools=["specialist_pools_value"],
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_data_labeling_job.DataLabelingJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.create_data_labeling_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_data_labeling_job.DataLabelingJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.datasets == ["datasets_value"]
+ assert response.labeler_count == 1375
+ assert response.instruction_uri == "instruction_uri_value"
+ assert response.inputs_schema_uri == "inputs_schema_uri_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.labeling_progress == 1810
+ assert response.specialist_pools == ["specialist_pools_value"]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_create_data_labeling_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_create_data_labeling_job"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_create_data_labeling_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.CreateDataLabelingJobRequest.pb(
+ job_service.CreateDataLabelingJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_data_labeling_job.DataLabelingJob.to_json(
+ gca_data_labeling_job.DataLabelingJob()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.CreateDataLabelingJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_data_labeling_job.DataLabelingJob()
+
+ await client.create_data_labeling_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_data_labeling_job_rest_asyncio_bad_request(
+ request_type=job_service.GetDataLabelingJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/dataLabelingJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_data_labeling_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetDataLabelingJobRequest,
+ dict,
+ ],
+)
+async def test_get_data_labeling_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/dataLabelingJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = data_labeling_job.DataLabelingJob(
+ name="name_value",
+ display_name="display_name_value",
+ datasets=["datasets_value"],
+ labeler_count=1375,
+ instruction_uri="instruction_uri_value",
+ inputs_schema_uri="inputs_schema_uri_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ labeling_progress=1810,
+ specialist_pools=["specialist_pools_value"],
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = data_labeling_job.DataLabelingJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.get_data_labeling_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, data_labeling_job.DataLabelingJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.datasets == ["datasets_value"]
+ assert response.labeler_count == 1375
+ assert response.instruction_uri == "instruction_uri_value"
+ assert response.inputs_schema_uri == "inputs_schema_uri_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.labeling_progress == 1810
+ assert response.specialist_pools == ["specialist_pools_value"]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_get_data_labeling_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_get_data_labeling_job"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_get_data_labeling_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.GetDataLabelingJobRequest.pb(
+ job_service.GetDataLabelingJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = data_labeling_job.DataLabelingJob.to_json(
+ data_labeling_job.DataLabelingJob()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.GetDataLabelingJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = data_labeling_job.DataLabelingJob()
+
+ await client.get_data_labeling_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_list_data_labeling_jobs_rest_asyncio_bad_request(
+ request_type=job_service.ListDataLabelingJobsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_data_labeling_jobs(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListDataLabelingJobsRequest,
+ dict,
+ ],
+)
+async def test_list_data_labeling_jobs_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListDataLabelingJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListDataLabelingJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.list_data_labeling_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListDataLabelingJobsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_list_data_labeling_jobs_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_list_data_labeling_jobs"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_list_data_labeling_jobs"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.ListDataLabelingJobsRequest.pb(
+ job_service.ListDataLabelingJobsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = job_service.ListDataLabelingJobsResponse.to_json(
+ job_service.ListDataLabelingJobsResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.ListDataLabelingJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = job_service.ListDataLabelingJobsResponse()
+
+ await client.list_data_labeling_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_delete_data_labeling_job_rest_asyncio_bad_request(
+ request_type=job_service.DeleteDataLabelingJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/dataLabelingJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_data_labeling_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteDataLabelingJobRequest,
+ dict,
+ ],
+)
+async def test_delete_data_labeling_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/dataLabelingJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.delete_data_labeling_job(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_delete_data_labeling_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_delete_data_labeling_job"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_delete_data_labeling_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.DeleteDataLabelingJobRequest.pb(
+ job_service.DeleteDataLabelingJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.DeleteDataLabelingJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.delete_data_labeling_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_cancel_data_labeling_job_rest_asyncio_bad_request(
+ request_type=job_service.CancelDataLabelingJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/dataLabelingJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_data_labeling_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CancelDataLabelingJobRequest,
+ dict,
+ ],
+)
+async def test_cancel_data_labeling_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/dataLabelingJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.cancel_data_labeling_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_cancel_data_labeling_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_cancel_data_labeling_job"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = job_service.CancelDataLabelingJobRequest.pb(
+ job_service.CancelDataLabelingJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = job_service.CancelDataLabelingJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ await client.cancel_data_labeling_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_create_hyperparameter_tuning_job_rest_asyncio_bad_request(
+ request_type=job_service.CreateHyperparameterTuningJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.create_hyperparameter_tuning_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateHyperparameterTuningJobRequest,
+ dict,
+ ],
+)
+async def test_create_hyperparameter_tuning_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["hyperparameter_tuning_job"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "study_spec": {
+ "decay_curve_stopping_spec": {"use_elapsed_duration": True},
+ "median_automated_stopping_spec": {"use_elapsed_duration": True},
+ "convex_automated_stopping_spec": {
+ "max_step_count": 1513,
+ "min_step_count": 1511,
+ "min_measurement_count": 2257,
+ "learning_rate_parameter_name": "learning_rate_parameter_name_value",
+ "use_elapsed_duration": True,
+ "update_all_stopped_trials": True,
+ },
+ "metrics": [
+ {
+ "metric_id": "metric_id_value",
+ "goal": 1,
+ "safety_config": {
+ "safety_threshold": 0.17200000000000001,
+ "desired_min_safe_trials_fraction": 0.33640000000000003,
+ },
+ }
+ ],
+ "parameters": [
+ {
+ "double_value_spec": {
+ "min_value": 0.96,
+ "max_value": 0.962,
+ "default_value": 0.13770000000000002,
+ },
+ "integer_value_spec": {
+ "min_value": 960,
+ "max_value": 962,
+ "default_value": 1377,
+ },
+ "categorical_value_spec": {
+ "values": ["values_value1", "values_value2"],
+ "default_value": "default_value_value",
+ },
+ "discrete_value_spec": {
+ "values": [0.657, 0.658],
+ "default_value": 0.13770000000000002,
+ },
+ "parameter_id": "parameter_id_value",
+ "scale_type": 1,
+ "conditional_parameter_specs": [
+ {
+ "parent_discrete_values": {"values": [0.657, 0.658]},
+ "parent_int_values": {"values": [657, 658]},
+ "parent_categorical_values": {
+ "values": ["values_value1", "values_value2"]
+ },
+ "parameter_spec": {},
+ }
+ ],
+ }
+ ],
+ "algorithm": 2,
+ "observation_noise": 1,
+ "measurement_selection_type": 1,
+ "study_stopping_config": {
+ "should_stop_asap": {"value": True},
+ "minimum_runtime_constraint": {
+ "max_duration": {"seconds": 751, "nanos": 543},
+ "end_time": {"seconds": 751, "nanos": 543},
+ },
+ "maximum_runtime_constraint": {},
+ "min_num_trials": {"value": 541},
+ "max_num_trials": {},
+ "max_num_trials_no_progress": {},
+ "max_duration_no_progress": {},
+ },
+ },
+ "max_trial_count": 1609,
+ "parallel_trial_count": 2128,
+ "max_failed_trial_count": 2317,
+ "trial_job_spec": {
+ "persistent_resource_id": "persistent_resource_id_value",
+ "worker_pool_specs": [
+ {
+ "container_spec": {
+ "image_uri": "image_uri_value",
+ "command": ["command_value1", "command_value2"],
+ "args": ["args_value1", "args_value2"],
+ "env": [{"name": "name_value", "value": "value_value"}],
+ },
+ "python_package_spec": {
+ "executor_image_uri": "executor_image_uri_value",
+ "package_uris": ["package_uris_value1", "package_uris_value2"],
+ "python_module": "python_module_value",
+ "args": ["args_value1", "args_value2"],
+ "env": {},
+ },
+ "machine_spec": {
+ "machine_type": "machine_type_value",
+ "accelerator_type": 1,
+ "accelerator_count": 1805,
+ "tpu_topology": "tpu_topology_value",
+ "reservation_affinity": {
+ "reservation_affinity_type": 1,
+ "key": "key_value",
+ "values": ["values_value1", "values_value2"],
+ },
+ },
+ "replica_count": 1384,
+ "nfs_mounts": [
+ {
+ "server": "server_value",
+ "path": "path_value",
+ "mount_point": "mount_point_value",
+ }
+ ],
+ "disk_spec": {
+ "boot_disk_type": "boot_disk_type_value",
+ "boot_disk_size_gb": 1792,
+ },
+ }
+ ],
+ "scheduling": {
+ "timeout": {},
+ "restart_job_on_worker_restart": True,
+ "strategy": 1,
+ "disable_retries": True,
+ "max_wait_duration": {},
+ },
+ "service_account": "service_account_value",
+ "network": "network_value",
+ "reserved_ip_ranges": [
+ "reserved_ip_ranges_value1",
+ "reserved_ip_ranges_value2",
+ ],
+ "base_output_directory": {"output_uri_prefix": "output_uri_prefix_value"},
+ "protected_artifact_location_id": "protected_artifact_location_id_value",
+ "tensorboard": "tensorboard_value",
+ "enable_web_access": True,
+ "enable_dashboard_access": True,
+ "experiment": "experiment_value",
+ "experiment_run": "experiment_run_value",
+ "models": ["models_value1", "models_value2"],
+ },
+ "trials": [
+ {
+ "name": "name_value",
+ "id": "id_value",
+ "state": 1,
+ "parameters": [
+ {
+ "parameter_id": "parameter_id_value",
+ "value": {
+ "null_value": 0,
+ "number_value": 0.1285,
+ "string_value": "string_value_value",
+ "bool_value": True,
+ "struct_value": {"fields": {}},
+ "list_value": {"values": {}},
+ },
+ }
+ ],
+ "final_measurement": {
+ "elapsed_duration": {},
+ "step_count": 1092,
+ "metrics": [{"metric_id": "metric_id_value", "value": 0.541}],
+ },
+ "measurements": {},
+ "start_time": {},
+ "end_time": {},
+ "client_id": "client_id_value",
+ "infeasible_reason": "infeasible_reason_value",
+ "custom_job": "custom_job_value",
+ "web_access_uris": {},
+ }
+ ],
+ "state": 1,
+ "create_time": {},
+ "start_time": {},
+ "end_time": {},
+ "update_time": {},
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "labels": {},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = job_service.CreateHyperparameterTuningJobRequest.meta.fields[
+ "hyperparameter_tuning_job"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init[
+ "hyperparameter_tuning_job"
+ ].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(
+ 0, len(request_init["hyperparameter_tuning_job"][field])
+ ):
+ del request_init["hyperparameter_tuning_job"][field][i][subfield]
+ else:
+ del request_init["hyperparameter_tuning_job"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value",
+ display_name="display_name_value",
+ max_trial_count=1609,
+ parallel_trial_count=2128,
+ max_failed_trial_count=2317,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.create_hyperparameter_tuning_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_trial_count == 1609
+ assert response.parallel_trial_count == 2128
+ assert response.max_failed_trial_count == 2317
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_create_hyperparameter_tuning_job_rest_asyncio_interceptors(
+ null_interceptor,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "post_create_hyperparameter_tuning_job",
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "pre_create_hyperparameter_tuning_job",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.CreateHyperparameterTuningJobRequest.pb(
+ job_service.CreateHyperparameterTuningJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob.to_json(
+ gca_hyperparameter_tuning_job.HyperparameterTuningJob()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.CreateHyperparameterTuningJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob()
+
+ await client.create_hyperparameter_tuning_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_hyperparameter_tuning_job_rest_asyncio_bad_request(
+ request_type=job_service.GetHyperparameterTuningJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/hyperparameterTuningJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_hyperparameter_tuning_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetHyperparameterTuningJobRequest,
+ dict,
+ ],
+)
+async def test_get_hyperparameter_tuning_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/hyperparameterTuningJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = hyperparameter_tuning_job.HyperparameterTuningJob(
+ name="name_value",
+ display_name="display_name_value",
+ max_trial_count=1609,
+ parallel_trial_count=2128,
+ max_failed_trial_count=2317,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = hyperparameter_tuning_job.HyperparameterTuningJob.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.get_hyperparameter_tuning_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_trial_count == 1609
+ assert response.parallel_trial_count == 2128
+ assert response.max_failed_trial_count == 2317
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_get_hyperparameter_tuning_job_rest_asyncio_interceptors(
+ null_interceptor,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_get_hyperparameter_tuning_job"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_get_hyperparameter_tuning_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.GetHyperparameterTuningJobRequest.pb(
+ job_service.GetHyperparameterTuningJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = hyperparameter_tuning_job.HyperparameterTuningJob.to_json(
+ hyperparameter_tuning_job.HyperparameterTuningJob()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.GetHyperparameterTuningJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = hyperparameter_tuning_job.HyperparameterTuningJob()
+
+ await client.get_hyperparameter_tuning_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_list_hyperparameter_tuning_jobs_rest_asyncio_bad_request(
+ request_type=job_service.ListHyperparameterTuningJobsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_hyperparameter_tuning_jobs(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListHyperparameterTuningJobsRequest,
+ dict,
+ ],
+)
+async def test_list_hyperparameter_tuning_jobs_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListHyperparameterTuningJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListHyperparameterTuningJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.list_hyperparameter_tuning_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListHyperparameterTuningJobsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_list_hyperparameter_tuning_jobs_rest_asyncio_interceptors(
+ null_interceptor,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "post_list_hyperparameter_tuning_jobs",
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_list_hyperparameter_tuning_jobs"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.ListHyperparameterTuningJobsRequest.pb(
+ job_service.ListHyperparameterTuningJobsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = job_service.ListHyperparameterTuningJobsResponse.to_json(
+ job_service.ListHyperparameterTuningJobsResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.ListHyperparameterTuningJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = job_service.ListHyperparameterTuningJobsResponse()
+
+ await client.list_hyperparameter_tuning_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_delete_hyperparameter_tuning_job_rest_asyncio_bad_request(
+ request_type=job_service.DeleteHyperparameterTuningJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/hyperparameterTuningJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_hyperparameter_tuning_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteHyperparameterTuningJobRequest,
+ dict,
+ ],
+)
+async def test_delete_hyperparameter_tuning_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/hyperparameterTuningJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.delete_hyperparameter_tuning_job(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_delete_hyperparameter_tuning_job_rest_asyncio_interceptors(
+ null_interceptor,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "post_delete_hyperparameter_tuning_job",
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "pre_delete_hyperparameter_tuning_job",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.DeleteHyperparameterTuningJobRequest.pb(
+ job_service.DeleteHyperparameterTuningJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.DeleteHyperparameterTuningJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.delete_hyperparameter_tuning_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_cancel_hyperparameter_tuning_job_rest_asyncio_bad_request(
+ request_type=job_service.CancelHyperparameterTuningJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/hyperparameterTuningJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_hyperparameter_tuning_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CancelHyperparameterTuningJobRequest,
+ dict,
+ ],
+)
+async def test_cancel_hyperparameter_tuning_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/hyperparameterTuningJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.cancel_hyperparameter_tuning_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_cancel_hyperparameter_tuning_job_rest_asyncio_interceptors(
+ null_interceptor,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "pre_cancel_hyperparameter_tuning_job",
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = job_service.CancelHyperparameterTuningJobRequest.pb(
+ job_service.CancelHyperparameterTuningJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = job_service.CancelHyperparameterTuningJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ await client.cancel_hyperparameter_tuning_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_create_nas_job_rest_asyncio_bad_request(
+ request_type=job_service.CreateNasJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.create_nas_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateNasJobRequest,
+ dict,
+ ],
+)
+async def test_create_nas_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["nas_job"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "nas_job_spec": {
+ "multi_trial_algorithm_spec": {
+ "multi_trial_algorithm": 1,
+ "metric": {"metric_id": "metric_id_value", "goal": 1},
+ "search_trial_spec": {
+ "search_trial_job_spec": {
+ "persistent_resource_id": "persistent_resource_id_value",
+ "worker_pool_specs": [
+ {
+ "container_spec": {
+ "image_uri": "image_uri_value",
+ "command": ["command_value1", "command_value2"],
+ "args": ["args_value1", "args_value2"],
+ "env": [
+ {"name": "name_value", "value": "value_value"}
+ ],
+ },
+ "python_package_spec": {
+ "executor_image_uri": "executor_image_uri_value",
+ "package_uris": [
+ "package_uris_value1",
+ "package_uris_value2",
+ ],
+ "python_module": "python_module_value",
+ "args": ["args_value1", "args_value2"],
+ "env": {},
+ },
+ "machine_spec": {
+ "machine_type": "machine_type_value",
+ "accelerator_type": 1,
+ "accelerator_count": 1805,
+ "tpu_topology": "tpu_topology_value",
+ "reservation_affinity": {
+ "reservation_affinity_type": 1,
+ "key": "key_value",
+ "values": ["values_value1", "values_value2"],
+ },
+ },
+ "replica_count": 1384,
+ "nfs_mounts": [
+ {
+ "server": "server_value",
+ "path": "path_value",
+ "mount_point": "mount_point_value",
+ }
+ ],
+ "disk_spec": {
+ "boot_disk_type": "boot_disk_type_value",
+ "boot_disk_size_gb": 1792,
+ },
+ }
+ ],
+ "scheduling": {
+ "timeout": {"seconds": 751, "nanos": 543},
+ "restart_job_on_worker_restart": True,
+ "strategy": 1,
+ "disable_retries": True,
+ "max_wait_duration": {},
+ },
+ "service_account": "service_account_value",
+ "network": "network_value",
+ "reserved_ip_ranges": [
+ "reserved_ip_ranges_value1",
+ "reserved_ip_ranges_value2",
+ ],
+ "base_output_directory": {
+ "output_uri_prefix": "output_uri_prefix_value"
+ },
+ "protected_artifact_location_id": "protected_artifact_location_id_value",
+ "tensorboard": "tensorboard_value",
+ "enable_web_access": True,
+ "enable_dashboard_access": True,
+ "experiment": "experiment_value",
+ "experiment_run": "experiment_run_value",
+ "models": ["models_value1", "models_value2"],
+ },
+ "max_trial_count": 1609,
+ "max_parallel_trial_count": 2549,
+ "max_failed_trial_count": 2317,
+ },
+ "train_trial_spec": {
+ "train_trial_job_spec": {},
+ "max_parallel_trial_count": 2549,
+ "frequency": 978,
+ },
+ },
+ "resume_nas_job_id": "resume_nas_job_id_value",
+ "search_space_spec": "search_space_spec_value",
+ },
+ "nas_job_output": {
+ "multi_trial_job_output": {
+ "search_trials": [
+ {
+ "id": "id_value",
+ "state": 1,
+ "final_measurement": {
+ "elapsed_duration": {},
+ "step_count": 1092,
+ "metrics": [
+ {"metric_id": "metric_id_value", "value": 0.541}
+ ],
+ },
+ "start_time": {"seconds": 751, "nanos": 543},
+ "end_time": {},
+ }
+ ],
+ "train_trials": {},
+ }
+ },
+ "state": 1,
+ "create_time": {},
+ "start_time": {},
+ "end_time": {},
+ "update_time": {},
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "labels": {},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "enable_restricted_image_training": True,
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = job_service.CreateNasJobRequest.meta.fields["nas_job"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["nas_job"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["nas_job"][field])):
+ del request_init["nas_job"][field][i][subfield]
+ else:
+ del request_init["nas_job"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_nas_job.NasJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ enable_restricted_image_training=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_nas_job.NasJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.create_nas_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_nas_job.NasJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.enable_restricted_image_training is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_create_nas_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_create_nas_job"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_create_nas_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.CreateNasJobRequest.pb(
+ job_service.CreateNasJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_nas_job.NasJob.to_json(gca_nas_job.NasJob())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.CreateNasJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_nas_job.NasJob()
+
+ await client.create_nas_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_nas_job_rest_asyncio_bad_request(
+ request_type=job_service.GetNasJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/nasJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_nas_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetNasJobRequest,
+ dict,
+ ],
+)
+async def test_get_nas_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/nasJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = nas_job.NasJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ enable_restricted_image_training=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = nas_job.NasJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.get_nas_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, nas_job.NasJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.enable_restricted_image_training is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_get_nas_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_get_nas_job"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_get_nas_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.GetNasJobRequest.pb(job_service.GetNasJobRequest())
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = nas_job.NasJob.to_json(nas_job.NasJob())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.GetNasJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = nas_job.NasJob()
+
+ await client.get_nas_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_list_nas_jobs_rest_asyncio_bad_request(
+ request_type=job_service.ListNasJobsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_nas_jobs(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListNasJobsRequest,
+ dict,
+ ],
+)
+async def test_list_nas_jobs_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListNasJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListNasJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.list_nas_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListNasJobsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_list_nas_jobs_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_list_nas_jobs"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_list_nas_jobs"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.ListNasJobsRequest.pb(job_service.ListNasJobsRequest())
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = job_service.ListNasJobsResponse.to_json(
+ job_service.ListNasJobsResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.ListNasJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = job_service.ListNasJobsResponse()
+
+ await client.list_nas_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_delete_nas_job_rest_asyncio_bad_request(
+ request_type=job_service.DeleteNasJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/nasJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_nas_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteNasJobRequest,
+ dict,
+ ],
+)
+async def test_delete_nas_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/nasJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.delete_nas_job(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_delete_nas_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_delete_nas_job"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_delete_nas_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.DeleteNasJobRequest.pb(
+ job_service.DeleteNasJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.DeleteNasJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.delete_nas_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_cancel_nas_job_rest_asyncio_bad_request(
+ request_type=job_service.CancelNasJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/nasJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_nas_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CancelNasJobRequest,
+ dict,
+ ],
+)
+async def test_cancel_nas_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/nasJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.cancel_nas_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_cancel_nas_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_cancel_nas_job"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = job_service.CancelNasJobRequest.pb(
+ job_service.CancelNasJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = job_service.CancelNasJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ await client.cancel_nas_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_nas_trial_detail_rest_asyncio_bad_request(
+ request_type=job_service.GetNasTrialDetailRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/nasJobs/sample3/nasTrialDetails/sample4"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_nas_trial_detail(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetNasTrialDetailRequest,
+ dict,
+ ],
+)
+async def test_get_nas_trial_detail_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/nasJobs/sample3/nasTrialDetails/sample4"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = nas_job.NasTrialDetail(
+ name="name_value",
+ parameters="parameters_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = nas_job.NasTrialDetail.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.get_nas_trial_detail(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, nas_job.NasTrialDetail)
+ assert response.name == "name_value"
+ assert response.parameters == "parameters_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_get_nas_trial_detail_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_get_nas_trial_detail"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_get_nas_trial_detail"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.GetNasTrialDetailRequest.pb(
+ job_service.GetNasTrialDetailRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = nas_job.NasTrialDetail.to_json(nas_job.NasTrialDetail())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.GetNasTrialDetailRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = nas_job.NasTrialDetail()
+
+ await client.get_nas_trial_detail(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_list_nas_trial_details_rest_asyncio_bad_request(
+ request_type=job_service.ListNasTrialDetailsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2/nasJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_nas_trial_details(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListNasTrialDetailsRequest,
+ dict,
+ ],
+)
+async def test_list_nas_trial_details_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2/nasJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListNasTrialDetailsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListNasTrialDetailsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.list_nas_trial_details(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListNasTrialDetailsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_list_nas_trial_details_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_list_nas_trial_details"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_list_nas_trial_details"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.ListNasTrialDetailsRequest.pb(
+ job_service.ListNasTrialDetailsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = job_service.ListNasTrialDetailsResponse.to_json(
+ job_service.ListNasTrialDetailsResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.ListNasTrialDetailsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = job_service.ListNasTrialDetailsResponse()
+
+ await client.list_nas_trial_details(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_create_batch_prediction_job_rest_asyncio_bad_request(
+ request_type=job_service.CreateBatchPredictionJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.create_batch_prediction_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateBatchPredictionJobRequest,
+ dict,
+ ],
+)
+async def test_create_batch_prediction_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["batch_prediction_job"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "model": "model_value",
+ "model_version_id": "model_version_id_value",
+ "unmanaged_container_model": {
+ "artifact_uri": "artifact_uri_value",
+ "predict_schemata": {
+ "instance_schema_uri": "instance_schema_uri_value",
+ "parameters_schema_uri": "parameters_schema_uri_value",
+ "prediction_schema_uri": "prediction_schema_uri_value",
+ },
+ "container_spec": {
+ "image_uri": "image_uri_value",
+ "command": ["command_value1", "command_value2"],
+ "args": ["args_value1", "args_value2"],
+ "env": [{"name": "name_value", "value": "value_value"}],
+ "ports": [{"container_port": 1511}],
+ "predict_route": "predict_route_value",
+ "health_route": "health_route_value",
+ "grpc_ports": {},
+ "deployment_timeout": {"seconds": 751, "nanos": 543},
+ "shared_memory_size_mb": 2231,
+ "startup_probe": {
+ "exec_": {"command": ["command_value1", "command_value2"]},
+ "period_seconds": 1489,
+ "timeout_seconds": 1621,
+ },
+ "health_probe": {},
+ },
+ },
+ "input_config": {
+ "gcs_source": {"uris": ["uris_value1", "uris_value2"]},
+ "bigquery_source": {"input_uri": "input_uri_value"},
+ "instances_format": "instances_format_value",
+ },
+ "instance_config": {
+ "instance_type": "instance_type_value",
+ "key_field": "key_field_value",
+ "included_fields": ["included_fields_value1", "included_fields_value2"],
+ "excluded_fields": ["excluded_fields_value1", "excluded_fields_value2"],
+ },
+ "model_parameters": {
+ "null_value": 0,
+ "number_value": 0.1285,
+ "string_value": "string_value_value",
+ "bool_value": True,
+ "struct_value": {"fields": {}},
+ "list_value": {"values": {}},
+ },
+ "output_config": {
+ "gcs_destination": {"output_uri_prefix": "output_uri_prefix_value"},
+ "bigquery_destination": {"output_uri": "output_uri_value"},
+ "predictions_format": "predictions_format_value",
+ },
+ "dedicated_resources": {
+ "machine_spec": {
+ "machine_type": "machine_type_value",
+ "accelerator_type": 1,
+ "accelerator_count": 1805,
+ "tpu_topology": "tpu_topology_value",
+ "reservation_affinity": {
+ "reservation_affinity_type": 1,
+ "key": "key_value",
+ "values": ["values_value1", "values_value2"],
+ },
+ },
+ "starting_replica_count": 2355,
+ "max_replica_count": 1805,
+ },
+ "service_account": "service_account_value",
+ "manual_batch_tuning_parameters": {"batch_size": 1052},
+ "generate_explanation": True,
+ "explanation_spec": {
+ "parameters": {
+ "sampled_shapley_attribution": {"path_count": 1077},
+ "integrated_gradients_attribution": {
+ "step_count": 1092,
+ "smooth_grad_config": {
+ "noise_sigma": 0.11660000000000001,
+ "feature_noise_sigma": {
+ "noise_sigma": [{"name": "name_value", "sigma": 0.529}]
+ },
+ "noisy_sample_count": 1947,
+ },
+ "blur_baseline_config": {"max_blur_sigma": 0.1482},
+ },
+ "xrai_attribution": {
+ "step_count": 1092,
+ "smooth_grad_config": {},
+ "blur_baseline_config": {},
+ },
+ "examples": {
+ "example_gcs_source": {"data_format": 1, "gcs_source": {}},
+ "nearest_neighbor_search_config": {},
+ "presets": {"query": 1, "modality": 1},
+ "neighbor_count": 1494,
+ },
+ "top_k": 541,
+ "output_indices": {},
+ },
+ "metadata": {
+ "inputs": {},
+ "outputs": {},
+ "feature_attributions_schema_uri": "feature_attributions_schema_uri_value",
+ "latent_space_source": "latent_space_source_value",
+ },
+ },
+ "output_info": {
+ "gcs_output_directory": "gcs_output_directory_value",
+ "bigquery_output_dataset": "bigquery_output_dataset_value",
+ "bigquery_output_table": "bigquery_output_table_value",
+ },
+ "state": 1,
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "partial_failures": {},
+ "resources_consumed": {"replica_hours": 0.13920000000000002},
+ "completion_stats": {
+ "successful_count": 1736,
+ "failed_count": 1261,
+ "incomplete_count": 1720,
+ "successful_forecast_point_count": 3335,
+ },
+ "create_time": {"seconds": 751, "nanos": 543},
+ "start_time": {},
+ "end_time": {},
+ "update_time": {},
+ "labels": {},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "disable_container_logging": True,
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = job_service.CreateBatchPredictionJobRequest.meta.fields[
+ "batch_prediction_job"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init[
+ "batch_prediction_job"
+ ].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["batch_prediction_job"][field])):
+ del request_init["batch_prediction_job"][field][i][subfield]
+ else:
+ del request_init["batch_prediction_job"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_batch_prediction_job.BatchPredictionJob(
+ name="name_value",
+ display_name="display_name_value",
+ model="model_value",
+ model_version_id="model_version_id_value",
+ service_account="service_account_value",
+ generate_explanation=True,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ disable_container_logging=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_batch_prediction_job.BatchPredictionJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.create_batch_prediction_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.model == "model_value"
+ assert response.model_version_id == "model_version_id_value"
+ assert response.service_account == "service_account_value"
+ assert response.generate_explanation is True
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.disable_container_logging is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_create_batch_prediction_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_create_batch_prediction_job"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_create_batch_prediction_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.CreateBatchPredictionJobRequest.pb(
+ job_service.CreateBatchPredictionJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_batch_prediction_job.BatchPredictionJob.to_json(
+ gca_batch_prediction_job.BatchPredictionJob()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.CreateBatchPredictionJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_batch_prediction_job.BatchPredictionJob()
+
+ await client.create_batch_prediction_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_batch_prediction_job_rest_asyncio_bad_request(
+ request_type=job_service.GetBatchPredictionJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/batchPredictionJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_batch_prediction_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetBatchPredictionJobRequest,
+ dict,
+ ],
+)
+async def test_get_batch_prediction_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/batchPredictionJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = batch_prediction_job.BatchPredictionJob(
+ name="name_value",
+ display_name="display_name_value",
+ model="model_value",
+ model_version_id="model_version_id_value",
+ service_account="service_account_value",
+ generate_explanation=True,
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ disable_container_logging=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = batch_prediction_job.BatchPredictionJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.get_batch_prediction_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, batch_prediction_job.BatchPredictionJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.model == "model_value"
+ assert response.model_version_id == "model_version_id_value"
+ assert response.service_account == "service_account_value"
+ assert response.generate_explanation is True
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert response.disable_container_logging is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_get_batch_prediction_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_get_batch_prediction_job"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_get_batch_prediction_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.GetBatchPredictionJobRequest.pb(
+ job_service.GetBatchPredictionJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = batch_prediction_job.BatchPredictionJob.to_json(
+ batch_prediction_job.BatchPredictionJob()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.GetBatchPredictionJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = batch_prediction_job.BatchPredictionJob()
+
+ await client.get_batch_prediction_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_list_batch_prediction_jobs_rest_asyncio_bad_request(
+ request_type=job_service.ListBatchPredictionJobsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_batch_prediction_jobs(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListBatchPredictionJobsRequest,
+ dict,
+ ],
+)
+async def test_list_batch_prediction_jobs_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListBatchPredictionJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListBatchPredictionJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.list_batch_prediction_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListBatchPredictionJobsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_list_batch_prediction_jobs_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_list_batch_prediction_jobs"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_list_batch_prediction_jobs"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.ListBatchPredictionJobsRequest.pb(
+ job_service.ListBatchPredictionJobsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = job_service.ListBatchPredictionJobsResponse.to_json(
+ job_service.ListBatchPredictionJobsResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.ListBatchPredictionJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = job_service.ListBatchPredictionJobsResponse()
+
+ await client.list_batch_prediction_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_delete_batch_prediction_job_rest_asyncio_bad_request(
+ request_type=job_service.DeleteBatchPredictionJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/batchPredictionJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_batch_prediction_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteBatchPredictionJobRequest,
+ dict,
+ ],
+)
+async def test_delete_batch_prediction_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/batchPredictionJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.delete_batch_prediction_job(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_delete_batch_prediction_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "post_delete_batch_prediction_job"
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_delete_batch_prediction_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.DeleteBatchPredictionJobRequest.pb(
+ job_service.DeleteBatchPredictionJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.DeleteBatchPredictionJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.delete_batch_prediction_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_cancel_batch_prediction_job_rest_asyncio_bad_request(
+ request_type=job_service.CancelBatchPredictionJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/batchPredictionJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_batch_prediction_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CancelBatchPredictionJobRequest,
+ dict,
+ ],
+)
+async def test_cancel_batch_prediction_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/batchPredictionJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.cancel_batch_prediction_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_cancel_batch_prediction_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor, "pre_cancel_batch_prediction_job"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = job_service.CancelBatchPredictionJobRequest.pb(
+ job_service.CancelBatchPredictionJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = job_service.CancelBatchPredictionJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ await client.cancel_batch_prediction_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_create_model_deployment_monitoring_job_rest_asyncio_bad_request(
+ request_type=job_service.CreateModelDeploymentMonitoringJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.create_model_deployment_monitoring_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.CreateModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+async def test_create_model_deployment_monitoring_job_rest_asyncio_call_success(
+ request_type,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["model_deployment_monitoring_job"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "endpoint": "endpoint_value",
+ "state": 1,
+ "schedule_state": 1,
+ "latest_monitoring_pipeline_metadata": {
+ "run_time": {"seconds": 751, "nanos": 543},
+ "status": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ },
+ "model_deployment_monitoring_objective_configs": [
+ {
+ "deployed_model_id": "deployed_model_id_value",
+ "objective_config": {
+ "training_dataset": {
+ "dataset": "dataset_value",
+ "gcs_source": {"uris": ["uris_value1", "uris_value2"]},
+ "bigquery_source": {"input_uri": "input_uri_value"},
+ "data_format": "data_format_value",
+ "target_field": "target_field_value",
+ "logging_sampling_strategy": {
+ "random_sample_config": {"sample_rate": 0.1165}
+ },
+ },
+ "training_prediction_skew_detection_config": {
+ "skew_thresholds": {},
+ "attribution_score_skew_thresholds": {},
+ "default_skew_threshold": {"value": 0.541},
+ },
+ "prediction_drift_detection_config": {
+ "drift_thresholds": {},
+ "attribution_score_drift_thresholds": {},
+ "default_drift_threshold": {},
+ },
+ "explanation_config": {
+ "enable_feature_attributes": True,
+ "explanation_baseline": {
+ "gcs": {"output_uri_prefix": "output_uri_prefix_value"},
+ "bigquery": {"output_uri": "output_uri_value"},
+ "prediction_format": 2,
+ },
+ },
+ },
+ }
+ ],
+ "model_deployment_monitoring_schedule_config": {
+ "monitor_interval": {"seconds": 751, "nanos": 543},
+ "monitor_window": {},
+ },
+ "logging_sampling_strategy": {},
+ "model_monitoring_alert_config": {
+ "email_alert_config": {
+ "user_emails": ["user_emails_value1", "user_emails_value2"]
+ },
+ "enable_logging": True,
+ "notification_channels": [
+ "notification_channels_value1",
+ "notification_channels_value2",
+ ],
+ },
+ "predict_instance_schema_uri": "predict_instance_schema_uri_value",
+ "sample_predict_instance": {
+ "null_value": 0,
+ "number_value": 0.1285,
+ "string_value": "string_value_value",
+ "bool_value": True,
+ "struct_value": {"fields": {}},
+ "list_value": {"values": {}},
+ },
+ "analysis_instance_schema_uri": "analysis_instance_schema_uri_value",
+ "bigquery_tables": [
+ {
+ "log_source": 1,
+ "log_type": 1,
+ "bigquery_table_path": "bigquery_table_path_value",
+ "request_response_logging_schema_version": "request_response_logging_schema_version_value",
+ }
+ ],
+ "log_ttl": {},
+ "labels": {},
+ "create_time": {},
+ "update_time": {},
+ "next_schedule_time": {},
+ "stats_anomalies_base_directory": {},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "enable_monitoring_pipeline_logs": True,
+ "error": {},
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = job_service.CreateModelDeploymentMonitoringJobRequest.meta.fields[
+ "model_deployment_monitoring_job"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init[
+ "model_deployment_monitoring_job"
+ ].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(
+ 0, len(request_init["model_deployment_monitoring_job"][field])
+ ):
+ del request_init["model_deployment_monitoring_job"][field][i][
+ subfield
+ ]
+ else:
+ del request_init["model_deployment_monitoring_job"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value",
+ display_name="display_name_value",
+ endpoint="endpoint_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
+ predict_instance_schema_uri="predict_instance_schema_uri_value",
+ analysis_instance_schema_uri="analysis_instance_schema_uri_value",
+ enable_monitoring_pipeline_logs=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = (
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.pb(
+ return_value
+ )
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.create_model_deployment_monitoring_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(
+ response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob
+ )
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.endpoint == "endpoint_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert (
+ response.schedule_state
+ == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING
+ )
+ assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value"
+ assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value"
+ assert response.enable_monitoring_pipeline_logs is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_create_model_deployment_monitoring_job_rest_asyncio_interceptors(
+ null_interceptor,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "post_create_model_deployment_monitoring_job",
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "pre_create_model_deployment_monitoring_job",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.CreateModelDeploymentMonitoringJobRequest.pb(
+ job_service.CreateModelDeploymentMonitoringJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = (
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.to_json(
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.CreateModelDeploymentMonitoringJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = (
+ gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+
+ await client.create_model_deployment_monitoring_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_search_model_deployment_monitoring_stats_anomalies_rest_asyncio_bad_request(
+ request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "model_deployment_monitoring_job": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.search_model_deployment_monitoring_stats_anomalies(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest,
+ dict,
+ ],
+)
+async def test_search_model_deployment_monitoring_stats_anomalies_rest_asyncio_call_success(
+ request_type,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "model_deployment_monitoring_job": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.pb(
+ return_value
+ )
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.search_model_deployment_monitoring_stats_anomalies(
+ request
+ )
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(
+ response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager
+ )
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_search_model_deployment_monitoring_stats_anomalies_rest_asyncio_interceptors(
+ null_interceptor,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "post_search_model_deployment_monitoring_stats_anomalies",
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "pre_search_model_deployment_monitoring_stats_anomalies",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.pb(
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
+ )
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.to_json(
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
+ )
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = (
+ job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
+ )
+
+ await client.search_model_deployment_monitoring_stats_anomalies(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_model_deployment_monitoring_job_rest_asyncio_bad_request(
+ request_type=job_service.GetModelDeploymentMonitoringJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_model_deployment_monitoring_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.GetModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+async def test_get_model_deployment_monitoring_job_rest_asyncio_call_success(
+ request_type,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
+ name="name_value",
+ display_name="display_name_value",
+ endpoint="endpoint_value",
+ state=job_state.JobState.JOB_STATE_QUEUED,
+ schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
+ predict_instance_schema_uri="predict_instance_schema_uri_value",
+ analysis_instance_schema_uri="analysis_instance_schema_uri_value",
+ enable_monitoring_pipeline_logs=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.get_model_deployment_monitoring_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(
+ response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob
+ )
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.endpoint == "endpoint_value"
+ assert response.state == job_state.JobState.JOB_STATE_QUEUED
+ assert (
+ response.schedule_state
+ == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING
+ )
+ assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value"
+ assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value"
+ assert response.enable_monitoring_pipeline_logs is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_get_model_deployment_monitoring_job_rest_asyncio_interceptors(
+ null_interceptor,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "post_get_model_deployment_monitoring_job",
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "pre_get_model_deployment_monitoring_job",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.GetModelDeploymentMonitoringJobRequest.pb(
+ job_service.GetModelDeploymentMonitoringJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = (
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob.to_json(
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.GetModelDeploymentMonitoringJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = (
+ model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
+ )
+
+ await client.get_model_deployment_monitoring_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_list_model_deployment_monitoring_jobs_rest_asyncio_bad_request(
+ request_type=job_service.ListModelDeploymentMonitoringJobsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_model_deployment_monitoring_jobs(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ListModelDeploymentMonitoringJobsRequest,
+ dict,
+ ],
+)
+async def test_list_model_deployment_monitoring_jobs_rest_asyncio_call_success(
+ request_type,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = job_service.ListModelDeploymentMonitoringJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = job_service.ListModelDeploymentMonitoringJobsResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.list_model_deployment_monitoring_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_list_model_deployment_monitoring_jobs_rest_asyncio_interceptors(
+ null_interceptor,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "post_list_model_deployment_monitoring_jobs",
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "pre_list_model_deployment_monitoring_jobs",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.ListModelDeploymentMonitoringJobsRequest.pb(
+ job_service.ListModelDeploymentMonitoringJobsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = job_service.ListModelDeploymentMonitoringJobsResponse.to_json(
+ job_service.ListModelDeploymentMonitoringJobsResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.ListModelDeploymentMonitoringJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = job_service.ListModelDeploymentMonitoringJobsResponse()
+
+ await client.list_model_deployment_monitoring_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_update_model_deployment_monitoring_job_rest_asyncio_bad_request(
+ request_type=job_service.UpdateModelDeploymentMonitoringJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "model_deployment_monitoring_job": {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.update_model_deployment_monitoring_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.UpdateModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+async def test_update_model_deployment_monitoring_job_rest_asyncio_call_success(
+ request_type,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "model_deployment_monitoring_job": {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ }
+ request_init["model_deployment_monitoring_job"] = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3",
+ "display_name": "display_name_value",
+ "endpoint": "endpoint_value",
+ "state": 1,
+ "schedule_state": 1,
+ "latest_monitoring_pipeline_metadata": {
+ "run_time": {"seconds": 751, "nanos": 543},
+ "status": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ },
+ "model_deployment_monitoring_objective_configs": [
+ {
+ "deployed_model_id": "deployed_model_id_value",
+ "objective_config": {
+ "training_dataset": {
+ "dataset": "dataset_value",
+ "gcs_source": {"uris": ["uris_value1", "uris_value2"]},
+ "bigquery_source": {"input_uri": "input_uri_value"},
+ "data_format": "data_format_value",
+ "target_field": "target_field_value",
+ "logging_sampling_strategy": {
+ "random_sample_config": {"sample_rate": 0.1165}
+ },
+ },
+ "training_prediction_skew_detection_config": {
+ "skew_thresholds": {},
+ "attribution_score_skew_thresholds": {},
+ "default_skew_threshold": {"value": 0.541},
+ },
+ "prediction_drift_detection_config": {
+ "drift_thresholds": {},
+ "attribution_score_drift_thresholds": {},
+ "default_drift_threshold": {},
+ },
+ "explanation_config": {
+ "enable_feature_attributes": True,
+ "explanation_baseline": {
+ "gcs": {"output_uri_prefix": "output_uri_prefix_value"},
+ "bigquery": {"output_uri": "output_uri_value"},
+ "prediction_format": 2,
+ },
+ },
+ },
+ }
+ ],
+ "model_deployment_monitoring_schedule_config": {
+ "monitor_interval": {"seconds": 751, "nanos": 543},
+ "monitor_window": {},
+ },
+ "logging_sampling_strategy": {},
+ "model_monitoring_alert_config": {
+ "email_alert_config": {
+ "user_emails": ["user_emails_value1", "user_emails_value2"]
+ },
+ "enable_logging": True,
+ "notification_channels": [
+ "notification_channels_value1",
+ "notification_channels_value2",
+ ],
+ },
+ "predict_instance_schema_uri": "predict_instance_schema_uri_value",
+ "sample_predict_instance": {
+ "null_value": 0,
+ "number_value": 0.1285,
+ "string_value": "string_value_value",
+ "bool_value": True,
+ "struct_value": {"fields": {}},
+ "list_value": {"values": {}},
+ },
+ "analysis_instance_schema_uri": "analysis_instance_schema_uri_value",
+ "bigquery_tables": [
+ {
+ "log_source": 1,
+ "log_type": 1,
+ "bigquery_table_path": "bigquery_table_path_value",
+ "request_response_logging_schema_version": "request_response_logging_schema_version_value",
+ }
+ ],
+ "log_ttl": {},
+ "labels": {},
+ "create_time": {},
+ "update_time": {},
+ "next_schedule_time": {},
+ "stats_anomalies_base_directory": {},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "enable_monitoring_pipeline_logs": True,
+ "error": {},
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = job_service.UpdateModelDeploymentMonitoringJobRequest.meta.fields[
+ "model_deployment_monitoring_job"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init[
+ "model_deployment_monitoring_job"
+ ].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(
+ 0, len(request_init["model_deployment_monitoring_job"][field])
+ ):
+ del request_init["model_deployment_monitoring_job"][field][i][
+ subfield
+ ]
+ else:
+ del request_init["model_deployment_monitoring_job"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.update_model_deployment_monitoring_job(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_update_model_deployment_monitoring_job_rest_asyncio_interceptors(
+ null_interceptor,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "post_update_model_deployment_monitoring_job",
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "pre_update_model_deployment_monitoring_job",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.UpdateModelDeploymentMonitoringJobRequest.pb(
+ job_service.UpdateModelDeploymentMonitoringJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.UpdateModelDeploymentMonitoringJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.update_model_deployment_monitoring_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_delete_model_deployment_monitoring_job_rest_asyncio_bad_request(
+ request_type=job_service.DeleteModelDeploymentMonitoringJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_model_deployment_monitoring_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.DeleteModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+async def test_delete_model_deployment_monitoring_job_rest_asyncio_call_success(
+ request_type,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.delete_model_deployment_monitoring_job(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_delete_model_deployment_monitoring_job_rest_asyncio_interceptors(
+ null_interceptor,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "post_delete_model_deployment_monitoring_job",
+ ) as post, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "pre_delete_model_deployment_monitoring_job",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = job_service.DeleteModelDeploymentMonitoringJobRequest.pb(
+ job_service.DeleteModelDeploymentMonitoringJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = job_service.DeleteModelDeploymentMonitoringJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.delete_model_deployment_monitoring_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_pause_model_deployment_monitoring_job_rest_asyncio_bad_request(
+ request_type=job_service.PauseModelDeploymentMonitoringJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.pause_model_deployment_monitoring_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.PauseModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+async def test_pause_model_deployment_monitoring_job_rest_asyncio_call_success(
+ request_type,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.pause_model_deployment_monitoring_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_pause_model_deployment_monitoring_job_rest_asyncio_interceptors(
+ null_interceptor,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "pre_pause_model_deployment_monitoring_job",
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = job_service.PauseModelDeploymentMonitoringJobRequest.pb(
+ job_service.PauseModelDeploymentMonitoringJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = job_service.PauseModelDeploymentMonitoringJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ await client.pause_model_deployment_monitoring_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_resume_model_deployment_monitoring_job_rest_asyncio_bad_request(
+ request_type=job_service.ResumeModelDeploymentMonitoringJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.resume_model_deployment_monitoring_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ job_service.ResumeModelDeploymentMonitoringJobRequest,
+ dict,
+ ],
+)
+async def test_resume_model_deployment_monitoring_job_rest_asyncio_call_success(
+ request_type,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/modelDeploymentMonitoringJobs/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.resume_model_deployment_monitoring_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_resume_model_deployment_monitoring_job_rest_asyncio_interceptors(
+ null_interceptor,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncJobServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncJobServiceRestInterceptor(),
+ )
+ client = JobServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncJobServiceRestInterceptor,
+ "pre_resume_model_deployment_monitoring_job",
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = job_service.ResumeModelDeploymentMonitoringJobRequest.pb(
+ job_service.ResumeModelDeploymentMonitoringJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = job_service.ResumeModelDeploymentMonitoringJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ await client.resume_model_deployment_monitoring_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_location_rest_asyncio_bad_request(
+ request_type=locations_pb2.GetLocationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_location(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+async def test_get_location_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_rest_asyncio_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_locations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+async def test_list_locations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_get_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.set_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_set_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.test_iam_permissions(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+async def test_test_iam_permissions_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+async def test_cancel_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+async def test_delete_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_get_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+async def test_get_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_rest_asyncio_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_operations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+async def test_list_operations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.wait_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+async def test_wait_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_custom_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_custom_job), "__call__"
+ ) as call:
+ await client.create_custom_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateCustomJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_custom_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
+ await client.get_custom_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetCustomJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_custom_jobs_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
+ await client.list_custom_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListCustomJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_custom_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_custom_job), "__call__"
+ ) as call:
+ await client.delete_custom_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteCustomJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_cancel_custom_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_custom_job), "__call__"
+ ) as call:
+ await client.cancel_custom_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelCustomJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_data_labeling_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_data_labeling_job), "__call__"
+ ) as call:
+ await client.create_data_labeling_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateDataLabelingJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_data_labeling_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_data_labeling_job), "__call__"
+ ) as call:
+ await client.get_data_labeling_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetDataLabelingJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_data_labeling_jobs_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_data_labeling_jobs), "__call__"
+ ) as call:
+ await client.list_data_labeling_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListDataLabelingJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_data_labeling_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_data_labeling_job), "__call__"
+ ) as call:
+ await client.delete_data_labeling_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteDataLabelingJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_cancel_data_labeling_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_data_labeling_job), "__call__"
+ ) as call:
+ await client.cancel_data_labeling_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelDataLabelingJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_hyperparameter_tuning_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ await client.create_hyperparameter_tuning_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateHyperparameterTuningJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_hyperparameter_tuning_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ await client.get_hyperparameter_tuning_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetHyperparameterTuningJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_hyperparameter_tuning_jobs_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
+ ) as call:
+ await client.list_hyperparameter_tuning_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListHyperparameterTuningJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_hyperparameter_tuning_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ await client.delete_hyperparameter_tuning_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteHyperparameterTuningJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_cancel_hyperparameter_tuning_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
+ ) as call:
+ await client.cancel_hyperparameter_tuning_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelHyperparameterTuningJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_nas_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.create_nas_job), "__call__") as call:
+ await client.create_nas_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateNasJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_nas_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_nas_job), "__call__") as call:
+ await client.get_nas_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetNasJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_nas_jobs_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_nas_jobs), "__call__") as call:
+ await client.list_nas_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListNasJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_nas_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_nas_job), "__call__") as call:
+ await client.delete_nas_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteNasJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_cancel_nas_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_nas_job), "__call__") as call:
+ await client.cancel_nas_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelNasJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_nas_trial_detail_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_nas_trial_detail), "__call__"
+ ) as call:
+ await client.get_nas_trial_detail(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetNasTrialDetailRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_nas_trial_details_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_nas_trial_details), "__call__"
+ ) as call:
+ await client.list_nas_trial_details(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListNasTrialDetailsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_batch_prediction_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_batch_prediction_job), "__call__"
+ ) as call:
+ await client.create_batch_prediction_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateBatchPredictionJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_batch_prediction_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_batch_prediction_job), "__call__"
+ ) as call:
+ await client.get_batch_prediction_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetBatchPredictionJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_batch_prediction_jobs_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batch_prediction_jobs), "__call__"
+ ) as call:
+ await client.list_batch_prediction_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListBatchPredictionJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_batch_prediction_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_batch_prediction_job), "__call__"
+ ) as call:
+ await client.delete_batch_prediction_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteBatchPredictionJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_cancel_batch_prediction_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_batch_prediction_job), "__call__"
+ ) as call:
+ await client.cancel_batch_prediction_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CancelBatchPredictionJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_model_deployment_monitoring_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ await client.create_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.CreateModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_search_model_deployment_monitoring_stats_anomalies_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.search_model_deployment_monitoring_stats_anomalies),
+ "__call__",
+ ) as call:
+ await client.search_model_deployment_monitoring_stats_anomalies(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_model_deployment_monitoring_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ await client.get_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.GetModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_model_deployment_monitoring_jobs_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
+ ) as call:
+ await client.list_model_deployment_monitoring_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ListModelDeploymentMonitoringJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_update_model_deployment_monitoring_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ await client.update_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.UpdateModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_model_deployment_monitoring_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ await client.delete_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.DeleteModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_pause_model_deployment_monitoring_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.pause_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ await client.pause_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.PauseModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_resume_model_deployment_monitoring_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.resume_model_deployment_monitoring_job), "__call__"
+ ) as call:
+ await client.resume_model_deployment_monitoring_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = job_service.ResumeModelDeploymentMonitoringJobRequest()
+
+ assert args[0] == request_msg
+
+
+def test_job_service_rest_asyncio_lro_client():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ transport = client.transport
+
+ # Ensure that we have an api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.AsyncOperationsRestClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_unsupported_parameter_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with pytest.raises(core_exceptions.AsyncRestUnsupportedParameterError, match="google.api_core.client_options.ClientOptions.quota_project_id") as exc: # type: ignore
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ client_options=options,
+ )
+
+
+def test_transport_grpc_default():
+ # A client should use the gRPC transport by default.
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert isinstance(
+ client.transport,
+ transports.JobServiceGrpcTransport,
+ )
+
+
+def test_job_service_base_transport_error():
+ # Passing both a credentials object and credentials_file should raise an error
+ with pytest.raises(core_exceptions.DuplicateCredentialArgs):
+ transport = transports.JobServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ credentials_file="credentials.json",
+ )
+
+
+def test_job_service_base_transport():
+ # Instantiate the base transport.
+ with mock.patch(
+ "google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport.__init__"
+ ) as Transport:
+ Transport.return_value = None
+ transport = transports.JobServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Every method on the transport should just blindly
+ # raise NotImplementedError.
+ methods = (
+ "create_custom_job",
+ "get_custom_job",
+ "list_custom_jobs",
+ "delete_custom_job",
+ "cancel_custom_job",
+ "create_data_labeling_job",
+ "get_data_labeling_job",
+ "list_data_labeling_jobs",
+ "delete_data_labeling_job",
+ "cancel_data_labeling_job",
+ "create_hyperparameter_tuning_job",
+ "get_hyperparameter_tuning_job",
+ "list_hyperparameter_tuning_jobs",
+ "delete_hyperparameter_tuning_job",
+ "cancel_hyperparameter_tuning_job",
+ "create_nas_job",
+ "get_nas_job",
+ "list_nas_jobs",
+ "delete_nas_job",
+ "cancel_nas_job",
+ "get_nas_trial_detail",
+ "list_nas_trial_details",
+ "create_batch_prediction_job",
+ "get_batch_prediction_job",
+ "list_batch_prediction_jobs",
+ "delete_batch_prediction_job",
+ "cancel_batch_prediction_job",
+ "create_model_deployment_monitoring_job",
+ "search_model_deployment_monitoring_stats_anomalies",
+ "get_model_deployment_monitoring_job",
+ "list_model_deployment_monitoring_jobs",
+ "update_model_deployment_monitoring_job",
+ "delete_model_deployment_monitoring_job",
+ "pause_model_deployment_monitoring_job",
+ "resume_model_deployment_monitoring_job",
+ "set_iam_policy",
+ "get_iam_policy",
+ "test_iam_permissions",
+ "get_location",
+ "list_locations",
+ "get_operation",
+ "wait_operation",
+ "cancel_operation",
+ "delete_operation",
+ "list_operations",
+ )
+ for method in methods:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, method)(request=object())
+
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
+ # Additionally, the LRO client (a property) should
+ # also raise NotImplementedError
+ with pytest.raises(NotImplementedError):
+ transport.operations_client
+
+ # Catch all for all remaining methods and properties
+ remainder = [
+ "kind",
+ ]
+ for r in remainder:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, r)()
+
+
+def test_job_service_base_transport_with_credentials_file():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.JobServiceTransport(
+ credentials_file="credentials.json",
+ quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=None,
+ default_scopes=(
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ ),
+ quota_project_id="octopus",
+ )
+
+
+def test_job_service_base_transport_with_adc():
+ # Test the default credentials are used if credentials and credentials_file are None.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
+ "google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.JobServiceTransport()
+ adc.assert_called_once()
+
+
+def test_job_service_auth_adc():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ JobServiceClient()
+ adc.assert_called_once_with(
+ scopes=None,
+ default_scopes=(
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ ),
+ quota_project_id=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.JobServiceGrpcTransport,
+ transports.JobServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_job_service_transport_auth_adc(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+ adc.assert_called_once_with(
+ scopes=["1", "2"],
+ default_scopes=(
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ ),
+ quota_project_id="octopus",
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.JobServiceGrpcTransport,
+ transports.JobServiceGrpcAsyncIOTransport,
+ transports.JobServiceRestTransport,
+ ],
+)
+def test_job_service_transport_auth_gdch_credentials(transport_class):
+ host = "https://language.com"
+ api_audience_tests = [None, "https://language2.com"]
+ api_audience_expect = [host, "https://language2.com"]
+ for t, e in zip(api_audience_tests, api_audience_expect):
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ gdch_mock = mock.MagicMock()
+ type(gdch_mock).with_gdch_audience = mock.PropertyMock(
+ return_value=gdch_mock
+ )
+ adc.return_value = (gdch_mock, None)
+ transport_class(host=host, api_audience=t)
+ gdch_mock.with_gdch_audience.assert_called_once_with(e)
+
+
+@pytest.mark.parametrize(
+ "transport_class,grpc_helpers",
+ [
+ (transports.JobServiceGrpcTransport, grpc_helpers),
+ (transports.JobServiceGrpcAsyncIOTransport, grpc_helpers_async),
+ ],
+)
+def test_job_service_transport_create_channel(transport_class, grpc_helpers):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel", autospec=True
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ adc.return_value = (creds, None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=creds,
+ credentials_file=None,
+ quota_project_id="octopus",
+ default_scopes=(
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ ),
+ scopes=["1", "2"],
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport],
+)
+def test_job_service_grpc_transport_client_cert_source_for_mtls(transport_class):
+ cred = ga_credentials.AnonymousCredentials()
+
+ # Check ssl_channel_credentials is used if provided.
+ with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
+ mock_ssl_channel_creds = mock.Mock()
+ transport_class(
+ host="squid.clam.whelk",
+ credentials=cred,
+ ssl_channel_credentials=mock_ssl_channel_creds,
+ )
+ mock_create_channel.assert_called_once_with(
+ "squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_channel_creds,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
+ # is used.
+ with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
+ with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
+ transport_class(
+ credentials=cred,
+ client_cert_source_for_mtls=client_cert_source_callback,
+ )
+ expected_cert, expected_key = client_cert_source_callback()
+ mock_ssl_cred.assert_called_once_with(
+ certificate_chain=expected_cert, private_key=expected_key
+ )
+
+
+def test_job_service_http_transport_client_cert_source_for_mtls():
+ cred = ga_credentials.AnonymousCredentials()
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ) as mock_configure_mtls_channel:
+ transports.JobServiceRestTransport(
+ credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
+ )
+ mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_job_service_host_no_port(transport_name):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_job_service_host_with_port(transport_name):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com:8000"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:8000"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com:8000"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "rest",
+ ],
+)
+def test_job_service_client_transport_session_collision(transport_name):
+ creds1 = ga_credentials.AnonymousCredentials()
+ creds2 = ga_credentials.AnonymousCredentials()
+ client1 = JobServiceClient(
+ credentials=creds1,
+ transport=transport_name,
+ )
+ client2 = JobServiceClient(
+ credentials=creds2,
+ transport=transport_name,
+ )
+ session1 = client1.transport.create_custom_job._session
+ session2 = client2.transport.create_custom_job._session
+ assert session1 != session2
+ session1 = client1.transport.get_custom_job._session
+ session2 = client2.transport.get_custom_job._session
+ assert session1 != session2
+ session1 = client1.transport.list_custom_jobs._session
+ session2 = client2.transport.list_custom_jobs._session
+ assert session1 != session2
+ session1 = client1.transport.delete_custom_job._session
+ session2 = client2.transport.delete_custom_job._session
+ assert session1 != session2
+ session1 = client1.transport.cancel_custom_job._session
+ session2 = client2.transport.cancel_custom_job._session
+ assert session1 != session2
+ session1 = client1.transport.create_data_labeling_job._session
+ session2 = client2.transport.create_data_labeling_job._session
+ assert session1 != session2
+ session1 = client1.transport.get_data_labeling_job._session
+ session2 = client2.transport.get_data_labeling_job._session
+ assert session1 != session2
+ session1 = client1.transport.list_data_labeling_jobs._session
+ session2 = client2.transport.list_data_labeling_jobs._session
+ assert session1 != session2
+ session1 = client1.transport.delete_data_labeling_job._session
+ session2 = client2.transport.delete_data_labeling_job._session
+ assert session1 != session2
+ session1 = client1.transport.cancel_data_labeling_job._session
+ session2 = client2.transport.cancel_data_labeling_job._session
+ assert session1 != session2
+ session1 = client1.transport.create_hyperparameter_tuning_job._session
+ session2 = client2.transport.create_hyperparameter_tuning_job._session
+ assert session1 != session2
+ session1 = client1.transport.get_hyperparameter_tuning_job._session
+ session2 = client2.transport.get_hyperparameter_tuning_job._session
+ assert session1 != session2
+ session1 = client1.transport.list_hyperparameter_tuning_jobs._session
+ session2 = client2.transport.list_hyperparameter_tuning_jobs._session
+ assert session1 != session2
+ session1 = client1.transport.delete_hyperparameter_tuning_job._session
+ session2 = client2.transport.delete_hyperparameter_tuning_job._session
+ assert session1 != session2
+ session1 = client1.transport.cancel_hyperparameter_tuning_job._session
+ session2 = client2.transport.cancel_hyperparameter_tuning_job._session
+ assert session1 != session2
+ session1 = client1.transport.create_nas_job._session
+ session2 = client2.transport.create_nas_job._session
+ assert session1 != session2
+ session1 = client1.transport.get_nas_job._session
+ session2 = client2.transport.get_nas_job._session
+ assert session1 != session2
+ session1 = client1.transport.list_nas_jobs._session
+ session2 = client2.transport.list_nas_jobs._session
+ assert session1 != session2
+ session1 = client1.transport.delete_nas_job._session
+ session2 = client2.transport.delete_nas_job._session
+ assert session1 != session2
+ session1 = client1.transport.cancel_nas_job._session
+ session2 = client2.transport.cancel_nas_job._session
+ assert session1 != session2
+ session1 = client1.transport.get_nas_trial_detail._session
+ session2 = client2.transport.get_nas_trial_detail._session
+ assert session1 != session2
+ session1 = client1.transport.list_nas_trial_details._session
+ session2 = client2.transport.list_nas_trial_details._session
+ assert session1 != session2
+ session1 = client1.transport.create_batch_prediction_job._session
+ session2 = client2.transport.create_batch_prediction_job._session
+ assert session1 != session2
+ session1 = client1.transport.get_batch_prediction_job._session
+ session2 = client2.transport.get_batch_prediction_job._session
+ assert session1 != session2
+ session1 = client1.transport.list_batch_prediction_jobs._session
+ session2 = client2.transport.list_batch_prediction_jobs._session
+ assert session1 != session2
+ session1 = client1.transport.delete_batch_prediction_job._session
+ session2 = client2.transport.delete_batch_prediction_job._session
+ assert session1 != session2
+ session1 = client1.transport.cancel_batch_prediction_job._session
+ session2 = client2.transport.cancel_batch_prediction_job._session
+ assert session1 != session2
+ session1 = client1.transport.create_model_deployment_monitoring_job._session
+ session2 = client2.transport.create_model_deployment_monitoring_job._session
+ assert session1 != session2
+ session1 = (
+ client1.transport.search_model_deployment_monitoring_stats_anomalies._session
+ )
+ session2 = (
+ client2.transport.search_model_deployment_monitoring_stats_anomalies._session
+ )
+ assert session1 != session2
+ session1 = client1.transport.get_model_deployment_monitoring_job._session
+ session2 = client2.transport.get_model_deployment_monitoring_job._session
+ assert session1 != session2
+ session1 = client1.transport.list_model_deployment_monitoring_jobs._session
+ session2 = client2.transport.list_model_deployment_monitoring_jobs._session
+ assert session1 != session2
+ session1 = client1.transport.update_model_deployment_monitoring_job._session
+ session2 = client2.transport.update_model_deployment_monitoring_job._session
+ assert session1 != session2
+ session1 = client1.transport.delete_model_deployment_monitoring_job._session
+ session2 = client2.transport.delete_model_deployment_monitoring_job._session
+ assert session1 != session2
+ session1 = client1.transport.pause_model_deployment_monitoring_job._session
+ session2 = client2.transport.pause_model_deployment_monitoring_job._session
+ assert session1 != session2
+ session1 = client1.transport.resume_model_deployment_monitoring_job._session
+ session2 = client2.transport.resume_model_deployment_monitoring_job._session
+ assert session1 != session2
+
+
+def test_job_service_grpc_transport_channel():
+ channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.JobServiceGrpcTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+def test_job_service_grpc_asyncio_transport_channel():
+ channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.JobServiceGrpcAsyncIOTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport],
+)
+def test_job_service_transport_channel_mtls_with_client_cert_source(transport_class):
+ with mock.patch(
+ "grpc.ssl_channel_credentials", autospec=True
+ ) as grpc_ssl_channel_cred:
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_ssl_cred = mock.Mock()
+ grpc_ssl_channel_cred.return_value = mock_ssl_cred
+
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+
+ cred = ga_credentials.AnonymousCredentials()
+ with pytest.warns(DeprecationWarning):
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (cred, None)
+ transport = transport_class(
+ host="squid.clam.whelk",
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=client_cert_source_callback,
+ )
+ adc.assert_called_once()
+
+ grpc_ssl_channel_cred.assert_called_once_with(
+ certificate_chain=b"cert bytes", private_key=b"key bytes"
+ )
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport],
+)
+def test_job_service_transport_channel_mtls_with_adc(transport_class):
+ mock_ssl_cred = mock.Mock()
+ with mock.patch.multiple(
+ "google.auth.transport.grpc.SslCredentials",
+ __init__=mock.Mock(return_value=None),
+ ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
+ ):
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+ mock_cred = mock.Mock()
+
+ with pytest.warns(DeprecationWarning):
+ transport = transport_class(
+ host="squid.clam.whelk",
+ credentials=mock_cred,
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=None,
+ )
+
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=mock_cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+
+
+def test_job_service_grpc_lro_client():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.OperationsClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_job_service_grpc_lro_async_client():
+ client = JobServiceAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc_asyncio",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.OperationsAsyncClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_batch_prediction_job_path():
+ project = "squid"
+ location = "clam"
+ batch_prediction_job = "whelk"
+ expected = "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(
+ project=project,
+ location=location,
+ batch_prediction_job=batch_prediction_job,
+ )
+ actual = JobServiceClient.batch_prediction_job_path(
+ project, location, batch_prediction_job
+ )
+ assert expected == actual
+
+
+def test_parse_batch_prediction_job_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "batch_prediction_job": "nudibranch",
+ }
+ path = JobServiceClient.batch_prediction_job_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_batch_prediction_job_path(path)
+ assert expected == actual
+
+
+def test_context_path():
+ project = "cuttlefish"
+ location = "mussel"
+ metadata_store = "winkle"
+ context = "nautilus"
+ expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(
+ project=project,
+ location=location,
+ metadata_store=metadata_store,
+ context=context,
+ )
+ actual = JobServiceClient.context_path(project, location, metadata_store, context)
+ assert expected == actual
+
+
+def test_parse_context_path():
+ expected = {
+ "project": "scallop",
+ "location": "abalone",
+ "metadata_store": "squid",
+ "context": "clam",
+ }
+ path = JobServiceClient.context_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_context_path(path)
+ assert expected == actual
+
+
+def test_custom_job_path():
+ project = "whelk"
+ location = "octopus"
+ custom_job = "oyster"
+ expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(
+ project=project,
+ location=location,
+ custom_job=custom_job,
+ )
+ actual = JobServiceClient.custom_job_path(project, location, custom_job)
+ assert expected == actual
+
+
+def test_parse_custom_job_path():
+ expected = {
+ "project": "nudibranch",
+ "location": "cuttlefish",
+ "custom_job": "mussel",
+ }
+ path = JobServiceClient.custom_job_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_custom_job_path(path)
+ assert expected == actual
+
+
+def test_data_labeling_job_path():
+ project = "winkle"
+ location = "nautilus"
+ data_labeling_job = "scallop"
+ expected = "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(
+ project=project,
+ location=location,
+ data_labeling_job=data_labeling_job,
+ )
+ actual = JobServiceClient.data_labeling_job_path(
+ project, location, data_labeling_job
+ )
+ assert expected == actual
+
+
+def test_parse_data_labeling_job_path():
+ expected = {
+ "project": "abalone",
+ "location": "squid",
+ "data_labeling_job": "clam",
+ }
+ path = JobServiceClient.data_labeling_job_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_data_labeling_job_path(path)
+ assert expected == actual
+
+
+def test_dataset_path():
+ project = "whelk"
+ location = "octopus"
+ dataset = "oyster"
+ expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
+ project=project,
+ location=location,
+ dataset=dataset,
+ )
+ actual = JobServiceClient.dataset_path(project, location, dataset)
+ assert expected == actual
+
+
+def test_parse_dataset_path():
+ expected = {
+ "project": "nudibranch",
+ "location": "cuttlefish",
+ "dataset": "mussel",
+ }
+ path = JobServiceClient.dataset_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_dataset_path(path)
+ assert expected == actual
+
+
+def test_endpoint_path():
+ project = "winkle"
+ location = "nautilus"
+ endpoint = "scallop"
+ expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(
+ project=project,
+ location=location,
+ endpoint=endpoint,
+ )
+ actual = JobServiceClient.endpoint_path(project, location, endpoint)
+ assert expected == actual
+
+
+def test_parse_endpoint_path():
+ expected = {
+ "project": "abalone",
+ "location": "squid",
+ "endpoint": "clam",
+ }
+ path = JobServiceClient.endpoint_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_endpoint_path(path)
+ assert expected == actual
+
+
+def test_hyperparameter_tuning_job_path():
+ project = "whelk"
+ location = "octopus"
+ hyperparameter_tuning_job = "oyster"
+ expected = "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(
+ project=project,
+ location=location,
+ hyperparameter_tuning_job=hyperparameter_tuning_job,
+ )
+ actual = JobServiceClient.hyperparameter_tuning_job_path(
+ project, location, hyperparameter_tuning_job
+ )
+ assert expected == actual
+
+
+def test_parse_hyperparameter_tuning_job_path():
+ expected = {
+ "project": "nudibranch",
+ "location": "cuttlefish",
+ "hyperparameter_tuning_job": "mussel",
+ }
+ path = JobServiceClient.hyperparameter_tuning_job_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_hyperparameter_tuning_job_path(path)
+ assert expected == actual
+
+
+def test_model_path():
+ project = "winkle"
+ location = "nautilus"
+ model = "scallop"
+ expected = "projects/{project}/locations/{location}/models/{model}".format(
+ project=project,
+ location=location,
+ model=model,
+ )
+ actual = JobServiceClient.model_path(project, location, model)
+ assert expected == actual
+
+
+def test_parse_model_path():
+ expected = {
+ "project": "abalone",
+ "location": "squid",
+ "model": "clam",
+ }
+ path = JobServiceClient.model_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_model_path(path)
+ assert expected == actual
+
+
+def test_model_deployment_monitoring_job_path():
+ project = "whelk"
+ location = "octopus"
+ model_deployment_monitoring_job = "oyster"
+ expected = "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(
+ project=project,
+ location=location,
+ model_deployment_monitoring_job=model_deployment_monitoring_job,
+ )
+ actual = JobServiceClient.model_deployment_monitoring_job_path(
+ project, location, model_deployment_monitoring_job
+ )
+ assert expected == actual
+
+
+def test_parse_model_deployment_monitoring_job_path():
+ expected = {
+ "project": "nudibranch",
+ "location": "cuttlefish",
+ "model_deployment_monitoring_job": "mussel",
+ }
+ path = JobServiceClient.model_deployment_monitoring_job_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_model_deployment_monitoring_job_path(path)
+ assert expected == actual
+
+
+def test_nas_job_path():
+ project = "winkle"
+ location = "nautilus"
+ nas_job = "scallop"
+ expected = "projects/{project}/locations/{location}/nasJobs/{nas_job}".format(
+ project=project,
+ location=location,
+ nas_job=nas_job,
+ )
+ actual = JobServiceClient.nas_job_path(project, location, nas_job)
+ assert expected == actual
+
+
+def test_parse_nas_job_path():
+ expected = {
+ "project": "abalone",
+ "location": "squid",
+ "nas_job": "clam",
+ }
+ path = JobServiceClient.nas_job_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_nas_job_path(path)
+ assert expected == actual
+
+
+def test_nas_trial_detail_path():
+ project = "whelk"
+ location = "octopus"
+ nas_job = "oyster"
+ nas_trial_detail = "nudibranch"
+ expected = "projects/{project}/locations/{location}/nasJobs/{nas_job}/nasTrialDetails/{nas_trial_detail}".format(
+ project=project,
+ location=location,
+ nas_job=nas_job,
+ nas_trial_detail=nas_trial_detail,
+ )
+ actual = JobServiceClient.nas_trial_detail_path(
+ project, location, nas_job, nas_trial_detail
+ )
+ assert expected == actual
+
+
+def test_parse_nas_trial_detail_path():
+ expected = {
+ "project": "cuttlefish",
+ "location": "mussel",
+ "nas_job": "winkle",
+ "nas_trial_detail": "nautilus",
+ }
+ path = JobServiceClient.nas_trial_detail_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_nas_trial_detail_path(path)
+ assert expected == actual
+
+
+def test_network_path():
+ project = "scallop"
+ network = "abalone"
+ expected = "projects/{project}/global/networks/{network}".format(
+ project=project,
+ network=network,
+ )
+ actual = JobServiceClient.network_path(project, network)
+ assert expected == actual
+
+
+def test_parse_network_path():
+ expected = {
+ "project": "squid",
+ "network": "clam",
+ }
+ path = JobServiceClient.network_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_network_path(path)
+ assert expected == actual
+
+
+def test_notification_channel_path():
+ project = "whelk"
+ notification_channel = "octopus"
+ expected = "projects/{project}/notificationChannels/{notification_channel}".format(
+ project=project,
+ notification_channel=notification_channel,
+ )
+ actual = JobServiceClient.notification_channel_path(project, notification_channel)
+ assert expected == actual
+
+
+def test_parse_notification_channel_path():
+ expected = {
+ "project": "oyster",
+ "notification_channel": "nudibranch",
+ }
+ path = JobServiceClient.notification_channel_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_notification_channel_path(path)
+ assert expected == actual
+
+
+def test_persistent_resource_path():
+ project = "cuttlefish"
+ location = "mussel"
+ persistent_resource = "winkle"
+ expected = "projects/{project}/locations/{location}/persistentResources/{persistent_resource}".format(
+ project=project,
+ location=location,
+ persistent_resource=persistent_resource,
+ )
+ actual = JobServiceClient.persistent_resource_path(
+ project, location, persistent_resource
+ )
+ assert expected == actual
+
+
+def test_parse_persistent_resource_path():
+ expected = {
+ "project": "nautilus",
+ "location": "scallop",
+ "persistent_resource": "abalone",
+ }
+ path = JobServiceClient.persistent_resource_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_persistent_resource_path(path)
+ assert expected == actual
+
+
+def test_reservation_path():
+ project_id_or_number = "squid"
+ zone = "clam"
+ reservation_name = "whelk"
+ expected = "projects/{project_id_or_number}/zones/{zone}/reservations/{reservation_name}".format(
+ project_id_or_number=project_id_or_number,
+ zone=zone,
+ reservation_name=reservation_name,
+ )
+ actual = JobServiceClient.reservation_path(
+ project_id_or_number, zone, reservation_name
+ )
+ assert expected == actual
+
+
+def test_parse_reservation_path():
+ expected = {
+ "project_id_or_number": "octopus",
+ "zone": "oyster",
+ "reservation_name": "nudibranch",
+ }
+ path = JobServiceClient.reservation_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_reservation_path(path)
+ assert expected == actual
+
+
+def test_tensorboard_path():
+ project = "cuttlefish"
+ location = "mussel"
+ tensorboard = "winkle"
+ expected = (
+ "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(
+ project=project,
+ location=location,
+ tensorboard=tensorboard,
+ )
+ )
+ actual = JobServiceClient.tensorboard_path(project, location, tensorboard)
+ assert expected == actual
+
+
+def test_parse_tensorboard_path():
+ expected = {
+ "project": "nautilus",
+ "location": "scallop",
+ "tensorboard": "abalone",
+ }
+ path = JobServiceClient.tensorboard_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_tensorboard_path(path)
+ assert expected == actual
+
+
+def test_trial_path():
+ project = "squid"
+ location = "clam"
+ study = "whelk"
+ trial = "octopus"
+ expected = (
+ "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(
+ project=project,
+ location=location,
+ study=study,
+ trial=trial,
+ )
+ )
+ actual = JobServiceClient.trial_path(project, location, study, trial)
+ assert expected == actual
+
+
+def test_parse_trial_path():
+ expected = {
+ "project": "oyster",
+ "location": "nudibranch",
+ "study": "cuttlefish",
+ "trial": "mussel",
+ }
+ path = JobServiceClient.trial_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_trial_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "winkle"
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = JobServiceClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "nautilus",
+ }
+ path = JobServiceClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "scallop"
+ expected = "folders/{folder}".format(
+ folder=folder,
+ )
+ actual = JobServiceClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "abalone",
+ }
+ path = JobServiceClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "squid"
+ expected = "organizations/{organization}".format(
+ organization=organization,
+ )
+ actual = JobServiceClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "clam",
+ }
+ path = JobServiceClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "whelk"
+ expected = "projects/{project}".format(
+ project=project,
+ )
+ actual = JobServiceClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "octopus",
+ }
+ path = JobServiceClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "oyster"
+ location = "nudibranch"
+ expected = "projects/{project}/locations/{location}".format(
+ project=project,
+ location=location,
+ )
+ actual = JobServiceClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "cuttlefish",
+ "location": "mussel",
+ }
+ path = JobServiceClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = JobServiceClient.parse_common_location_path(path)
+ assert expected == actual
+
+
+def test_client_with_default_client_info():
+ client_info = gapic_v1.client_info.ClientInfo()
+
+ with mock.patch.object(
+ transports.JobServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+ with mock.patch.object(
+ transports.JobServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ transport_class = JobServiceClient.get_transport_class()
+ transport = transport_class(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+
+def test_delete_operation(transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_async(transport: str = "grpc_asyncio"):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = None
+
+ client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_delete_operation_from_dict():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_from_dict_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_cancel_operation(transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_async(transport: str = "grpc_asyncio"):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_operation_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = None
+
+ client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_cancel_operation_from_dict():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_from_dict_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_wait_operation(transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation(transport: str = "grpc_asyncio"):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_wait_operation_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_wait_operation_from_dict():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_from_dict_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_operation(transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_get_operation_async(transport: str = "grpc_asyncio"):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_get_operation_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_operation_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_get_operation_from_dict():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_operation_from_dict_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_operations(transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+ response = client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_async(transport: str = "grpc_asyncio"):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_list_operations_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_operations_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_operations_from_dict():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ response = client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_operations_from_dict_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_locations(transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+ response = client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_async(transport: str = "grpc_asyncio"):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_list_locations_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_locations_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_locations_from_dict():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ response = client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_locations_from_dict_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_location(transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+ response = client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_get_location_async(transport: str = "grpc_asyncio"):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_get_location_field_headers():
+ client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials())
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = locations_pb2.Location()
+
+ client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_location_field_headers_async():
+ client = JobServiceAsyncClient(credentials=async_anonymous_credentials())
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+def test_get_location_from_dict():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+
+ response = client.get_location(
+ request={
+ "name": "locations/abc",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_location_from_dict_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_set_iam_policy(transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ response = client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+ response = await client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_set_iam_policy_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_set_iam_policy_from_dict():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_from_dict_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+def test_get_iam_policy(transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_get_iam_policy_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_get_iam_policy_from_dict():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_from_dict_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+def test_test_iam_permissions(transport: str = "grpc"):
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"):
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+ )
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+def test_test_iam_permissions_field_headers():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_field_headers_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_test_iam_permissions_from_dict():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ response = client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_from_dict_async():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ response = await client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+def test_transport_close_grpc():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_grpc_asyncio():
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close_rest():
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = JobServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "rest",
+ "grpc",
+ ]
+ for transport in transports:
+ client = JobServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class",
+ [
+ (JobServiceClient, transports.JobServiceGrpcTransport),
+ (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport),
+ ],
+)
+def test_api_key_credentials(client_class, transport_class):
+ with mock.patch.object(
+ google.auth._default, "get_api_key_credentials", create=True
+ ) as get_api_key_credentials:
+ mock_cred = mock.Mock()
+ get_api_key_credentials.return_value = mock_cred
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=mock_cred,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_match_service.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_match_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..85353fcbb87284bb87b85293192c09e115cdf263
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_match_service.py
@@ -0,0 +1,6234 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+ from unittest.mock import AsyncMock # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ import mock
+
+import grpc
+from grpc.experimental import aio
+from collections.abc import Iterable, AsyncIterable
+from google.protobuf import json_format
+import json
+import math
+import pytest
+from google.api_core import api_core_version
+from proto.marshal.rules.dates import DurationRule, TimestampRule
+from proto.marshal.rules import wrappers
+
+try:
+ import aiohttp # type: ignore
+ from google.auth.aio.transport.sessions import AsyncAuthorizedSession
+ from google.api_core.operations_v1 import AsyncOperationsRestClient
+
+ HAS_ASYNC_REST_EXTRA = True
+except ImportError: # pragma: NO COVER
+ HAS_ASYNC_REST_EXTRA = False
+from requests import Response
+from requests import Request, PreparedRequest
+from requests.sessions import Session
+from google.protobuf import json_format
+
+try:
+ from google.auth.aio import credentials as ga_credentials_async
+
+ HAS_GOOGLE_AUTH_AIO = True
+except ImportError: # pragma: NO COVER
+ HAS_GOOGLE_AUTH_AIO = False
+
+from google.api_core import client_options
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers
+from google.api_core import grpc_helpers_async
+from google.api_core import path_template
+from google.api_core import retry as retries
+from google.auth import credentials as ga_credentials
+from google.auth.exceptions import MutualTLSChannelError
+from google.cloud.aiplatform_v1beta1.services.match_service import (
+ MatchServiceAsyncClient,
+)
+from google.cloud.aiplatform_v1beta1.services.match_service import MatchServiceClient
+from google.cloud.aiplatform_v1beta1.services.match_service import transports
+from google.cloud.aiplatform_v1beta1.types import index
+from google.cloud.aiplatform_v1beta1.types import match_service
+from google.cloud.location import locations_pb2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import options_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.oauth2 import service_account
+import google.auth
+
+
+async def mock_async_gen(data, chunk_size=1):
+ for i in range(0, len(data)): # pragma: NO COVER
+ chunk = data[i : i + chunk_size]
+ yield chunk.encode("utf-8")
+
+
+def client_cert_source_callback():
+ return b"cert bytes", b"key bytes"
+
+
+# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded.
+# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107.
+def async_anonymous_credentials():
+ if HAS_GOOGLE_AUTH_AIO:
+ return ga_credentials_async.AnonymousCredentials()
+ return ga_credentials.AnonymousCredentials()
+
+
+# If default endpoint is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint(client):
+ return (
+ "foo.googleapis.com"
+ if ("localhost" in client.DEFAULT_ENDPOINT)
+ else client.DEFAULT_ENDPOINT
+ )
+
+
+# If default endpoint template is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint template so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint_template(client):
+ return (
+ "test.{UNIVERSE_DOMAIN}"
+ if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE)
+ else client._DEFAULT_ENDPOINT_TEMPLATE
+ )
+
+
+def test__get_default_mtls_endpoint():
+ api_endpoint = "example.googleapis.com"
+ api_mtls_endpoint = "example.mtls.googleapis.com"
+ sandbox_endpoint = "example.sandbox.googleapis.com"
+ sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
+ non_googleapi = "api.example.com"
+
+ assert MatchServiceClient._get_default_mtls_endpoint(None) is None
+ assert (
+ MatchServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
+ )
+ assert (
+ MatchServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ MatchServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ MatchServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert MatchServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
+
+
+def test__read_environment_variables():
+ assert MatchServiceClient._read_environment_variables() == (False, "auto", None)
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert MatchServiceClient._read_environment_variables() == (True, "auto", None)
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ assert MatchServiceClient._read_environment_variables() == (False, "auto", None)
+
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ MatchServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ assert MatchServiceClient._read_environment_variables() == (
+ False,
+ "never",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ assert MatchServiceClient._read_environment_variables() == (
+ False,
+ "always",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}):
+ assert MatchServiceClient._read_environment_variables() == (False, "auto", None)
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ MatchServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}):
+ assert MatchServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ "foo.com",
+ )
+
+
+def test__get_client_cert_source():
+ mock_provided_cert_source = mock.Mock()
+ mock_default_cert_source = mock.Mock()
+
+ assert MatchServiceClient._get_client_cert_source(None, False) is None
+ assert (
+ MatchServiceClient._get_client_cert_source(mock_provided_cert_source, False)
+ is None
+ )
+ assert (
+ MatchServiceClient._get_client_cert_source(mock_provided_cert_source, True)
+ == mock_provided_cert_source
+ )
+
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source", return_value=True
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_default_cert_source,
+ ):
+ assert (
+ MatchServiceClient._get_client_cert_source(None, True)
+ is mock_default_cert_source
+ )
+ assert (
+ MatchServiceClient._get_client_cert_source(
+ mock_provided_cert_source, "true"
+ )
+ is mock_provided_cert_source
+ )
+
+
+@mock.patch.object(
+ MatchServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(MatchServiceClient),
+)
+@mock.patch.object(
+ MatchServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(MatchServiceAsyncClient),
+)
+def test__get_api_endpoint():
+ api_override = "foo.com"
+ mock_client_cert_source = mock.Mock()
+ default_universe = MatchServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = MatchServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = MatchServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ assert (
+ MatchServiceClient._get_api_endpoint(
+ api_override, mock_client_cert_source, default_universe, "always"
+ )
+ == api_override
+ )
+ assert (
+ MatchServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "auto"
+ )
+ == MatchServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ MatchServiceClient._get_api_endpoint(None, None, default_universe, "auto")
+ == default_endpoint
+ )
+ assert (
+ MatchServiceClient._get_api_endpoint(None, None, default_universe, "always")
+ == MatchServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ MatchServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "always"
+ )
+ == MatchServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ MatchServiceClient._get_api_endpoint(None, None, mock_universe, "never")
+ == mock_endpoint
+ )
+ assert (
+ MatchServiceClient._get_api_endpoint(None, None, default_universe, "never")
+ == default_endpoint
+ )
+
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ MatchServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, mock_universe, "auto"
+ )
+ assert (
+ str(excinfo.value)
+ == "mTLS is not supported in any universe other than googleapis.com."
+ )
+
+
+def test__get_universe_domain():
+ client_universe_domain = "foo.com"
+ universe_domain_env = "bar.com"
+
+ assert (
+ MatchServiceClient._get_universe_domain(
+ client_universe_domain, universe_domain_env
+ )
+ == client_universe_domain
+ )
+ assert (
+ MatchServiceClient._get_universe_domain(None, universe_domain_env)
+ == universe_domain_env
+ )
+ assert (
+ MatchServiceClient._get_universe_domain(None, None)
+ == MatchServiceClient._DEFAULT_UNIVERSE
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ MatchServiceClient._get_universe_domain("", None)
+ assert str(excinfo.value) == "Universe Domain cannot be an empty string."
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (MatchServiceClient, "grpc"),
+ (MatchServiceAsyncClient, "grpc_asyncio"),
+ (MatchServiceClient, "rest"),
+ ],
+)
+def test_match_service_client_from_service_account_info(client_class, transport_name):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_info"
+ ) as factory:
+ factory.return_value = creds
+ info = {"valid": True}
+ client = client_class.from_service_account_info(info, transport=transport_name)
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (transports.MatchServiceGrpcTransport, "grpc"),
+ (transports.MatchServiceGrpcAsyncIOTransport, "grpc_asyncio"),
+ (transports.MatchServiceRestTransport, "rest"),
+ ],
+)
+def test_match_service_client_service_account_always_use_jwt(
+ transport_class, transport_name
+):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=True)
+ use_jwt.assert_called_once_with(True)
+
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=False)
+ use_jwt.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (MatchServiceClient, "grpc"),
+ (MatchServiceAsyncClient, "grpc_asyncio"),
+ (MatchServiceClient, "rest"),
+ ],
+)
+def test_match_service_client_from_service_account_file(client_class, transport_name):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_file"
+ ) as factory:
+ factory.return_value = creds
+ client = client_class.from_service_account_file(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ client = client_class.from_service_account_json(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+def test_match_service_client_get_transport_class():
+ transport = MatchServiceClient.get_transport_class()
+ available_transports = [
+ transports.MatchServiceGrpcTransport,
+ transports.MatchServiceRestTransport,
+ ]
+ assert transport in available_transports
+
+ transport = MatchServiceClient.get_transport_class("grpc")
+ assert transport == transports.MatchServiceGrpcTransport
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (MatchServiceClient, transports.MatchServiceGrpcTransport, "grpc"),
+ (
+ MatchServiceAsyncClient,
+ transports.MatchServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (MatchServiceClient, transports.MatchServiceRestTransport, "rest"),
+ ],
+)
+@mock.patch.object(
+ MatchServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(MatchServiceClient),
+)
+@mock.patch.object(
+ MatchServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(MatchServiceAsyncClient),
+)
+def test_match_service_client_client_options(
+ client_class, transport_class, transport_name
+):
+ # Check that if channel is provided we won't create a new one.
+ with mock.patch.object(MatchServiceClient, "get_transport_class") as gtc:
+ transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
+ client = client_class(transport=transport)
+ gtc.assert_not_called()
+
+ # Check that if channel is provided via str we will create a new one.
+ with mock.patch.object(MatchServiceClient, "get_transport_class") as gtc:
+ client = client_class(transport=transport_name)
+ gtc.assert_called()
+
+ # Check the case api_endpoint is provided.
+ options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name, client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_MTLS_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ # Check the case quota_project_id is provided
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id="octopus",
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+ # Check the case api_endpoint is provided
+ options = client_options.ClientOptions(
+ api_audience="https://language.googleapis.com"
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience="https://language.googleapis.com",
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,use_client_cert_env",
+ [
+ (MatchServiceClient, transports.MatchServiceGrpcTransport, "grpc", "true"),
+ (
+ MatchServiceAsyncClient,
+ transports.MatchServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "true",
+ ),
+ (MatchServiceClient, transports.MatchServiceGrpcTransport, "grpc", "false"),
+ (
+ MatchServiceAsyncClient,
+ transports.MatchServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "false",
+ ),
+ (MatchServiceClient, transports.MatchServiceRestTransport, "rest", "true"),
+ (MatchServiceClient, transports.MatchServiceRestTransport, "rest", "false"),
+ ],
+)
+@mock.patch.object(
+ MatchServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(MatchServiceClient),
+)
+@mock.patch.object(
+ MatchServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(MatchServiceAsyncClient),
+)
+@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
+def test_match_service_client_mtls_env_auto(
+ client_class, transport_class, transport_name, use_client_cert_env
+):
+ # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
+ # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
+
+ # Check the case client_cert_source is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=client_cert_source_callback
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+
+ if use_client_cert_env == "false":
+ expected_client_cert_source = None
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ else:
+ expected_client_cert_source = client_cert_source_callback
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case ADC client cert is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=client_cert_source_callback,
+ ):
+ if use_client_cert_env == "false":
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ expected_client_cert_source = None
+ else:
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+ expected_client_cert_source = client_cert_source_callback
+
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case client_cert_source and ADC client cert are not provided.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize("client_class", [MatchServiceClient, MatchServiceAsyncClient])
+@mock.patch.object(
+ MatchServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MatchServiceClient)
+)
+@mock.patch.object(
+ MatchServiceAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(MatchServiceAsyncClient),
+)
+def test_match_service_client_get_mtls_endpoint_and_cert_source(client_class):
+ mock_client_cert_source = mock.Mock()
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source == mock_client_cert_source
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_client_cert_source,
+ ):
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source == mock_client_cert_source
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+
+@pytest.mark.parametrize("client_class", [MatchServiceClient, MatchServiceAsyncClient])
+@mock.patch.object(
+ MatchServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(MatchServiceClient),
+)
+@mock.patch.object(
+ MatchServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(MatchServiceAsyncClient),
+)
+def test_match_service_client_client_api_endpoint(client_class):
+ mock_client_cert_source = client_cert_source_callback
+ api_override = "foo.com"
+ default_universe = MatchServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = MatchServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = MatchServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true",
+ # use ClientOptions.api_endpoint as the api endpoint regardless.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=api_override
+ )
+ client = client_class(
+ client_options=options,
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert client.api_endpoint == api_override
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == default_endpoint
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always",
+ # use the DEFAULT_MTLS_ENDPOINT as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+
+ # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default),
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist,
+ # and ClientOptions.universe_domain="bar.com",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint.
+ options = client_options.ClientOptions()
+ universe_exists = hasattr(options, "universe_domain")
+ if universe_exists:
+ options = client_options.ClientOptions(universe_domain=mock_universe)
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ else:
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == (
+ mock_endpoint if universe_exists else default_endpoint
+ )
+ assert client.universe_domain == (
+ mock_universe if universe_exists else default_universe
+ )
+
+ # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ options = client_options.ClientOptions()
+ if hasattr(options, "universe_domain"):
+ delattr(options, "universe_domain")
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == default_endpoint
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (MatchServiceClient, transports.MatchServiceGrpcTransport, "grpc"),
+ (
+ MatchServiceAsyncClient,
+ transports.MatchServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (MatchServiceClient, transports.MatchServiceRestTransport, "rest"),
+ ],
+)
+def test_match_service_client_client_options_scopes(
+ client_class, transport_class, transport_name
+):
+ # Check the case scopes are provided.
+ options = client_options.ClientOptions(
+ scopes=["1", "2"],
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=["1", "2"],
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ MatchServiceClient,
+ transports.MatchServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ MatchServiceAsyncClient,
+ transports.MatchServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ (MatchServiceClient, transports.MatchServiceRestTransport, "rest", None),
+ ],
+)
+def test_match_service_client_client_options_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+def test_match_service_client_client_options_from_dict():
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.match_service.transports.MatchServiceGrpcTransport.__init__"
+ ) as grpc_transport:
+ grpc_transport.return_value = None
+ client = MatchServiceClient(client_options={"api_endpoint": "squid.clam.whelk"})
+ grpc_transport.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ MatchServiceClient,
+ transports.MatchServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ MatchServiceAsyncClient,
+ transports.MatchServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_match_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ match_service.FindNeighborsRequest,
+ dict,
+ ],
+)
+def test_find_neighbors(request_type, transport: str = "grpc"):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.find_neighbors), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = match_service.FindNeighborsResponse()
+ response = client.find_neighbors(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = match_service.FindNeighborsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, match_service.FindNeighborsResponse)
+
+
+def test_find_neighbors_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = match_service.FindNeighborsRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index_id="deployed_index_id_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.find_neighbors), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.find_neighbors(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == match_service.FindNeighborsRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index_id="deployed_index_id_value",
+ )
+
+
+def test_find_neighbors_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.find_neighbors in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.find_neighbors] = mock_rpc
+ request = {}
+ client.find_neighbors(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.find_neighbors(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_find_neighbors_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.find_neighbors
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.find_neighbors
+ ] = mock_rpc
+
+ request = {}
+ await client.find_neighbors(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.find_neighbors(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_find_neighbors_async(
+ transport: str = "grpc_asyncio", request_type=match_service.FindNeighborsRequest
+):
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.find_neighbors), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ match_service.FindNeighborsResponse()
+ )
+ response = await client.find_neighbors(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = match_service.FindNeighborsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, match_service.FindNeighborsResponse)
+
+
+@pytest.mark.asyncio
+async def test_find_neighbors_async_from_dict():
+ await test_find_neighbors_async(request_type=dict)
+
+
+def test_find_neighbors_field_headers():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = match_service.FindNeighborsRequest()
+
+ request.index_endpoint = "index_endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.find_neighbors), "__call__") as call:
+ call.return_value = match_service.FindNeighborsResponse()
+ client.find_neighbors(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "index_endpoint=index_endpoint_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_find_neighbors_field_headers_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = match_service.FindNeighborsRequest()
+
+ request.index_endpoint = "index_endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.find_neighbors), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ match_service.FindNeighborsResponse()
+ )
+ await client.find_neighbors(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "index_endpoint=index_endpoint_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ match_service.ReadIndexDatapointsRequest,
+ dict,
+ ],
+)
+def test_read_index_datapoints(request_type, transport: str = "grpc"):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.read_index_datapoints), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = match_service.ReadIndexDatapointsResponse()
+ response = client.read_index_datapoints(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = match_service.ReadIndexDatapointsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, match_service.ReadIndexDatapointsResponse)
+
+
+def test_read_index_datapoints_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = match_service.ReadIndexDatapointsRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index_id="deployed_index_id_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.read_index_datapoints), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.read_index_datapoints(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == match_service.ReadIndexDatapointsRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index_id="deployed_index_id_value",
+ )
+
+
+def test_read_index_datapoints_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.read_index_datapoints
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.read_index_datapoints
+ ] = mock_rpc
+ request = {}
+ client.read_index_datapoints(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.read_index_datapoints(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_read_index_datapoints_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.read_index_datapoints
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.read_index_datapoints
+ ] = mock_rpc
+
+ request = {}
+ await client.read_index_datapoints(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.read_index_datapoints(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_read_index_datapoints_async(
+ transport: str = "grpc_asyncio",
+ request_type=match_service.ReadIndexDatapointsRequest,
+):
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.read_index_datapoints), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ match_service.ReadIndexDatapointsResponse()
+ )
+ response = await client.read_index_datapoints(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = match_service.ReadIndexDatapointsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, match_service.ReadIndexDatapointsResponse)
+
+
+@pytest.mark.asyncio
+async def test_read_index_datapoints_async_from_dict():
+ await test_read_index_datapoints_async(request_type=dict)
+
+
+def test_read_index_datapoints_field_headers():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = match_service.ReadIndexDatapointsRequest()
+
+ request.index_endpoint = "index_endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.read_index_datapoints), "__call__"
+ ) as call:
+ call.return_value = match_service.ReadIndexDatapointsResponse()
+ client.read_index_datapoints(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "index_endpoint=index_endpoint_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_read_index_datapoints_field_headers_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = match_service.ReadIndexDatapointsRequest()
+
+ request.index_endpoint = "index_endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.read_index_datapoints), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ match_service.ReadIndexDatapointsResponse()
+ )
+ await client.read_index_datapoints(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "index_endpoint=index_endpoint_value",
+ ) in kw["metadata"]
+
+
+def test_find_neighbors_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.find_neighbors in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.find_neighbors] = mock_rpc
+
+ request = {}
+ client.find_neighbors(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.find_neighbors(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_find_neighbors_rest_required_fields(
+ request_type=match_service.FindNeighborsRequest,
+):
+ transport_class = transports.MatchServiceRestTransport
+
+ request_init = {}
+ request_init["index_endpoint"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).find_neighbors._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["indexEndpoint"] = "index_endpoint_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).find_neighbors._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "indexEndpoint" in jsonified_request
+ assert jsonified_request["indexEndpoint"] == "index_endpoint_value"
+
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = match_service.FindNeighborsResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = match_service.FindNeighborsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.find_neighbors(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_find_neighbors_rest_unset_required_fields():
+ transport = transports.MatchServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.find_neighbors._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("indexEndpoint",)))
+
+
+def test_read_index_datapoints_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.read_index_datapoints
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.read_index_datapoints
+ ] = mock_rpc
+
+ request = {}
+ client.read_index_datapoints(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.read_index_datapoints(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_read_index_datapoints_rest_required_fields(
+ request_type=match_service.ReadIndexDatapointsRequest,
+):
+ transport_class = transports.MatchServiceRestTransport
+
+ request_init = {}
+ request_init["index_endpoint"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).read_index_datapoints._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["indexEndpoint"] = "index_endpoint_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).read_index_datapoints._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "indexEndpoint" in jsonified_request
+ assert jsonified_request["indexEndpoint"] == "index_endpoint_value"
+
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = match_service.ReadIndexDatapointsResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = match_service.ReadIndexDatapointsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.read_index_datapoints(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_read_index_datapoints_rest_unset_required_fields():
+ transport = transports.MatchServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.read_index_datapoints._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("indexEndpoint",)))
+
+
+def test_credentials_transport_error():
+ # It is an error to provide credentials and a transport instance.
+ transport = transports.MatchServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # It is an error to provide a credentials file and a transport instance.
+ transport = transports.MatchServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = MatchServiceClient(
+ client_options={"credentials_file": "credentials.json"},
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a transport instance.
+ transport = transports.MatchServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = MatchServiceClient(
+ client_options=options,
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a credential.
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = MatchServiceClient(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # It is an error to provide scopes and a transport instance.
+ transport = transports.MatchServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = MatchServiceClient(
+ client_options={"scopes": ["1", "2"]},
+ transport=transport,
+ )
+
+
+def test_transport_instance():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.MatchServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ client = MatchServiceClient(transport=transport)
+ assert client.transport is transport
+
+
+def test_transport_get_channel():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.MatchServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+ transport = transports.MatchServiceGrpcAsyncIOTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.MatchServiceGrpcTransport,
+ transports.MatchServiceGrpcAsyncIOTransport,
+ transports.MatchServiceRestTransport,
+ ],
+)
+def test_transport_adc(transport_class):
+ # Test default credentials are used if not provided.
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class()
+ adc.assert_called_once()
+
+
+def test_transport_kind_grpc():
+ transport = MatchServiceClient.get_transport_class("grpc")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "grpc"
+
+
+def test_initialize_client_w_grpc():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_find_neighbors_empty_call_grpc():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.find_neighbors), "__call__") as call:
+ call.return_value = match_service.FindNeighborsResponse()
+ client.find_neighbors(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = match_service.FindNeighborsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_read_index_datapoints_empty_call_grpc():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.read_index_datapoints), "__call__"
+ ) as call:
+ call.return_value = match_service.ReadIndexDatapointsResponse()
+ client.read_index_datapoints(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = match_service.ReadIndexDatapointsRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_grpc_asyncio():
+ transport = MatchServiceAsyncClient.get_transport_class("grpc_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "grpc_asyncio"
+
+
+def test_initialize_client_w_grpc_asyncio():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_find_neighbors_empty_call_grpc_asyncio():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.find_neighbors), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ match_service.FindNeighborsResponse()
+ )
+ await client.find_neighbors(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = match_service.FindNeighborsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_read_index_datapoints_empty_call_grpc_asyncio():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.read_index_datapoints), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ match_service.ReadIndexDatapointsResponse()
+ )
+ await client.read_index_datapoints(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = match_service.ReadIndexDatapointsRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_rest():
+ transport = MatchServiceClient.get_transport_class("rest")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "rest"
+
+
+def test_find_neighbors_rest_bad_request(
+ request_type=match_service.FindNeighborsRequest,
+):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "index_endpoint": "projects/sample1/locations/sample2/indexEndpoints/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.find_neighbors(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ match_service.FindNeighborsRequest,
+ dict,
+ ],
+)
+def test_find_neighbors_rest_call_success(request_type):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "index_endpoint": "projects/sample1/locations/sample2/indexEndpoints/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = match_service.FindNeighborsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = match_service.FindNeighborsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.find_neighbors(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, match_service.FindNeighborsResponse)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_find_neighbors_rest_interceptors(null_interceptor):
+ transport = transports.MatchServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.MatchServiceRestInterceptor(),
+ )
+ client = MatchServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.MatchServiceRestInterceptor, "post_find_neighbors"
+ ) as post, mock.patch.object(
+ transports.MatchServiceRestInterceptor, "pre_find_neighbors"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = match_service.FindNeighborsRequest.pb(
+ match_service.FindNeighborsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = match_service.FindNeighborsResponse.to_json(
+ match_service.FindNeighborsResponse()
+ )
+ req.return_value.content = return_value
+
+ request = match_service.FindNeighborsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = match_service.FindNeighborsResponse()
+
+ client.find_neighbors(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_read_index_datapoints_rest_bad_request(
+ request_type=match_service.ReadIndexDatapointsRequest,
+):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "index_endpoint": "projects/sample1/locations/sample2/indexEndpoints/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.read_index_datapoints(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ match_service.ReadIndexDatapointsRequest,
+ dict,
+ ],
+)
+def test_read_index_datapoints_rest_call_success(request_type):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "index_endpoint": "projects/sample1/locations/sample2/indexEndpoints/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = match_service.ReadIndexDatapointsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = match_service.ReadIndexDatapointsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.read_index_datapoints(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, match_service.ReadIndexDatapointsResponse)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_read_index_datapoints_rest_interceptors(null_interceptor):
+ transport = transports.MatchServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.MatchServiceRestInterceptor(),
+ )
+ client = MatchServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.MatchServiceRestInterceptor, "post_read_index_datapoints"
+ ) as post, mock.patch.object(
+ transports.MatchServiceRestInterceptor, "pre_read_index_datapoints"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = match_service.ReadIndexDatapointsRequest.pb(
+ match_service.ReadIndexDatapointsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = match_service.ReadIndexDatapointsResponse.to_json(
+ match_service.ReadIndexDatapointsResponse()
+ )
+ req.return_value.content = return_value
+
+ request = match_service.ReadIndexDatapointsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = match_service.ReadIndexDatapointsResponse()
+
+ client.read_index_datapoints(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_location_rest_bad_request(request_type=locations_pb2.GetLocationRequest):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_location(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+def test_get_location_rest(request_type):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_list_locations_rest_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_locations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+def test_list_locations_rest(request_type):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_get_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_get_iam_policy_rest(request_type):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_set_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.set_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_set_iam_policy_rest(request_type):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_test_iam_permissions_rest_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.test_iam_permissions(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+def test_test_iam_permissions_rest(request_type):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+def test_cancel_operation_rest_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+def test_cancel_operation_rest(request_type):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_rest_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+def test_delete_operation_rest(request_type):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_get_operation_rest_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+def test_get_operation_rest(request_type):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_list_operations_rest_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_operations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+def test_list_operations_rest(request_type):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_wait_operation_rest_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.wait_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+def test_wait_operation_rest(request_type):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_find_neighbors_empty_call_rest():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.find_neighbors), "__call__") as call:
+ client.find_neighbors(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = match_service.FindNeighborsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_read_index_datapoints_empty_call_rest():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.read_index_datapoints), "__call__"
+ ) as call:
+ client.read_index_datapoints(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = match_service.ReadIndexDatapointsRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = MatchServiceAsyncClient.get_transport_class("rest_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "rest_asyncio"
+
+
+@pytest.mark.asyncio
+async def test_find_neighbors_rest_asyncio_bad_request(
+ request_type=match_service.FindNeighborsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "index_endpoint": "projects/sample1/locations/sample2/indexEndpoints/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.find_neighbors(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ match_service.FindNeighborsRequest,
+ dict,
+ ],
+)
+async def test_find_neighbors_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "index_endpoint": "projects/sample1/locations/sample2/indexEndpoints/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = match_service.FindNeighborsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = match_service.FindNeighborsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.find_neighbors(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, match_service.FindNeighborsResponse)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_find_neighbors_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncMatchServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncMatchServiceRestInterceptor(),
+ )
+ client = MatchServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncMatchServiceRestInterceptor, "post_find_neighbors"
+ ) as post, mock.patch.object(
+ transports.AsyncMatchServiceRestInterceptor, "pre_find_neighbors"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = match_service.FindNeighborsRequest.pb(
+ match_service.FindNeighborsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = match_service.FindNeighborsResponse.to_json(
+ match_service.FindNeighborsResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = match_service.FindNeighborsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = match_service.FindNeighborsResponse()
+
+ await client.find_neighbors(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_read_index_datapoints_rest_asyncio_bad_request(
+ request_type=match_service.ReadIndexDatapointsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "index_endpoint": "projects/sample1/locations/sample2/indexEndpoints/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.read_index_datapoints(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ match_service.ReadIndexDatapointsRequest,
+ dict,
+ ],
+)
+async def test_read_index_datapoints_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "index_endpoint": "projects/sample1/locations/sample2/indexEndpoints/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = match_service.ReadIndexDatapointsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = match_service.ReadIndexDatapointsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.read_index_datapoints(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, match_service.ReadIndexDatapointsResponse)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_read_index_datapoints_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncMatchServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncMatchServiceRestInterceptor(),
+ )
+ client = MatchServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncMatchServiceRestInterceptor, "post_read_index_datapoints"
+ ) as post, mock.patch.object(
+ transports.AsyncMatchServiceRestInterceptor, "pre_read_index_datapoints"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = match_service.ReadIndexDatapointsRequest.pb(
+ match_service.ReadIndexDatapointsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = match_service.ReadIndexDatapointsResponse.to_json(
+ match_service.ReadIndexDatapointsResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = match_service.ReadIndexDatapointsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = match_service.ReadIndexDatapointsResponse()
+
+ await client.read_index_datapoints(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_location_rest_asyncio_bad_request(
+ request_type=locations_pb2.GetLocationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_location(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+async def test_get_location_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_rest_asyncio_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_locations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+async def test_list_locations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_get_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.set_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_set_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.test_iam_permissions(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+async def test_test_iam_permissions_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+async def test_cancel_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+async def test_delete_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_get_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+async def test_get_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_rest_asyncio_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_operations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+async def test_list_operations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.wait_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+async def test_wait_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_find_neighbors_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.find_neighbors), "__call__") as call:
+ await client.find_neighbors(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = match_service.FindNeighborsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_read_index_datapoints_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.read_index_datapoints), "__call__"
+ ) as call:
+ await client.read_index_datapoints(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = match_service.ReadIndexDatapointsRequest()
+
+ assert args[0] == request_msg
+
+
+def test_unsupported_parameter_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with pytest.raises(core_exceptions.AsyncRestUnsupportedParameterError, match="google.api_core.client_options.ClientOptions.quota_project_id") as exc: # type: ignore
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ client_options=options,
+ )
+
+
+def test_transport_grpc_default():
+ # A client should use the gRPC transport by default.
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert isinstance(
+ client.transport,
+ transports.MatchServiceGrpcTransport,
+ )
+
+
+def test_match_service_base_transport_error():
+ # Passing both a credentials object and credentials_file should raise an error
+ with pytest.raises(core_exceptions.DuplicateCredentialArgs):
+ transport = transports.MatchServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ credentials_file="credentials.json",
+ )
+
+
+def test_match_service_base_transport():
+ # Instantiate the base transport.
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.match_service.transports.MatchServiceTransport.__init__"
+ ) as Transport:
+ Transport.return_value = None
+ transport = transports.MatchServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Every method on the transport should just blindly
+ # raise NotImplementedError.
+ methods = (
+ "find_neighbors",
+ "read_index_datapoints",
+ "set_iam_policy",
+ "get_iam_policy",
+ "test_iam_permissions",
+ "get_location",
+ "list_locations",
+ "get_operation",
+ "wait_operation",
+ "cancel_operation",
+ "delete_operation",
+ "list_operations",
+ )
+ for method in methods:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, method)(request=object())
+
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
+ # Catch all for all remaining methods and properties
+ remainder = [
+ "kind",
+ ]
+ for r in remainder:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, r)()
+
+
+def test_match_service_base_transport_with_credentials_file():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.match_service.transports.MatchServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.MatchServiceTransport(
+ credentials_file="credentials.json",
+ quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+def test_match_service_base_transport_with_adc():
+ # Test the default credentials are used if credentials and credentials_file are None.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.match_service.transports.MatchServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.MatchServiceTransport()
+ adc.assert_called_once()
+
+
+def test_match_service_auth_adc():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ MatchServiceClient()
+ adc.assert_called_once_with(
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.MatchServiceGrpcTransport,
+ transports.MatchServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_match_service_transport_auth_adc(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+ adc.assert_called_once_with(
+ scopes=["1", "2"],
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.MatchServiceGrpcTransport,
+ transports.MatchServiceGrpcAsyncIOTransport,
+ transports.MatchServiceRestTransport,
+ ],
+)
+def test_match_service_transport_auth_gdch_credentials(transport_class):
+ host = "https://language.com"
+ api_audience_tests = [None, "https://language2.com"]
+ api_audience_expect = [host, "https://language2.com"]
+ for t, e in zip(api_audience_tests, api_audience_expect):
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ gdch_mock = mock.MagicMock()
+ type(gdch_mock).with_gdch_audience = mock.PropertyMock(
+ return_value=gdch_mock
+ )
+ adc.return_value = (gdch_mock, None)
+ transport_class(host=host, api_audience=t)
+ gdch_mock.with_gdch_audience.assert_called_once_with(e)
+
+
+@pytest.mark.parametrize(
+ "transport_class,grpc_helpers",
+ [
+ (transports.MatchServiceGrpcTransport, grpc_helpers),
+ (transports.MatchServiceGrpcAsyncIOTransport, grpc_helpers_async),
+ ],
+)
+def test_match_service_transport_create_channel(transport_class, grpc_helpers):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel", autospec=True
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ adc.return_value = (creds, None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=creds,
+ credentials_file=None,
+ quota_project_id="octopus",
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=["1", "2"],
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [transports.MatchServiceGrpcTransport, transports.MatchServiceGrpcAsyncIOTransport],
+)
+def test_match_service_grpc_transport_client_cert_source_for_mtls(transport_class):
+ cred = ga_credentials.AnonymousCredentials()
+
+ # Check ssl_channel_credentials is used if provided.
+ with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
+ mock_ssl_channel_creds = mock.Mock()
+ transport_class(
+ host="squid.clam.whelk",
+ credentials=cred,
+ ssl_channel_credentials=mock_ssl_channel_creds,
+ )
+ mock_create_channel.assert_called_once_with(
+ "squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_channel_creds,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
+ # is used.
+ with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
+ with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
+ transport_class(
+ credentials=cred,
+ client_cert_source_for_mtls=client_cert_source_callback,
+ )
+ expected_cert, expected_key = client_cert_source_callback()
+ mock_ssl_cred.assert_called_once_with(
+ certificate_chain=expected_cert, private_key=expected_key
+ )
+
+
+def test_match_service_http_transport_client_cert_source_for_mtls():
+ cred = ga_credentials.AnonymousCredentials()
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ) as mock_configure_mtls_channel:
+ transports.MatchServiceRestTransport(
+ credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
+ )
+ mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_match_service_host_no_port(transport_name):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_match_service_host_with_port(transport_name):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com:8000"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:8000"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com:8000"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "rest",
+ ],
+)
+def test_match_service_client_transport_session_collision(transport_name):
+ creds1 = ga_credentials.AnonymousCredentials()
+ creds2 = ga_credentials.AnonymousCredentials()
+ client1 = MatchServiceClient(
+ credentials=creds1,
+ transport=transport_name,
+ )
+ client2 = MatchServiceClient(
+ credentials=creds2,
+ transport=transport_name,
+ )
+ session1 = client1.transport.find_neighbors._session
+ session2 = client2.transport.find_neighbors._session
+ assert session1 != session2
+ session1 = client1.transport.read_index_datapoints._session
+ session2 = client2.transport.read_index_datapoints._session
+ assert session1 != session2
+
+
+def test_match_service_grpc_transport_channel():
+ channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.MatchServiceGrpcTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+def test_match_service_grpc_asyncio_transport_channel():
+ channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.MatchServiceGrpcAsyncIOTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [transports.MatchServiceGrpcTransport, transports.MatchServiceGrpcAsyncIOTransport],
+)
+def test_match_service_transport_channel_mtls_with_client_cert_source(transport_class):
+ with mock.patch(
+ "grpc.ssl_channel_credentials", autospec=True
+ ) as grpc_ssl_channel_cred:
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_ssl_cred = mock.Mock()
+ grpc_ssl_channel_cred.return_value = mock_ssl_cred
+
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+
+ cred = ga_credentials.AnonymousCredentials()
+ with pytest.warns(DeprecationWarning):
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (cred, None)
+ transport = transport_class(
+ host="squid.clam.whelk",
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=client_cert_source_callback,
+ )
+ adc.assert_called_once()
+
+ grpc_ssl_channel_cred.assert_called_once_with(
+ certificate_chain=b"cert bytes", private_key=b"key bytes"
+ )
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [transports.MatchServiceGrpcTransport, transports.MatchServiceGrpcAsyncIOTransport],
+)
+def test_match_service_transport_channel_mtls_with_adc(transport_class):
+ mock_ssl_cred = mock.Mock()
+ with mock.patch.multiple(
+ "google.auth.transport.grpc.SslCredentials",
+ __init__=mock.Mock(return_value=None),
+ ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
+ ):
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+ mock_cred = mock.Mock()
+
+ with pytest.warns(DeprecationWarning):
+ transport = transport_class(
+ host="squid.clam.whelk",
+ credentials=mock_cred,
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=None,
+ )
+
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=mock_cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+
+
+def test_index_endpoint_path():
+ project = "squid"
+ location = "clam"
+ index_endpoint = "whelk"
+ expected = "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(
+ project=project,
+ location=location,
+ index_endpoint=index_endpoint,
+ )
+ actual = MatchServiceClient.index_endpoint_path(project, location, index_endpoint)
+ assert expected == actual
+
+
+def test_parse_index_endpoint_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "index_endpoint": "nudibranch",
+ }
+ path = MatchServiceClient.index_endpoint_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = MatchServiceClient.parse_index_endpoint_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "cuttlefish"
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = MatchServiceClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "mussel",
+ }
+ path = MatchServiceClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = MatchServiceClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "winkle"
+ expected = "folders/{folder}".format(
+ folder=folder,
+ )
+ actual = MatchServiceClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "nautilus",
+ }
+ path = MatchServiceClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = MatchServiceClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "scallop"
+ expected = "organizations/{organization}".format(
+ organization=organization,
+ )
+ actual = MatchServiceClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "abalone",
+ }
+ path = MatchServiceClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = MatchServiceClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "squid"
+ expected = "projects/{project}".format(
+ project=project,
+ )
+ actual = MatchServiceClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "clam",
+ }
+ path = MatchServiceClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = MatchServiceClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "whelk"
+ location = "octopus"
+ expected = "projects/{project}/locations/{location}".format(
+ project=project,
+ location=location,
+ )
+ actual = MatchServiceClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "oyster",
+ "location": "nudibranch",
+ }
+ path = MatchServiceClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = MatchServiceClient.parse_common_location_path(path)
+ assert expected == actual
+
+
+def test_client_with_default_client_info():
+ client_info = gapic_v1.client_info.ClientInfo()
+
+ with mock.patch.object(
+ transports.MatchServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+ with mock.patch.object(
+ transports.MatchServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ transport_class = MatchServiceClient.get_transport_class()
+ transport = transport_class(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+
+def test_delete_operation(transport: str = "grpc"):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_async(transport: str = "grpc_asyncio"):
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_field_headers():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = None
+
+ client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_field_headers_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_delete_operation_from_dict():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_from_dict_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_cancel_operation(transport: str = "grpc"):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_async(transport: str = "grpc_asyncio"):
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_operation_field_headers():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = None
+
+ client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_field_headers_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_cancel_operation_from_dict():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_from_dict_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_wait_operation(transport: str = "grpc"):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation(transport: str = "grpc_asyncio"):
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_wait_operation_field_headers():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_field_headers_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_wait_operation_from_dict():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_from_dict_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_operation(transport: str = "grpc"):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_get_operation_async(transport: str = "grpc_asyncio"):
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_get_operation_field_headers():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_operation_field_headers_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_get_operation_from_dict():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_operation_from_dict_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_operations(transport: str = "grpc"):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+ response = client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_async(transport: str = "grpc_asyncio"):
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_list_operations_field_headers():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_operations_field_headers_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_operations_from_dict():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ response = client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_operations_from_dict_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_locations(transport: str = "grpc"):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+ response = client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_async(transport: str = "grpc_asyncio"):
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_list_locations_field_headers():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_locations_field_headers_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_locations_from_dict():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ response = client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_locations_from_dict_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_location(transport: str = "grpc"):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+ response = client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_get_location_async(transport: str = "grpc_asyncio"):
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_get_location_field_headers():
+ client = MatchServiceClient(credentials=ga_credentials.AnonymousCredentials())
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = locations_pb2.Location()
+
+ client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_location_field_headers_async():
+ client = MatchServiceAsyncClient(credentials=async_anonymous_credentials())
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+def test_get_location_from_dict():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+
+ response = client.get_location(
+ request={
+ "name": "locations/abc",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_location_from_dict_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_set_iam_policy(transport: str = "grpc"):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ response = client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+ response = await client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_set_iam_policy_field_headers():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_field_headers_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_set_iam_policy_from_dict():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_from_dict_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+def test_get_iam_policy(transport: str = "grpc"):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_get_iam_policy_field_headers():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_field_headers_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_get_iam_policy_from_dict():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_from_dict_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+def test_test_iam_permissions(transport: str = "grpc"):
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"):
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+ )
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+def test_test_iam_permissions_field_headers():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_field_headers_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_test_iam_permissions_from_dict():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ response = client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_from_dict_async():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ response = await client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+def test_transport_close_grpc():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_grpc_asyncio():
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close_rest():
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = MatchServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "rest",
+ "grpc",
+ ]
+ for transport in transports:
+ client = MatchServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class",
+ [
+ (MatchServiceClient, transports.MatchServiceGrpcTransport),
+ (MatchServiceAsyncClient, transports.MatchServiceGrpcAsyncIOTransport),
+ ],
+)
+def test_api_key_credentials(client_class, transport_class):
+ with mock.patch.object(
+ google.auth._default, "get_api_key_credentials", create=True
+ ) as get_api_key_credentials:
+ mock_cred = mock.Mock()
+ get_api_key_credentials.return_value = mock_cred
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=mock_cred,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..745f1cd69b9605dc3b3579251b3b2b9a1c5dec6c
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py
@@ -0,0 +1,11562 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+ from unittest.mock import AsyncMock # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ import mock
+
+import grpc
+from grpc.experimental import aio
+from collections.abc import Iterable, AsyncIterable
+from google.protobuf import json_format
+import json
+import math
+import pytest
+from google.api_core import api_core_version
+from proto.marshal.rules.dates import DurationRule, TimestampRule
+from proto.marshal.rules import wrappers
+
+try:
+ import aiohttp # type: ignore
+ from google.auth.aio.transport.sessions import AsyncAuthorizedSession
+ from google.api_core.operations_v1 import AsyncOperationsRestClient
+
+ HAS_ASYNC_REST_EXTRA = True
+except ImportError: # pragma: NO COVER
+ HAS_ASYNC_REST_EXTRA = False
+from requests import Response
+from requests import Request, PreparedRequest
+from requests.sessions import Session
+from google.protobuf import json_format
+
+try:
+ from google.auth.aio import credentials as ga_credentials_async
+
+ HAS_GOOGLE_AUTH_AIO = True
+except ImportError: # pragma: NO COVER
+ HAS_GOOGLE_AUTH_AIO = False
+
+from google.api_core import client_options
+from google.api_core import exceptions as core_exceptions
+from google.api_core import future
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers
+from google.api_core import grpc_helpers_async
+from google.api_core import operation
+from google.api_core import operation_async # type: ignore
+from google.api_core import operations_v1
+from google.api_core import path_template
+from google.api_core import retry as retries
+from google.auth import credentials as ga_credentials
+from google.auth.exceptions import MutualTLSChannelError
+from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import (
+ PersistentResourceServiceAsyncClient,
+)
+from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import (
+ PersistentResourceServiceClient,
+)
+from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import pagers
+from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import (
+ transports,
+)
+from google.cloud.aiplatform_v1beta1.types import accelerator_type
+from google.cloud.aiplatform_v1beta1.types import encryption_spec
+from google.cloud.aiplatform_v1beta1.types import machine_resources
+from google.cloud.aiplatform_v1beta1.types import operation as gca_operation
+from google.cloud.aiplatform_v1beta1.types import persistent_resource
+from google.cloud.aiplatform_v1beta1.types import (
+ persistent_resource as gca_persistent_resource,
+)
+from google.cloud.aiplatform_v1beta1.types import persistent_resource_service
+from google.cloud.aiplatform_v1beta1.types import reservation_affinity
+from google.cloud.aiplatform_v1beta1.types import service_networking
+from google.cloud.location import locations_pb2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import options_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.oauth2 import service_account
+from google.protobuf import any_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
+from google.rpc import status_pb2 # type: ignore
+import google.auth
+
+
+async def mock_async_gen(data, chunk_size=1):
+ for i in range(0, len(data)): # pragma: NO COVER
+ chunk = data[i : i + chunk_size]
+ yield chunk.encode("utf-8")
+
+
+def client_cert_source_callback():
+ return b"cert bytes", b"key bytes"
+
+
+# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded.
+# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107.
+def async_anonymous_credentials():
+ if HAS_GOOGLE_AUTH_AIO:
+ return ga_credentials_async.AnonymousCredentials()
+ return ga_credentials.AnonymousCredentials()
+
+
+# If default endpoint is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint(client):
+ return (
+ "foo.googleapis.com"
+ if ("localhost" in client.DEFAULT_ENDPOINT)
+ else client.DEFAULT_ENDPOINT
+ )
+
+
+# If default endpoint template is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint template so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint_template(client):
+ return (
+ "test.{UNIVERSE_DOMAIN}"
+ if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE)
+ else client._DEFAULT_ENDPOINT_TEMPLATE
+ )
+
+
+def test__get_default_mtls_endpoint():
+ api_endpoint = "example.googleapis.com"
+ api_mtls_endpoint = "example.mtls.googleapis.com"
+ sandbox_endpoint = "example.sandbox.googleapis.com"
+ sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
+ non_googleapi = "api.example.com"
+
+ assert PersistentResourceServiceClient._get_default_mtls_endpoint(None) is None
+ assert (
+ PersistentResourceServiceClient._get_default_mtls_endpoint(api_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ PersistentResourceServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ PersistentResourceServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ PersistentResourceServiceClient._get_default_mtls_endpoint(
+ sandbox_mtls_endpoint
+ )
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ PersistentResourceServiceClient._get_default_mtls_endpoint(non_googleapi)
+ == non_googleapi
+ )
+
+
+def test__read_environment_variables():
+ assert PersistentResourceServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert PersistentResourceServiceClient._read_environment_variables() == (
+ True,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ assert PersistentResourceServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ PersistentResourceServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ assert PersistentResourceServiceClient._read_environment_variables() == (
+ False,
+ "never",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ assert PersistentResourceServiceClient._read_environment_variables() == (
+ False,
+ "always",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}):
+ assert PersistentResourceServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ PersistentResourceServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}):
+ assert PersistentResourceServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ "foo.com",
+ )
+
+
+def test__get_client_cert_source():
+ mock_provided_cert_source = mock.Mock()
+ mock_default_cert_source = mock.Mock()
+
+ assert PersistentResourceServiceClient._get_client_cert_source(None, False) is None
+ assert (
+ PersistentResourceServiceClient._get_client_cert_source(
+ mock_provided_cert_source, False
+ )
+ is None
+ )
+ assert (
+ PersistentResourceServiceClient._get_client_cert_source(
+ mock_provided_cert_source, True
+ )
+ == mock_provided_cert_source
+ )
+
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source", return_value=True
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_default_cert_source,
+ ):
+ assert (
+ PersistentResourceServiceClient._get_client_cert_source(None, True)
+ is mock_default_cert_source
+ )
+ assert (
+ PersistentResourceServiceClient._get_client_cert_source(
+ mock_provided_cert_source, "true"
+ )
+ is mock_provided_cert_source
+ )
+
+
+@mock.patch.object(
+ PersistentResourceServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PersistentResourceServiceClient),
+)
+@mock.patch.object(
+ PersistentResourceServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PersistentResourceServiceAsyncClient),
+)
+def test__get_api_endpoint():
+ api_override = "foo.com"
+ mock_client_cert_source = mock.Mock()
+ default_universe = PersistentResourceServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = (
+ PersistentResourceServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = PersistentResourceServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ assert (
+ PersistentResourceServiceClient._get_api_endpoint(
+ api_override, mock_client_cert_source, default_universe, "always"
+ )
+ == api_override
+ )
+ assert (
+ PersistentResourceServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "auto"
+ )
+ == PersistentResourceServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ PersistentResourceServiceClient._get_api_endpoint(
+ None, None, default_universe, "auto"
+ )
+ == default_endpoint
+ )
+ assert (
+ PersistentResourceServiceClient._get_api_endpoint(
+ None, None, default_universe, "always"
+ )
+ == PersistentResourceServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ PersistentResourceServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "always"
+ )
+ == PersistentResourceServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ PersistentResourceServiceClient._get_api_endpoint(
+ None, None, mock_universe, "never"
+ )
+ == mock_endpoint
+ )
+ assert (
+ PersistentResourceServiceClient._get_api_endpoint(
+ None, None, default_universe, "never"
+ )
+ == default_endpoint
+ )
+
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ PersistentResourceServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, mock_universe, "auto"
+ )
+ assert (
+ str(excinfo.value)
+ == "mTLS is not supported in any universe other than googleapis.com."
+ )
+
+
+def test__get_universe_domain():
+ client_universe_domain = "foo.com"
+ universe_domain_env = "bar.com"
+
+ assert (
+ PersistentResourceServiceClient._get_universe_domain(
+ client_universe_domain, universe_domain_env
+ )
+ == client_universe_domain
+ )
+ assert (
+ PersistentResourceServiceClient._get_universe_domain(None, universe_domain_env)
+ == universe_domain_env
+ )
+ assert (
+ PersistentResourceServiceClient._get_universe_domain(None, None)
+ == PersistentResourceServiceClient._DEFAULT_UNIVERSE
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ PersistentResourceServiceClient._get_universe_domain("", None)
+ assert str(excinfo.value) == "Universe Domain cannot be an empty string."
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (PersistentResourceServiceClient, "grpc"),
+ (PersistentResourceServiceAsyncClient, "grpc_asyncio"),
+ (PersistentResourceServiceClient, "rest"),
+ ],
+)
+def test_persistent_resource_service_client_from_service_account_info(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_info"
+ ) as factory:
+ factory.return_value = creds
+ info = {"valid": True}
+ client = client_class.from_service_account_info(info, transport=transport_name)
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (transports.PersistentResourceServiceGrpcTransport, "grpc"),
+ (transports.PersistentResourceServiceGrpcAsyncIOTransport, "grpc_asyncio"),
+ (transports.PersistentResourceServiceRestTransport, "rest"),
+ ],
+)
+def test_persistent_resource_service_client_service_account_always_use_jwt(
+ transport_class, transport_name
+):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=True)
+ use_jwt.assert_called_once_with(True)
+
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=False)
+ use_jwt.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (PersistentResourceServiceClient, "grpc"),
+ (PersistentResourceServiceAsyncClient, "grpc_asyncio"),
+ (PersistentResourceServiceClient, "rest"),
+ ],
+)
+def test_persistent_resource_service_client_from_service_account_file(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_file"
+ ) as factory:
+ factory.return_value = creds
+ client = client_class.from_service_account_file(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ client = client_class.from_service_account_json(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+def test_persistent_resource_service_client_get_transport_class():
+ transport = PersistentResourceServiceClient.get_transport_class()
+ available_transports = [
+ transports.PersistentResourceServiceGrpcTransport,
+ transports.PersistentResourceServiceRestTransport,
+ ]
+ assert transport in available_transports
+
+ transport = PersistentResourceServiceClient.get_transport_class("grpc")
+ assert transport == transports.PersistentResourceServiceGrpcTransport
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (
+ PersistentResourceServiceClient,
+ transports.PersistentResourceServiceGrpcTransport,
+ "grpc",
+ ),
+ (
+ PersistentResourceServiceAsyncClient,
+ transports.PersistentResourceServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (
+ PersistentResourceServiceClient,
+ transports.PersistentResourceServiceRestTransport,
+ "rest",
+ ),
+ ],
+)
+@mock.patch.object(
+ PersistentResourceServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PersistentResourceServiceClient),
+)
+@mock.patch.object(
+ PersistentResourceServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PersistentResourceServiceAsyncClient),
+)
+def test_persistent_resource_service_client_client_options(
+ client_class, transport_class, transport_name
+):
+ # Check that if channel is provided we won't create a new one.
+ with mock.patch.object(
+ PersistentResourceServiceClient, "get_transport_class"
+ ) as gtc:
+ transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
+ client = client_class(transport=transport)
+ gtc.assert_not_called()
+
+ # Check that if channel is provided via str we will create a new one.
+ with mock.patch.object(
+ PersistentResourceServiceClient, "get_transport_class"
+ ) as gtc:
+ client = client_class(transport=transport_name)
+ gtc.assert_called()
+
+ # Check the case api_endpoint is provided.
+ options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name, client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_MTLS_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ # Check the case quota_project_id is provided
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id="octopus",
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+ # Check the case api_endpoint is provided
+ options = client_options.ClientOptions(
+ api_audience="https://language.googleapis.com"
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience="https://language.googleapis.com",
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,use_client_cert_env",
+ [
+ (
+ PersistentResourceServiceClient,
+ transports.PersistentResourceServiceGrpcTransport,
+ "grpc",
+ "true",
+ ),
+ (
+ PersistentResourceServiceAsyncClient,
+ transports.PersistentResourceServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "true",
+ ),
+ (
+ PersistentResourceServiceClient,
+ transports.PersistentResourceServiceGrpcTransport,
+ "grpc",
+ "false",
+ ),
+ (
+ PersistentResourceServiceAsyncClient,
+ transports.PersistentResourceServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "false",
+ ),
+ (
+ PersistentResourceServiceClient,
+ transports.PersistentResourceServiceRestTransport,
+ "rest",
+ "true",
+ ),
+ (
+ PersistentResourceServiceClient,
+ transports.PersistentResourceServiceRestTransport,
+ "rest",
+ "false",
+ ),
+ ],
+)
+@mock.patch.object(
+ PersistentResourceServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PersistentResourceServiceClient),
+)
+@mock.patch.object(
+ PersistentResourceServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PersistentResourceServiceAsyncClient),
+)
+@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
+def test_persistent_resource_service_client_mtls_env_auto(
+ client_class, transport_class, transport_name, use_client_cert_env
+):
+ # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
+ # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
+
+ # Check the case client_cert_source is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=client_cert_source_callback
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+
+ if use_client_cert_env == "false":
+ expected_client_cert_source = None
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ else:
+ expected_client_cert_source = client_cert_source_callback
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case ADC client cert is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=client_cert_source_callback,
+ ):
+ if use_client_cert_env == "false":
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ expected_client_cert_source = None
+ else:
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+ expected_client_cert_source = client_cert_source_callback
+
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case client_cert_source and ADC client cert are not provided.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class",
+ [PersistentResourceServiceClient, PersistentResourceServiceAsyncClient],
+)
+@mock.patch.object(
+ PersistentResourceServiceClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(PersistentResourceServiceClient),
+)
+@mock.patch.object(
+ PersistentResourceServiceAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(PersistentResourceServiceAsyncClient),
+)
+def test_persistent_resource_service_client_get_mtls_endpoint_and_cert_source(
+ client_class,
+):
+ mock_client_cert_source = mock.Mock()
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source == mock_client_cert_source
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_client_cert_source,
+ ):
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source == mock_client_cert_source
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class",
+ [PersistentResourceServiceClient, PersistentResourceServiceAsyncClient],
+)
+@mock.patch.object(
+ PersistentResourceServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PersistentResourceServiceClient),
+)
+@mock.patch.object(
+ PersistentResourceServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PersistentResourceServiceAsyncClient),
+)
+def test_persistent_resource_service_client_client_api_endpoint(client_class):
+ mock_client_cert_source = client_cert_source_callback
+ api_override = "foo.com"
+ default_universe = PersistentResourceServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = (
+ PersistentResourceServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = PersistentResourceServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true",
+ # use ClientOptions.api_endpoint as the api endpoint regardless.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=api_override
+ )
+ client = client_class(
+ client_options=options,
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert client.api_endpoint == api_override
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == default_endpoint
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always",
+ # use the DEFAULT_MTLS_ENDPOINT as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+
+ # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default),
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist,
+ # and ClientOptions.universe_domain="bar.com",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint.
+ options = client_options.ClientOptions()
+ universe_exists = hasattr(options, "universe_domain")
+ if universe_exists:
+ options = client_options.ClientOptions(universe_domain=mock_universe)
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ else:
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == (
+ mock_endpoint if universe_exists else default_endpoint
+ )
+ assert client.universe_domain == (
+ mock_universe if universe_exists else default_universe
+ )
+
+ # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ options = client_options.ClientOptions()
+ if hasattr(options, "universe_domain"):
+ delattr(options, "universe_domain")
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == default_endpoint
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (
+ PersistentResourceServiceClient,
+ transports.PersistentResourceServiceGrpcTransport,
+ "grpc",
+ ),
+ (
+ PersistentResourceServiceAsyncClient,
+ transports.PersistentResourceServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (
+ PersistentResourceServiceClient,
+ transports.PersistentResourceServiceRestTransport,
+ "rest",
+ ),
+ ],
+)
+def test_persistent_resource_service_client_client_options_scopes(
+ client_class, transport_class, transport_name
+):
+ # Check the case scopes are provided.
+ options = client_options.ClientOptions(
+ scopes=["1", "2"],
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=["1", "2"],
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ PersistentResourceServiceClient,
+ transports.PersistentResourceServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ PersistentResourceServiceAsyncClient,
+ transports.PersistentResourceServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ (
+ PersistentResourceServiceClient,
+ transports.PersistentResourceServiceRestTransport,
+ "rest",
+ None,
+ ),
+ ],
+)
+def test_persistent_resource_service_client_client_options_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+def test_persistent_resource_service_client_client_options_from_dict():
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.persistent_resource_service.transports.PersistentResourceServiceGrpcTransport.__init__"
+ ) as grpc_transport:
+ grpc_transport.return_value = None
+ client = PersistentResourceServiceClient(
+ client_options={"api_endpoint": "squid.clam.whelk"}
+ )
+ grpc_transport.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ PersistentResourceServiceClient,
+ transports.PersistentResourceServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ PersistentResourceServiceAsyncClient,
+ transports.PersistentResourceServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_persistent_resource_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.CreatePersistentResourceRequest,
+ dict,
+ ],
+)
+def test_create_persistent_resource(request_type, transport: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.create_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = persistent_resource_service.CreatePersistentResourceRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_create_persistent_resource_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = persistent_resource_service.CreatePersistentResourceRequest(
+ parent="parent_value",
+ persistent_resource_id="persistent_resource_id_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_persistent_resource), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.create_persistent_resource(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == persistent_resource_service.CreatePersistentResourceRequest(
+ parent="parent_value",
+ persistent_resource_id="persistent_resource_id_value",
+ )
+
+
+def test_create_persistent_resource_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_persistent_resource
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_persistent_resource
+ ] = mock_rpc
+ request = {}
+ client.create_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.create_persistent_resource(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_persistent_resource_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.create_persistent_resource
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.create_persistent_resource
+ ] = mock_rpc
+
+ request = {}
+ await client.create_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.create_persistent_resource(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_persistent_resource_async(
+ transport: str = "grpc_asyncio",
+ request_type=persistent_resource_service.CreatePersistentResourceRequest,
+):
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.create_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = persistent_resource_service.CreatePersistentResourceRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_create_persistent_resource_async_from_dict():
+ await test_create_persistent_resource_async(request_type=dict)
+
+
+def test_create_persistent_resource_field_headers():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = persistent_resource_service.CreatePersistentResourceRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_persistent_resource), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.create_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_create_persistent_resource_field_headers_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = persistent_resource_service.CreatePersistentResourceRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_persistent_resource), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.create_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_create_persistent_resource_flattened():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.create_persistent_resource(
+ parent="parent_value",
+ persistent_resource=gca_persistent_resource.PersistentResource(
+ name="name_value"
+ ),
+ persistent_resource_id="persistent_resource_id_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].persistent_resource
+ mock_val = gca_persistent_resource.PersistentResource(name="name_value")
+ assert arg == mock_val
+ arg = args[0].persistent_resource_id
+ mock_val = "persistent_resource_id_value"
+ assert arg == mock_val
+
+
+def test_create_persistent_resource_flattened_error():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_persistent_resource(
+ persistent_resource_service.CreatePersistentResourceRequest(),
+ parent="parent_value",
+ persistent_resource=gca_persistent_resource.PersistentResource(
+ name="name_value"
+ ),
+ persistent_resource_id="persistent_resource_id_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_create_persistent_resource_flattened_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.create_persistent_resource(
+ parent="parent_value",
+ persistent_resource=gca_persistent_resource.PersistentResource(
+ name="name_value"
+ ),
+ persistent_resource_id="persistent_resource_id_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].persistent_resource
+ mock_val = gca_persistent_resource.PersistentResource(name="name_value")
+ assert arg == mock_val
+ arg = args[0].persistent_resource_id
+ mock_val = "persistent_resource_id_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_create_persistent_resource_flattened_error_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.create_persistent_resource(
+ persistent_resource_service.CreatePersistentResourceRequest(),
+ parent="parent_value",
+ persistent_resource=gca_persistent_resource.PersistentResource(
+ name="name_value"
+ ),
+ persistent_resource_id="persistent_resource_id_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.GetPersistentResourceRequest,
+ dict,
+ ],
+)
+def test_get_persistent_resource(request_type, transport: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = persistent_resource.PersistentResource(
+ name="name_value",
+ display_name="display_name_value",
+ state=persistent_resource.PersistentResource.State.PROVISIONING,
+ network="network_value",
+ reserved_ip_ranges=["reserved_ip_ranges_value"],
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ response = client.get_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = persistent_resource_service.GetPersistentResourceRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, persistent_resource.PersistentResource)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == persistent_resource.PersistentResource.State.PROVISIONING
+ assert response.network == "network_value"
+ assert response.reserved_ip_ranges == ["reserved_ip_ranges_value"]
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+def test_get_persistent_resource_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = persistent_resource_service.GetPersistentResourceRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_persistent_resource), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.get_persistent_resource(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == persistent_resource_service.GetPersistentResourceRequest(
+ name="name_value",
+ )
+
+
+def test_get_persistent_resource_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_persistent_resource
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_persistent_resource
+ ] = mock_rpc
+ request = {}
+ client.get_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_persistent_resource(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_persistent_resource_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.get_persistent_resource
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.get_persistent_resource
+ ] = mock_rpc
+
+ request = {}
+ await client.get_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.get_persistent_resource(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_persistent_resource_async(
+ transport: str = "grpc_asyncio",
+ request_type=persistent_resource_service.GetPersistentResourceRequest,
+):
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ persistent_resource.PersistentResource(
+ name="name_value",
+ display_name="display_name_value",
+ state=persistent_resource.PersistentResource.State.PROVISIONING,
+ network="network_value",
+ reserved_ip_ranges=["reserved_ip_ranges_value"],
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ response = await client.get_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = persistent_resource_service.GetPersistentResourceRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, persistent_resource.PersistentResource)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == persistent_resource.PersistentResource.State.PROVISIONING
+ assert response.network == "network_value"
+ assert response.reserved_ip_ranges == ["reserved_ip_ranges_value"]
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+async def test_get_persistent_resource_async_from_dict():
+ await test_get_persistent_resource_async(request_type=dict)
+
+
+def test_get_persistent_resource_field_headers():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = persistent_resource_service.GetPersistentResourceRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_persistent_resource), "__call__"
+ ) as call:
+ call.return_value = persistent_resource.PersistentResource()
+ client.get_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_persistent_resource_field_headers_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = persistent_resource_service.GetPersistentResourceRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_persistent_resource), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ persistent_resource.PersistentResource()
+ )
+ await client.get_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_get_persistent_resource_flattened():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = persistent_resource.PersistentResource()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_persistent_resource(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_get_persistent_resource_flattened_error():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_persistent_resource(
+ persistent_resource_service.GetPersistentResourceRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_persistent_resource_flattened_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = persistent_resource.PersistentResource()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ persistent_resource.PersistentResource()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_persistent_resource(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_get_persistent_resource_flattened_error_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_persistent_resource(
+ persistent_resource_service.GetPersistentResourceRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.ListPersistentResourcesRequest,
+ dict,
+ ],
+)
+def test_list_persistent_resources(request_type, transport: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_persistent_resources), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = persistent_resource_service.ListPersistentResourcesResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_persistent_resources(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = persistent_resource_service.ListPersistentResourcesRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListPersistentResourcesPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_persistent_resources_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = persistent_resource_service.ListPersistentResourcesRequest(
+ parent="parent_value",
+ page_token="page_token_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_persistent_resources), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.list_persistent_resources(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == persistent_resource_service.ListPersistentResourcesRequest(
+ parent="parent_value",
+ page_token="page_token_value",
+ )
+
+
+def test_list_persistent_resources_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_persistent_resources
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_persistent_resources
+ ] = mock_rpc
+ request = {}
+ client.list_persistent_resources(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_persistent_resources(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_persistent_resources_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.list_persistent_resources
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.list_persistent_resources
+ ] = mock_rpc
+
+ request = {}
+ await client.list_persistent_resources(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.list_persistent_resources(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_persistent_resources_async(
+ transport: str = "grpc_asyncio",
+ request_type=persistent_resource_service.ListPersistentResourcesRequest,
+):
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_persistent_resources), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ persistent_resource_service.ListPersistentResourcesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.list_persistent_resources(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = persistent_resource_service.ListPersistentResourcesRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListPersistentResourcesAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_persistent_resources_async_from_dict():
+ await test_list_persistent_resources_async(request_type=dict)
+
+
+def test_list_persistent_resources_field_headers():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = persistent_resource_service.ListPersistentResourcesRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_persistent_resources), "__call__"
+ ) as call:
+ call.return_value = (
+ persistent_resource_service.ListPersistentResourcesResponse()
+ )
+ client.list_persistent_resources(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_persistent_resources_field_headers_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = persistent_resource_service.ListPersistentResourcesRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_persistent_resources), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ persistent_resource_service.ListPersistentResourcesResponse()
+ )
+ await client.list_persistent_resources(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_list_persistent_resources_flattened():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_persistent_resources), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = (
+ persistent_resource_service.ListPersistentResourcesResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_persistent_resources(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+def test_list_persistent_resources_flattened_error():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_persistent_resources(
+ persistent_resource_service.ListPersistentResourcesRequest(),
+ parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_persistent_resources_flattened_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_persistent_resources), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = (
+ persistent_resource_service.ListPersistentResourcesResponse()
+ )
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ persistent_resource_service.ListPersistentResourcesResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_persistent_resources(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_list_persistent_resources_flattened_error_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_persistent_resources(
+ persistent_resource_service.ListPersistentResourcesRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_persistent_resources_pager(transport_name: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_persistent_resources), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[
+ persistent_resource.PersistentResource(),
+ persistent_resource.PersistentResource(),
+ persistent_resource.PersistentResource(),
+ ],
+ next_page_token="abc",
+ ),
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[],
+ next_page_token="def",
+ ),
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[
+ persistent_resource.PersistentResource(),
+ ],
+ next_page_token="ghi",
+ ),
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[
+ persistent_resource.PersistentResource(),
+ persistent_resource.PersistentResource(),
+ ],
+ ),
+ RuntimeError,
+ )
+
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_persistent_resources(
+ request={}, retry=retry, timeout=timeout
+ )
+
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(
+ isinstance(i, persistent_resource.PersistentResource) for i in results
+ )
+
+
+def test_list_persistent_resources_pages(transport_name: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_persistent_resources), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[
+ persistent_resource.PersistentResource(),
+ persistent_resource.PersistentResource(),
+ persistent_resource.PersistentResource(),
+ ],
+ next_page_token="abc",
+ ),
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[],
+ next_page_token="def",
+ ),
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[
+ persistent_resource.PersistentResource(),
+ ],
+ next_page_token="ghi",
+ ),
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[
+ persistent_resource.PersistentResource(),
+ persistent_resource.PersistentResource(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_persistent_resources(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_persistent_resources_async_pager():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_persistent_resources),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[
+ persistent_resource.PersistentResource(),
+ persistent_resource.PersistentResource(),
+ persistent_resource.PersistentResource(),
+ ],
+ next_page_token="abc",
+ ),
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[],
+ next_page_token="def",
+ ),
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[
+ persistent_resource.PersistentResource(),
+ ],
+ next_page_token="ghi",
+ ),
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[
+ persistent_resource.PersistentResource(),
+ persistent_resource.PersistentResource(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_persistent_resources(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(
+ isinstance(i, persistent_resource.PersistentResource) for i in responses
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_persistent_resources_async_pages():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_persistent_resources),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[
+ persistent_resource.PersistentResource(),
+ persistent_resource.PersistentResource(),
+ persistent_resource.PersistentResource(),
+ ],
+ next_page_token="abc",
+ ),
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[],
+ next_page_token="def",
+ ),
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[
+ persistent_resource.PersistentResource(),
+ ],
+ next_page_token="ghi",
+ ),
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[
+ persistent_resource.PersistentResource(),
+ persistent_resource.PersistentResource(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.list_persistent_resources(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.DeletePersistentResourceRequest,
+ dict,
+ ],
+)
+def test_delete_persistent_resource(request_type, transport: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.delete_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = persistent_resource_service.DeletePersistentResourceRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_delete_persistent_resource_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = persistent_resource_service.DeletePersistentResourceRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_persistent_resource), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.delete_persistent_resource(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == persistent_resource_service.DeletePersistentResourceRequest(
+ name="name_value",
+ )
+
+
+def test_delete_persistent_resource_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_persistent_resource
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_persistent_resource
+ ] = mock_rpc
+ request = {}
+ client.delete_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_persistent_resource(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_persistent_resource_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.delete_persistent_resource
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.delete_persistent_resource
+ ] = mock_rpc
+
+ request = {}
+ await client.delete_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.delete_persistent_resource(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_persistent_resource_async(
+ transport: str = "grpc_asyncio",
+ request_type=persistent_resource_service.DeletePersistentResourceRequest,
+):
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.delete_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = persistent_resource_service.DeletePersistentResourceRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_delete_persistent_resource_async_from_dict():
+ await test_delete_persistent_resource_async(request_type=dict)
+
+
+def test_delete_persistent_resource_field_headers():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = persistent_resource_service.DeletePersistentResourceRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_persistent_resource), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_persistent_resource_field_headers_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = persistent_resource_service.DeletePersistentResourceRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_persistent_resource), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.delete_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_delete_persistent_resource_flattened():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_persistent_resource(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_delete_persistent_resource_flattened_error():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_persistent_resource(
+ persistent_resource_service.DeletePersistentResourceRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_delete_persistent_resource_flattened_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_persistent_resource(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_delete_persistent_resource_flattened_error_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.delete_persistent_resource(
+ persistent_resource_service.DeletePersistentResourceRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.UpdatePersistentResourceRequest,
+ dict,
+ ],
+)
+def test_update_persistent_resource(request_type, transport: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.update_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = persistent_resource_service.UpdatePersistentResourceRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_update_persistent_resource_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = persistent_resource_service.UpdatePersistentResourceRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_persistent_resource), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.update_persistent_resource(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == persistent_resource_service.UpdatePersistentResourceRequest()
+
+
+def test_update_persistent_resource_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.update_persistent_resource
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.update_persistent_resource
+ ] = mock_rpc
+ request = {}
+ client.update_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.update_persistent_resource(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_update_persistent_resource_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.update_persistent_resource
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.update_persistent_resource
+ ] = mock_rpc
+
+ request = {}
+ await client.update_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.update_persistent_resource(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_update_persistent_resource_async(
+ transport: str = "grpc_asyncio",
+ request_type=persistent_resource_service.UpdatePersistentResourceRequest,
+):
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.update_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = persistent_resource_service.UpdatePersistentResourceRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_update_persistent_resource_async_from_dict():
+ await test_update_persistent_resource_async(request_type=dict)
+
+
+def test_update_persistent_resource_field_headers():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = persistent_resource_service.UpdatePersistentResourceRequest()
+
+ request.persistent_resource.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_persistent_resource), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.update_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "persistent_resource.name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_update_persistent_resource_field_headers_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = persistent_resource_service.UpdatePersistentResourceRequest()
+
+ request.persistent_resource.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_persistent_resource), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.update_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "persistent_resource.name=name_value",
+ ) in kw["metadata"]
+
+
+def test_update_persistent_resource_flattened():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.update_persistent_resource(
+ persistent_resource=gca_persistent_resource.PersistentResource(
+ name="name_value"
+ ),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].persistent_resource
+ mock_val = gca_persistent_resource.PersistentResource(name="name_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
+
+
+def test_update_persistent_resource_flattened_error():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.update_persistent_resource(
+ persistent_resource_service.UpdatePersistentResourceRequest(),
+ persistent_resource=gca_persistent_resource.PersistentResource(
+ name="name_value"
+ ),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+@pytest.mark.asyncio
+async def test_update_persistent_resource_flattened_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.update_persistent_resource(
+ persistent_resource=gca_persistent_resource.PersistentResource(
+ name="name_value"
+ ),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].persistent_resource
+ mock_val = gca_persistent_resource.PersistentResource(name="name_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_update_persistent_resource_flattened_error_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.update_persistent_resource(
+ persistent_resource_service.UpdatePersistentResourceRequest(),
+ persistent_resource=gca_persistent_resource.PersistentResource(
+ name="name_value"
+ ),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.RebootPersistentResourceRequest,
+ dict,
+ ],
+)
+def test_reboot_persistent_resource(request_type, transport: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.reboot_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.reboot_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = persistent_resource_service.RebootPersistentResourceRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_reboot_persistent_resource_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = persistent_resource_service.RebootPersistentResourceRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.reboot_persistent_resource), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.reboot_persistent_resource(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == persistent_resource_service.RebootPersistentResourceRequest(
+ name="name_value",
+ )
+
+
+def test_reboot_persistent_resource_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.reboot_persistent_resource
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.reboot_persistent_resource
+ ] = mock_rpc
+ request = {}
+ client.reboot_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.reboot_persistent_resource(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_reboot_persistent_resource_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.reboot_persistent_resource
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.reboot_persistent_resource
+ ] = mock_rpc
+
+ request = {}
+ await client.reboot_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.reboot_persistent_resource(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_reboot_persistent_resource_async(
+ transport: str = "grpc_asyncio",
+ request_type=persistent_resource_service.RebootPersistentResourceRequest,
+):
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.reboot_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.reboot_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = persistent_resource_service.RebootPersistentResourceRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_reboot_persistent_resource_async_from_dict():
+ await test_reboot_persistent_resource_async(request_type=dict)
+
+
+def test_reboot_persistent_resource_field_headers():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = persistent_resource_service.RebootPersistentResourceRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.reboot_persistent_resource), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.reboot_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_reboot_persistent_resource_field_headers_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = persistent_resource_service.RebootPersistentResourceRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.reboot_persistent_resource), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.reboot_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_reboot_persistent_resource_flattened():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.reboot_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.reboot_persistent_resource(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_reboot_persistent_resource_flattened_error():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.reboot_persistent_resource(
+ persistent_resource_service.RebootPersistentResourceRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_reboot_persistent_resource_flattened_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.reboot_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.reboot_persistent_resource(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_reboot_persistent_resource_flattened_error_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.reboot_persistent_resource(
+ persistent_resource_service.RebootPersistentResourceRequest(),
+ name="name_value",
+ )
+
+
+def test_create_persistent_resource_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_persistent_resource
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_persistent_resource
+ ] = mock_rpc
+
+ request = {}
+ client.create_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.create_persistent_resource(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_create_persistent_resource_rest_required_fields(
+ request_type=persistent_resource_service.CreatePersistentResourceRequest,
+):
+ transport_class = transports.PersistentResourceServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request_init["persistent_resource_id"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+ assert "persistentResourceId" not in jsonified_request
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_persistent_resource._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+ assert "persistentResourceId" in jsonified_request
+ assert (
+ jsonified_request["persistentResourceId"]
+ == request_init["persistent_resource_id"]
+ )
+
+ jsonified_request["parent"] = "parent_value"
+ jsonified_request["persistentResourceId"] = "persistent_resource_id_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_persistent_resource._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(("persistent_resource_id",))
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+ assert "persistentResourceId" in jsonified_request
+ assert jsonified_request["persistentResourceId"] == "persistent_resource_id_value"
+
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.create_persistent_resource(request)
+
+ expected_params = [
+ (
+ "persistentResourceId",
+ "",
+ ),
+ ("$alt", "json;enum-encoding=int"),
+ ]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_create_persistent_resource_rest_unset_required_fields():
+ transport = transports.PersistentResourceServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.create_persistent_resource._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(("persistentResourceId",))
+ & set(
+ (
+ "parent",
+ "persistentResource",
+ "persistentResourceId",
+ )
+ )
+ )
+
+
+def test_create_persistent_resource_rest_flattened():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ persistent_resource=gca_persistent_resource.PersistentResource(
+ name="name_value"
+ ),
+ persistent_resource_id="persistent_resource_id_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.create_persistent_resource(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}/persistentResources"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_create_persistent_resource_rest_flattened_error(transport: str = "rest"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_persistent_resource(
+ persistent_resource_service.CreatePersistentResourceRequest(),
+ parent="parent_value",
+ persistent_resource=gca_persistent_resource.PersistentResource(
+ name="name_value"
+ ),
+ persistent_resource_id="persistent_resource_id_value",
+ )
+
+
+def test_get_persistent_resource_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_persistent_resource
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_persistent_resource
+ ] = mock_rpc
+
+ request = {}
+ client.get_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_persistent_resource(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_get_persistent_resource_rest_required_fields(
+ request_type=persistent_resource_service.GetPersistentResourceRequest,
+):
+ transport_class = transports.PersistentResourceServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_persistent_resource._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_persistent_resource._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = persistent_resource.PersistentResource()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = persistent_resource.PersistentResource.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_persistent_resource(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_get_persistent_resource_rest_unset_required_fields():
+ transport = transports.PersistentResourceServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.get_persistent_resource._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_persistent_resource_rest_flattened():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = persistent_resource.PersistentResource()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = persistent_resource.PersistentResource.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_persistent_resource(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/persistentResources/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_get_persistent_resource_rest_flattened_error(transport: str = "rest"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_persistent_resource(
+ persistent_resource_service.GetPersistentResourceRequest(),
+ name="name_value",
+ )
+
+
+def test_list_persistent_resources_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_persistent_resources
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_persistent_resources
+ ] = mock_rpc
+
+ request = {}
+ client.list_persistent_resources(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_persistent_resources(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_list_persistent_resources_rest_required_fields(
+ request_type=persistent_resource_service.ListPersistentResourcesRequest,
+):
+ transport_class = transports.PersistentResourceServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_persistent_resources._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_persistent_resources._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "page_size",
+ "page_token",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = persistent_resource_service.ListPersistentResourcesResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = (
+ persistent_resource_service.ListPersistentResourcesResponse.pb(
+ return_value
+ )
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_persistent_resources(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_list_persistent_resources_rest_unset_required_fields():
+ transport = transports.PersistentResourceServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.list_persistent_resources._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(
+ (
+ "pageSize",
+ "pageToken",
+ )
+ )
+ & set(("parent",))
+ )
+
+
+def test_list_persistent_resources_rest_flattened():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = persistent_resource_service.ListPersistentResourcesResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = persistent_resource_service.ListPersistentResourcesResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.list_persistent_resources(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}/persistentResources"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_list_persistent_resources_rest_flattened_error(transport: str = "rest"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_persistent_resources(
+ persistent_resource_service.ListPersistentResourcesRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_persistent_resources_rest_pager(transport: str = "rest"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[
+ persistent_resource.PersistentResource(),
+ persistent_resource.PersistentResource(),
+ persistent_resource.PersistentResource(),
+ ],
+ next_page_token="abc",
+ ),
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[],
+ next_page_token="def",
+ ),
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[
+ persistent_resource.PersistentResource(),
+ ],
+ next_page_token="ghi",
+ ),
+ persistent_resource_service.ListPersistentResourcesResponse(
+ persistent_resources=[
+ persistent_resource.PersistentResource(),
+ persistent_resource.PersistentResource(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ persistent_resource_service.ListPersistentResourcesResponse.to_json(x)
+ for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ pager = client.list_persistent_resources(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(
+ isinstance(i, persistent_resource.PersistentResource) for i in results
+ )
+
+ pages = list(client.list_persistent_resources(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_delete_persistent_resource_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_persistent_resource
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_persistent_resource
+ ] = mock_rpc
+
+ request = {}
+ client.delete_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_persistent_resource(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_delete_persistent_resource_rest_required_fields(
+ request_type=persistent_resource_service.DeletePersistentResourceRequest,
+):
+ transport_class = transports.PersistentResourceServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_persistent_resource._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_persistent_resource._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "delete",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_persistent_resource(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_delete_persistent_resource_rest_unset_required_fields():
+ transport = transports.PersistentResourceServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.delete_persistent_resource._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_delete_persistent_resource_rest_flattened():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.delete_persistent_resource(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/persistentResources/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_persistent_resource_rest_flattened_error(transport: str = "rest"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_persistent_resource(
+ persistent_resource_service.DeletePersistentResourceRequest(),
+ name="name_value",
+ )
+
+
+def test_update_persistent_resource_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.update_persistent_resource
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.update_persistent_resource
+ ] = mock_rpc
+
+ request = {}
+ client.update_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.update_persistent_resource(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_update_persistent_resource_rest_required_fields(
+ request_type=persistent_resource_service.UpdatePersistentResourceRequest,
+):
+ transport_class = transports.PersistentResourceServiceRestTransport
+
+ request_init = {}
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).update_persistent_resource._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).update_persistent_resource._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(("update_mask",))
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "patch",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.update_persistent_resource(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_update_persistent_resource_rest_unset_required_fields():
+ transport = transports.PersistentResourceServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.update_persistent_resource._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(("updateMask",))
+ & set(
+ (
+ "persistentResource",
+ "updateMask",
+ )
+ )
+ )
+
+
+def test_update_persistent_resource_rest_flattened():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "persistent_resource": {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ persistent_resource=gca_persistent_resource.PersistentResource(
+ name="name_value"
+ ),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.update_persistent_resource(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{persistent_resource.name=projects/*/locations/*/persistentResources/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_update_persistent_resource_rest_flattened_error(transport: str = "rest"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.update_persistent_resource(
+ persistent_resource_service.UpdatePersistentResourceRequest(),
+ persistent_resource=gca_persistent_resource.PersistentResource(
+ name="name_value"
+ ),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+def test_reboot_persistent_resource_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.reboot_persistent_resource
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.reboot_persistent_resource
+ ] = mock_rpc
+
+ request = {}
+ client.reboot_persistent_resource(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.reboot_persistent_resource(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_reboot_persistent_resource_rest_required_fields(
+ request_type=persistent_resource_service.RebootPersistentResourceRequest,
+):
+ transport_class = transports.PersistentResourceServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).reboot_persistent_resource._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).reboot_persistent_resource._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.reboot_persistent_resource(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_reboot_persistent_resource_rest_unset_required_fields():
+ transport = transports.PersistentResourceServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.reboot_persistent_resource._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_reboot_persistent_resource_rest_flattened():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.reboot_persistent_resource(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/persistentResources/*}:reboot"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_reboot_persistent_resource_rest_flattened_error(transport: str = "rest"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.reboot_persistent_resource(
+ persistent_resource_service.RebootPersistentResourceRequest(),
+ name="name_value",
+ )
+
+
+def test_credentials_transport_error():
+ # It is an error to provide credentials and a transport instance.
+ transport = transports.PersistentResourceServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # It is an error to provide a credentials file and a transport instance.
+ transport = transports.PersistentResourceServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = PersistentResourceServiceClient(
+ client_options={"credentials_file": "credentials.json"},
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a transport instance.
+ transport = transports.PersistentResourceServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = PersistentResourceServiceClient(
+ client_options=options,
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a credential.
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = PersistentResourceServiceClient(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # It is an error to provide scopes and a transport instance.
+ transport = transports.PersistentResourceServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = PersistentResourceServiceClient(
+ client_options={"scopes": ["1", "2"]},
+ transport=transport,
+ )
+
+
+def test_transport_instance():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.PersistentResourceServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ client = PersistentResourceServiceClient(transport=transport)
+ assert client.transport is transport
+
+
+def test_transport_get_channel():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.PersistentResourceServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+ transport = transports.PersistentResourceServiceGrpcAsyncIOTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PersistentResourceServiceGrpcTransport,
+ transports.PersistentResourceServiceGrpcAsyncIOTransport,
+ transports.PersistentResourceServiceRestTransport,
+ ],
+)
+def test_transport_adc(transport_class):
+ # Test default credentials are used if not provided.
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class()
+ adc.assert_called_once()
+
+
+def test_transport_kind_grpc():
+ transport = PersistentResourceServiceClient.get_transport_class("grpc")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "grpc"
+
+
+def test_initialize_client_w_grpc():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_persistent_resource_empty_call_grpc():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_persistent_resource), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.create_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.CreatePersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_persistent_resource_empty_call_grpc():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_persistent_resource), "__call__"
+ ) as call:
+ call.return_value = persistent_resource.PersistentResource()
+ client.get_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.GetPersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_persistent_resources_empty_call_grpc():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_persistent_resources), "__call__"
+ ) as call:
+ call.return_value = (
+ persistent_resource_service.ListPersistentResourcesResponse()
+ )
+ client.list_persistent_resources(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.ListPersistentResourcesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_persistent_resource_empty_call_grpc():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_persistent_resource), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.DeletePersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_persistent_resource_empty_call_grpc():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_persistent_resource), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.update_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.UpdatePersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_reboot_persistent_resource_empty_call_grpc():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.reboot_persistent_resource), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.reboot_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.RebootPersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_grpc_asyncio():
+ transport = PersistentResourceServiceAsyncClient.get_transport_class(
+ "grpc_asyncio"
+ )(credentials=async_anonymous_credentials())
+ assert transport.kind == "grpc_asyncio"
+
+
+def test_initialize_client_w_grpc_asyncio():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_persistent_resource_empty_call_grpc_asyncio():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.create_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.CreatePersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_persistent_resource_empty_call_grpc_asyncio():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ persistent_resource.PersistentResource(
+ name="name_value",
+ display_name="display_name_value",
+ state=persistent_resource.PersistentResource.State.PROVISIONING,
+ network="network_value",
+ reserved_ip_ranges=["reserved_ip_ranges_value"],
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+ )
+ await client.get_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.GetPersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_persistent_resources_empty_call_grpc_asyncio():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_persistent_resources), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ persistent_resource_service.ListPersistentResourcesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_persistent_resources(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.ListPersistentResourcesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_persistent_resource_empty_call_grpc_asyncio():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.delete_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.DeletePersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_update_persistent_resource_empty_call_grpc_asyncio():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.update_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.UpdatePersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_reboot_persistent_resource_empty_call_grpc_asyncio():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.reboot_persistent_resource), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.reboot_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.RebootPersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_rest():
+ transport = PersistentResourceServiceClient.get_transport_class("rest")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "rest"
+
+
+def test_create_persistent_resource_rest_bad_request(
+ request_type=persistent_resource_service.CreatePersistentResourceRequest,
+):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.create_persistent_resource(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.CreatePersistentResourceRequest,
+ dict,
+ ],
+)
+def test_create_persistent_resource_rest_call_success(request_type):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["persistent_resource"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "resource_pools": [
+ {
+ "id": "id_value",
+ "machine_spec": {
+ "machine_type": "machine_type_value",
+ "accelerator_type": 1,
+ "accelerator_count": 1805,
+ "tpu_topology": "tpu_topology_value",
+ "reservation_affinity": {
+ "reservation_affinity_type": 1,
+ "key": "key_value",
+ "values": ["values_value1", "values_value2"],
+ },
+ },
+ "replica_count": 1384,
+ "disk_spec": {
+ "boot_disk_type": "boot_disk_type_value",
+ "boot_disk_size_gb": 1792,
+ },
+ "used_replica_count": 1912,
+ "autoscaling_spec": {
+ "min_replica_count": 1803,
+ "max_replica_count": 1805,
+ },
+ }
+ ],
+ "state": 1,
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "create_time": {"seconds": 751, "nanos": 543},
+ "start_time": {},
+ "update_time": {},
+ "labels": {},
+ "network": "network_value",
+ "psc_interface_config": {"network_attachment": "network_attachment_value"},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "resource_runtime_spec": {
+ "service_account_spec": {
+ "enable_custom_service_account": True,
+ "service_account": "service_account_value",
+ },
+ "ray_spec": {
+ "image_uri": "image_uri_value",
+ "nfs_mounts": [
+ {
+ "server": "server_value",
+ "path": "path_value",
+ "mount_point": "mount_point_value",
+ }
+ ],
+ "resource_pool_images": {},
+ "head_node_resource_pool_id": "head_node_resource_pool_id_value",
+ "ray_metric_spec": {"disabled": True},
+ "ray_logs_spec": {"disabled": True},
+ },
+ },
+ "resource_runtime": {
+ "access_uris": {},
+ "notebook_runtime_template": "notebook_runtime_template_value",
+ },
+ "reserved_ip_ranges": [
+ "reserved_ip_ranges_value1",
+ "reserved_ip_ranges_value2",
+ ],
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = (
+ persistent_resource_service.CreatePersistentResourceRequest.meta.fields[
+ "persistent_resource"
+ ]
+ )
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["persistent_resource"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["persistent_resource"][field])):
+ del request_init["persistent_resource"][field][i][subfield]
+ else:
+ del request_init["persistent_resource"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.create_persistent_resource(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_create_persistent_resource_rest_interceptors(null_interceptor):
+ transport = transports.PersistentResourceServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PersistentResourceServiceRestInterceptor(),
+ )
+ client = PersistentResourceServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.PersistentResourceServiceRestInterceptor,
+ "post_create_persistent_resource",
+ ) as post, mock.patch.object(
+ transports.PersistentResourceServiceRestInterceptor,
+ "pre_create_persistent_resource",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = persistent_resource_service.CreatePersistentResourceRequest.pb(
+ persistent_resource_service.CreatePersistentResourceRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = persistent_resource_service.CreatePersistentResourceRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.create_persistent_resource(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_persistent_resource_rest_bad_request(
+ request_type=persistent_resource_service.GetPersistentResourceRequest,
+):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_persistent_resource(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.GetPersistentResourceRequest,
+ dict,
+ ],
+)
+def test_get_persistent_resource_rest_call_success(request_type):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = persistent_resource.PersistentResource(
+ name="name_value",
+ display_name="display_name_value",
+ state=persistent_resource.PersistentResource.State.PROVISIONING,
+ network="network_value",
+ reserved_ip_ranges=["reserved_ip_ranges_value"],
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = persistent_resource.PersistentResource.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_persistent_resource(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, persistent_resource.PersistentResource)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == persistent_resource.PersistentResource.State.PROVISIONING
+ assert response.network == "network_value"
+ assert response.reserved_ip_ranges == ["reserved_ip_ranges_value"]
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_get_persistent_resource_rest_interceptors(null_interceptor):
+ transport = transports.PersistentResourceServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PersistentResourceServiceRestInterceptor(),
+ )
+ client = PersistentResourceServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PersistentResourceServiceRestInterceptor,
+ "post_get_persistent_resource",
+ ) as post, mock.patch.object(
+ transports.PersistentResourceServiceRestInterceptor,
+ "pre_get_persistent_resource",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = persistent_resource_service.GetPersistentResourceRequest.pb(
+ persistent_resource_service.GetPersistentResourceRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = persistent_resource.PersistentResource.to_json(
+ persistent_resource.PersistentResource()
+ )
+ req.return_value.content = return_value
+
+ request = persistent_resource_service.GetPersistentResourceRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = persistent_resource.PersistentResource()
+
+ client.get_persistent_resource(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_list_persistent_resources_rest_bad_request(
+ request_type=persistent_resource_service.ListPersistentResourcesRequest,
+):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_persistent_resources(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.ListPersistentResourcesRequest,
+ dict,
+ ],
+)
+def test_list_persistent_resources_rest_call_success(request_type):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = persistent_resource_service.ListPersistentResourcesResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = persistent_resource_service.ListPersistentResourcesResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.list_persistent_resources(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListPersistentResourcesPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_list_persistent_resources_rest_interceptors(null_interceptor):
+ transport = transports.PersistentResourceServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PersistentResourceServiceRestInterceptor(),
+ )
+ client = PersistentResourceServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PersistentResourceServiceRestInterceptor,
+ "post_list_persistent_resources",
+ ) as post, mock.patch.object(
+ transports.PersistentResourceServiceRestInterceptor,
+ "pre_list_persistent_resources",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = persistent_resource_service.ListPersistentResourcesRequest.pb(
+ persistent_resource_service.ListPersistentResourcesRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = (
+ persistent_resource_service.ListPersistentResourcesResponse.to_json(
+ persistent_resource_service.ListPersistentResourcesResponse()
+ )
+ )
+ req.return_value.content = return_value
+
+ request = persistent_resource_service.ListPersistentResourcesRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = (
+ persistent_resource_service.ListPersistentResourcesResponse()
+ )
+
+ client.list_persistent_resources(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_delete_persistent_resource_rest_bad_request(
+ request_type=persistent_resource_service.DeletePersistentResourceRequest,
+):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_persistent_resource(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.DeletePersistentResourceRequest,
+ dict,
+ ],
+)
+def test_delete_persistent_resource_rest_call_success(request_type):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.delete_persistent_resource(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_delete_persistent_resource_rest_interceptors(null_interceptor):
+ transport = transports.PersistentResourceServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PersistentResourceServiceRestInterceptor(),
+ )
+ client = PersistentResourceServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.PersistentResourceServiceRestInterceptor,
+ "post_delete_persistent_resource",
+ ) as post, mock.patch.object(
+ transports.PersistentResourceServiceRestInterceptor,
+ "pre_delete_persistent_resource",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = persistent_resource_service.DeletePersistentResourceRequest.pb(
+ persistent_resource_service.DeletePersistentResourceRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = persistent_resource_service.DeletePersistentResourceRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.delete_persistent_resource(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_update_persistent_resource_rest_bad_request(
+ request_type=persistent_resource_service.UpdatePersistentResourceRequest,
+):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "persistent_resource": {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.update_persistent_resource(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.UpdatePersistentResourceRequest,
+ dict,
+ ],
+)
+def test_update_persistent_resource_rest_call_success(request_type):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "persistent_resource": {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ }
+ request_init["persistent_resource"] = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3",
+ "display_name": "display_name_value",
+ "resource_pools": [
+ {
+ "id": "id_value",
+ "machine_spec": {
+ "machine_type": "machine_type_value",
+ "accelerator_type": 1,
+ "accelerator_count": 1805,
+ "tpu_topology": "tpu_topology_value",
+ "reservation_affinity": {
+ "reservation_affinity_type": 1,
+ "key": "key_value",
+ "values": ["values_value1", "values_value2"],
+ },
+ },
+ "replica_count": 1384,
+ "disk_spec": {
+ "boot_disk_type": "boot_disk_type_value",
+ "boot_disk_size_gb": 1792,
+ },
+ "used_replica_count": 1912,
+ "autoscaling_spec": {
+ "min_replica_count": 1803,
+ "max_replica_count": 1805,
+ },
+ }
+ ],
+ "state": 1,
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "create_time": {"seconds": 751, "nanos": 543},
+ "start_time": {},
+ "update_time": {},
+ "labels": {},
+ "network": "network_value",
+ "psc_interface_config": {"network_attachment": "network_attachment_value"},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "resource_runtime_spec": {
+ "service_account_spec": {
+ "enable_custom_service_account": True,
+ "service_account": "service_account_value",
+ },
+ "ray_spec": {
+ "image_uri": "image_uri_value",
+ "nfs_mounts": [
+ {
+ "server": "server_value",
+ "path": "path_value",
+ "mount_point": "mount_point_value",
+ }
+ ],
+ "resource_pool_images": {},
+ "head_node_resource_pool_id": "head_node_resource_pool_id_value",
+ "ray_metric_spec": {"disabled": True},
+ "ray_logs_spec": {"disabled": True},
+ },
+ },
+ "resource_runtime": {
+ "access_uris": {},
+ "notebook_runtime_template": "notebook_runtime_template_value",
+ },
+ "reserved_ip_ranges": [
+ "reserved_ip_ranges_value1",
+ "reserved_ip_ranges_value2",
+ ],
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = (
+ persistent_resource_service.UpdatePersistentResourceRequest.meta.fields[
+ "persistent_resource"
+ ]
+ )
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["persistent_resource"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["persistent_resource"][field])):
+ del request_init["persistent_resource"][field][i][subfield]
+ else:
+ del request_init["persistent_resource"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.update_persistent_resource(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_update_persistent_resource_rest_interceptors(null_interceptor):
+ transport = transports.PersistentResourceServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PersistentResourceServiceRestInterceptor(),
+ )
+ client = PersistentResourceServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.PersistentResourceServiceRestInterceptor,
+ "post_update_persistent_resource",
+ ) as post, mock.patch.object(
+ transports.PersistentResourceServiceRestInterceptor,
+ "pre_update_persistent_resource",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = persistent_resource_service.UpdatePersistentResourceRequest.pb(
+ persistent_resource_service.UpdatePersistentResourceRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = persistent_resource_service.UpdatePersistentResourceRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.update_persistent_resource(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_reboot_persistent_resource_rest_bad_request(
+ request_type=persistent_resource_service.RebootPersistentResourceRequest,
+):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.reboot_persistent_resource(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.RebootPersistentResourceRequest,
+ dict,
+ ],
+)
+def test_reboot_persistent_resource_rest_call_success(request_type):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.reboot_persistent_resource(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_reboot_persistent_resource_rest_interceptors(null_interceptor):
+ transport = transports.PersistentResourceServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PersistentResourceServiceRestInterceptor(),
+ )
+ client = PersistentResourceServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.PersistentResourceServiceRestInterceptor,
+ "post_reboot_persistent_resource",
+ ) as post, mock.patch.object(
+ transports.PersistentResourceServiceRestInterceptor,
+ "pre_reboot_persistent_resource",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = persistent_resource_service.RebootPersistentResourceRequest.pb(
+ persistent_resource_service.RebootPersistentResourceRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = persistent_resource_service.RebootPersistentResourceRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.reboot_persistent_resource(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_location_rest_bad_request(request_type=locations_pb2.GetLocationRequest):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_location(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+def test_get_location_rest(request_type):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_list_locations_rest_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_locations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+def test_list_locations_rest(request_type):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_get_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_get_iam_policy_rest(request_type):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_set_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.set_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_set_iam_policy_rest(request_type):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_test_iam_permissions_rest_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.test_iam_permissions(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+def test_test_iam_permissions_rest(request_type):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+def test_cancel_operation_rest_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+def test_cancel_operation_rest(request_type):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_rest_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+def test_delete_operation_rest(request_type):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_get_operation_rest_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+def test_get_operation_rest(request_type):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_list_operations_rest_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_operations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+def test_list_operations_rest(request_type):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_wait_operation_rest_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.wait_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+def test_wait_operation_rest(request_type):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_persistent_resource_empty_call_rest():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_persistent_resource), "__call__"
+ ) as call:
+ client.create_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.CreatePersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_persistent_resource_empty_call_rest():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_persistent_resource), "__call__"
+ ) as call:
+ client.get_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.GetPersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_persistent_resources_empty_call_rest():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_persistent_resources), "__call__"
+ ) as call:
+ client.list_persistent_resources(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.ListPersistentResourcesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_persistent_resource_empty_call_rest():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_persistent_resource), "__call__"
+ ) as call:
+ client.delete_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.DeletePersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_persistent_resource_empty_call_rest():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_persistent_resource), "__call__"
+ ) as call:
+ client.update_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.UpdatePersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_reboot_persistent_resource_empty_call_rest():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.reboot_persistent_resource), "__call__"
+ ) as call:
+ client.reboot_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.RebootPersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+def test_persistent_resource_service_rest_lro_client():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ transport = client.transport
+
+ # Ensure that we have an api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.AbstractOperationsClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_transport_kind_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = PersistentResourceServiceAsyncClient.get_transport_class(
+ "rest_asyncio"
+ )(credentials=async_anonymous_credentials())
+ assert transport.kind == "rest_asyncio"
+
+
+@pytest.mark.asyncio
+async def test_create_persistent_resource_rest_asyncio_bad_request(
+ request_type=persistent_resource_service.CreatePersistentResourceRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.create_persistent_resource(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.CreatePersistentResourceRequest,
+ dict,
+ ],
+)
+async def test_create_persistent_resource_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["persistent_resource"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "resource_pools": [
+ {
+ "id": "id_value",
+ "machine_spec": {
+ "machine_type": "machine_type_value",
+ "accelerator_type": 1,
+ "accelerator_count": 1805,
+ "tpu_topology": "tpu_topology_value",
+ "reservation_affinity": {
+ "reservation_affinity_type": 1,
+ "key": "key_value",
+ "values": ["values_value1", "values_value2"],
+ },
+ },
+ "replica_count": 1384,
+ "disk_spec": {
+ "boot_disk_type": "boot_disk_type_value",
+ "boot_disk_size_gb": 1792,
+ },
+ "used_replica_count": 1912,
+ "autoscaling_spec": {
+ "min_replica_count": 1803,
+ "max_replica_count": 1805,
+ },
+ }
+ ],
+ "state": 1,
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "create_time": {"seconds": 751, "nanos": 543},
+ "start_time": {},
+ "update_time": {},
+ "labels": {},
+ "network": "network_value",
+ "psc_interface_config": {"network_attachment": "network_attachment_value"},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "resource_runtime_spec": {
+ "service_account_spec": {
+ "enable_custom_service_account": True,
+ "service_account": "service_account_value",
+ },
+ "ray_spec": {
+ "image_uri": "image_uri_value",
+ "nfs_mounts": [
+ {
+ "server": "server_value",
+ "path": "path_value",
+ "mount_point": "mount_point_value",
+ }
+ ],
+ "resource_pool_images": {},
+ "head_node_resource_pool_id": "head_node_resource_pool_id_value",
+ "ray_metric_spec": {"disabled": True},
+ "ray_logs_spec": {"disabled": True},
+ },
+ },
+ "resource_runtime": {
+ "access_uris": {},
+ "notebook_runtime_template": "notebook_runtime_template_value",
+ },
+ "reserved_ip_ranges": [
+ "reserved_ip_ranges_value1",
+ "reserved_ip_ranges_value2",
+ ],
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = (
+ persistent_resource_service.CreatePersistentResourceRequest.meta.fields[
+ "persistent_resource"
+ ]
+ )
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["persistent_resource"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["persistent_resource"][field])):
+ del request_init["persistent_resource"][field][i][subfield]
+ else:
+ del request_init["persistent_resource"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.create_persistent_resource(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_create_persistent_resource_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPersistentResourceServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPersistentResourceServiceRestInterceptor(),
+ )
+ client = PersistentResourceServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncPersistentResourceServiceRestInterceptor,
+ "post_create_persistent_resource",
+ ) as post, mock.patch.object(
+ transports.AsyncPersistentResourceServiceRestInterceptor,
+ "pre_create_persistent_resource",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = persistent_resource_service.CreatePersistentResourceRequest.pb(
+ persistent_resource_service.CreatePersistentResourceRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = persistent_resource_service.CreatePersistentResourceRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.create_persistent_resource(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_persistent_resource_rest_asyncio_bad_request(
+ request_type=persistent_resource_service.GetPersistentResourceRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_persistent_resource(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.GetPersistentResourceRequest,
+ dict,
+ ],
+)
+async def test_get_persistent_resource_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = persistent_resource.PersistentResource(
+ name="name_value",
+ display_name="display_name_value",
+ state=persistent_resource.PersistentResource.State.PROVISIONING,
+ network="network_value",
+ reserved_ip_ranges=["reserved_ip_ranges_value"],
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = persistent_resource.PersistentResource.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.get_persistent_resource(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, persistent_resource.PersistentResource)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == persistent_resource.PersistentResource.State.PROVISIONING
+ assert response.network == "network_value"
+ assert response.reserved_ip_ranges == ["reserved_ip_ranges_value"]
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_get_persistent_resource_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPersistentResourceServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPersistentResourceServiceRestInterceptor(),
+ )
+ client = PersistentResourceServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPersistentResourceServiceRestInterceptor,
+ "post_get_persistent_resource",
+ ) as post, mock.patch.object(
+ transports.AsyncPersistentResourceServiceRestInterceptor,
+ "pre_get_persistent_resource",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = persistent_resource_service.GetPersistentResourceRequest.pb(
+ persistent_resource_service.GetPersistentResourceRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = persistent_resource.PersistentResource.to_json(
+ persistent_resource.PersistentResource()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = persistent_resource_service.GetPersistentResourceRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = persistent_resource.PersistentResource()
+
+ await client.get_persistent_resource(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_list_persistent_resources_rest_asyncio_bad_request(
+ request_type=persistent_resource_service.ListPersistentResourcesRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_persistent_resources(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.ListPersistentResourcesRequest,
+ dict,
+ ],
+)
+async def test_list_persistent_resources_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = persistent_resource_service.ListPersistentResourcesResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = persistent_resource_service.ListPersistentResourcesResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.list_persistent_resources(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListPersistentResourcesAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_list_persistent_resources_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPersistentResourceServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPersistentResourceServiceRestInterceptor(),
+ )
+ client = PersistentResourceServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPersistentResourceServiceRestInterceptor,
+ "post_list_persistent_resources",
+ ) as post, mock.patch.object(
+ transports.AsyncPersistentResourceServiceRestInterceptor,
+ "pre_list_persistent_resources",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = persistent_resource_service.ListPersistentResourcesRequest.pb(
+ persistent_resource_service.ListPersistentResourcesRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = (
+ persistent_resource_service.ListPersistentResourcesResponse.to_json(
+ persistent_resource_service.ListPersistentResourcesResponse()
+ )
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = persistent_resource_service.ListPersistentResourcesRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = (
+ persistent_resource_service.ListPersistentResourcesResponse()
+ )
+
+ await client.list_persistent_resources(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_delete_persistent_resource_rest_asyncio_bad_request(
+ request_type=persistent_resource_service.DeletePersistentResourceRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_persistent_resource(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.DeletePersistentResourceRequest,
+ dict,
+ ],
+)
+async def test_delete_persistent_resource_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.delete_persistent_resource(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_delete_persistent_resource_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPersistentResourceServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPersistentResourceServiceRestInterceptor(),
+ )
+ client = PersistentResourceServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncPersistentResourceServiceRestInterceptor,
+ "post_delete_persistent_resource",
+ ) as post, mock.patch.object(
+ transports.AsyncPersistentResourceServiceRestInterceptor,
+ "pre_delete_persistent_resource",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = persistent_resource_service.DeletePersistentResourceRequest.pb(
+ persistent_resource_service.DeletePersistentResourceRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = persistent_resource_service.DeletePersistentResourceRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.delete_persistent_resource(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_update_persistent_resource_rest_asyncio_bad_request(
+ request_type=persistent_resource_service.UpdatePersistentResourceRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "persistent_resource": {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.update_persistent_resource(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.UpdatePersistentResourceRequest,
+ dict,
+ ],
+)
+async def test_update_persistent_resource_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "persistent_resource": {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ }
+ request_init["persistent_resource"] = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3",
+ "display_name": "display_name_value",
+ "resource_pools": [
+ {
+ "id": "id_value",
+ "machine_spec": {
+ "machine_type": "machine_type_value",
+ "accelerator_type": 1,
+ "accelerator_count": 1805,
+ "tpu_topology": "tpu_topology_value",
+ "reservation_affinity": {
+ "reservation_affinity_type": 1,
+ "key": "key_value",
+ "values": ["values_value1", "values_value2"],
+ },
+ },
+ "replica_count": 1384,
+ "disk_spec": {
+ "boot_disk_type": "boot_disk_type_value",
+ "boot_disk_size_gb": 1792,
+ },
+ "used_replica_count": 1912,
+ "autoscaling_spec": {
+ "min_replica_count": 1803,
+ "max_replica_count": 1805,
+ },
+ }
+ ],
+ "state": 1,
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "create_time": {"seconds": 751, "nanos": 543},
+ "start_time": {},
+ "update_time": {},
+ "labels": {},
+ "network": "network_value",
+ "psc_interface_config": {"network_attachment": "network_attachment_value"},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "resource_runtime_spec": {
+ "service_account_spec": {
+ "enable_custom_service_account": True,
+ "service_account": "service_account_value",
+ },
+ "ray_spec": {
+ "image_uri": "image_uri_value",
+ "nfs_mounts": [
+ {
+ "server": "server_value",
+ "path": "path_value",
+ "mount_point": "mount_point_value",
+ }
+ ],
+ "resource_pool_images": {},
+ "head_node_resource_pool_id": "head_node_resource_pool_id_value",
+ "ray_metric_spec": {"disabled": True},
+ "ray_logs_spec": {"disabled": True},
+ },
+ },
+ "resource_runtime": {
+ "access_uris": {},
+ "notebook_runtime_template": "notebook_runtime_template_value",
+ },
+ "reserved_ip_ranges": [
+ "reserved_ip_ranges_value1",
+ "reserved_ip_ranges_value2",
+ ],
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = (
+ persistent_resource_service.UpdatePersistentResourceRequest.meta.fields[
+ "persistent_resource"
+ ]
+ )
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["persistent_resource"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["persistent_resource"][field])):
+ del request_init["persistent_resource"][field][i][subfield]
+ else:
+ del request_init["persistent_resource"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.update_persistent_resource(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_update_persistent_resource_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPersistentResourceServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPersistentResourceServiceRestInterceptor(),
+ )
+ client = PersistentResourceServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncPersistentResourceServiceRestInterceptor,
+ "post_update_persistent_resource",
+ ) as post, mock.patch.object(
+ transports.AsyncPersistentResourceServiceRestInterceptor,
+ "pre_update_persistent_resource",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = persistent_resource_service.UpdatePersistentResourceRequest.pb(
+ persistent_resource_service.UpdatePersistentResourceRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = persistent_resource_service.UpdatePersistentResourceRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.update_persistent_resource(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_reboot_persistent_resource_rest_asyncio_bad_request(
+ request_type=persistent_resource_service.RebootPersistentResourceRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.reboot_persistent_resource(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ persistent_resource_service.RebootPersistentResourceRequest,
+ dict,
+ ],
+)
+async def test_reboot_persistent_resource_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/persistentResources/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.reboot_persistent_resource(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_reboot_persistent_resource_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPersistentResourceServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPersistentResourceServiceRestInterceptor(),
+ )
+ client = PersistentResourceServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncPersistentResourceServiceRestInterceptor,
+ "post_reboot_persistent_resource",
+ ) as post, mock.patch.object(
+ transports.AsyncPersistentResourceServiceRestInterceptor,
+ "pre_reboot_persistent_resource",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = persistent_resource_service.RebootPersistentResourceRequest.pb(
+ persistent_resource_service.RebootPersistentResourceRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = persistent_resource_service.RebootPersistentResourceRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.reboot_persistent_resource(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_location_rest_asyncio_bad_request(
+ request_type=locations_pb2.GetLocationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_location(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+async def test_get_location_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_rest_asyncio_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_locations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+async def test_list_locations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_get_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.set_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_set_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.test_iam_permissions(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+async def test_test_iam_permissions_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+async def test_cancel_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+async def test_delete_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_get_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+async def test_get_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_rest_asyncio_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_operations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+async def test_list_operations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.wait_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+async def test_wait_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_persistent_resource_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_persistent_resource), "__call__"
+ ) as call:
+ await client.create_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.CreatePersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_persistent_resource_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_persistent_resource), "__call__"
+ ) as call:
+ await client.get_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.GetPersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_persistent_resources_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_persistent_resources), "__call__"
+ ) as call:
+ await client.list_persistent_resources(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.ListPersistentResourcesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_persistent_resource_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_persistent_resource), "__call__"
+ ) as call:
+ await client.delete_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.DeletePersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_update_persistent_resource_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_persistent_resource), "__call__"
+ ) as call:
+ await client.update_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.UpdatePersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_reboot_persistent_resource_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.reboot_persistent_resource), "__call__"
+ ) as call:
+ await client.reboot_persistent_resource(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = persistent_resource_service.RebootPersistentResourceRequest()
+
+ assert args[0] == request_msg
+
+
+def test_persistent_resource_service_rest_asyncio_lro_client():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ transport = client.transport
+
+ # Ensure that we have an api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.AsyncOperationsRestClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_unsupported_parameter_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with pytest.raises(core_exceptions.AsyncRestUnsupportedParameterError, match="google.api_core.client_options.ClientOptions.quota_project_id") as exc: # type: ignore
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ client_options=options,
+ )
+
+
+def test_transport_grpc_default():
+ # A client should use the gRPC transport by default.
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert isinstance(
+ client.transport,
+ transports.PersistentResourceServiceGrpcTransport,
+ )
+
+
+def test_persistent_resource_service_base_transport_error():
+ # Passing both a credentials object and credentials_file should raise an error
+ with pytest.raises(core_exceptions.DuplicateCredentialArgs):
+ transport = transports.PersistentResourceServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ credentials_file="credentials.json",
+ )
+
+
+def test_persistent_resource_service_base_transport():
+ # Instantiate the base transport.
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.persistent_resource_service.transports.PersistentResourceServiceTransport.__init__"
+ ) as Transport:
+ Transport.return_value = None
+ transport = transports.PersistentResourceServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Every method on the transport should just blindly
+ # raise NotImplementedError.
+ methods = (
+ "create_persistent_resource",
+ "get_persistent_resource",
+ "list_persistent_resources",
+ "delete_persistent_resource",
+ "update_persistent_resource",
+ "reboot_persistent_resource",
+ "set_iam_policy",
+ "get_iam_policy",
+ "test_iam_permissions",
+ "get_location",
+ "list_locations",
+ "get_operation",
+ "wait_operation",
+ "cancel_operation",
+ "delete_operation",
+ "list_operations",
+ )
+ for method in methods:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, method)(request=object())
+
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
+ # Additionally, the LRO client (a property) should
+ # also raise NotImplementedError
+ with pytest.raises(NotImplementedError):
+ transport.operations_client
+
+ # Catch all for all remaining methods and properties
+ remainder = [
+ "kind",
+ ]
+ for r in remainder:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, r)()
+
+
+def test_persistent_resource_service_base_transport_with_credentials_file():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.persistent_resource_service.transports.PersistentResourceServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.PersistentResourceServiceTransport(
+ credentials_file="credentials.json",
+ quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+def test_persistent_resource_service_base_transport_with_adc():
+ # Test the default credentials are used if credentials and credentials_file are None.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.persistent_resource_service.transports.PersistentResourceServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.PersistentResourceServiceTransport()
+ adc.assert_called_once()
+
+
+def test_persistent_resource_service_auth_adc():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ PersistentResourceServiceClient()
+ adc.assert_called_once_with(
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PersistentResourceServiceGrpcTransport,
+ transports.PersistentResourceServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_persistent_resource_service_transport_auth_adc(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+ adc.assert_called_once_with(
+ scopes=["1", "2"],
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PersistentResourceServiceGrpcTransport,
+ transports.PersistentResourceServiceGrpcAsyncIOTransport,
+ transports.PersistentResourceServiceRestTransport,
+ ],
+)
+def test_persistent_resource_service_transport_auth_gdch_credentials(transport_class):
+ host = "https://language.com"
+ api_audience_tests = [None, "https://language2.com"]
+ api_audience_expect = [host, "https://language2.com"]
+ for t, e in zip(api_audience_tests, api_audience_expect):
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ gdch_mock = mock.MagicMock()
+ type(gdch_mock).with_gdch_audience = mock.PropertyMock(
+ return_value=gdch_mock
+ )
+ adc.return_value = (gdch_mock, None)
+ transport_class(host=host, api_audience=t)
+ gdch_mock.with_gdch_audience.assert_called_once_with(e)
+
+
+@pytest.mark.parametrize(
+ "transport_class,grpc_helpers",
+ [
+ (transports.PersistentResourceServiceGrpcTransport, grpc_helpers),
+ (transports.PersistentResourceServiceGrpcAsyncIOTransport, grpc_helpers_async),
+ ],
+)
+def test_persistent_resource_service_transport_create_channel(
+ transport_class, grpc_helpers
+):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel", autospec=True
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ adc.return_value = (creds, None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=creds,
+ credentials_file=None,
+ quota_project_id="octopus",
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=["1", "2"],
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PersistentResourceServiceGrpcTransport,
+ transports.PersistentResourceServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_persistent_resource_service_grpc_transport_client_cert_source_for_mtls(
+ transport_class,
+):
+ cred = ga_credentials.AnonymousCredentials()
+
+ # Check ssl_channel_credentials is used if provided.
+ with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
+ mock_ssl_channel_creds = mock.Mock()
+ transport_class(
+ host="squid.clam.whelk",
+ credentials=cred,
+ ssl_channel_credentials=mock_ssl_channel_creds,
+ )
+ mock_create_channel.assert_called_once_with(
+ "squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_channel_creds,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
+ # is used.
+ with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
+ with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
+ transport_class(
+ credentials=cred,
+ client_cert_source_for_mtls=client_cert_source_callback,
+ )
+ expected_cert, expected_key = client_cert_source_callback()
+ mock_ssl_cred.assert_called_once_with(
+ certificate_chain=expected_cert, private_key=expected_key
+ )
+
+
+def test_persistent_resource_service_http_transport_client_cert_source_for_mtls():
+ cred = ga_credentials.AnonymousCredentials()
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ) as mock_configure_mtls_channel:
+ transports.PersistentResourceServiceRestTransport(
+ credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
+ )
+ mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_persistent_resource_service_host_no_port(transport_name):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_persistent_resource_service_host_with_port(transport_name):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com:8000"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:8000"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com:8000"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "rest",
+ ],
+)
+def test_persistent_resource_service_client_transport_session_collision(transport_name):
+ creds1 = ga_credentials.AnonymousCredentials()
+ creds2 = ga_credentials.AnonymousCredentials()
+ client1 = PersistentResourceServiceClient(
+ credentials=creds1,
+ transport=transport_name,
+ )
+ client2 = PersistentResourceServiceClient(
+ credentials=creds2,
+ transport=transport_name,
+ )
+ session1 = client1.transport.create_persistent_resource._session
+ session2 = client2.transport.create_persistent_resource._session
+ assert session1 != session2
+ session1 = client1.transport.get_persistent_resource._session
+ session2 = client2.transport.get_persistent_resource._session
+ assert session1 != session2
+ session1 = client1.transport.list_persistent_resources._session
+ session2 = client2.transport.list_persistent_resources._session
+ assert session1 != session2
+ session1 = client1.transport.delete_persistent_resource._session
+ session2 = client2.transport.delete_persistent_resource._session
+ assert session1 != session2
+ session1 = client1.transport.update_persistent_resource._session
+ session2 = client2.transport.update_persistent_resource._session
+ assert session1 != session2
+ session1 = client1.transport.reboot_persistent_resource._session
+ session2 = client2.transport.reboot_persistent_resource._session
+ assert session1 != session2
+
+
+def test_persistent_resource_service_grpc_transport_channel():
+ channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.PersistentResourceServiceGrpcTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+def test_persistent_resource_service_grpc_asyncio_transport_channel():
+ channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.PersistentResourceServiceGrpcAsyncIOTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PersistentResourceServiceGrpcTransport,
+ transports.PersistentResourceServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_persistent_resource_service_transport_channel_mtls_with_client_cert_source(
+ transport_class,
+):
+ with mock.patch(
+ "grpc.ssl_channel_credentials", autospec=True
+ ) as grpc_ssl_channel_cred:
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_ssl_cred = mock.Mock()
+ grpc_ssl_channel_cred.return_value = mock_ssl_cred
+
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+
+ cred = ga_credentials.AnonymousCredentials()
+ with pytest.warns(DeprecationWarning):
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (cred, None)
+ transport = transport_class(
+ host="squid.clam.whelk",
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=client_cert_source_callback,
+ )
+ adc.assert_called_once()
+
+ grpc_ssl_channel_cred.assert_called_once_with(
+ certificate_chain=b"cert bytes", private_key=b"key bytes"
+ )
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PersistentResourceServiceGrpcTransport,
+ transports.PersistentResourceServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_persistent_resource_service_transport_channel_mtls_with_adc(transport_class):
+ mock_ssl_cred = mock.Mock()
+ with mock.patch.multiple(
+ "google.auth.transport.grpc.SslCredentials",
+ __init__=mock.Mock(return_value=None),
+ ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
+ ):
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+ mock_cred = mock.Mock()
+
+ with pytest.warns(DeprecationWarning):
+ transport = transport_class(
+ host="squid.clam.whelk",
+ credentials=mock_cred,
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=None,
+ )
+
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=mock_cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+
+
+def test_persistent_resource_service_grpc_lro_client():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.OperationsClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_persistent_resource_service_grpc_lro_async_client():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc_asyncio",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.OperationsAsyncClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_network_path():
+ project = "squid"
+ network = "clam"
+ expected = "projects/{project}/global/networks/{network}".format(
+ project=project,
+ network=network,
+ )
+ actual = PersistentResourceServiceClient.network_path(project, network)
+ assert expected == actual
+
+
+def test_parse_network_path():
+ expected = {
+ "project": "whelk",
+ "network": "octopus",
+ }
+ path = PersistentResourceServiceClient.network_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PersistentResourceServiceClient.parse_network_path(path)
+ assert expected == actual
+
+
+def test_network_attachment_path():
+ project = "oyster"
+ region = "nudibranch"
+ networkattachment = "cuttlefish"
+ expected = "projects/{project}/regions/{region}/networkAttachments/{networkattachment}".format(
+ project=project,
+ region=region,
+ networkattachment=networkattachment,
+ )
+ actual = PersistentResourceServiceClient.network_attachment_path(
+ project, region, networkattachment
+ )
+ assert expected == actual
+
+
+def test_parse_network_attachment_path():
+ expected = {
+ "project": "mussel",
+ "region": "winkle",
+ "networkattachment": "nautilus",
+ }
+ path = PersistentResourceServiceClient.network_attachment_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PersistentResourceServiceClient.parse_network_attachment_path(path)
+ assert expected == actual
+
+
+def test_notebook_runtime_template_path():
+ project = "scallop"
+ location = "abalone"
+ notebook_runtime_template = "squid"
+ expected = "projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}".format(
+ project=project,
+ location=location,
+ notebook_runtime_template=notebook_runtime_template,
+ )
+ actual = PersistentResourceServiceClient.notebook_runtime_template_path(
+ project, location, notebook_runtime_template
+ )
+ assert expected == actual
+
+
+def test_parse_notebook_runtime_template_path():
+ expected = {
+ "project": "clam",
+ "location": "whelk",
+ "notebook_runtime_template": "octopus",
+ }
+ path = PersistentResourceServiceClient.notebook_runtime_template_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PersistentResourceServiceClient.parse_notebook_runtime_template_path(path)
+ assert expected == actual
+
+
+def test_persistent_resource_path():
+ project = "oyster"
+ location = "nudibranch"
+ persistent_resource = "cuttlefish"
+ expected = "projects/{project}/locations/{location}/persistentResources/{persistent_resource}".format(
+ project=project,
+ location=location,
+ persistent_resource=persistent_resource,
+ )
+ actual = PersistentResourceServiceClient.persistent_resource_path(
+ project, location, persistent_resource
+ )
+ assert expected == actual
+
+
+def test_parse_persistent_resource_path():
+ expected = {
+ "project": "mussel",
+ "location": "winkle",
+ "persistent_resource": "nautilus",
+ }
+ path = PersistentResourceServiceClient.persistent_resource_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PersistentResourceServiceClient.parse_persistent_resource_path(path)
+ assert expected == actual
+
+
+def test_reservation_path():
+ project_id_or_number = "scallop"
+ zone = "abalone"
+ reservation_name = "squid"
+ expected = "projects/{project_id_or_number}/zones/{zone}/reservations/{reservation_name}".format(
+ project_id_or_number=project_id_or_number,
+ zone=zone,
+ reservation_name=reservation_name,
+ )
+ actual = PersistentResourceServiceClient.reservation_path(
+ project_id_or_number, zone, reservation_name
+ )
+ assert expected == actual
+
+
+def test_parse_reservation_path():
+ expected = {
+ "project_id_or_number": "clam",
+ "zone": "whelk",
+ "reservation_name": "octopus",
+ }
+ path = PersistentResourceServiceClient.reservation_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PersistentResourceServiceClient.parse_reservation_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "oyster"
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = PersistentResourceServiceClient.common_billing_account_path(
+ billing_account
+ )
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "nudibranch",
+ }
+ path = PersistentResourceServiceClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PersistentResourceServiceClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "cuttlefish"
+ expected = "folders/{folder}".format(
+ folder=folder,
+ )
+ actual = PersistentResourceServiceClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "mussel",
+ }
+ path = PersistentResourceServiceClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PersistentResourceServiceClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "winkle"
+ expected = "organizations/{organization}".format(
+ organization=organization,
+ )
+ actual = PersistentResourceServiceClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "nautilus",
+ }
+ path = PersistentResourceServiceClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PersistentResourceServiceClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "scallop"
+ expected = "projects/{project}".format(
+ project=project,
+ )
+ actual = PersistentResourceServiceClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "abalone",
+ }
+ path = PersistentResourceServiceClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PersistentResourceServiceClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "squid"
+ location = "clam"
+ expected = "projects/{project}/locations/{location}".format(
+ project=project,
+ location=location,
+ )
+ actual = PersistentResourceServiceClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "whelk",
+ "location": "octopus",
+ }
+ path = PersistentResourceServiceClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PersistentResourceServiceClient.parse_common_location_path(path)
+ assert expected == actual
+
+
+def test_client_with_default_client_info():
+ client_info = gapic_v1.client_info.ClientInfo()
+
+ with mock.patch.object(
+ transports.PersistentResourceServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+ with mock.patch.object(
+ transports.PersistentResourceServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ transport_class = PersistentResourceServiceClient.get_transport_class()
+ transport = transport_class(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+
+def test_delete_operation(transport: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_async(transport: str = "grpc_asyncio"):
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_field_headers():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = None
+
+ client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_field_headers_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_delete_operation_from_dict():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_from_dict_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_cancel_operation(transport: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_async(transport: str = "grpc_asyncio"):
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_operation_field_headers():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = None
+
+ client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_field_headers_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_cancel_operation_from_dict():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_from_dict_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_wait_operation(transport: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation(transport: str = "grpc_asyncio"):
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_wait_operation_field_headers():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_field_headers_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_wait_operation_from_dict():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_from_dict_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_operation(transport: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_get_operation_async(transport: str = "grpc_asyncio"):
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_get_operation_field_headers():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_operation_field_headers_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_get_operation_from_dict():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_operation_from_dict_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_operations(transport: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+ response = client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_async(transport: str = "grpc_asyncio"):
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_list_operations_field_headers():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_operations_field_headers_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_operations_from_dict():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ response = client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_operations_from_dict_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_locations(transport: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+ response = client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_async(transport: str = "grpc_asyncio"):
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_list_locations_field_headers():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_locations_field_headers_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_locations_from_dict():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ response = client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_locations_from_dict_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_location(transport: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+ response = client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_get_location_async(transport: str = "grpc_asyncio"):
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_get_location_field_headers():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = locations_pb2.Location()
+
+ client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_location_field_headers_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials()
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+def test_get_location_from_dict():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+
+ response = client.get_location(
+ request={
+ "name": "locations/abc",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_location_from_dict_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_set_iam_policy(transport: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ response = client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+ response = await client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_set_iam_policy_field_headers():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_field_headers_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_set_iam_policy_from_dict():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_from_dict_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+def test_get_iam_policy(transport: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_get_iam_policy_field_headers():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_field_headers_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_get_iam_policy_from_dict():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_from_dict_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+def test_test_iam_permissions(transport: str = "grpc"):
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"):
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+ )
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+def test_test_iam_permissions_field_headers():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_field_headers_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_test_iam_permissions_from_dict():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ response = client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_from_dict_async():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ response = await client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+def test_transport_close_grpc():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_grpc_asyncio():
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close_rest():
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PersistentResourceServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "rest",
+ "grpc",
+ ]
+ for transport in transports:
+ client = PersistentResourceServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class",
+ [
+ (
+ PersistentResourceServiceClient,
+ transports.PersistentResourceServiceGrpcTransport,
+ ),
+ (
+ PersistentResourceServiceAsyncClient,
+ transports.PersistentResourceServiceGrpcAsyncIOTransport,
+ ),
+ ],
+)
+def test_api_key_credentials(client_class, transport_class):
+ with mock.patch.object(
+ google.auth._default, "get_api_key_credentials", create=True
+ ) as get_api_key_credentials:
+ mock_cred = mock.Mock()
+ get_api_key_credentials.return_value = mock_cred
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=mock_cred,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff5d304edd097110f9beeab1994b3e9dce33afe0
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py
@@ -0,0 +1,17624 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+ from unittest.mock import AsyncMock # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ import mock
+
+import grpc
+from grpc.experimental import aio
+from collections.abc import Iterable, AsyncIterable
+from google.protobuf import json_format
+import json
+import math
+import pytest
+from google.api_core import api_core_version
+from proto.marshal.rules.dates import DurationRule, TimestampRule
+from proto.marshal.rules import wrappers
+
+try:
+ import aiohttp # type: ignore
+ from google.auth.aio.transport.sessions import AsyncAuthorizedSession
+ from google.api_core.operations_v1 import AsyncOperationsRestClient
+
+ HAS_ASYNC_REST_EXTRA = True
+except ImportError: # pragma: NO COVER
+ HAS_ASYNC_REST_EXTRA = False
+from requests import Response
+from requests import Request, PreparedRequest
+from requests.sessions import Session
+from google.protobuf import json_format
+
+try:
+ from google.auth.aio import credentials as ga_credentials_async
+
+ HAS_GOOGLE_AUTH_AIO = True
+except ImportError: # pragma: NO COVER
+ HAS_GOOGLE_AUTH_AIO = False
+
+from google.api_core import client_options
+from google.api_core import exceptions as core_exceptions
+from google.api_core import future
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers
+from google.api_core import grpc_helpers_async
+from google.api_core import operation
+from google.api_core import operation_async # type: ignore
+from google.api_core import operations_v1
+from google.api_core import path_template
+from google.api_core import retry as retries
+from google.auth import credentials as ga_credentials
+from google.auth.exceptions import MutualTLSChannelError
+from google.cloud.aiplatform_v1beta1.services.pipeline_service import (
+ PipelineServiceAsyncClient,
+)
+from google.cloud.aiplatform_v1beta1.services.pipeline_service import (
+ PipelineServiceClient,
+)
+from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers
+from google.cloud.aiplatform_v1beta1.services.pipeline_service import transports
+from google.cloud.aiplatform_v1beta1.types import artifact
+from google.cloud.aiplatform_v1beta1.types import context
+from google.cloud.aiplatform_v1beta1.types import deployed_model_ref
+from google.cloud.aiplatform_v1beta1.types import encryption_spec
+from google.cloud.aiplatform_v1beta1.types import env_var
+from google.cloud.aiplatform_v1beta1.types import execution
+from google.cloud.aiplatform_v1beta1.types import explanation
+from google.cloud.aiplatform_v1beta1.types import explanation_metadata
+from google.cloud.aiplatform_v1beta1.types import io
+from google.cloud.aiplatform_v1beta1.types import model
+from google.cloud.aiplatform_v1beta1.types import operation as gca_operation
+from google.cloud.aiplatform_v1beta1.types import pipeline_failure_policy
+from google.cloud.aiplatform_v1beta1.types import pipeline_job
+from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job
+from google.cloud.aiplatform_v1beta1.types import pipeline_service
+from google.cloud.aiplatform_v1beta1.types import pipeline_state
+from google.cloud.aiplatform_v1beta1.types import service_networking
+from google.cloud.aiplatform_v1beta1.types import training_pipeline
+from google.cloud.aiplatform_v1beta1.types import (
+ training_pipeline as gca_training_pipeline,
+)
+from google.cloud.aiplatform_v1beta1.types import ui_pipeline_spec
+from google.cloud.aiplatform_v1beta1.types import value
+from google.cloud.location import locations_pb2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import options_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.oauth2 import service_account
+from google.protobuf import any_pb2 # type: ignore
+from google.protobuf import duration_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import struct_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
+from google.rpc import status_pb2 # type: ignore
+import google.auth
+
+
+async def mock_async_gen(data, chunk_size=1):
+ for i in range(0, len(data)): # pragma: NO COVER
+ chunk = data[i : i + chunk_size]
+ yield chunk.encode("utf-8")
+
+
+def client_cert_source_callback():
+ return b"cert bytes", b"key bytes"
+
+
+# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded.
+# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107.
+def async_anonymous_credentials():
+ if HAS_GOOGLE_AUTH_AIO:
+ return ga_credentials_async.AnonymousCredentials()
+ return ga_credentials.AnonymousCredentials()
+
+
+# If default endpoint is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint(client):
+ return (
+ "foo.googleapis.com"
+ if ("localhost" in client.DEFAULT_ENDPOINT)
+ else client.DEFAULT_ENDPOINT
+ )
+
+
+# If default endpoint template is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint template so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint_template(client):
+ return (
+ "test.{UNIVERSE_DOMAIN}"
+ if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE)
+ else client._DEFAULT_ENDPOINT_TEMPLATE
+ )
+
+
+def test__get_default_mtls_endpoint():
+ api_endpoint = "example.googleapis.com"
+ api_mtls_endpoint = "example.mtls.googleapis.com"
+ sandbox_endpoint = "example.sandbox.googleapis.com"
+ sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
+ non_googleapi = "api.example.com"
+
+ assert PipelineServiceClient._get_default_mtls_endpoint(None) is None
+ assert (
+ PipelineServiceClient._get_default_mtls_endpoint(api_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ PipelineServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ PipelineServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ PipelineServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ PipelineServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
+ )
+
+
+def test__read_environment_variables():
+ assert PipelineServiceClient._read_environment_variables() == (False, "auto", None)
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert PipelineServiceClient._read_environment_variables() == (
+ True,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ assert PipelineServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ PipelineServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ assert PipelineServiceClient._read_environment_variables() == (
+ False,
+ "never",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ assert PipelineServiceClient._read_environment_variables() == (
+ False,
+ "always",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}):
+ assert PipelineServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ PipelineServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}):
+ assert PipelineServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ "foo.com",
+ )
+
+
+def test__get_client_cert_source():
+ mock_provided_cert_source = mock.Mock()
+ mock_default_cert_source = mock.Mock()
+
+ assert PipelineServiceClient._get_client_cert_source(None, False) is None
+ assert (
+ PipelineServiceClient._get_client_cert_source(mock_provided_cert_source, False)
+ is None
+ )
+ assert (
+ PipelineServiceClient._get_client_cert_source(mock_provided_cert_source, True)
+ == mock_provided_cert_source
+ )
+
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source", return_value=True
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_default_cert_source,
+ ):
+ assert (
+ PipelineServiceClient._get_client_cert_source(None, True)
+ is mock_default_cert_source
+ )
+ assert (
+ PipelineServiceClient._get_client_cert_source(
+ mock_provided_cert_source, "true"
+ )
+ is mock_provided_cert_source
+ )
+
+
+@mock.patch.object(
+ PipelineServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PipelineServiceClient),
+)
+@mock.patch.object(
+ PipelineServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PipelineServiceAsyncClient),
+)
+def test__get_api_endpoint():
+ api_override = "foo.com"
+ mock_client_cert_source = mock.Mock()
+ default_universe = PipelineServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = PipelineServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = PipelineServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ assert (
+ PipelineServiceClient._get_api_endpoint(
+ api_override, mock_client_cert_source, default_universe, "always"
+ )
+ == api_override
+ )
+ assert (
+ PipelineServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "auto"
+ )
+ == PipelineServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ PipelineServiceClient._get_api_endpoint(None, None, default_universe, "auto")
+ == default_endpoint
+ )
+ assert (
+ PipelineServiceClient._get_api_endpoint(None, None, default_universe, "always")
+ == PipelineServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ PipelineServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "always"
+ )
+ == PipelineServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ PipelineServiceClient._get_api_endpoint(None, None, mock_universe, "never")
+ == mock_endpoint
+ )
+ assert (
+ PipelineServiceClient._get_api_endpoint(None, None, default_universe, "never")
+ == default_endpoint
+ )
+
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ PipelineServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, mock_universe, "auto"
+ )
+ assert (
+ str(excinfo.value)
+ == "mTLS is not supported in any universe other than googleapis.com."
+ )
+
+
+def test__get_universe_domain():
+ client_universe_domain = "foo.com"
+ universe_domain_env = "bar.com"
+
+ assert (
+ PipelineServiceClient._get_universe_domain(
+ client_universe_domain, universe_domain_env
+ )
+ == client_universe_domain
+ )
+ assert (
+ PipelineServiceClient._get_universe_domain(None, universe_domain_env)
+ == universe_domain_env
+ )
+ assert (
+ PipelineServiceClient._get_universe_domain(None, None)
+ == PipelineServiceClient._DEFAULT_UNIVERSE
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ PipelineServiceClient._get_universe_domain("", None)
+ assert str(excinfo.value) == "Universe Domain cannot be an empty string."
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (PipelineServiceClient, "grpc"),
+ (PipelineServiceAsyncClient, "grpc_asyncio"),
+ (PipelineServiceClient, "rest"),
+ ],
+)
+def test_pipeline_service_client_from_service_account_info(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_info"
+ ) as factory:
+ factory.return_value = creds
+ info = {"valid": True}
+ client = client_class.from_service_account_info(info, transport=transport_name)
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (transports.PipelineServiceGrpcTransport, "grpc"),
+ (transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"),
+ (transports.PipelineServiceRestTransport, "rest"),
+ ],
+)
+def test_pipeline_service_client_service_account_always_use_jwt(
+ transport_class, transport_name
+):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=True)
+ use_jwt.assert_called_once_with(True)
+
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=False)
+ use_jwt.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (PipelineServiceClient, "grpc"),
+ (PipelineServiceAsyncClient, "grpc_asyncio"),
+ (PipelineServiceClient, "rest"),
+ ],
+)
+def test_pipeline_service_client_from_service_account_file(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_file"
+ ) as factory:
+ factory.return_value = creds
+ client = client_class.from_service_account_file(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ client = client_class.from_service_account_json(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+def test_pipeline_service_client_get_transport_class():
+ transport = PipelineServiceClient.get_transport_class()
+ available_transports = [
+ transports.PipelineServiceGrpcTransport,
+ transports.PipelineServiceRestTransport,
+ ]
+ assert transport in available_transports
+
+ transport = PipelineServiceClient.get_transport_class("grpc")
+ assert transport == transports.PipelineServiceGrpcTransport
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"),
+ (
+ PipelineServiceAsyncClient,
+ transports.PipelineServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (PipelineServiceClient, transports.PipelineServiceRestTransport, "rest"),
+ ],
+)
+@mock.patch.object(
+ PipelineServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PipelineServiceClient),
+)
+@mock.patch.object(
+ PipelineServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PipelineServiceAsyncClient),
+)
+def test_pipeline_service_client_client_options(
+ client_class, transport_class, transport_name
+):
+ # Check that if channel is provided we won't create a new one.
+ with mock.patch.object(PipelineServiceClient, "get_transport_class") as gtc:
+ transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
+ client = client_class(transport=transport)
+ gtc.assert_not_called()
+
+ # Check that if channel is provided via str we will create a new one.
+ with mock.patch.object(PipelineServiceClient, "get_transport_class") as gtc:
+ client = client_class(transport=transport_name)
+ gtc.assert_called()
+
+ # Check the case api_endpoint is provided.
+ options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name, client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_MTLS_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ # Check the case quota_project_id is provided
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id="octopus",
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+ # Check the case api_endpoint is provided
+ options = client_options.ClientOptions(
+ api_audience="https://language.googleapis.com"
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience="https://language.googleapis.com",
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,use_client_cert_env",
+ [
+ (
+ PipelineServiceClient,
+ transports.PipelineServiceGrpcTransport,
+ "grpc",
+ "true",
+ ),
+ (
+ PipelineServiceAsyncClient,
+ transports.PipelineServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "true",
+ ),
+ (
+ PipelineServiceClient,
+ transports.PipelineServiceGrpcTransport,
+ "grpc",
+ "false",
+ ),
+ (
+ PipelineServiceAsyncClient,
+ transports.PipelineServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "false",
+ ),
+ (
+ PipelineServiceClient,
+ transports.PipelineServiceRestTransport,
+ "rest",
+ "true",
+ ),
+ (
+ PipelineServiceClient,
+ transports.PipelineServiceRestTransport,
+ "rest",
+ "false",
+ ),
+ ],
+)
+@mock.patch.object(
+ PipelineServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PipelineServiceClient),
+)
+@mock.patch.object(
+ PipelineServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PipelineServiceAsyncClient),
+)
+@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
+def test_pipeline_service_client_mtls_env_auto(
+ client_class, transport_class, transport_name, use_client_cert_env
+):
+ # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
+ # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
+
+ # Check the case client_cert_source is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=client_cert_source_callback
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+
+ if use_client_cert_env == "false":
+ expected_client_cert_source = None
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ else:
+ expected_client_cert_source = client_cert_source_callback
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case ADC client cert is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=client_cert_source_callback,
+ ):
+ if use_client_cert_env == "false":
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ expected_client_cert_source = None
+ else:
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+ expected_client_cert_source = client_cert_source_callback
+
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case client_cert_source and ADC client cert are not provided.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class", [PipelineServiceClient, PipelineServiceAsyncClient]
+)
+@mock.patch.object(
+ PipelineServiceClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(PipelineServiceClient),
+)
+@mock.patch.object(
+ PipelineServiceAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(PipelineServiceAsyncClient),
+)
+def test_pipeline_service_client_get_mtls_endpoint_and_cert_source(client_class):
+ mock_client_cert_source = mock.Mock()
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source == mock_client_cert_source
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_client_cert_source,
+ ):
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source == mock_client_cert_source
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class", [PipelineServiceClient, PipelineServiceAsyncClient]
+)
+@mock.patch.object(
+ PipelineServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PipelineServiceClient),
+)
+@mock.patch.object(
+ PipelineServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PipelineServiceAsyncClient),
+)
+def test_pipeline_service_client_client_api_endpoint(client_class):
+ mock_client_cert_source = client_cert_source_callback
+ api_override = "foo.com"
+ default_universe = PipelineServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = PipelineServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = PipelineServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true",
+ # use ClientOptions.api_endpoint as the api endpoint regardless.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=api_override
+ )
+ client = client_class(
+ client_options=options,
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert client.api_endpoint == api_override
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == default_endpoint
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always",
+ # use the DEFAULT_MTLS_ENDPOINT as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+
+ # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default),
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist,
+ # and ClientOptions.universe_domain="bar.com",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint.
+ options = client_options.ClientOptions()
+ universe_exists = hasattr(options, "universe_domain")
+ if universe_exists:
+ options = client_options.ClientOptions(universe_domain=mock_universe)
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ else:
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == (
+ mock_endpoint if universe_exists else default_endpoint
+ )
+ assert client.universe_domain == (
+ mock_universe if universe_exists else default_universe
+ )
+
+ # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ options = client_options.ClientOptions()
+ if hasattr(options, "universe_domain"):
+ delattr(options, "universe_domain")
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == default_endpoint
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"),
+ (
+ PipelineServiceAsyncClient,
+ transports.PipelineServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (PipelineServiceClient, transports.PipelineServiceRestTransport, "rest"),
+ ],
+)
+def test_pipeline_service_client_client_options_scopes(
+ client_class, transport_class, transport_name
+):
+ # Check the case scopes are provided.
+ options = client_options.ClientOptions(
+ scopes=["1", "2"],
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=["1", "2"],
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ PipelineServiceClient,
+ transports.PipelineServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ PipelineServiceAsyncClient,
+ transports.PipelineServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ (PipelineServiceClient, transports.PipelineServiceRestTransport, "rest", None),
+ ],
+)
+def test_pipeline_service_client_client_options_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+def test_pipeline_service_client_client_options_from_dict():
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceGrpcTransport.__init__"
+ ) as grpc_transport:
+ grpc_transport.return_value = None
+ client = PipelineServiceClient(
+ client_options={"api_endpoint": "squid.clam.whelk"}
+ )
+ grpc_transport.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ PipelineServiceClient,
+ transports.PipelineServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ PipelineServiceAsyncClient,
+ transports.PipelineServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_pipeline_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.CreateTrainingPipelineRequest,
+ dict,
+ ],
+)
+def test_create_training_pipeline(request_type, transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_training_pipeline.TrainingPipeline(
+ name="name_value",
+ display_name="display_name_value",
+ training_task_definition="training_task_definition_value",
+ model_id="model_id_value",
+ parent_model="parent_model_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ )
+ response = client.create_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.CreateTrainingPipelineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_training_pipeline.TrainingPipeline)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.training_task_definition == "training_task_definition_value"
+ assert response.model_id == "model_id_value"
+ assert response.parent_model == "parent_model_value"
+ assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
+
+
+def test_create_training_pipeline_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = pipeline_service.CreateTrainingPipelineRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_training_pipeline), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.create_training_pipeline(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == pipeline_service.CreateTrainingPipelineRequest(
+ parent="parent_value",
+ )
+
+
+def test_create_training_pipeline_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_training_pipeline
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_training_pipeline
+ ] = mock_rpc
+ request = {}
+ client.create_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_training_pipeline(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_training_pipeline_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.create_training_pipeline
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.create_training_pipeline
+ ] = mock_rpc
+
+ request = {}
+ await client.create_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.create_training_pipeline(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_training_pipeline_async(
+ transport: str = "grpc_asyncio",
+ request_type=pipeline_service.CreateTrainingPipelineRequest,
+):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_training_pipeline.TrainingPipeline(
+ name="name_value",
+ display_name="display_name_value",
+ training_task_definition="training_task_definition_value",
+ model_id="model_id_value",
+ parent_model="parent_model_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ )
+ )
+ response = await client.create_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.CreateTrainingPipelineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_training_pipeline.TrainingPipeline)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.training_task_definition == "training_task_definition_value"
+ assert response.model_id == "model_id_value"
+ assert response.parent_model == "parent_model_value"
+ assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
+
+
+@pytest.mark.asyncio
+async def test_create_training_pipeline_async_from_dict():
+ await test_create_training_pipeline_async(request_type=dict)
+
+
+def test_create_training_pipeline_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.CreateTrainingPipelineRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_training_pipeline), "__call__"
+ ) as call:
+ call.return_value = gca_training_pipeline.TrainingPipeline()
+ client.create_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_create_training_pipeline_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.CreateTrainingPipelineRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_training_pipeline), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_training_pipeline.TrainingPipeline()
+ )
+ await client.create_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_create_training_pipeline_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_training_pipeline.TrainingPipeline()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.create_training_pipeline(
+ parent="parent_value",
+ training_pipeline=gca_training_pipeline.TrainingPipeline(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].training_pipeline
+ mock_val = gca_training_pipeline.TrainingPipeline(name="name_value")
+ assert arg == mock_val
+
+
+def test_create_training_pipeline_flattened_error():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_training_pipeline(
+ pipeline_service.CreateTrainingPipelineRequest(),
+ parent="parent_value",
+ training_pipeline=gca_training_pipeline.TrainingPipeline(name="name_value"),
+ )
+
+
+@pytest.mark.asyncio
+async def test_create_training_pipeline_flattened_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_training_pipeline.TrainingPipeline()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_training_pipeline.TrainingPipeline()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.create_training_pipeline(
+ parent="parent_value",
+ training_pipeline=gca_training_pipeline.TrainingPipeline(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].training_pipeline
+ mock_val = gca_training_pipeline.TrainingPipeline(name="name_value")
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_create_training_pipeline_flattened_error_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.create_training_pipeline(
+ pipeline_service.CreateTrainingPipelineRequest(),
+ parent="parent_value",
+ training_pipeline=gca_training_pipeline.TrainingPipeline(name="name_value"),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.GetTrainingPipelineRequest,
+ dict,
+ ],
+)
+def test_get_training_pipeline(request_type, transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = training_pipeline.TrainingPipeline(
+ name="name_value",
+ display_name="display_name_value",
+ training_task_definition="training_task_definition_value",
+ model_id="model_id_value",
+ parent_model="parent_model_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ )
+ response = client.get_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.GetTrainingPipelineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, training_pipeline.TrainingPipeline)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.training_task_definition == "training_task_definition_value"
+ assert response.model_id == "model_id_value"
+ assert response.parent_model == "parent_model_value"
+ assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
+
+
+def test_get_training_pipeline_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = pipeline_service.GetTrainingPipelineRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_training_pipeline), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.get_training_pipeline(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == pipeline_service.GetTrainingPipelineRequest(
+ name="name_value",
+ )
+
+
+def test_get_training_pipeline_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_training_pipeline
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_training_pipeline
+ ] = mock_rpc
+ request = {}
+ client.get_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_training_pipeline(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_training_pipeline_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.get_training_pipeline
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.get_training_pipeline
+ ] = mock_rpc
+
+ request = {}
+ await client.get_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.get_training_pipeline(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_training_pipeline_async(
+ transport: str = "grpc_asyncio",
+ request_type=pipeline_service.GetTrainingPipelineRequest,
+):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ training_pipeline.TrainingPipeline(
+ name="name_value",
+ display_name="display_name_value",
+ training_task_definition="training_task_definition_value",
+ model_id="model_id_value",
+ parent_model="parent_model_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ )
+ )
+ response = await client.get_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.GetTrainingPipelineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, training_pipeline.TrainingPipeline)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.training_task_definition == "training_task_definition_value"
+ assert response.model_id == "model_id_value"
+ assert response.parent_model == "parent_model_value"
+ assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
+
+
+@pytest.mark.asyncio
+async def test_get_training_pipeline_async_from_dict():
+ await test_get_training_pipeline_async(request_type=dict)
+
+
+def test_get_training_pipeline_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.GetTrainingPipelineRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_training_pipeline), "__call__"
+ ) as call:
+ call.return_value = training_pipeline.TrainingPipeline()
+ client.get_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_training_pipeline_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.GetTrainingPipelineRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_training_pipeline), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ training_pipeline.TrainingPipeline()
+ )
+ await client.get_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_get_training_pipeline_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = training_pipeline.TrainingPipeline()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_training_pipeline(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_get_training_pipeline_flattened_error():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_training_pipeline(
+ pipeline_service.GetTrainingPipelineRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_training_pipeline_flattened_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = training_pipeline.TrainingPipeline()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ training_pipeline.TrainingPipeline()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_training_pipeline(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_get_training_pipeline_flattened_error_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_training_pipeline(
+ pipeline_service.GetTrainingPipelineRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.ListTrainingPipelinesRequest,
+ dict,
+ ],
+)
+def test_list_training_pipelines(request_type, transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_training_pipelines), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = pipeline_service.ListTrainingPipelinesResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_training_pipelines(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.ListTrainingPipelinesRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListTrainingPipelinesPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_training_pipelines_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = pipeline_service.ListTrainingPipelinesRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_training_pipelines), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.list_training_pipelines(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == pipeline_service.ListTrainingPipelinesRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ )
+
+
+def test_list_training_pipelines_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_training_pipelines
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_training_pipelines
+ ] = mock_rpc
+ request = {}
+ client.list_training_pipelines(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_training_pipelines(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_training_pipelines_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.list_training_pipelines
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.list_training_pipelines
+ ] = mock_rpc
+
+ request = {}
+ await client.list_training_pipelines(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.list_training_pipelines(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_training_pipelines_async(
+ transport: str = "grpc_asyncio",
+ request_type=pipeline_service.ListTrainingPipelinesRequest,
+):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_training_pipelines), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ pipeline_service.ListTrainingPipelinesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.list_training_pipelines(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.ListTrainingPipelinesRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListTrainingPipelinesAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_training_pipelines_async_from_dict():
+ await test_list_training_pipelines_async(request_type=dict)
+
+
+def test_list_training_pipelines_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.ListTrainingPipelinesRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_training_pipelines), "__call__"
+ ) as call:
+ call.return_value = pipeline_service.ListTrainingPipelinesResponse()
+ client.list_training_pipelines(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_training_pipelines_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.ListTrainingPipelinesRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_training_pipelines), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ pipeline_service.ListTrainingPipelinesResponse()
+ )
+ await client.list_training_pipelines(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_list_training_pipelines_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_training_pipelines), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = pipeline_service.ListTrainingPipelinesResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_training_pipelines(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+def test_list_training_pipelines_flattened_error():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_training_pipelines(
+ pipeline_service.ListTrainingPipelinesRequest(),
+ parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_training_pipelines_flattened_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_training_pipelines), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = pipeline_service.ListTrainingPipelinesResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ pipeline_service.ListTrainingPipelinesResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_training_pipelines(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_list_training_pipelines_flattened_error_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_training_pipelines(
+ pipeline_service.ListTrainingPipelinesRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_training_pipelines_pager(transport_name: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_training_pipelines), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[
+ training_pipeline.TrainingPipeline(),
+ training_pipeline.TrainingPipeline(),
+ training_pipeline.TrainingPipeline(),
+ ],
+ next_page_token="abc",
+ ),
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[],
+ next_page_token="def",
+ ),
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[
+ training_pipeline.TrainingPipeline(),
+ ],
+ next_page_token="ghi",
+ ),
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[
+ training_pipeline.TrainingPipeline(),
+ training_pipeline.TrainingPipeline(),
+ ],
+ ),
+ RuntimeError,
+ )
+
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_training_pipelines(request={}, retry=retry, timeout=timeout)
+
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, training_pipeline.TrainingPipeline) for i in results)
+
+
+def test_list_training_pipelines_pages(transport_name: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_training_pipelines), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[
+ training_pipeline.TrainingPipeline(),
+ training_pipeline.TrainingPipeline(),
+ training_pipeline.TrainingPipeline(),
+ ],
+ next_page_token="abc",
+ ),
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[],
+ next_page_token="def",
+ ),
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[
+ training_pipeline.TrainingPipeline(),
+ ],
+ next_page_token="ghi",
+ ),
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[
+ training_pipeline.TrainingPipeline(),
+ training_pipeline.TrainingPipeline(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_training_pipelines(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_training_pipelines_async_pager():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_training_pipelines),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[
+ training_pipeline.TrainingPipeline(),
+ training_pipeline.TrainingPipeline(),
+ training_pipeline.TrainingPipeline(),
+ ],
+ next_page_token="abc",
+ ),
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[],
+ next_page_token="def",
+ ),
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[
+ training_pipeline.TrainingPipeline(),
+ ],
+ next_page_token="ghi",
+ ),
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[
+ training_pipeline.TrainingPipeline(),
+ training_pipeline.TrainingPipeline(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_training_pipelines(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(isinstance(i, training_pipeline.TrainingPipeline) for i in responses)
+
+
+@pytest.mark.asyncio
+async def test_list_training_pipelines_async_pages():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_training_pipelines),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[
+ training_pipeline.TrainingPipeline(),
+ training_pipeline.TrainingPipeline(),
+ training_pipeline.TrainingPipeline(),
+ ],
+ next_page_token="abc",
+ ),
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[],
+ next_page_token="def",
+ ),
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[
+ training_pipeline.TrainingPipeline(),
+ ],
+ next_page_token="ghi",
+ ),
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[
+ training_pipeline.TrainingPipeline(),
+ training_pipeline.TrainingPipeline(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.list_training_pipelines(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.DeleteTrainingPipelineRequest,
+ dict,
+ ],
+)
+def test_delete_training_pipeline(request_type, transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.delete_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.DeleteTrainingPipelineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_delete_training_pipeline_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = pipeline_service.DeleteTrainingPipelineRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_training_pipeline), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.delete_training_pipeline(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == pipeline_service.DeleteTrainingPipelineRequest(
+ name="name_value",
+ )
+
+
+def test_delete_training_pipeline_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_training_pipeline
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_training_pipeline
+ ] = mock_rpc
+ request = {}
+ client.delete_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_training_pipeline(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_training_pipeline_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.delete_training_pipeline
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.delete_training_pipeline
+ ] = mock_rpc
+
+ request = {}
+ await client.delete_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.delete_training_pipeline(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_training_pipeline_async(
+ transport: str = "grpc_asyncio",
+ request_type=pipeline_service.DeleteTrainingPipelineRequest,
+):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.delete_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.DeleteTrainingPipelineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_delete_training_pipeline_async_from_dict():
+ await test_delete_training_pipeline_async(request_type=dict)
+
+
+def test_delete_training_pipeline_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.DeleteTrainingPipelineRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_training_pipeline), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_training_pipeline_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.DeleteTrainingPipelineRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_training_pipeline), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.delete_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_delete_training_pipeline_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_training_pipeline(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_delete_training_pipeline_flattened_error():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_training_pipeline(
+ pipeline_service.DeleteTrainingPipelineRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_delete_training_pipeline_flattened_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_training_pipeline(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_delete_training_pipeline_flattened_error_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.delete_training_pipeline(
+ pipeline_service.DeleteTrainingPipelineRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.CancelTrainingPipelineRequest,
+ dict,
+ ],
+)
+def test_cancel_training_pipeline(request_type, transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.CancelTrainingPipelineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_training_pipeline_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = pipeline_service.CancelTrainingPipelineRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_training_pipeline), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.cancel_training_pipeline(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == pipeline_service.CancelTrainingPipelineRequest(
+ name="name_value",
+ )
+
+
+def test_cancel_training_pipeline_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.cancel_training_pipeline
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.cancel_training_pipeline
+ ] = mock_rpc
+ request = {}
+ client.cancel_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.cancel_training_pipeline(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_cancel_training_pipeline_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.cancel_training_pipeline
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.cancel_training_pipeline
+ ] = mock_rpc
+
+ request = {}
+ await client.cancel_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.cancel_training_pipeline(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_cancel_training_pipeline_async(
+ transport: str = "grpc_asyncio",
+ request_type=pipeline_service.CancelTrainingPipelineRequest,
+):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.CancelTrainingPipelineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_training_pipeline_async_from_dict():
+ await test_cancel_training_pipeline_async(request_type=dict)
+
+
+def test_cancel_training_pipeline_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.CancelTrainingPipelineRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_training_pipeline), "__call__"
+ ) as call:
+ call.return_value = None
+ client.cancel_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_training_pipeline_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.CancelTrainingPipelineRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_training_pipeline), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_cancel_training_pipeline_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.cancel_training_pipeline(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_cancel_training_pipeline_flattened_error():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.cancel_training_pipeline(
+ pipeline_service.CancelTrainingPipelineRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_cancel_training_pipeline_flattened_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.cancel_training_pipeline(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_cancel_training_pipeline_flattened_error_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.cancel_training_pipeline(
+ pipeline_service.CancelTrainingPipelineRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.CreatePipelineJobRequest,
+ dict,
+ ],
+)
+def test_create_pipeline_job(request_type, transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_pipeline_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_pipeline_job.PipelineJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ service_account="service_account_value",
+ network="network_value",
+ reserved_ip_ranges=["reserved_ip_ranges_value"],
+ template_uri="template_uri_value",
+ schedule_name="schedule_name_value",
+ preflight_validations=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ original_pipeline_job_id=2512,
+ )
+ response = client.create_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.CreatePipelineJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_pipeline_job.PipelineJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
+ assert response.service_account == "service_account_value"
+ assert response.network == "network_value"
+ assert response.reserved_ip_ranges == ["reserved_ip_ranges_value"]
+ assert response.template_uri == "template_uri_value"
+ assert response.schedule_name == "schedule_name_value"
+ assert response.preflight_validations is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+ assert response.original_pipeline_job_id == 2512
+
+
+def test_create_pipeline_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = pipeline_service.CreatePipelineJobRequest(
+ parent="parent_value",
+ pipeline_job_id="pipeline_job_id_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_pipeline_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.create_pipeline_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == pipeline_service.CreatePipelineJobRequest(
+ parent="parent_value",
+ pipeline_job_id="pipeline_job_id_value",
+ )
+
+
+def test_create_pipeline_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_pipeline_job in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_pipeline_job
+ ] = mock_rpc
+ request = {}
+ client.create_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_pipeline_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_pipeline_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.create_pipeline_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.create_pipeline_job
+ ] = mock_rpc
+
+ request = {}
+ await client.create_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.create_pipeline_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_pipeline_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=pipeline_service.CreatePipelineJobRequest,
+):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_pipeline_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_pipeline_job.PipelineJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ service_account="service_account_value",
+ network="network_value",
+ reserved_ip_ranges=["reserved_ip_ranges_value"],
+ template_uri="template_uri_value",
+ schedule_name="schedule_name_value",
+ preflight_validations=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ original_pipeline_job_id=2512,
+ )
+ )
+ response = await client.create_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.CreatePipelineJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_pipeline_job.PipelineJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
+ assert response.service_account == "service_account_value"
+ assert response.network == "network_value"
+ assert response.reserved_ip_ranges == ["reserved_ip_ranges_value"]
+ assert response.template_uri == "template_uri_value"
+ assert response.schedule_name == "schedule_name_value"
+ assert response.preflight_validations is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+ assert response.original_pipeline_job_id == 2512
+
+
+@pytest.mark.asyncio
+async def test_create_pipeline_job_async_from_dict():
+ await test_create_pipeline_job_async(request_type=dict)
+
+
+def test_create_pipeline_job_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.CreatePipelineJobRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_pipeline_job), "__call__"
+ ) as call:
+ call.return_value = gca_pipeline_job.PipelineJob()
+ client.create_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_create_pipeline_job_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.CreatePipelineJobRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_pipeline_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_pipeline_job.PipelineJob()
+ )
+ await client.create_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_create_pipeline_job_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_pipeline_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_pipeline_job.PipelineJob()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.create_pipeline_job(
+ parent="parent_value",
+ pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"),
+ pipeline_job_id="pipeline_job_id_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].pipeline_job
+ mock_val = gca_pipeline_job.PipelineJob(name="name_value")
+ assert arg == mock_val
+ arg = args[0].pipeline_job_id
+ mock_val = "pipeline_job_id_value"
+ assert arg == mock_val
+
+
+def test_create_pipeline_job_flattened_error():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_pipeline_job(
+ pipeline_service.CreatePipelineJobRequest(),
+ parent="parent_value",
+ pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"),
+ pipeline_job_id="pipeline_job_id_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_create_pipeline_job_flattened_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_pipeline_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_pipeline_job.PipelineJob()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_pipeline_job.PipelineJob()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.create_pipeline_job(
+ parent="parent_value",
+ pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"),
+ pipeline_job_id="pipeline_job_id_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].pipeline_job
+ mock_val = gca_pipeline_job.PipelineJob(name="name_value")
+ assert arg == mock_val
+ arg = args[0].pipeline_job_id
+ mock_val = "pipeline_job_id_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_create_pipeline_job_flattened_error_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.create_pipeline_job(
+ pipeline_service.CreatePipelineJobRequest(),
+ parent="parent_value",
+ pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"),
+ pipeline_job_id="pipeline_job_id_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.GetPipelineJobRequest,
+ dict,
+ ],
+)
+def test_get_pipeline_job(request_type, transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = pipeline_job.PipelineJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ service_account="service_account_value",
+ network="network_value",
+ reserved_ip_ranges=["reserved_ip_ranges_value"],
+ template_uri="template_uri_value",
+ schedule_name="schedule_name_value",
+ preflight_validations=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ original_pipeline_job_id=2512,
+ )
+ response = client.get_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.GetPipelineJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pipeline_job.PipelineJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
+ assert response.service_account == "service_account_value"
+ assert response.network == "network_value"
+ assert response.reserved_ip_ranges == ["reserved_ip_ranges_value"]
+ assert response.template_uri == "template_uri_value"
+ assert response.schedule_name == "schedule_name_value"
+ assert response.preflight_validations is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+ assert response.original_pipeline_job_id == 2512
+
+
+def test_get_pipeline_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = pipeline_service.GetPipelineJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.get_pipeline_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == pipeline_service.GetPipelineJobRequest(
+ name="name_value",
+ )
+
+
+def test_get_pipeline_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.get_pipeline_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_pipeline_job
+ ] = mock_rpc
+ request = {}
+ client.get_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_pipeline_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_pipeline_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.get_pipeline_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.get_pipeline_job
+ ] = mock_rpc
+
+ request = {}
+ await client.get_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.get_pipeline_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_pipeline_job_async(
+ transport: str = "grpc_asyncio", request_type=pipeline_service.GetPipelineJobRequest
+):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ pipeline_job.PipelineJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ service_account="service_account_value",
+ network="network_value",
+ reserved_ip_ranges=["reserved_ip_ranges_value"],
+ template_uri="template_uri_value",
+ schedule_name="schedule_name_value",
+ preflight_validations=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ original_pipeline_job_id=2512,
+ )
+ )
+ response = await client.get_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.GetPipelineJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pipeline_job.PipelineJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
+ assert response.service_account == "service_account_value"
+ assert response.network == "network_value"
+ assert response.reserved_ip_ranges == ["reserved_ip_ranges_value"]
+ assert response.template_uri == "template_uri_value"
+ assert response.schedule_name == "schedule_name_value"
+ assert response.preflight_validations is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+ assert response.original_pipeline_job_id == 2512
+
+
+@pytest.mark.asyncio
+async def test_get_pipeline_job_async_from_dict():
+ await test_get_pipeline_job_async(request_type=dict)
+
+
+def test_get_pipeline_job_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.GetPipelineJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
+ call.return_value = pipeline_job.PipelineJob()
+ client.get_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_pipeline_job_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.GetPipelineJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ pipeline_job.PipelineJob()
+ )
+ await client.get_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_get_pipeline_job_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = pipeline_job.PipelineJob()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_pipeline_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_get_pipeline_job_flattened_error():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_pipeline_job(
+ pipeline_service.GetPipelineJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_pipeline_job_flattened_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = pipeline_job.PipelineJob()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ pipeline_job.PipelineJob()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_pipeline_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_get_pipeline_job_flattened_error_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_pipeline_job(
+ pipeline_service.GetPipelineJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.ListPipelineJobsRequest,
+ dict,
+ ],
+)
+def test_list_pipeline_jobs(request_type, transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_pipeline_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = pipeline_service.ListPipelineJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.ListPipelineJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListPipelineJobsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_pipeline_jobs_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = pipeline_service.ListPipelineJobsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ order_by="order_by_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_pipeline_jobs), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.list_pipeline_jobs(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == pipeline_service.ListPipelineJobsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ order_by="order_by_value",
+ )
+
+
+def test_list_pipeline_jobs_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_pipeline_jobs in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_pipeline_jobs
+ ] = mock_rpc
+ request = {}
+ client.list_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_pipeline_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_pipeline_jobs_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.list_pipeline_jobs
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.list_pipeline_jobs
+ ] = mock_rpc
+
+ request = {}
+ await client.list_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.list_pipeline_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_pipeline_jobs_async(
+ transport: str = "grpc_asyncio",
+ request_type=pipeline_service.ListPipelineJobsRequest,
+):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_pipeline_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ pipeline_service.ListPipelineJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.list_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.ListPipelineJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListPipelineJobsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_pipeline_jobs_async_from_dict():
+ await test_list_pipeline_jobs_async(request_type=dict)
+
+
+def test_list_pipeline_jobs_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.ListPipelineJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_pipeline_jobs), "__call__"
+ ) as call:
+ call.return_value = pipeline_service.ListPipelineJobsResponse()
+ client.list_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_pipeline_jobs_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.ListPipelineJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_pipeline_jobs), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ pipeline_service.ListPipelineJobsResponse()
+ )
+ await client.list_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_list_pipeline_jobs_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_pipeline_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = pipeline_service.ListPipelineJobsResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_pipeline_jobs(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+def test_list_pipeline_jobs_flattened_error():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_pipeline_jobs(
+ pipeline_service.ListPipelineJobsRequest(),
+ parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_pipeline_jobs_flattened_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_pipeline_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = pipeline_service.ListPipelineJobsResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ pipeline_service.ListPipelineJobsResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_pipeline_jobs(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_list_pipeline_jobs_flattened_error_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_pipeline_jobs(
+ pipeline_service.ListPipelineJobsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_pipeline_jobs_pager(transport_name: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_pipeline_jobs), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[
+ pipeline_job.PipelineJob(),
+ pipeline_job.PipelineJob(),
+ pipeline_job.PipelineJob(),
+ ],
+ next_page_token="abc",
+ ),
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[],
+ next_page_token="def",
+ ),
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[
+ pipeline_job.PipelineJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[
+ pipeline_job.PipelineJob(),
+ pipeline_job.PipelineJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_pipeline_jobs(request={}, retry=retry, timeout=timeout)
+
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, pipeline_job.PipelineJob) for i in results)
+
+
+def test_list_pipeline_jobs_pages(transport_name: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_pipeline_jobs), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[
+ pipeline_job.PipelineJob(),
+ pipeline_job.PipelineJob(),
+ pipeline_job.PipelineJob(),
+ ],
+ next_page_token="abc",
+ ),
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[],
+ next_page_token="def",
+ ),
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[
+ pipeline_job.PipelineJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[
+ pipeline_job.PipelineJob(),
+ pipeline_job.PipelineJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_pipeline_jobs(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_pipeline_jobs_async_pager():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_pipeline_jobs),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[
+ pipeline_job.PipelineJob(),
+ pipeline_job.PipelineJob(),
+ pipeline_job.PipelineJob(),
+ ],
+ next_page_token="abc",
+ ),
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[],
+ next_page_token="def",
+ ),
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[
+ pipeline_job.PipelineJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[
+ pipeline_job.PipelineJob(),
+ pipeline_job.PipelineJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_pipeline_jobs(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(isinstance(i, pipeline_job.PipelineJob) for i in responses)
+
+
+@pytest.mark.asyncio
+async def test_list_pipeline_jobs_async_pages():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_pipeline_jobs),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[
+ pipeline_job.PipelineJob(),
+ pipeline_job.PipelineJob(),
+ pipeline_job.PipelineJob(),
+ ],
+ next_page_token="abc",
+ ),
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[],
+ next_page_token="def",
+ ),
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[
+ pipeline_job.PipelineJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[
+ pipeline_job.PipelineJob(),
+ pipeline_job.PipelineJob(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.list_pipeline_jobs(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.DeletePipelineJobRequest,
+ dict,
+ ],
+)
+def test_delete_pipeline_job(request_type, transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_pipeline_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.delete_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.DeletePipelineJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_delete_pipeline_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = pipeline_service.DeletePipelineJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_pipeline_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.delete_pipeline_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == pipeline_service.DeletePipelineJobRequest(
+ name="name_value",
+ )
+
+
+def test_delete_pipeline_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_pipeline_job in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_pipeline_job
+ ] = mock_rpc
+ request = {}
+ client.delete_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_pipeline_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_pipeline_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.delete_pipeline_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.delete_pipeline_job
+ ] = mock_rpc
+
+ request = {}
+ await client.delete_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.delete_pipeline_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_pipeline_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=pipeline_service.DeletePipelineJobRequest,
+):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_pipeline_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.delete_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.DeletePipelineJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_delete_pipeline_job_async_from_dict():
+ await test_delete_pipeline_job_async(request_type=dict)
+
+
+def test_delete_pipeline_job_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.DeletePipelineJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_pipeline_job), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_pipeline_job_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.DeletePipelineJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_pipeline_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.delete_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_delete_pipeline_job_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_pipeline_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_pipeline_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_delete_pipeline_job_flattened_error():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_pipeline_job(
+ pipeline_service.DeletePipelineJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_delete_pipeline_job_flattened_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_pipeline_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_pipeline_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_delete_pipeline_job_flattened_error_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.delete_pipeline_job(
+ pipeline_service.DeletePipelineJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.BatchDeletePipelineJobsRequest,
+ dict,
+ ],
+)
+def test_batch_delete_pipeline_jobs(request_type, transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_delete_pipeline_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.batch_delete_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.BatchDeletePipelineJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_batch_delete_pipeline_jobs_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = pipeline_service.BatchDeletePipelineJobsRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_delete_pipeline_jobs), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.batch_delete_pipeline_jobs(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == pipeline_service.BatchDeletePipelineJobsRequest(
+ parent="parent_value",
+ )
+
+
+def test_batch_delete_pipeline_jobs_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.batch_delete_pipeline_jobs
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.batch_delete_pipeline_jobs
+ ] = mock_rpc
+ request = {}
+ client.batch_delete_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.batch_delete_pipeline_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_batch_delete_pipeline_jobs_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.batch_delete_pipeline_jobs
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.batch_delete_pipeline_jobs
+ ] = mock_rpc
+
+ request = {}
+ await client.batch_delete_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.batch_delete_pipeline_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_batch_delete_pipeline_jobs_async(
+ transport: str = "grpc_asyncio",
+ request_type=pipeline_service.BatchDeletePipelineJobsRequest,
+):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_delete_pipeline_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.batch_delete_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.BatchDeletePipelineJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_batch_delete_pipeline_jobs_async_from_dict():
+ await test_batch_delete_pipeline_jobs_async(request_type=dict)
+
+
+def test_batch_delete_pipeline_jobs_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.BatchDeletePipelineJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_delete_pipeline_jobs), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.batch_delete_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_batch_delete_pipeline_jobs_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.BatchDeletePipelineJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_delete_pipeline_jobs), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.batch_delete_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_batch_delete_pipeline_jobs_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_delete_pipeline_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.batch_delete_pipeline_jobs(
+ parent="parent_value",
+ names=["names_value"],
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].names
+ mock_val = ["names_value"]
+ assert arg == mock_val
+
+
+def test_batch_delete_pipeline_jobs_flattened_error():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.batch_delete_pipeline_jobs(
+ pipeline_service.BatchDeletePipelineJobsRequest(),
+ parent="parent_value",
+ names=["names_value"],
+ )
+
+
+@pytest.mark.asyncio
+async def test_batch_delete_pipeline_jobs_flattened_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_delete_pipeline_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.batch_delete_pipeline_jobs(
+ parent="parent_value",
+ names=["names_value"],
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].names
+ mock_val = ["names_value"]
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_batch_delete_pipeline_jobs_flattened_error_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.batch_delete_pipeline_jobs(
+ pipeline_service.BatchDeletePipelineJobsRequest(),
+ parent="parent_value",
+ names=["names_value"],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.CancelPipelineJobRequest,
+ dict,
+ ],
+)
+def test_cancel_pipeline_job(request_type, transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_pipeline_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.CancelPipelineJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_pipeline_job_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = pipeline_service.CancelPipelineJobRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_pipeline_job), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.cancel_pipeline_job(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == pipeline_service.CancelPipelineJobRequest(
+ name="name_value",
+ )
+
+
+def test_cancel_pipeline_job_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.cancel_pipeline_job in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.cancel_pipeline_job
+ ] = mock_rpc
+ request = {}
+ client.cancel_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.cancel_pipeline_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_cancel_pipeline_job_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.cancel_pipeline_job
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.cancel_pipeline_job
+ ] = mock_rpc
+
+ request = {}
+ await client.cancel_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.cancel_pipeline_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_cancel_pipeline_job_async(
+ transport: str = "grpc_asyncio",
+ request_type=pipeline_service.CancelPipelineJobRequest,
+):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_pipeline_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.CancelPipelineJobRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_pipeline_job_async_from_dict():
+ await test_cancel_pipeline_job_async(request_type=dict)
+
+
+def test_cancel_pipeline_job_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.CancelPipelineJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_pipeline_job), "__call__"
+ ) as call:
+ call.return_value = None
+ client.cancel_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_pipeline_job_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.CancelPipelineJobRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_pipeline_job), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_cancel_pipeline_job_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_pipeline_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.cancel_pipeline_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_cancel_pipeline_job_flattened_error():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.cancel_pipeline_job(
+ pipeline_service.CancelPipelineJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_cancel_pipeline_job_flattened_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_pipeline_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.cancel_pipeline_job(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_cancel_pipeline_job_flattened_error_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.cancel_pipeline_job(
+ pipeline_service.CancelPipelineJobRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.BatchCancelPipelineJobsRequest,
+ dict,
+ ],
+)
+def test_batch_cancel_pipeline_jobs(request_type, transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_cancel_pipeline_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.batch_cancel_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.BatchCancelPipelineJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_batch_cancel_pipeline_jobs_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = pipeline_service.BatchCancelPipelineJobsRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_cancel_pipeline_jobs), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.batch_cancel_pipeline_jobs(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == pipeline_service.BatchCancelPipelineJobsRequest(
+ parent="parent_value",
+ )
+
+
+def test_batch_cancel_pipeline_jobs_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.batch_cancel_pipeline_jobs
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.batch_cancel_pipeline_jobs
+ ] = mock_rpc
+ request = {}
+ client.batch_cancel_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.batch_cancel_pipeline_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_batch_cancel_pipeline_jobs_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.batch_cancel_pipeline_jobs
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.batch_cancel_pipeline_jobs
+ ] = mock_rpc
+
+ request = {}
+ await client.batch_cancel_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.batch_cancel_pipeline_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_batch_cancel_pipeline_jobs_async(
+ transport: str = "grpc_asyncio",
+ request_type=pipeline_service.BatchCancelPipelineJobsRequest,
+):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_cancel_pipeline_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.batch_cancel_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = pipeline_service.BatchCancelPipelineJobsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_batch_cancel_pipeline_jobs_async_from_dict():
+ await test_batch_cancel_pipeline_jobs_async(request_type=dict)
+
+
+def test_batch_cancel_pipeline_jobs_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.BatchCancelPipelineJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_cancel_pipeline_jobs), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.batch_cancel_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_batch_cancel_pipeline_jobs_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = pipeline_service.BatchCancelPipelineJobsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_cancel_pipeline_jobs), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.batch_cancel_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_batch_cancel_pipeline_jobs_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_cancel_pipeline_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.batch_cancel_pipeline_jobs(
+ parent="parent_value",
+ names=["names_value"],
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].names
+ mock_val = ["names_value"]
+ assert arg == mock_val
+
+
+def test_batch_cancel_pipeline_jobs_flattened_error():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.batch_cancel_pipeline_jobs(
+ pipeline_service.BatchCancelPipelineJobsRequest(),
+ parent="parent_value",
+ names=["names_value"],
+ )
+
+
+@pytest.mark.asyncio
+async def test_batch_cancel_pipeline_jobs_flattened_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_cancel_pipeline_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.batch_cancel_pipeline_jobs(
+ parent="parent_value",
+ names=["names_value"],
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].names
+ mock_val = ["names_value"]
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_batch_cancel_pipeline_jobs_flattened_error_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.batch_cancel_pipeline_jobs(
+ pipeline_service.BatchCancelPipelineJobsRequest(),
+ parent="parent_value",
+ names=["names_value"],
+ )
+
+
+def test_create_training_pipeline_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_training_pipeline
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_training_pipeline
+ ] = mock_rpc
+
+ request = {}
+ client.create_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_training_pipeline(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_create_training_pipeline_rest_required_fields(
+ request_type=pipeline_service.CreateTrainingPipelineRequest,
+):
+ transport_class = transports.PipelineServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_training_pipeline._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_training_pipeline._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = gca_training_pipeline.TrainingPipeline()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_training_pipeline.TrainingPipeline.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.create_training_pipeline(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_create_training_pipeline_rest_unset_required_fields():
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.create_training_pipeline._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "trainingPipeline",
+ )
+ )
+ )
+
+
+def test_create_training_pipeline_rest_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_training_pipeline.TrainingPipeline()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ training_pipeline=gca_training_pipeline.TrainingPipeline(name="name_value"),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = gca_training_pipeline.TrainingPipeline.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.create_training_pipeline(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}/trainingPipelines"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_create_training_pipeline_rest_flattened_error(transport: str = "rest"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_training_pipeline(
+ pipeline_service.CreateTrainingPipelineRequest(),
+ parent="parent_value",
+ training_pipeline=gca_training_pipeline.TrainingPipeline(name="name_value"),
+ )
+
+
+def test_get_training_pipeline_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_training_pipeline
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_training_pipeline
+ ] = mock_rpc
+
+ request = {}
+ client.get_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_training_pipeline(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_get_training_pipeline_rest_required_fields(
+ request_type=pipeline_service.GetTrainingPipelineRequest,
+):
+ transport_class = transports.PipelineServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_training_pipeline._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_training_pipeline._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = training_pipeline.TrainingPipeline()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = training_pipeline.TrainingPipeline.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_training_pipeline(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_get_training_pipeline_rest_unset_required_fields():
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.get_training_pipeline._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_training_pipeline_rest_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = training_pipeline.TrainingPipeline()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/trainingPipelines/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = training_pipeline.TrainingPipeline.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_training_pipeline(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_get_training_pipeline_rest_flattened_error(transport: str = "rest"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_training_pipeline(
+ pipeline_service.GetTrainingPipelineRequest(),
+ name="name_value",
+ )
+
+
+def test_list_training_pipelines_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_training_pipelines
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_training_pipelines
+ ] = mock_rpc
+
+ request = {}
+ client.list_training_pipelines(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_training_pipelines(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_list_training_pipelines_rest_required_fields(
+ request_type=pipeline_service.ListTrainingPipelinesRequest,
+):
+ transport_class = transports.PipelineServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_training_pipelines._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_training_pipelines._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "filter",
+ "page_size",
+ "page_token",
+ "read_mask",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = pipeline_service.ListTrainingPipelinesResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = pipeline_service.ListTrainingPipelinesResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_training_pipelines(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_list_training_pipelines_rest_unset_required_fields():
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.list_training_pipelines._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(
+ (
+ "filter",
+ "pageSize",
+ "pageToken",
+ "readMask",
+ )
+ )
+ & set(("parent",))
+ )
+
+
+def test_list_training_pipelines_rest_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = pipeline_service.ListTrainingPipelinesResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = pipeline_service.ListTrainingPipelinesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.list_training_pipelines(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}/trainingPipelines"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_list_training_pipelines_rest_flattened_error(transport: str = "rest"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_training_pipelines(
+ pipeline_service.ListTrainingPipelinesRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_training_pipelines_rest_pager(transport: str = "rest"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[
+ training_pipeline.TrainingPipeline(),
+ training_pipeline.TrainingPipeline(),
+ training_pipeline.TrainingPipeline(),
+ ],
+ next_page_token="abc",
+ ),
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[],
+ next_page_token="def",
+ ),
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[
+ training_pipeline.TrainingPipeline(),
+ ],
+ next_page_token="ghi",
+ ),
+ pipeline_service.ListTrainingPipelinesResponse(
+ training_pipelines=[
+ training_pipeline.TrainingPipeline(),
+ training_pipeline.TrainingPipeline(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ pipeline_service.ListTrainingPipelinesResponse.to_json(x) for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ pager = client.list_training_pipelines(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, training_pipeline.TrainingPipeline) for i in results)
+
+ pages = list(client.list_training_pipelines(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_delete_training_pipeline_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_training_pipeline
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_training_pipeline
+ ] = mock_rpc
+
+ request = {}
+ client.delete_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_training_pipeline(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_delete_training_pipeline_rest_required_fields(
+ request_type=pipeline_service.DeleteTrainingPipelineRequest,
+):
+ transport_class = transports.PipelineServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_training_pipeline._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_training_pipeline._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "delete",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_training_pipeline(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_delete_training_pipeline_rest_unset_required_fields():
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.delete_training_pipeline._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_delete_training_pipeline_rest_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/trainingPipelines/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.delete_training_pipeline(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_training_pipeline_rest_flattened_error(transport: str = "rest"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_training_pipeline(
+ pipeline_service.DeleteTrainingPipelineRequest(),
+ name="name_value",
+ )
+
+
+def test_cancel_training_pipeline_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.cancel_training_pipeline
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.cancel_training_pipeline
+ ] = mock_rpc
+
+ request = {}
+ client.cancel_training_pipeline(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.cancel_training_pipeline(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_cancel_training_pipeline_rest_required_fields(
+ request_type=pipeline_service.CancelTrainingPipelineRequest,
+):
+ transport_class = transports.PipelineServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).cancel_training_pipeline._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).cancel_training_pipeline._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = None
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_training_pipeline(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_cancel_training_pipeline_rest_unset_required_fields():
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.cancel_training_pipeline._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_cancel_training_pipeline_rest_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/trainingPipelines/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.cancel_training_pipeline(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}:cancel"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_cancel_training_pipeline_rest_flattened_error(transport: str = "rest"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.cancel_training_pipeline(
+ pipeline_service.CancelTrainingPipelineRequest(),
+ name="name_value",
+ )
+
+
+def test_create_pipeline_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_pipeline_job in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_pipeline_job
+ ] = mock_rpc
+
+ request = {}
+ client.create_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_pipeline_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_create_pipeline_job_rest_required_fields(
+ request_type=pipeline_service.CreatePipelineJobRequest,
+):
+ transport_class = transports.PipelineServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_pipeline_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_pipeline_job._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(("pipeline_job_id",))
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = gca_pipeline_job.PipelineJob()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_pipeline_job.PipelineJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.create_pipeline_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_create_pipeline_job_rest_unset_required_fields():
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.create_pipeline_job._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(("pipelineJobId",))
+ & set(
+ (
+ "parent",
+ "pipelineJob",
+ )
+ )
+ )
+
+
+def test_create_pipeline_job_rest_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_pipeline_job.PipelineJob()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"),
+ pipeline_job_id="pipeline_job_id_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = gca_pipeline_job.PipelineJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.create_pipeline_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}/pipelineJobs"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_create_pipeline_job_rest_flattened_error(transport: str = "rest"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_pipeline_job(
+ pipeline_service.CreatePipelineJobRequest(),
+ parent="parent_value",
+ pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"),
+ pipeline_job_id="pipeline_job_id_value",
+ )
+
+
+def test_get_pipeline_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.get_pipeline_job in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_pipeline_job
+ ] = mock_rpc
+
+ request = {}
+ client.get_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_pipeline_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_get_pipeline_job_rest_required_fields(
+ request_type=pipeline_service.GetPipelineJobRequest,
+):
+ transport_class = transports.PipelineServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_pipeline_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_pipeline_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = pipeline_job.PipelineJob()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = pipeline_job.PipelineJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_pipeline_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_get_pipeline_job_rest_unset_required_fields():
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.get_pipeline_job._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_pipeline_job_rest_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = pipeline_job.PipelineJob()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/pipelineJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = pipeline_job.PipelineJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_pipeline_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/pipelineJobs/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_get_pipeline_job_rest_flattened_error(transport: str = "rest"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_pipeline_job(
+ pipeline_service.GetPipelineJobRequest(),
+ name="name_value",
+ )
+
+
+def test_list_pipeline_jobs_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_pipeline_jobs in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_pipeline_jobs
+ ] = mock_rpc
+
+ request = {}
+ client.list_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_pipeline_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_list_pipeline_jobs_rest_required_fields(
+ request_type=pipeline_service.ListPipelineJobsRequest,
+):
+ transport_class = transports.PipelineServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_pipeline_jobs._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_pipeline_jobs._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "filter",
+ "order_by",
+ "page_size",
+ "page_token",
+ "read_mask",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = pipeline_service.ListPipelineJobsResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = pipeline_service.ListPipelineJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_pipeline_jobs(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_list_pipeline_jobs_rest_unset_required_fields():
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.list_pipeline_jobs._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(
+ (
+ "filter",
+ "orderBy",
+ "pageSize",
+ "pageToken",
+ "readMask",
+ )
+ )
+ & set(("parent",))
+ )
+
+
+def test_list_pipeline_jobs_rest_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = pipeline_service.ListPipelineJobsResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = pipeline_service.ListPipelineJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.list_pipeline_jobs(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}/pipelineJobs"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_list_pipeline_jobs_rest_flattened_error(transport: str = "rest"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_pipeline_jobs(
+ pipeline_service.ListPipelineJobsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_pipeline_jobs_rest_pager(transport: str = "rest"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[
+ pipeline_job.PipelineJob(),
+ pipeline_job.PipelineJob(),
+ pipeline_job.PipelineJob(),
+ ],
+ next_page_token="abc",
+ ),
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[],
+ next_page_token="def",
+ ),
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[
+ pipeline_job.PipelineJob(),
+ ],
+ next_page_token="ghi",
+ ),
+ pipeline_service.ListPipelineJobsResponse(
+ pipeline_jobs=[
+ pipeline_job.PipelineJob(),
+ pipeline_job.PipelineJob(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ pipeline_service.ListPipelineJobsResponse.to_json(x) for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ pager = client.list_pipeline_jobs(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, pipeline_job.PipelineJob) for i in results)
+
+ pages = list(client.list_pipeline_jobs(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_delete_pipeline_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_pipeline_job in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_pipeline_job
+ ] = mock_rpc
+
+ request = {}
+ client.delete_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_pipeline_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_delete_pipeline_job_rest_required_fields(
+ request_type=pipeline_service.DeletePipelineJobRequest,
+):
+ transport_class = transports.PipelineServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_pipeline_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_pipeline_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "delete",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_pipeline_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_delete_pipeline_job_rest_unset_required_fields():
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.delete_pipeline_job._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_delete_pipeline_job_rest_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/pipelineJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.delete_pipeline_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/pipelineJobs/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_pipeline_job_rest_flattened_error(transport: str = "rest"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_pipeline_job(
+ pipeline_service.DeletePipelineJobRequest(),
+ name="name_value",
+ )
+
+
+def test_batch_delete_pipeline_jobs_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.batch_delete_pipeline_jobs
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.batch_delete_pipeline_jobs
+ ] = mock_rpc
+
+ request = {}
+ client.batch_delete_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.batch_delete_pipeline_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_batch_delete_pipeline_jobs_rest_required_fields(
+ request_type=pipeline_service.BatchDeletePipelineJobsRequest,
+):
+ transport_class = transports.PipelineServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request_init["names"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).batch_delete_pipeline_jobs._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+ jsonified_request["names"] = "names_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).batch_delete_pipeline_jobs._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+ assert "names" in jsonified_request
+ assert jsonified_request["names"] == "names_value"
+
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.batch_delete_pipeline_jobs(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_batch_delete_pipeline_jobs_rest_unset_required_fields():
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.batch_delete_pipeline_jobs._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "names",
+ )
+ )
+ )
+
+
+def test_batch_delete_pipeline_jobs_rest_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ names=["names_value"],
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.batch_delete_pipeline_jobs(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}/pipelineJobs:batchDelete"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_batch_delete_pipeline_jobs_rest_flattened_error(transport: str = "rest"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.batch_delete_pipeline_jobs(
+ pipeline_service.BatchDeletePipelineJobsRequest(),
+ parent="parent_value",
+ names=["names_value"],
+ )
+
+
+def test_cancel_pipeline_job_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.cancel_pipeline_job in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.cancel_pipeline_job
+ ] = mock_rpc
+
+ request = {}
+ client.cancel_pipeline_job(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.cancel_pipeline_job(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_cancel_pipeline_job_rest_required_fields(
+ request_type=pipeline_service.CancelPipelineJobRequest,
+):
+ transport_class = transports.PipelineServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).cancel_pipeline_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).cancel_pipeline_job._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = None
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_pipeline_job(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_cancel_pipeline_job_rest_unset_required_fields():
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.cancel_pipeline_job._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_cancel_pipeline_job_rest_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/pipelineJobs/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.cancel_pipeline_job(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/pipelineJobs/*}:cancel"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_cancel_pipeline_job_rest_flattened_error(transport: str = "rest"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.cancel_pipeline_job(
+ pipeline_service.CancelPipelineJobRequest(),
+ name="name_value",
+ )
+
+
+def test_batch_cancel_pipeline_jobs_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.batch_cancel_pipeline_jobs
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.batch_cancel_pipeline_jobs
+ ] = mock_rpc
+
+ request = {}
+ client.batch_cancel_pipeline_jobs(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.batch_cancel_pipeline_jobs(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_batch_cancel_pipeline_jobs_rest_required_fields(
+ request_type=pipeline_service.BatchCancelPipelineJobsRequest,
+):
+ transport_class = transports.PipelineServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request_init["names"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).batch_cancel_pipeline_jobs._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+ jsonified_request["names"] = "names_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).batch_cancel_pipeline_jobs._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+ assert "names" in jsonified_request
+ assert jsonified_request["names"] == "names_value"
+
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.batch_cancel_pipeline_jobs(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_batch_cancel_pipeline_jobs_rest_unset_required_fields():
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.batch_cancel_pipeline_jobs._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "names",
+ )
+ )
+ )
+
+
+def test_batch_cancel_pipeline_jobs_rest_flattened():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ names=["names_value"],
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.batch_cancel_pipeline_jobs(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}/pipelineJobs:batchCancel"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_batch_cancel_pipeline_jobs_rest_flattened_error(transport: str = "rest"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.batch_cancel_pipeline_jobs(
+ pipeline_service.BatchCancelPipelineJobsRequest(),
+ parent="parent_value",
+ names=["names_value"],
+ )
+
+
+def test_credentials_transport_error():
+ # It is an error to provide credentials and a transport instance.
+ transport = transports.PipelineServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # It is an error to provide a credentials file and a transport instance.
+ transport = transports.PipelineServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = PipelineServiceClient(
+ client_options={"credentials_file": "credentials.json"},
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a transport instance.
+ transport = transports.PipelineServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = PipelineServiceClient(
+ client_options=options,
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a credential.
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = PipelineServiceClient(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # It is an error to provide scopes and a transport instance.
+ transport = transports.PipelineServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = PipelineServiceClient(
+ client_options={"scopes": ["1", "2"]},
+ transport=transport,
+ )
+
+
+def test_transport_instance():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.PipelineServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ client = PipelineServiceClient(transport=transport)
+ assert client.transport is transport
+
+
+def test_transport_get_channel():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.PipelineServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+ transport = transports.PipelineServiceGrpcAsyncIOTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PipelineServiceGrpcTransport,
+ transports.PipelineServiceGrpcAsyncIOTransport,
+ transports.PipelineServiceRestTransport,
+ ],
+)
+def test_transport_adc(transport_class):
+ # Test default credentials are used if not provided.
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class()
+ adc.assert_called_once()
+
+
+def test_transport_kind_grpc():
+ transport = PipelineServiceClient.get_transport_class("grpc")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "grpc"
+
+
+def test_initialize_client_w_grpc():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_training_pipeline_empty_call_grpc():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_training_pipeline), "__call__"
+ ) as call:
+ call.return_value = gca_training_pipeline.TrainingPipeline()
+ client.create_training_pipeline(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.CreateTrainingPipelineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_training_pipeline_empty_call_grpc():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_training_pipeline), "__call__"
+ ) as call:
+ call.return_value = training_pipeline.TrainingPipeline()
+ client.get_training_pipeline(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.GetTrainingPipelineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_training_pipelines_empty_call_grpc():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_training_pipelines), "__call__"
+ ) as call:
+ call.return_value = pipeline_service.ListTrainingPipelinesResponse()
+ client.list_training_pipelines(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.ListTrainingPipelinesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_training_pipeline_empty_call_grpc():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_training_pipeline), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_training_pipeline(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.DeleteTrainingPipelineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_cancel_training_pipeline_empty_call_grpc():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_training_pipeline), "__call__"
+ ) as call:
+ call.return_value = None
+ client.cancel_training_pipeline(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.CancelTrainingPipelineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_pipeline_job_empty_call_grpc():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_pipeline_job), "__call__"
+ ) as call:
+ call.return_value = gca_pipeline_job.PipelineJob()
+ client.create_pipeline_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.CreatePipelineJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_pipeline_job_empty_call_grpc():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
+ call.return_value = pipeline_job.PipelineJob()
+ client.get_pipeline_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.GetPipelineJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_pipeline_jobs_empty_call_grpc():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_pipeline_jobs), "__call__"
+ ) as call:
+ call.return_value = pipeline_service.ListPipelineJobsResponse()
+ client.list_pipeline_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.ListPipelineJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_pipeline_job_empty_call_grpc():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_pipeline_job), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_pipeline_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.DeletePipelineJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_batch_delete_pipeline_jobs_empty_call_grpc():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_delete_pipeline_jobs), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.batch_delete_pipeline_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.BatchDeletePipelineJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_cancel_pipeline_job_empty_call_grpc():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_pipeline_job), "__call__"
+ ) as call:
+ call.return_value = None
+ client.cancel_pipeline_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.CancelPipelineJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_batch_cancel_pipeline_jobs_empty_call_grpc():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_cancel_pipeline_jobs), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.batch_cancel_pipeline_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.BatchCancelPipelineJobsRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_grpc_asyncio():
+ transport = PipelineServiceAsyncClient.get_transport_class("grpc_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "grpc_asyncio"
+
+
+def test_initialize_client_w_grpc_asyncio():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_training_pipeline_empty_call_grpc_asyncio():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_training_pipeline.TrainingPipeline(
+ name="name_value",
+ display_name="display_name_value",
+ training_task_definition="training_task_definition_value",
+ model_id="model_id_value",
+ parent_model="parent_model_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ )
+ )
+ await client.create_training_pipeline(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.CreateTrainingPipelineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_training_pipeline_empty_call_grpc_asyncio():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ training_pipeline.TrainingPipeline(
+ name="name_value",
+ display_name="display_name_value",
+ training_task_definition="training_task_definition_value",
+ model_id="model_id_value",
+ parent_model="parent_model_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ )
+ )
+ await client.get_training_pipeline(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.GetTrainingPipelineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_training_pipelines_empty_call_grpc_asyncio():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_training_pipelines), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ pipeline_service.ListTrainingPipelinesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_training_pipelines(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.ListTrainingPipelinesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_training_pipeline_empty_call_grpc_asyncio():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.delete_training_pipeline(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.DeleteTrainingPipelineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_cancel_training_pipeline_empty_call_grpc_asyncio():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_training_pipeline), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_training_pipeline(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.CancelTrainingPipelineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_pipeline_job_empty_call_grpc_asyncio():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_pipeline_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_pipeline_job.PipelineJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ service_account="service_account_value",
+ network="network_value",
+ reserved_ip_ranges=["reserved_ip_ranges_value"],
+ template_uri="template_uri_value",
+ schedule_name="schedule_name_value",
+ preflight_validations=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ original_pipeline_job_id=2512,
+ )
+ )
+ await client.create_pipeline_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.CreatePipelineJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_pipeline_job_empty_call_grpc_asyncio():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ pipeline_job.PipelineJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ service_account="service_account_value",
+ network="network_value",
+ reserved_ip_ranges=["reserved_ip_ranges_value"],
+ template_uri="template_uri_value",
+ schedule_name="schedule_name_value",
+ preflight_validations=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ original_pipeline_job_id=2512,
+ )
+ )
+ await client.get_pipeline_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.GetPipelineJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_pipeline_jobs_empty_call_grpc_asyncio():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_pipeline_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ pipeline_service.ListPipelineJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_pipeline_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.ListPipelineJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_pipeline_job_empty_call_grpc_asyncio():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_pipeline_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.delete_pipeline_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.DeletePipelineJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_batch_delete_pipeline_jobs_empty_call_grpc_asyncio():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_delete_pipeline_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.batch_delete_pipeline_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.BatchDeletePipelineJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_cancel_pipeline_job_empty_call_grpc_asyncio():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_pipeline_job), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_pipeline_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.CancelPipelineJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_batch_cancel_pipeline_jobs_empty_call_grpc_asyncio():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_cancel_pipeline_jobs), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.batch_cancel_pipeline_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.BatchCancelPipelineJobsRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_rest():
+ transport = PipelineServiceClient.get_transport_class("rest")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "rest"
+
+
+def test_create_training_pipeline_rest_bad_request(
+ request_type=pipeline_service.CreateTrainingPipelineRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.create_training_pipeline(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.CreateTrainingPipelineRequest,
+ dict,
+ ],
+)
+def test_create_training_pipeline_rest_call_success(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["training_pipeline"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "input_data_config": {
+ "fraction_split": {
+ "training_fraction": 0.1809,
+ "validation_fraction": 0.2016,
+ "test_fraction": 0.13970000000000002,
+ },
+ "filter_split": {
+ "training_filter": "training_filter_value",
+ "validation_filter": "validation_filter_value",
+ "test_filter": "test_filter_value",
+ },
+ "predefined_split": {"key": "key_value"},
+ "timestamp_split": {
+ "training_fraction": 0.1809,
+ "validation_fraction": 0.2016,
+ "test_fraction": 0.13970000000000002,
+ "key": "key_value",
+ },
+ "stratified_split": {
+ "training_fraction": 0.1809,
+ "validation_fraction": 0.2016,
+ "test_fraction": 0.13970000000000002,
+ "key": "key_value",
+ },
+ "gcs_destination": {"output_uri_prefix": "output_uri_prefix_value"},
+ "bigquery_destination": {"output_uri": "output_uri_value"},
+ "dataset_id": "dataset_id_value",
+ "annotations_filter": "annotations_filter_value",
+ "annotation_schema_uri": "annotation_schema_uri_value",
+ "saved_query_id": "saved_query_id_value",
+ "persist_ml_use_assignment": True,
+ },
+ "training_task_definition": "training_task_definition_value",
+ "training_task_inputs": {
+ "null_value": 0,
+ "number_value": 0.1285,
+ "string_value": "string_value_value",
+ "bool_value": True,
+ "struct_value": {"fields": {}},
+ "list_value": {"values": {}},
+ },
+ "training_task_metadata": {},
+ "model_to_upload": {
+ "name": "name_value",
+ "version_id": "version_id_value",
+ "version_aliases": ["version_aliases_value1", "version_aliases_value2"],
+ "version_create_time": {"seconds": 751, "nanos": 543},
+ "version_update_time": {},
+ "display_name": "display_name_value",
+ "description": "description_value",
+ "version_description": "version_description_value",
+ "predict_schemata": {
+ "instance_schema_uri": "instance_schema_uri_value",
+ "parameters_schema_uri": "parameters_schema_uri_value",
+ "prediction_schema_uri": "prediction_schema_uri_value",
+ },
+ "metadata_schema_uri": "metadata_schema_uri_value",
+ "metadata": {},
+ "supported_export_formats": [
+ {"id": "id_value", "exportable_contents": [1]}
+ ],
+ "training_pipeline": "training_pipeline_value",
+ "container_spec": {
+ "image_uri": "image_uri_value",
+ "command": ["command_value1", "command_value2"],
+ "args": ["args_value1", "args_value2"],
+ "env": [{"name": "name_value", "value": "value_value"}],
+ "ports": [{"container_port": 1511}],
+ "predict_route": "predict_route_value",
+ "health_route": "health_route_value",
+ "grpc_ports": {},
+ "deployment_timeout": {"seconds": 751, "nanos": 543},
+ "shared_memory_size_mb": 2231,
+ "startup_probe": {
+ "exec_": {"command": ["command_value1", "command_value2"]},
+ "period_seconds": 1489,
+ "timeout_seconds": 1621,
+ },
+ "health_probe": {},
+ },
+ "artifact_uri": "artifact_uri_value",
+ "supported_deployment_resources_types": [1],
+ "supported_input_storage_formats": [
+ "supported_input_storage_formats_value1",
+ "supported_input_storage_formats_value2",
+ ],
+ "supported_output_storage_formats": [
+ "supported_output_storage_formats_value1",
+ "supported_output_storage_formats_value2",
+ ],
+ "create_time": {},
+ "update_time": {},
+ "deployed_models": [
+ {
+ "endpoint": "endpoint_value",
+ "deployed_model_id": "deployed_model_id_value",
+ }
+ ],
+ "explanation_spec": {
+ "parameters": {
+ "sampled_shapley_attribution": {"path_count": 1077},
+ "integrated_gradients_attribution": {
+ "step_count": 1092,
+ "smooth_grad_config": {
+ "noise_sigma": 0.11660000000000001,
+ "feature_noise_sigma": {
+ "noise_sigma": [{"name": "name_value", "sigma": 0.529}]
+ },
+ "noisy_sample_count": 1947,
+ },
+ "blur_baseline_config": {"max_blur_sigma": 0.1482},
+ },
+ "xrai_attribution": {
+ "step_count": 1092,
+ "smooth_grad_config": {},
+ "blur_baseline_config": {},
+ },
+ "examples": {
+ "example_gcs_source": {
+ "data_format": 1,
+ "gcs_source": {"uris": ["uris_value1", "uris_value2"]},
+ },
+ "nearest_neighbor_search_config": {},
+ "presets": {"query": 1, "modality": 1},
+ "gcs_source": {},
+ "neighbor_count": 1494,
+ },
+ "top_k": 541,
+ "output_indices": {},
+ },
+ "metadata": {
+ "inputs": {},
+ "outputs": {},
+ "feature_attributions_schema_uri": "feature_attributions_schema_uri_value",
+ "latent_space_source": "latent_space_source_value",
+ },
+ },
+ "etag": "etag_value",
+ "labels": {},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "model_source_info": {"source_type": 1, "copy": True},
+ "original_model_info": {"model": "model_value"},
+ "metadata_artifact": "metadata_artifact_value",
+ "base_model_source": {
+ "model_garden_source": {"public_model_name": "public_model_name_value"},
+ "genie_source": {"base_model_uri": "base_model_uri_value"},
+ },
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ },
+ "model_id": "model_id_value",
+ "parent_model": "parent_model_value",
+ "state": 1,
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "create_time": {},
+ "start_time": {},
+ "end_time": {},
+ "update_time": {},
+ "labels": {},
+ "encryption_spec": {},
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = pipeline_service.CreateTrainingPipelineRequest.meta.fields[
+ "training_pipeline"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["training_pipeline"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["training_pipeline"][field])):
+ del request_init["training_pipeline"][field][i][subfield]
+ else:
+ del request_init["training_pipeline"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_training_pipeline.TrainingPipeline(
+ name="name_value",
+ display_name="display_name_value",
+ training_task_definition="training_task_definition_value",
+ model_id="model_id_value",
+ parent_model="parent_model_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_training_pipeline.TrainingPipeline.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.create_training_pipeline(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_training_pipeline.TrainingPipeline)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.training_task_definition == "training_task_definition_value"
+ assert response.model_id == "model_id_value"
+ assert response.parent_model == "parent_model_value"
+ assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_create_training_pipeline_rest_interceptors(null_interceptor):
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "post_create_training_pipeline"
+ ) as post, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "pre_create_training_pipeline"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.CreateTrainingPipelineRequest.pb(
+ pipeline_service.CreateTrainingPipelineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_training_pipeline.TrainingPipeline.to_json(
+ gca_training_pipeline.TrainingPipeline()
+ )
+ req.return_value.content = return_value
+
+ request = pipeline_service.CreateTrainingPipelineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_training_pipeline.TrainingPipeline()
+
+ client.create_training_pipeline(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_training_pipeline_rest_bad_request(
+ request_type=pipeline_service.GetTrainingPipelineRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/trainingPipelines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_training_pipeline(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.GetTrainingPipelineRequest,
+ dict,
+ ],
+)
+def test_get_training_pipeline_rest_call_success(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/trainingPipelines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = training_pipeline.TrainingPipeline(
+ name="name_value",
+ display_name="display_name_value",
+ training_task_definition="training_task_definition_value",
+ model_id="model_id_value",
+ parent_model="parent_model_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = training_pipeline.TrainingPipeline.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_training_pipeline(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, training_pipeline.TrainingPipeline)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.training_task_definition == "training_task_definition_value"
+ assert response.model_id == "model_id_value"
+ assert response.parent_model == "parent_model_value"
+ assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_get_training_pipeline_rest_interceptors(null_interceptor):
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "post_get_training_pipeline"
+ ) as post, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "pre_get_training_pipeline"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.GetTrainingPipelineRequest.pb(
+ pipeline_service.GetTrainingPipelineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = training_pipeline.TrainingPipeline.to_json(
+ training_pipeline.TrainingPipeline()
+ )
+ req.return_value.content = return_value
+
+ request = pipeline_service.GetTrainingPipelineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = training_pipeline.TrainingPipeline()
+
+ client.get_training_pipeline(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_list_training_pipelines_rest_bad_request(
+ request_type=pipeline_service.ListTrainingPipelinesRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_training_pipelines(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.ListTrainingPipelinesRequest,
+ dict,
+ ],
+)
+def test_list_training_pipelines_rest_call_success(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = pipeline_service.ListTrainingPipelinesResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = pipeline_service.ListTrainingPipelinesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.list_training_pipelines(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListTrainingPipelinesPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_list_training_pipelines_rest_interceptors(null_interceptor):
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "post_list_training_pipelines"
+ ) as post, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "pre_list_training_pipelines"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.ListTrainingPipelinesRequest.pb(
+ pipeline_service.ListTrainingPipelinesRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = pipeline_service.ListTrainingPipelinesResponse.to_json(
+ pipeline_service.ListTrainingPipelinesResponse()
+ )
+ req.return_value.content = return_value
+
+ request = pipeline_service.ListTrainingPipelinesRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = pipeline_service.ListTrainingPipelinesResponse()
+
+ client.list_training_pipelines(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_delete_training_pipeline_rest_bad_request(
+ request_type=pipeline_service.DeleteTrainingPipelineRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/trainingPipelines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_training_pipeline(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.DeleteTrainingPipelineRequest,
+ dict,
+ ],
+)
+def test_delete_training_pipeline_rest_call_success(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/trainingPipelines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.delete_training_pipeline(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_delete_training_pipeline_rest_interceptors(null_interceptor):
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "post_delete_training_pipeline"
+ ) as post, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "pre_delete_training_pipeline"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.DeleteTrainingPipelineRequest.pb(
+ pipeline_service.DeleteTrainingPipelineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = pipeline_service.DeleteTrainingPipelineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.delete_training_pipeline(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_cancel_training_pipeline_rest_bad_request(
+ request_type=pipeline_service.CancelTrainingPipelineRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/trainingPipelines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_training_pipeline(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.CancelTrainingPipelineRequest,
+ dict,
+ ],
+)
+def test_cancel_training_pipeline_rest_call_success(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/trainingPipelines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.cancel_training_pipeline(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_cancel_training_pipeline_rest_interceptors(null_interceptor):
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "pre_cancel_training_pipeline"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = pipeline_service.CancelTrainingPipelineRequest.pb(
+ pipeline_service.CancelTrainingPipelineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = pipeline_service.CancelTrainingPipelineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ client.cancel_training_pipeline(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+def test_create_pipeline_job_rest_bad_request(
+ request_type=pipeline_service.CreatePipelineJobRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.create_pipeline_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.CreatePipelineJobRequest,
+ dict,
+ ],
+)
+def test_create_pipeline_job_rest_call_success(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["pipeline_job"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "create_time": {"seconds": 751, "nanos": 543},
+ "start_time": {},
+ "end_time": {},
+ "update_time": {},
+ "pipeline_spec": {"fields": {}},
+ "state": 1,
+ "job_detail": {
+ "pipeline_context": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "etag": "etag_value",
+ "labels": {},
+ "create_time": {},
+ "update_time": {},
+ "parent_contexts": ["parent_contexts_value1", "parent_contexts_value2"],
+ "schema_title": "schema_title_value",
+ "schema_version": "schema_version_value",
+ "metadata": {},
+ "description": "description_value",
+ },
+ "pipeline_run_context": {},
+ "task_details": [
+ {
+ "task_id": 735,
+ "parent_task_id": 1480,
+ "task_name": "task_name_value",
+ "create_time": {},
+ "start_time": {},
+ "end_time": {},
+ "executor_detail": {
+ "container_detail": {
+ "main_job": "main_job_value",
+ "pre_caching_check_job": "pre_caching_check_job_value",
+ "failed_main_jobs": [
+ "failed_main_jobs_value1",
+ "failed_main_jobs_value2",
+ ],
+ "failed_pre_caching_check_jobs": [
+ "failed_pre_caching_check_jobs_value1",
+ "failed_pre_caching_check_jobs_value2",
+ ],
+ },
+ "custom_job_detail": {
+ "job": "job_value",
+ "failed_jobs": ["failed_jobs_value1", "failed_jobs_value2"],
+ },
+ },
+ "state": 1,
+ "execution": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "state": 1,
+ "etag": "etag_value",
+ "labels": {},
+ "create_time": {},
+ "update_time": {},
+ "schema_title": "schema_title_value",
+ "schema_version": "schema_version_value",
+ "metadata": {},
+ "description": "description_value",
+ },
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "pipeline_task_status": [
+ {"update_time": {}, "state": 1, "error": {}}
+ ],
+ "inputs": {},
+ "outputs": {},
+ }
+ ],
+ },
+ "error": {},
+ "labels": {},
+ "runtime_config": {
+ "parameters": {},
+ "gcs_output_directory": "gcs_output_directory_value",
+ "parameter_values": {},
+ "failure_policy": 1,
+ "input_artifacts": {},
+ "default_runtime": {
+ "persistent_resource_runtime_detail": {
+ "persistent_resource_name": "persistent_resource_name_value",
+ "task_resource_unavailable_wait_time_ms": 4030,
+ "task_resource_unavailable_timeout_behavior": 1,
+ }
+ },
+ },
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "service_account": "service_account_value",
+ "network": "network_value",
+ "reserved_ip_ranges": [
+ "reserved_ip_ranges_value1",
+ "reserved_ip_ranges_value2",
+ ],
+ "psc_interface_config": {"network_attachment": "network_attachment_value"},
+ "template_uri": "template_uri_value",
+ "template_metadata": {"version": "version_value"},
+ "schedule_name": "schedule_name_value",
+ "preflight_validations": True,
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ "original_pipeline_job_id": 2512,
+ "pipeline_task_rerun_configs": [
+ {
+ "task_id": 735,
+ "task_name": "task_name_value",
+ "inputs": {"artifacts": {}, "parameter_values": {}},
+ "skip_task": True,
+ "skip_downstream_tasks": True,
+ }
+ ],
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = pipeline_service.CreatePipelineJobRequest.meta.fields["pipeline_job"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["pipeline_job"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["pipeline_job"][field])):
+ del request_init["pipeline_job"][field][i][subfield]
+ else:
+ del request_init["pipeline_job"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_pipeline_job.PipelineJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ service_account="service_account_value",
+ network="network_value",
+ reserved_ip_ranges=["reserved_ip_ranges_value"],
+ template_uri="template_uri_value",
+ schedule_name="schedule_name_value",
+ preflight_validations=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ original_pipeline_job_id=2512,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_pipeline_job.PipelineJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.create_pipeline_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_pipeline_job.PipelineJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
+ assert response.service_account == "service_account_value"
+ assert response.network == "network_value"
+ assert response.reserved_ip_ranges == ["reserved_ip_ranges_value"]
+ assert response.template_uri == "template_uri_value"
+ assert response.schedule_name == "schedule_name_value"
+ assert response.preflight_validations is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+ assert response.original_pipeline_job_id == 2512
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_create_pipeline_job_rest_interceptors(null_interceptor):
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "post_create_pipeline_job"
+ ) as post, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "pre_create_pipeline_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.CreatePipelineJobRequest.pb(
+ pipeline_service.CreatePipelineJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_pipeline_job.PipelineJob.to_json(
+ gca_pipeline_job.PipelineJob()
+ )
+ req.return_value.content = return_value
+
+ request = pipeline_service.CreatePipelineJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_pipeline_job.PipelineJob()
+
+ client.create_pipeline_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_pipeline_job_rest_bad_request(
+ request_type=pipeline_service.GetPipelineJobRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/pipelineJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_pipeline_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.GetPipelineJobRequest,
+ dict,
+ ],
+)
+def test_get_pipeline_job_rest_call_success(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/pipelineJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = pipeline_job.PipelineJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ service_account="service_account_value",
+ network="network_value",
+ reserved_ip_ranges=["reserved_ip_ranges_value"],
+ template_uri="template_uri_value",
+ schedule_name="schedule_name_value",
+ preflight_validations=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ original_pipeline_job_id=2512,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = pipeline_job.PipelineJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_pipeline_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pipeline_job.PipelineJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
+ assert response.service_account == "service_account_value"
+ assert response.network == "network_value"
+ assert response.reserved_ip_ranges == ["reserved_ip_ranges_value"]
+ assert response.template_uri == "template_uri_value"
+ assert response.schedule_name == "schedule_name_value"
+ assert response.preflight_validations is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+ assert response.original_pipeline_job_id == 2512
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_get_pipeline_job_rest_interceptors(null_interceptor):
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "post_get_pipeline_job"
+ ) as post, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "pre_get_pipeline_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.GetPipelineJobRequest.pb(
+ pipeline_service.GetPipelineJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = pipeline_job.PipelineJob.to_json(pipeline_job.PipelineJob())
+ req.return_value.content = return_value
+
+ request = pipeline_service.GetPipelineJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = pipeline_job.PipelineJob()
+
+ client.get_pipeline_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_list_pipeline_jobs_rest_bad_request(
+ request_type=pipeline_service.ListPipelineJobsRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_pipeline_jobs(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.ListPipelineJobsRequest,
+ dict,
+ ],
+)
+def test_list_pipeline_jobs_rest_call_success(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = pipeline_service.ListPipelineJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = pipeline_service.ListPipelineJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.list_pipeline_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListPipelineJobsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_list_pipeline_jobs_rest_interceptors(null_interceptor):
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "post_list_pipeline_jobs"
+ ) as post, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "pre_list_pipeline_jobs"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.ListPipelineJobsRequest.pb(
+ pipeline_service.ListPipelineJobsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = pipeline_service.ListPipelineJobsResponse.to_json(
+ pipeline_service.ListPipelineJobsResponse()
+ )
+ req.return_value.content = return_value
+
+ request = pipeline_service.ListPipelineJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = pipeline_service.ListPipelineJobsResponse()
+
+ client.list_pipeline_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_delete_pipeline_job_rest_bad_request(
+ request_type=pipeline_service.DeletePipelineJobRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/pipelineJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_pipeline_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.DeletePipelineJobRequest,
+ dict,
+ ],
+)
+def test_delete_pipeline_job_rest_call_success(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/pipelineJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.delete_pipeline_job(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_delete_pipeline_job_rest_interceptors(null_interceptor):
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "post_delete_pipeline_job"
+ ) as post, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "pre_delete_pipeline_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.DeletePipelineJobRequest.pb(
+ pipeline_service.DeletePipelineJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = pipeline_service.DeletePipelineJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.delete_pipeline_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_batch_delete_pipeline_jobs_rest_bad_request(
+ request_type=pipeline_service.BatchDeletePipelineJobsRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.batch_delete_pipeline_jobs(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.BatchDeletePipelineJobsRequest,
+ dict,
+ ],
+)
+def test_batch_delete_pipeline_jobs_rest_call_success(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.batch_delete_pipeline_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_batch_delete_pipeline_jobs_rest_interceptors(null_interceptor):
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "post_batch_delete_pipeline_jobs"
+ ) as post, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "pre_batch_delete_pipeline_jobs"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.BatchDeletePipelineJobsRequest.pb(
+ pipeline_service.BatchDeletePipelineJobsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = pipeline_service.BatchDeletePipelineJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.batch_delete_pipeline_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_cancel_pipeline_job_rest_bad_request(
+ request_type=pipeline_service.CancelPipelineJobRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/pipelineJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_pipeline_job(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.CancelPipelineJobRequest,
+ dict,
+ ],
+)
+def test_cancel_pipeline_job_rest_call_success(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/pipelineJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.cancel_pipeline_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_cancel_pipeline_job_rest_interceptors(null_interceptor):
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "pre_cancel_pipeline_job"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = pipeline_service.CancelPipelineJobRequest.pb(
+ pipeline_service.CancelPipelineJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = pipeline_service.CancelPipelineJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ client.cancel_pipeline_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+def test_batch_cancel_pipeline_jobs_rest_bad_request(
+ request_type=pipeline_service.BatchCancelPipelineJobsRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.batch_cancel_pipeline_jobs(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.BatchCancelPipelineJobsRequest,
+ dict,
+ ],
+)
+def test_batch_cancel_pipeline_jobs_rest_call_success(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.batch_cancel_pipeline_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_batch_cancel_pipeline_jobs_rest_interceptors(null_interceptor):
+ transport = transports.PipelineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "post_batch_cancel_pipeline_jobs"
+ ) as post, mock.patch.object(
+ transports.PipelineServiceRestInterceptor, "pre_batch_cancel_pipeline_jobs"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.BatchCancelPipelineJobsRequest.pb(
+ pipeline_service.BatchCancelPipelineJobsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = pipeline_service.BatchCancelPipelineJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.batch_cancel_pipeline_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_location_rest_bad_request(request_type=locations_pb2.GetLocationRequest):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_location(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+def test_get_location_rest(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_list_locations_rest_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_locations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+def test_list_locations_rest(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_get_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_get_iam_policy_rest(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_set_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.set_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_set_iam_policy_rest(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_test_iam_permissions_rest_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.test_iam_permissions(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+def test_test_iam_permissions_rest(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+def test_cancel_operation_rest_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+def test_cancel_operation_rest(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_rest_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+def test_delete_operation_rest(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_get_operation_rest_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+def test_get_operation_rest(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_list_operations_rest_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_operations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+def test_list_operations_rest(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_wait_operation_rest_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.wait_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+def test_wait_operation_rest(request_type):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_training_pipeline_empty_call_rest():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_training_pipeline), "__call__"
+ ) as call:
+ client.create_training_pipeline(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.CreateTrainingPipelineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_training_pipeline_empty_call_rest():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_training_pipeline), "__call__"
+ ) as call:
+ client.get_training_pipeline(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.GetTrainingPipelineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_training_pipelines_empty_call_rest():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_training_pipelines), "__call__"
+ ) as call:
+ client.list_training_pipelines(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.ListTrainingPipelinesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_training_pipeline_empty_call_rest():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_training_pipeline), "__call__"
+ ) as call:
+ client.delete_training_pipeline(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.DeleteTrainingPipelineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_cancel_training_pipeline_empty_call_rest():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_training_pipeline), "__call__"
+ ) as call:
+ client.cancel_training_pipeline(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.CancelTrainingPipelineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_pipeline_job_empty_call_rest():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_pipeline_job), "__call__"
+ ) as call:
+ client.create_pipeline_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.CreatePipelineJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_pipeline_job_empty_call_rest():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
+ client.get_pipeline_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.GetPipelineJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_pipeline_jobs_empty_call_rest():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_pipeline_jobs), "__call__"
+ ) as call:
+ client.list_pipeline_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.ListPipelineJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_pipeline_job_empty_call_rest():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_pipeline_job), "__call__"
+ ) as call:
+ client.delete_pipeline_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.DeletePipelineJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_batch_delete_pipeline_jobs_empty_call_rest():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_delete_pipeline_jobs), "__call__"
+ ) as call:
+ client.batch_delete_pipeline_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.BatchDeletePipelineJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_cancel_pipeline_job_empty_call_rest():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_pipeline_job), "__call__"
+ ) as call:
+ client.cancel_pipeline_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.CancelPipelineJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_batch_cancel_pipeline_jobs_empty_call_rest():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_cancel_pipeline_jobs), "__call__"
+ ) as call:
+ client.batch_cancel_pipeline_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.BatchCancelPipelineJobsRequest()
+
+ assert args[0] == request_msg
+
+
+def test_pipeline_service_rest_lro_client():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ transport = client.transport
+
+ # Ensure that we have an api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.AbstractOperationsClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_transport_kind_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = PipelineServiceAsyncClient.get_transport_class("rest_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "rest_asyncio"
+
+
+@pytest.mark.asyncio
+async def test_create_training_pipeline_rest_asyncio_bad_request(
+ request_type=pipeline_service.CreateTrainingPipelineRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.create_training_pipeline(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.CreateTrainingPipelineRequest,
+ dict,
+ ],
+)
+async def test_create_training_pipeline_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["training_pipeline"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "input_data_config": {
+ "fraction_split": {
+ "training_fraction": 0.1809,
+ "validation_fraction": 0.2016,
+ "test_fraction": 0.13970000000000002,
+ },
+ "filter_split": {
+ "training_filter": "training_filter_value",
+ "validation_filter": "validation_filter_value",
+ "test_filter": "test_filter_value",
+ },
+ "predefined_split": {"key": "key_value"},
+ "timestamp_split": {
+ "training_fraction": 0.1809,
+ "validation_fraction": 0.2016,
+ "test_fraction": 0.13970000000000002,
+ "key": "key_value",
+ },
+ "stratified_split": {
+ "training_fraction": 0.1809,
+ "validation_fraction": 0.2016,
+ "test_fraction": 0.13970000000000002,
+ "key": "key_value",
+ },
+ "gcs_destination": {"output_uri_prefix": "output_uri_prefix_value"},
+ "bigquery_destination": {"output_uri": "output_uri_value"},
+ "dataset_id": "dataset_id_value",
+ "annotations_filter": "annotations_filter_value",
+ "annotation_schema_uri": "annotation_schema_uri_value",
+ "saved_query_id": "saved_query_id_value",
+ "persist_ml_use_assignment": True,
+ },
+ "training_task_definition": "training_task_definition_value",
+ "training_task_inputs": {
+ "null_value": 0,
+ "number_value": 0.1285,
+ "string_value": "string_value_value",
+ "bool_value": True,
+ "struct_value": {"fields": {}},
+ "list_value": {"values": {}},
+ },
+ "training_task_metadata": {},
+ "model_to_upload": {
+ "name": "name_value",
+ "version_id": "version_id_value",
+ "version_aliases": ["version_aliases_value1", "version_aliases_value2"],
+ "version_create_time": {"seconds": 751, "nanos": 543},
+ "version_update_time": {},
+ "display_name": "display_name_value",
+ "description": "description_value",
+ "version_description": "version_description_value",
+ "predict_schemata": {
+ "instance_schema_uri": "instance_schema_uri_value",
+ "parameters_schema_uri": "parameters_schema_uri_value",
+ "prediction_schema_uri": "prediction_schema_uri_value",
+ },
+ "metadata_schema_uri": "metadata_schema_uri_value",
+ "metadata": {},
+ "supported_export_formats": [
+ {"id": "id_value", "exportable_contents": [1]}
+ ],
+ "training_pipeline": "training_pipeline_value",
+ "container_spec": {
+ "image_uri": "image_uri_value",
+ "command": ["command_value1", "command_value2"],
+ "args": ["args_value1", "args_value2"],
+ "env": [{"name": "name_value", "value": "value_value"}],
+ "ports": [{"container_port": 1511}],
+ "predict_route": "predict_route_value",
+ "health_route": "health_route_value",
+ "grpc_ports": {},
+ "deployment_timeout": {"seconds": 751, "nanos": 543},
+ "shared_memory_size_mb": 2231,
+ "startup_probe": {
+ "exec_": {"command": ["command_value1", "command_value2"]},
+ "period_seconds": 1489,
+ "timeout_seconds": 1621,
+ },
+ "health_probe": {},
+ },
+ "artifact_uri": "artifact_uri_value",
+ "supported_deployment_resources_types": [1],
+ "supported_input_storage_formats": [
+ "supported_input_storage_formats_value1",
+ "supported_input_storage_formats_value2",
+ ],
+ "supported_output_storage_formats": [
+ "supported_output_storage_formats_value1",
+ "supported_output_storage_formats_value2",
+ ],
+ "create_time": {},
+ "update_time": {},
+ "deployed_models": [
+ {
+ "endpoint": "endpoint_value",
+ "deployed_model_id": "deployed_model_id_value",
+ }
+ ],
+ "explanation_spec": {
+ "parameters": {
+ "sampled_shapley_attribution": {"path_count": 1077},
+ "integrated_gradients_attribution": {
+ "step_count": 1092,
+ "smooth_grad_config": {
+ "noise_sigma": 0.11660000000000001,
+ "feature_noise_sigma": {
+ "noise_sigma": [{"name": "name_value", "sigma": 0.529}]
+ },
+ "noisy_sample_count": 1947,
+ },
+ "blur_baseline_config": {"max_blur_sigma": 0.1482},
+ },
+ "xrai_attribution": {
+ "step_count": 1092,
+ "smooth_grad_config": {},
+ "blur_baseline_config": {},
+ },
+ "examples": {
+ "example_gcs_source": {
+ "data_format": 1,
+ "gcs_source": {"uris": ["uris_value1", "uris_value2"]},
+ },
+ "nearest_neighbor_search_config": {},
+ "presets": {"query": 1, "modality": 1},
+ "gcs_source": {},
+ "neighbor_count": 1494,
+ },
+ "top_k": 541,
+ "output_indices": {},
+ },
+ "metadata": {
+ "inputs": {},
+ "outputs": {},
+ "feature_attributions_schema_uri": "feature_attributions_schema_uri_value",
+ "latent_space_source": "latent_space_source_value",
+ },
+ },
+ "etag": "etag_value",
+ "labels": {},
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "model_source_info": {"source_type": 1, "copy": True},
+ "original_model_info": {"model": "model_value"},
+ "metadata_artifact": "metadata_artifact_value",
+ "base_model_source": {
+ "model_garden_source": {"public_model_name": "public_model_name_value"},
+ "genie_source": {"base_model_uri": "base_model_uri_value"},
+ },
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ },
+ "model_id": "model_id_value",
+ "parent_model": "parent_model_value",
+ "state": 1,
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "create_time": {},
+ "start_time": {},
+ "end_time": {},
+ "update_time": {},
+ "labels": {},
+ "encryption_spec": {},
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = pipeline_service.CreateTrainingPipelineRequest.meta.fields[
+ "training_pipeline"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["training_pipeline"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["training_pipeline"][field])):
+ del request_init["training_pipeline"][field][i][subfield]
+ else:
+ del request_init["training_pipeline"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_training_pipeline.TrainingPipeline(
+ name="name_value",
+ display_name="display_name_value",
+ training_task_definition="training_task_definition_value",
+ model_id="model_id_value",
+ parent_model="parent_model_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_training_pipeline.TrainingPipeline.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.create_training_pipeline(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_training_pipeline.TrainingPipeline)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.training_task_definition == "training_task_definition_value"
+ assert response.model_id == "model_id_value"
+ assert response.parent_model == "parent_model_value"
+ assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_create_training_pipeline_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPipelineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "post_create_training_pipeline"
+ ) as post, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "pre_create_training_pipeline"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.CreateTrainingPipelineRequest.pb(
+ pipeline_service.CreateTrainingPipelineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_training_pipeline.TrainingPipeline.to_json(
+ gca_training_pipeline.TrainingPipeline()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = pipeline_service.CreateTrainingPipelineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_training_pipeline.TrainingPipeline()
+
+ await client.create_training_pipeline(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_training_pipeline_rest_asyncio_bad_request(
+ request_type=pipeline_service.GetTrainingPipelineRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/trainingPipelines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_training_pipeline(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.GetTrainingPipelineRequest,
+ dict,
+ ],
+)
+async def test_get_training_pipeline_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/trainingPipelines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = training_pipeline.TrainingPipeline(
+ name="name_value",
+ display_name="display_name_value",
+ training_task_definition="training_task_definition_value",
+ model_id="model_id_value",
+ parent_model="parent_model_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = training_pipeline.TrainingPipeline.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.get_training_pipeline(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, training_pipeline.TrainingPipeline)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.training_task_definition == "training_task_definition_value"
+ assert response.model_id == "model_id_value"
+ assert response.parent_model == "parent_model_value"
+ assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_get_training_pipeline_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPipelineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "post_get_training_pipeline"
+ ) as post, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "pre_get_training_pipeline"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.GetTrainingPipelineRequest.pb(
+ pipeline_service.GetTrainingPipelineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = training_pipeline.TrainingPipeline.to_json(
+ training_pipeline.TrainingPipeline()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = pipeline_service.GetTrainingPipelineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = training_pipeline.TrainingPipeline()
+
+ await client.get_training_pipeline(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_list_training_pipelines_rest_asyncio_bad_request(
+ request_type=pipeline_service.ListTrainingPipelinesRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_training_pipelines(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.ListTrainingPipelinesRequest,
+ dict,
+ ],
+)
+async def test_list_training_pipelines_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = pipeline_service.ListTrainingPipelinesResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = pipeline_service.ListTrainingPipelinesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.list_training_pipelines(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListTrainingPipelinesAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_list_training_pipelines_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPipelineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "post_list_training_pipelines"
+ ) as post, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "pre_list_training_pipelines"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.ListTrainingPipelinesRequest.pb(
+ pipeline_service.ListTrainingPipelinesRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = pipeline_service.ListTrainingPipelinesResponse.to_json(
+ pipeline_service.ListTrainingPipelinesResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = pipeline_service.ListTrainingPipelinesRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = pipeline_service.ListTrainingPipelinesResponse()
+
+ await client.list_training_pipelines(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_delete_training_pipeline_rest_asyncio_bad_request(
+ request_type=pipeline_service.DeleteTrainingPipelineRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/trainingPipelines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_training_pipeline(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.DeleteTrainingPipelineRequest,
+ dict,
+ ],
+)
+async def test_delete_training_pipeline_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/trainingPipelines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.delete_training_pipeline(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_delete_training_pipeline_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPipelineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "post_delete_training_pipeline"
+ ) as post, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "pre_delete_training_pipeline"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.DeleteTrainingPipelineRequest.pb(
+ pipeline_service.DeleteTrainingPipelineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = pipeline_service.DeleteTrainingPipelineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.delete_training_pipeline(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_cancel_training_pipeline_rest_asyncio_bad_request(
+ request_type=pipeline_service.CancelTrainingPipelineRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/trainingPipelines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_training_pipeline(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.CancelTrainingPipelineRequest,
+ dict,
+ ],
+)
+async def test_cancel_training_pipeline_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/trainingPipelines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.cancel_training_pipeline(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_cancel_training_pipeline_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPipelineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "pre_cancel_training_pipeline"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = pipeline_service.CancelTrainingPipelineRequest.pb(
+ pipeline_service.CancelTrainingPipelineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = pipeline_service.CancelTrainingPipelineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ await client.cancel_training_pipeline(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_create_pipeline_job_rest_asyncio_bad_request(
+ request_type=pipeline_service.CreatePipelineJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.create_pipeline_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.CreatePipelineJobRequest,
+ dict,
+ ],
+)
+async def test_create_pipeline_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["pipeline_job"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "create_time": {"seconds": 751, "nanos": 543},
+ "start_time": {},
+ "end_time": {},
+ "update_time": {},
+ "pipeline_spec": {"fields": {}},
+ "state": 1,
+ "job_detail": {
+ "pipeline_context": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "etag": "etag_value",
+ "labels": {},
+ "create_time": {},
+ "update_time": {},
+ "parent_contexts": ["parent_contexts_value1", "parent_contexts_value2"],
+ "schema_title": "schema_title_value",
+ "schema_version": "schema_version_value",
+ "metadata": {},
+ "description": "description_value",
+ },
+ "pipeline_run_context": {},
+ "task_details": [
+ {
+ "task_id": 735,
+ "parent_task_id": 1480,
+ "task_name": "task_name_value",
+ "create_time": {},
+ "start_time": {},
+ "end_time": {},
+ "executor_detail": {
+ "container_detail": {
+ "main_job": "main_job_value",
+ "pre_caching_check_job": "pre_caching_check_job_value",
+ "failed_main_jobs": [
+ "failed_main_jobs_value1",
+ "failed_main_jobs_value2",
+ ],
+ "failed_pre_caching_check_jobs": [
+ "failed_pre_caching_check_jobs_value1",
+ "failed_pre_caching_check_jobs_value2",
+ ],
+ },
+ "custom_job_detail": {
+ "job": "job_value",
+ "failed_jobs": ["failed_jobs_value1", "failed_jobs_value2"],
+ },
+ },
+ "state": 1,
+ "execution": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "state": 1,
+ "etag": "etag_value",
+ "labels": {},
+ "create_time": {},
+ "update_time": {},
+ "schema_title": "schema_title_value",
+ "schema_version": "schema_version_value",
+ "metadata": {},
+ "description": "description_value",
+ },
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "pipeline_task_status": [
+ {"update_time": {}, "state": 1, "error": {}}
+ ],
+ "inputs": {},
+ "outputs": {},
+ }
+ ],
+ },
+ "error": {},
+ "labels": {},
+ "runtime_config": {
+ "parameters": {},
+ "gcs_output_directory": "gcs_output_directory_value",
+ "parameter_values": {},
+ "failure_policy": 1,
+ "input_artifacts": {},
+ "default_runtime": {
+ "persistent_resource_runtime_detail": {
+ "persistent_resource_name": "persistent_resource_name_value",
+ "task_resource_unavailable_wait_time_ms": 4030,
+ "task_resource_unavailable_timeout_behavior": 1,
+ }
+ },
+ },
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "service_account": "service_account_value",
+ "network": "network_value",
+ "reserved_ip_ranges": [
+ "reserved_ip_ranges_value1",
+ "reserved_ip_ranges_value2",
+ ],
+ "psc_interface_config": {"network_attachment": "network_attachment_value"},
+ "template_uri": "template_uri_value",
+ "template_metadata": {"version": "version_value"},
+ "schedule_name": "schedule_name_value",
+ "preflight_validations": True,
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ "original_pipeline_job_id": 2512,
+ "pipeline_task_rerun_configs": [
+ {
+ "task_id": 735,
+ "task_name": "task_name_value",
+ "inputs": {"artifacts": {}, "parameter_values": {}},
+ "skip_task": True,
+ "skip_downstream_tasks": True,
+ }
+ ],
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = pipeline_service.CreatePipelineJobRequest.meta.fields["pipeline_job"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["pipeline_job"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["pipeline_job"][field])):
+ del request_init["pipeline_job"][field][i][subfield]
+ else:
+ del request_init["pipeline_job"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_pipeline_job.PipelineJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ service_account="service_account_value",
+ network="network_value",
+ reserved_ip_ranges=["reserved_ip_ranges_value"],
+ template_uri="template_uri_value",
+ schedule_name="schedule_name_value",
+ preflight_validations=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ original_pipeline_job_id=2512,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_pipeline_job.PipelineJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.create_pipeline_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_pipeline_job.PipelineJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
+ assert response.service_account == "service_account_value"
+ assert response.network == "network_value"
+ assert response.reserved_ip_ranges == ["reserved_ip_ranges_value"]
+ assert response.template_uri == "template_uri_value"
+ assert response.schedule_name == "schedule_name_value"
+ assert response.preflight_validations is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+ assert response.original_pipeline_job_id == 2512
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_create_pipeline_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPipelineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "post_create_pipeline_job"
+ ) as post, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "pre_create_pipeline_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.CreatePipelineJobRequest.pb(
+ pipeline_service.CreatePipelineJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_pipeline_job.PipelineJob.to_json(
+ gca_pipeline_job.PipelineJob()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = pipeline_service.CreatePipelineJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_pipeline_job.PipelineJob()
+
+ await client.create_pipeline_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_pipeline_job_rest_asyncio_bad_request(
+ request_type=pipeline_service.GetPipelineJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/pipelineJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_pipeline_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.GetPipelineJobRequest,
+ dict,
+ ],
+)
+async def test_get_pipeline_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/pipelineJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = pipeline_job.PipelineJob(
+ name="name_value",
+ display_name="display_name_value",
+ state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
+ service_account="service_account_value",
+ network="network_value",
+ reserved_ip_ranges=["reserved_ip_ranges_value"],
+ template_uri="template_uri_value",
+ schedule_name="schedule_name_value",
+ preflight_validations=True,
+ satisfies_pzs=True,
+ satisfies_pzi=True,
+ original_pipeline_job_id=2512,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = pipeline_job.PipelineJob.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.get_pipeline_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pipeline_job.PipelineJob)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
+ assert response.service_account == "service_account_value"
+ assert response.network == "network_value"
+ assert response.reserved_ip_ranges == ["reserved_ip_ranges_value"]
+ assert response.template_uri == "template_uri_value"
+ assert response.schedule_name == "schedule_name_value"
+ assert response.preflight_validations is True
+ assert response.satisfies_pzs is True
+ assert response.satisfies_pzi is True
+ assert response.original_pipeline_job_id == 2512
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_get_pipeline_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPipelineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "post_get_pipeline_job"
+ ) as post, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "pre_get_pipeline_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.GetPipelineJobRequest.pb(
+ pipeline_service.GetPipelineJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = pipeline_job.PipelineJob.to_json(pipeline_job.PipelineJob())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = pipeline_service.GetPipelineJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = pipeline_job.PipelineJob()
+
+ await client.get_pipeline_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_list_pipeline_jobs_rest_asyncio_bad_request(
+ request_type=pipeline_service.ListPipelineJobsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_pipeline_jobs(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.ListPipelineJobsRequest,
+ dict,
+ ],
+)
+async def test_list_pipeline_jobs_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = pipeline_service.ListPipelineJobsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = pipeline_service.ListPipelineJobsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.list_pipeline_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListPipelineJobsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_list_pipeline_jobs_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPipelineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "post_list_pipeline_jobs"
+ ) as post, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "pre_list_pipeline_jobs"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.ListPipelineJobsRequest.pb(
+ pipeline_service.ListPipelineJobsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = pipeline_service.ListPipelineJobsResponse.to_json(
+ pipeline_service.ListPipelineJobsResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = pipeline_service.ListPipelineJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = pipeline_service.ListPipelineJobsResponse()
+
+ await client.list_pipeline_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_delete_pipeline_job_rest_asyncio_bad_request(
+ request_type=pipeline_service.DeletePipelineJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/pipelineJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_pipeline_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.DeletePipelineJobRequest,
+ dict,
+ ],
+)
+async def test_delete_pipeline_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/pipelineJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.delete_pipeline_job(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_delete_pipeline_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPipelineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "post_delete_pipeline_job"
+ ) as post, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "pre_delete_pipeline_job"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.DeletePipelineJobRequest.pb(
+ pipeline_service.DeletePipelineJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = pipeline_service.DeletePipelineJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.delete_pipeline_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_batch_delete_pipeline_jobs_rest_asyncio_bad_request(
+ request_type=pipeline_service.BatchDeletePipelineJobsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.batch_delete_pipeline_jobs(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.BatchDeletePipelineJobsRequest,
+ dict,
+ ],
+)
+async def test_batch_delete_pipeline_jobs_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.batch_delete_pipeline_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_batch_delete_pipeline_jobs_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPipelineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor,
+ "post_batch_delete_pipeline_jobs",
+ ) as post, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "pre_batch_delete_pipeline_jobs"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.BatchDeletePipelineJobsRequest.pb(
+ pipeline_service.BatchDeletePipelineJobsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = pipeline_service.BatchDeletePipelineJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.batch_delete_pipeline_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_cancel_pipeline_job_rest_asyncio_bad_request(
+ request_type=pipeline_service.CancelPipelineJobRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/pipelineJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_pipeline_job(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.CancelPipelineJobRequest,
+ dict,
+ ],
+)
+async def test_cancel_pipeline_job_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/pipelineJobs/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.cancel_pipeline_job(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_cancel_pipeline_job_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPipelineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "pre_cancel_pipeline_job"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = pipeline_service.CancelPipelineJobRequest.pb(
+ pipeline_service.CancelPipelineJobRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = pipeline_service.CancelPipelineJobRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ await client.cancel_pipeline_job(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_batch_cancel_pipeline_jobs_rest_asyncio_bad_request(
+ request_type=pipeline_service.BatchCancelPipelineJobsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.batch_cancel_pipeline_jobs(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ pipeline_service.BatchCancelPipelineJobsRequest,
+ dict,
+ ],
+)
+async def test_batch_cancel_pipeline_jobs_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.batch_cancel_pipeline_jobs(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_batch_cancel_pipeline_jobs_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPipelineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPipelineServiceRestInterceptor(),
+ )
+ client = PipelineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor,
+ "post_batch_cancel_pipeline_jobs",
+ ) as post, mock.patch.object(
+ transports.AsyncPipelineServiceRestInterceptor, "pre_batch_cancel_pipeline_jobs"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = pipeline_service.BatchCancelPipelineJobsRequest.pb(
+ pipeline_service.BatchCancelPipelineJobsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = pipeline_service.BatchCancelPipelineJobsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.batch_cancel_pipeline_jobs(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_location_rest_asyncio_bad_request(
+ request_type=locations_pb2.GetLocationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_location(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+async def test_get_location_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_rest_asyncio_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_locations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+async def test_list_locations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_get_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.set_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_set_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.test_iam_permissions(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+async def test_test_iam_permissions_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+async def test_cancel_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+async def test_delete_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_get_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+async def test_get_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_rest_asyncio_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_operations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+async def test_list_operations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.wait_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+async def test_wait_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_training_pipeline_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_training_pipeline), "__call__"
+ ) as call:
+ await client.create_training_pipeline(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.CreateTrainingPipelineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_training_pipeline_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_training_pipeline), "__call__"
+ ) as call:
+ await client.get_training_pipeline(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.GetTrainingPipelineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_training_pipelines_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_training_pipelines), "__call__"
+ ) as call:
+ await client.list_training_pipelines(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.ListTrainingPipelinesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_training_pipeline_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_training_pipeline), "__call__"
+ ) as call:
+ await client.delete_training_pipeline(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.DeleteTrainingPipelineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_cancel_training_pipeline_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_training_pipeline), "__call__"
+ ) as call:
+ await client.cancel_training_pipeline(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.CancelTrainingPipelineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_pipeline_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_pipeline_job), "__call__"
+ ) as call:
+ await client.create_pipeline_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.CreatePipelineJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_pipeline_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
+ await client.get_pipeline_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.GetPipelineJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_pipeline_jobs_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_pipeline_jobs), "__call__"
+ ) as call:
+ await client.list_pipeline_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.ListPipelineJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_pipeline_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_pipeline_job), "__call__"
+ ) as call:
+ await client.delete_pipeline_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.DeletePipelineJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_batch_delete_pipeline_jobs_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_delete_pipeline_jobs), "__call__"
+ ) as call:
+ await client.batch_delete_pipeline_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.BatchDeletePipelineJobsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_cancel_pipeline_job_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.cancel_pipeline_job), "__call__"
+ ) as call:
+ await client.cancel_pipeline_job(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.CancelPipelineJobRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_batch_cancel_pipeline_jobs_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_cancel_pipeline_jobs), "__call__"
+ ) as call:
+ await client.batch_cancel_pipeline_jobs(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = pipeline_service.BatchCancelPipelineJobsRequest()
+
+ assert args[0] == request_msg
+
+
+def test_pipeline_service_rest_asyncio_lro_client():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ transport = client.transport
+
+ # Ensure that we have an api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.AsyncOperationsRestClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_unsupported_parameter_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with pytest.raises(core_exceptions.AsyncRestUnsupportedParameterError, match="google.api_core.client_options.ClientOptions.quota_project_id") as exc: # type: ignore
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ client_options=options,
+ )
+
+
+def test_transport_grpc_default():
+ # A client should use the gRPC transport by default.
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert isinstance(
+ client.transport,
+ transports.PipelineServiceGrpcTransport,
+ )
+
+
+def test_pipeline_service_base_transport_error():
+ # Passing both a credentials object and credentials_file should raise an error
+ with pytest.raises(core_exceptions.DuplicateCredentialArgs):
+ transport = transports.PipelineServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ credentials_file="credentials.json",
+ )
+
+
+def test_pipeline_service_base_transport():
+ # Instantiate the base transport.
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport.__init__"
+ ) as Transport:
+ Transport.return_value = None
+ transport = transports.PipelineServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Every method on the transport should just blindly
+ # raise NotImplementedError.
+ methods = (
+ "create_training_pipeline",
+ "get_training_pipeline",
+ "list_training_pipelines",
+ "delete_training_pipeline",
+ "cancel_training_pipeline",
+ "create_pipeline_job",
+ "get_pipeline_job",
+ "list_pipeline_jobs",
+ "delete_pipeline_job",
+ "batch_delete_pipeline_jobs",
+ "cancel_pipeline_job",
+ "batch_cancel_pipeline_jobs",
+ "set_iam_policy",
+ "get_iam_policy",
+ "test_iam_permissions",
+ "get_location",
+ "list_locations",
+ "get_operation",
+ "wait_operation",
+ "cancel_operation",
+ "delete_operation",
+ "list_operations",
+ )
+ for method in methods:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, method)(request=object())
+
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
+ # Additionally, the LRO client (a property) should
+ # also raise NotImplementedError
+ with pytest.raises(NotImplementedError):
+ transport.operations_client
+
+ # Catch all for all remaining methods and properties
+ remainder = [
+ "kind",
+ ]
+ for r in remainder:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, r)()
+
+
+def test_pipeline_service_base_transport_with_credentials_file():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.PipelineServiceTransport(
+ credentials_file="credentials.json",
+ quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+def test_pipeline_service_base_transport_with_adc():
+ # Test the default credentials are used if credentials and credentials_file are None.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.PipelineServiceTransport()
+ adc.assert_called_once()
+
+
+def test_pipeline_service_auth_adc():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ PipelineServiceClient()
+ adc.assert_called_once_with(
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PipelineServiceGrpcTransport,
+ transports.PipelineServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_pipeline_service_transport_auth_adc(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+ adc.assert_called_once_with(
+ scopes=["1", "2"],
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PipelineServiceGrpcTransport,
+ transports.PipelineServiceGrpcAsyncIOTransport,
+ transports.PipelineServiceRestTransport,
+ ],
+)
+def test_pipeline_service_transport_auth_gdch_credentials(transport_class):
+ host = "https://language.com"
+ api_audience_tests = [None, "https://language2.com"]
+ api_audience_expect = [host, "https://language2.com"]
+ for t, e in zip(api_audience_tests, api_audience_expect):
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ gdch_mock = mock.MagicMock()
+ type(gdch_mock).with_gdch_audience = mock.PropertyMock(
+ return_value=gdch_mock
+ )
+ adc.return_value = (gdch_mock, None)
+ transport_class(host=host, api_audience=t)
+ gdch_mock.with_gdch_audience.assert_called_once_with(e)
+
+
+@pytest.mark.parametrize(
+ "transport_class,grpc_helpers",
+ [
+ (transports.PipelineServiceGrpcTransport, grpc_helpers),
+ (transports.PipelineServiceGrpcAsyncIOTransport, grpc_helpers_async),
+ ],
+)
+def test_pipeline_service_transport_create_channel(transport_class, grpc_helpers):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel", autospec=True
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ adc.return_value = (creds, None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=creds,
+ credentials_file=None,
+ quota_project_id="octopus",
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=["1", "2"],
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PipelineServiceGrpcTransport,
+ transports.PipelineServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_pipeline_service_grpc_transport_client_cert_source_for_mtls(transport_class):
+ cred = ga_credentials.AnonymousCredentials()
+
+ # Check ssl_channel_credentials is used if provided.
+ with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
+ mock_ssl_channel_creds = mock.Mock()
+ transport_class(
+ host="squid.clam.whelk",
+ credentials=cred,
+ ssl_channel_credentials=mock_ssl_channel_creds,
+ )
+ mock_create_channel.assert_called_once_with(
+ "squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_channel_creds,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
+ # is used.
+ with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
+ with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
+ transport_class(
+ credentials=cred,
+ client_cert_source_for_mtls=client_cert_source_callback,
+ )
+ expected_cert, expected_key = client_cert_source_callback()
+ mock_ssl_cred.assert_called_once_with(
+ certificate_chain=expected_cert, private_key=expected_key
+ )
+
+
+def test_pipeline_service_http_transport_client_cert_source_for_mtls():
+ cred = ga_credentials.AnonymousCredentials()
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ) as mock_configure_mtls_channel:
+ transports.PipelineServiceRestTransport(
+ credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
+ )
+ mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_pipeline_service_host_no_port(transport_name):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_pipeline_service_host_with_port(transport_name):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com:8000"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:8000"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com:8000"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "rest",
+ ],
+)
+def test_pipeline_service_client_transport_session_collision(transport_name):
+ creds1 = ga_credentials.AnonymousCredentials()
+ creds2 = ga_credentials.AnonymousCredentials()
+ client1 = PipelineServiceClient(
+ credentials=creds1,
+ transport=transport_name,
+ )
+ client2 = PipelineServiceClient(
+ credentials=creds2,
+ transport=transport_name,
+ )
+ session1 = client1.transport.create_training_pipeline._session
+ session2 = client2.transport.create_training_pipeline._session
+ assert session1 != session2
+ session1 = client1.transport.get_training_pipeline._session
+ session2 = client2.transport.get_training_pipeline._session
+ assert session1 != session2
+ session1 = client1.transport.list_training_pipelines._session
+ session2 = client2.transport.list_training_pipelines._session
+ assert session1 != session2
+ session1 = client1.transport.delete_training_pipeline._session
+ session2 = client2.transport.delete_training_pipeline._session
+ assert session1 != session2
+ session1 = client1.transport.cancel_training_pipeline._session
+ session2 = client2.transport.cancel_training_pipeline._session
+ assert session1 != session2
+ session1 = client1.transport.create_pipeline_job._session
+ session2 = client2.transport.create_pipeline_job._session
+ assert session1 != session2
+ session1 = client1.transport.get_pipeline_job._session
+ session2 = client2.transport.get_pipeline_job._session
+ assert session1 != session2
+ session1 = client1.transport.list_pipeline_jobs._session
+ session2 = client2.transport.list_pipeline_jobs._session
+ assert session1 != session2
+ session1 = client1.transport.delete_pipeline_job._session
+ session2 = client2.transport.delete_pipeline_job._session
+ assert session1 != session2
+ session1 = client1.transport.batch_delete_pipeline_jobs._session
+ session2 = client2.transport.batch_delete_pipeline_jobs._session
+ assert session1 != session2
+ session1 = client1.transport.cancel_pipeline_job._session
+ session2 = client2.transport.cancel_pipeline_job._session
+ assert session1 != session2
+ session1 = client1.transport.batch_cancel_pipeline_jobs._session
+ session2 = client2.transport.batch_cancel_pipeline_jobs._session
+ assert session1 != session2
+
+
+def test_pipeline_service_grpc_transport_channel():
+ channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.PipelineServiceGrpcTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+def test_pipeline_service_grpc_asyncio_transport_channel():
+ channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.PipelineServiceGrpcAsyncIOTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PipelineServiceGrpcTransport,
+ transports.PipelineServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_pipeline_service_transport_channel_mtls_with_client_cert_source(
+ transport_class,
+):
+ with mock.patch(
+ "grpc.ssl_channel_credentials", autospec=True
+ ) as grpc_ssl_channel_cred:
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_ssl_cred = mock.Mock()
+ grpc_ssl_channel_cred.return_value = mock_ssl_cred
+
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+
+ cred = ga_credentials.AnonymousCredentials()
+ with pytest.warns(DeprecationWarning):
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (cred, None)
+ transport = transport_class(
+ host="squid.clam.whelk",
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=client_cert_source_callback,
+ )
+ adc.assert_called_once()
+
+ grpc_ssl_channel_cred.assert_called_once_with(
+ certificate_chain=b"cert bytes", private_key=b"key bytes"
+ )
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PipelineServiceGrpcTransport,
+ transports.PipelineServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_pipeline_service_transport_channel_mtls_with_adc(transport_class):
+ mock_ssl_cred = mock.Mock()
+ with mock.patch.multiple(
+ "google.auth.transport.grpc.SslCredentials",
+ __init__=mock.Mock(return_value=None),
+ ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
+ ):
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+ mock_cred = mock.Mock()
+
+ with pytest.warns(DeprecationWarning):
+ transport = transport_class(
+ host="squid.clam.whelk",
+ credentials=mock_cred,
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=None,
+ )
+
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=mock_cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+
+
+def test_pipeline_service_grpc_lro_client():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.OperationsClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_pipeline_service_grpc_lro_async_client():
+ client = PipelineServiceAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc_asyncio",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.OperationsAsyncClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_artifact_path():
+ project = "squid"
+ location = "clam"
+ metadata_store = "whelk"
+ artifact = "octopus"
+ expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(
+ project=project,
+ location=location,
+ metadata_store=metadata_store,
+ artifact=artifact,
+ )
+ actual = PipelineServiceClient.artifact_path(
+ project, location, metadata_store, artifact
+ )
+ assert expected == actual
+
+
+def test_parse_artifact_path():
+ expected = {
+ "project": "oyster",
+ "location": "nudibranch",
+ "metadata_store": "cuttlefish",
+ "artifact": "mussel",
+ }
+ path = PipelineServiceClient.artifact_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PipelineServiceClient.parse_artifact_path(path)
+ assert expected == actual
+
+
+def test_context_path():
+ project = "winkle"
+ location = "nautilus"
+ metadata_store = "scallop"
+ context = "abalone"
+ expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(
+ project=project,
+ location=location,
+ metadata_store=metadata_store,
+ context=context,
+ )
+ actual = PipelineServiceClient.context_path(
+ project, location, metadata_store, context
+ )
+ assert expected == actual
+
+
+def test_parse_context_path():
+ expected = {
+ "project": "squid",
+ "location": "clam",
+ "metadata_store": "whelk",
+ "context": "octopus",
+ }
+ path = PipelineServiceClient.context_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PipelineServiceClient.parse_context_path(path)
+ assert expected == actual
+
+
+def test_custom_job_path():
+ project = "oyster"
+ location = "nudibranch"
+ custom_job = "cuttlefish"
+ expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(
+ project=project,
+ location=location,
+ custom_job=custom_job,
+ )
+ actual = PipelineServiceClient.custom_job_path(project, location, custom_job)
+ assert expected == actual
+
+
+def test_parse_custom_job_path():
+ expected = {
+ "project": "mussel",
+ "location": "winkle",
+ "custom_job": "nautilus",
+ }
+ path = PipelineServiceClient.custom_job_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PipelineServiceClient.parse_custom_job_path(path)
+ assert expected == actual
+
+
+def test_endpoint_path():
+ project = "scallop"
+ location = "abalone"
+ endpoint = "squid"
+ expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(
+ project=project,
+ location=location,
+ endpoint=endpoint,
+ )
+ actual = PipelineServiceClient.endpoint_path(project, location, endpoint)
+ assert expected == actual
+
+
+def test_parse_endpoint_path():
+ expected = {
+ "project": "clam",
+ "location": "whelk",
+ "endpoint": "octopus",
+ }
+ path = PipelineServiceClient.endpoint_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PipelineServiceClient.parse_endpoint_path(path)
+ assert expected == actual
+
+
+def test_execution_path():
+ project = "oyster"
+ location = "nudibranch"
+ metadata_store = "cuttlefish"
+ execution = "mussel"
+ expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(
+ project=project,
+ location=location,
+ metadata_store=metadata_store,
+ execution=execution,
+ )
+ actual = PipelineServiceClient.execution_path(
+ project, location, metadata_store, execution
+ )
+ assert expected == actual
+
+
+def test_parse_execution_path():
+ expected = {
+ "project": "winkle",
+ "location": "nautilus",
+ "metadata_store": "scallop",
+ "execution": "abalone",
+ }
+ path = PipelineServiceClient.execution_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PipelineServiceClient.parse_execution_path(path)
+ assert expected == actual
+
+
+def test_model_path():
+ project = "squid"
+ location = "clam"
+ model = "whelk"
+ expected = "projects/{project}/locations/{location}/models/{model}".format(
+ project=project,
+ location=location,
+ model=model,
+ )
+ actual = PipelineServiceClient.model_path(project, location, model)
+ assert expected == actual
+
+
+def test_parse_model_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "model": "nudibranch",
+ }
+ path = PipelineServiceClient.model_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PipelineServiceClient.parse_model_path(path)
+ assert expected == actual
+
+
+def test_network_path():
+ project = "cuttlefish"
+ network = "mussel"
+ expected = "projects/{project}/global/networks/{network}".format(
+ project=project,
+ network=network,
+ )
+ actual = PipelineServiceClient.network_path(project, network)
+ assert expected == actual
+
+
+def test_parse_network_path():
+ expected = {
+ "project": "winkle",
+ "network": "nautilus",
+ }
+ path = PipelineServiceClient.network_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PipelineServiceClient.parse_network_path(path)
+ assert expected == actual
+
+
+def test_network_attachment_path():
+ project = "scallop"
+ region = "abalone"
+ networkattachment = "squid"
+ expected = "projects/{project}/regions/{region}/networkAttachments/{networkattachment}".format(
+ project=project,
+ region=region,
+ networkattachment=networkattachment,
+ )
+ actual = PipelineServiceClient.network_attachment_path(
+ project, region, networkattachment
+ )
+ assert expected == actual
+
+
+def test_parse_network_attachment_path():
+ expected = {
+ "project": "clam",
+ "region": "whelk",
+ "networkattachment": "octopus",
+ }
+ path = PipelineServiceClient.network_attachment_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PipelineServiceClient.parse_network_attachment_path(path)
+ assert expected == actual
+
+
+def test_pipeline_job_path():
+ project = "oyster"
+ location = "nudibranch"
+ pipeline_job = "cuttlefish"
+ expected = (
+ "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(
+ project=project,
+ location=location,
+ pipeline_job=pipeline_job,
+ )
+ )
+ actual = PipelineServiceClient.pipeline_job_path(project, location, pipeline_job)
+ assert expected == actual
+
+
+def test_parse_pipeline_job_path():
+ expected = {
+ "project": "mussel",
+ "location": "winkle",
+ "pipeline_job": "nautilus",
+ }
+ path = PipelineServiceClient.pipeline_job_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PipelineServiceClient.parse_pipeline_job_path(path)
+ assert expected == actual
+
+
+def test_training_pipeline_path():
+ project = "scallop"
+ location = "abalone"
+ training_pipeline = "squid"
+ expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(
+ project=project,
+ location=location,
+ training_pipeline=training_pipeline,
+ )
+ actual = PipelineServiceClient.training_pipeline_path(
+ project, location, training_pipeline
+ )
+ assert expected == actual
+
+
+def test_parse_training_pipeline_path():
+ expected = {
+ "project": "clam",
+ "location": "whelk",
+ "training_pipeline": "octopus",
+ }
+ path = PipelineServiceClient.training_pipeline_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PipelineServiceClient.parse_training_pipeline_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "oyster"
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = PipelineServiceClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "nudibranch",
+ }
+ path = PipelineServiceClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PipelineServiceClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "cuttlefish"
+ expected = "folders/{folder}".format(
+ folder=folder,
+ )
+ actual = PipelineServiceClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "mussel",
+ }
+ path = PipelineServiceClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PipelineServiceClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "winkle"
+ expected = "organizations/{organization}".format(
+ organization=organization,
+ )
+ actual = PipelineServiceClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "nautilus",
+ }
+ path = PipelineServiceClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PipelineServiceClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "scallop"
+ expected = "projects/{project}".format(
+ project=project,
+ )
+ actual = PipelineServiceClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "abalone",
+ }
+ path = PipelineServiceClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PipelineServiceClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "squid"
+ location = "clam"
+ expected = "projects/{project}/locations/{location}".format(
+ project=project,
+ location=location,
+ )
+ actual = PipelineServiceClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "whelk",
+ "location": "octopus",
+ }
+ path = PipelineServiceClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PipelineServiceClient.parse_common_location_path(path)
+ assert expected == actual
+
+
+def test_client_with_default_client_info():
+ client_info = gapic_v1.client_info.ClientInfo()
+
+ with mock.patch.object(
+ transports.PipelineServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+ with mock.patch.object(
+ transports.PipelineServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ transport_class = PipelineServiceClient.get_transport_class()
+ transport = transport_class(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+
+def test_delete_operation(transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_async(transport: str = "grpc_asyncio"):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = None
+
+ client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_delete_operation_from_dict():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_from_dict_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_cancel_operation(transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_async(transport: str = "grpc_asyncio"):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_operation_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = None
+
+ client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_cancel_operation_from_dict():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_from_dict_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_wait_operation(transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation(transport: str = "grpc_asyncio"):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_wait_operation_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_wait_operation_from_dict():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_from_dict_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_operation(transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_get_operation_async(transport: str = "grpc_asyncio"):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_get_operation_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_operation_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_get_operation_from_dict():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_operation_from_dict_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_operations(transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+ response = client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_async(transport: str = "grpc_asyncio"):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_list_operations_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_operations_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_operations_from_dict():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ response = client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_operations_from_dict_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_locations(transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+ response = client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_async(transport: str = "grpc_asyncio"):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_list_locations_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_locations_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_locations_from_dict():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ response = client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_locations_from_dict_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_location(transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+ response = client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_get_location_async(transport: str = "grpc_asyncio"):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_get_location_field_headers():
+ client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials())
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = locations_pb2.Location()
+
+ client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_location_field_headers_async():
+ client = PipelineServiceAsyncClient(credentials=async_anonymous_credentials())
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+def test_get_location_from_dict():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+
+ response = client.get_location(
+ request={
+ "name": "locations/abc",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_location_from_dict_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_set_iam_policy(transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ response = client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+ response = await client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_set_iam_policy_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_set_iam_policy_from_dict():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_from_dict_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+def test_get_iam_policy(transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_get_iam_policy_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_get_iam_policy_from_dict():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_from_dict_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+def test_test_iam_permissions(transport: str = "grpc"):
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"):
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+ )
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+def test_test_iam_permissions_field_headers():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_field_headers_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_test_iam_permissions_from_dict():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ response = client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_from_dict_async():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ response = await client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+def test_transport_close_grpc():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_grpc_asyncio():
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close_rest():
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PipelineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "rest",
+ "grpc",
+ ]
+ for transport in transports:
+ client = PipelineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class",
+ [
+ (PipelineServiceClient, transports.PipelineServiceGrpcTransport),
+ (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport),
+ ],
+)
+def test_api_key_credentials(client_class, transport_class):
+ with mock.patch.object(
+ google.auth._default, "get_api_key_credentials", create=True
+ ) as get_api_key_credentials:
+ mock_cred = mock.Mock()
+ get_api_key_credentials.return_value = mock_cred
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=mock_cred,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..c48e4305a4a5b534784b66554e6d04dd2f59dc46
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py
@@ -0,0 +1,15214 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+ from unittest.mock import AsyncMock # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ import mock
+
+import grpc
+from grpc.experimental import aio
+from collections.abc import Iterable, AsyncIterable
+from google.protobuf import json_format
+import json
+import math
+import pytest
+from google.api_core import api_core_version
+from proto.marshal.rules.dates import DurationRule, TimestampRule
+from proto.marshal.rules import wrappers
+
+try:
+ import aiohttp # type: ignore
+ from google.auth.aio.transport.sessions import AsyncAuthorizedSession
+ from google.api_core.operations_v1 import AsyncOperationsRestClient
+
+ HAS_ASYNC_REST_EXTRA = True
+except ImportError: # pragma: NO COVER
+ HAS_ASYNC_REST_EXTRA = False
+from requests import Response
+from requests import Request, PreparedRequest
+from requests.sessions import Session
+from google.protobuf import json_format
+
+try:
+ from google.auth.aio import credentials as ga_credentials_async
+
+ HAS_GOOGLE_AUTH_AIO = True
+except ImportError: # pragma: NO COVER
+ HAS_GOOGLE_AUTH_AIO = False
+
+from google.api import httpbody_pb2 # type: ignore
+from google.api_core import client_options
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers
+from google.api_core import grpc_helpers_async
+from google.api_core import path_template
+from google.api_core import retry as retries
+from google.auth import credentials as ga_credentials
+from google.auth.exceptions import MutualTLSChannelError
+from google.cloud.aiplatform_v1beta1.services.prediction_service import (
+ PredictionServiceAsyncClient,
+)
+from google.cloud.aiplatform_v1beta1.services.prediction_service import (
+ PredictionServiceClient,
+)
+from google.cloud.aiplatform_v1beta1.services.prediction_service import transports
+from google.cloud.aiplatform_v1beta1.types import content
+from google.cloud.aiplatform_v1beta1.types import explanation
+from google.cloud.aiplatform_v1beta1.types import io
+from google.cloud.aiplatform_v1beta1.types import openapi
+from google.cloud.aiplatform_v1beta1.types import prediction_service
+from google.cloud.aiplatform_v1beta1.types import tool
+from google.cloud.aiplatform_v1beta1.types import types
+from google.cloud.location import locations_pb2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import options_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.oauth2 import service_account
+from google.protobuf import any_pb2 # type: ignore
+from google.protobuf import duration_pb2 # type: ignore
+from google.protobuf import struct_pb2 # type: ignore
+import google.auth
+
+
+async def mock_async_gen(data, chunk_size=1):
+ for i in range(0, len(data)): # pragma: NO COVER
+ chunk = data[i : i + chunk_size]
+ yield chunk.encode("utf-8")
+
+
+def client_cert_source_callback():
+ return b"cert bytes", b"key bytes"
+
+
+# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded.
+# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107.
+def async_anonymous_credentials():
+ if HAS_GOOGLE_AUTH_AIO:
+ return ga_credentials_async.AnonymousCredentials()
+ return ga_credentials.AnonymousCredentials()
+
+
+# If default endpoint is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint(client):
+ return (
+ "foo.googleapis.com"
+ if ("localhost" in client.DEFAULT_ENDPOINT)
+ else client.DEFAULT_ENDPOINT
+ )
+
+
+# If default endpoint template is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint template so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint_template(client):
+ return (
+ "test.{UNIVERSE_DOMAIN}"
+ if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE)
+ else client._DEFAULT_ENDPOINT_TEMPLATE
+ )
+
+
+def test__get_default_mtls_endpoint():
+ api_endpoint = "example.googleapis.com"
+ api_mtls_endpoint = "example.mtls.googleapis.com"
+ sandbox_endpoint = "example.sandbox.googleapis.com"
+ sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
+ non_googleapi = "api.example.com"
+
+ assert PredictionServiceClient._get_default_mtls_endpoint(None) is None
+ assert (
+ PredictionServiceClient._get_default_mtls_endpoint(api_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ PredictionServiceClient._get_default_mtls_endpoint(non_googleapi)
+ == non_googleapi
+ )
+
+
+def test__read_environment_variables():
+ assert PredictionServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert PredictionServiceClient._read_environment_variables() == (
+ True,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ assert PredictionServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ PredictionServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ assert PredictionServiceClient._read_environment_variables() == (
+ False,
+ "never",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ assert PredictionServiceClient._read_environment_variables() == (
+ False,
+ "always",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}):
+ assert PredictionServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ PredictionServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}):
+ assert PredictionServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ "foo.com",
+ )
+
+
+def test__get_client_cert_source():
+ mock_provided_cert_source = mock.Mock()
+ mock_default_cert_source = mock.Mock()
+
+ assert PredictionServiceClient._get_client_cert_source(None, False) is None
+ assert (
+ PredictionServiceClient._get_client_cert_source(
+ mock_provided_cert_source, False
+ )
+ is None
+ )
+ assert (
+ PredictionServiceClient._get_client_cert_source(mock_provided_cert_source, True)
+ == mock_provided_cert_source
+ )
+
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source", return_value=True
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_default_cert_source,
+ ):
+ assert (
+ PredictionServiceClient._get_client_cert_source(None, True)
+ is mock_default_cert_source
+ )
+ assert (
+ PredictionServiceClient._get_client_cert_source(
+ mock_provided_cert_source, "true"
+ )
+ is mock_provided_cert_source
+ )
+
+
+@mock.patch.object(
+ PredictionServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PredictionServiceClient),
+)
+@mock.patch.object(
+ PredictionServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PredictionServiceAsyncClient),
+)
+def test__get_api_endpoint():
+ api_override = "foo.com"
+ mock_client_cert_source = mock.Mock()
+ default_universe = PredictionServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = PredictionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = PredictionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ assert (
+ PredictionServiceClient._get_api_endpoint(
+ api_override, mock_client_cert_source, default_universe, "always"
+ )
+ == api_override
+ )
+ assert (
+ PredictionServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "auto"
+ )
+ == PredictionServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ PredictionServiceClient._get_api_endpoint(None, None, default_universe, "auto")
+ == default_endpoint
+ )
+ assert (
+ PredictionServiceClient._get_api_endpoint(
+ None, None, default_universe, "always"
+ )
+ == PredictionServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ PredictionServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "always"
+ )
+ == PredictionServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ PredictionServiceClient._get_api_endpoint(None, None, mock_universe, "never")
+ == mock_endpoint
+ )
+ assert (
+ PredictionServiceClient._get_api_endpoint(None, None, default_universe, "never")
+ == default_endpoint
+ )
+
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ PredictionServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, mock_universe, "auto"
+ )
+ assert (
+ str(excinfo.value)
+ == "mTLS is not supported in any universe other than googleapis.com."
+ )
+
+
+def test__get_universe_domain():
+ client_universe_domain = "foo.com"
+ universe_domain_env = "bar.com"
+
+ assert (
+ PredictionServiceClient._get_universe_domain(
+ client_universe_domain, universe_domain_env
+ )
+ == client_universe_domain
+ )
+ assert (
+ PredictionServiceClient._get_universe_domain(None, universe_domain_env)
+ == universe_domain_env
+ )
+ assert (
+ PredictionServiceClient._get_universe_domain(None, None)
+ == PredictionServiceClient._DEFAULT_UNIVERSE
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ PredictionServiceClient._get_universe_domain("", None)
+ assert str(excinfo.value) == "Universe Domain cannot be an empty string."
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (PredictionServiceClient, "grpc"),
+ (PredictionServiceAsyncClient, "grpc_asyncio"),
+ (PredictionServiceClient, "rest"),
+ ],
+)
+def test_prediction_service_client_from_service_account_info(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_info"
+ ) as factory:
+ factory.return_value = creds
+ info = {"valid": True}
+ client = client_class.from_service_account_info(info, transport=transport_name)
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (transports.PredictionServiceGrpcTransport, "grpc"),
+ (transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"),
+ (transports.PredictionServiceRestTransport, "rest"),
+ ],
+)
+def test_prediction_service_client_service_account_always_use_jwt(
+ transport_class, transport_name
+):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=True)
+ use_jwt.assert_called_once_with(True)
+
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=False)
+ use_jwt.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (PredictionServiceClient, "grpc"),
+ (PredictionServiceAsyncClient, "grpc_asyncio"),
+ (PredictionServiceClient, "rest"),
+ ],
+)
+def test_prediction_service_client_from_service_account_file(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_file"
+ ) as factory:
+ factory.return_value = creds
+ client = client_class.from_service_account_file(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ client = client_class.from_service_account_json(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+def test_prediction_service_client_get_transport_class():
+ transport = PredictionServiceClient.get_transport_class()
+ available_transports = [
+ transports.PredictionServiceGrpcTransport,
+ transports.PredictionServiceRestTransport,
+ ]
+ assert transport in available_transports
+
+ transport = PredictionServiceClient.get_transport_class("grpc")
+ assert transport == transports.PredictionServiceGrpcTransport
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"),
+ (
+ PredictionServiceAsyncClient,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest"),
+ ],
+)
+@mock.patch.object(
+ PredictionServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PredictionServiceClient),
+)
+@mock.patch.object(
+ PredictionServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PredictionServiceAsyncClient),
+)
+def test_prediction_service_client_client_options(
+ client_class, transport_class, transport_name
+):
+ # Check that if channel is provided we won't create a new one.
+ with mock.patch.object(PredictionServiceClient, "get_transport_class") as gtc:
+ transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
+ client = client_class(transport=transport)
+ gtc.assert_not_called()
+
+ # Check that if channel is provided via str we will create a new one.
+ with mock.patch.object(PredictionServiceClient, "get_transport_class") as gtc:
+ client = client_class(transport=transport_name)
+ gtc.assert_called()
+
+ # Check the case api_endpoint is provided.
+ options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name, client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_MTLS_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ # Check the case quota_project_id is provided
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id="octopus",
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+ # Check the case api_endpoint is provided
+ options = client_options.ClientOptions(
+ api_audience="https://language.googleapis.com"
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience="https://language.googleapis.com",
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,use_client_cert_env",
+ [
+ (
+ PredictionServiceClient,
+ transports.PredictionServiceGrpcTransport,
+ "grpc",
+ "true",
+ ),
+ (
+ PredictionServiceAsyncClient,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "true",
+ ),
+ (
+ PredictionServiceClient,
+ transports.PredictionServiceGrpcTransport,
+ "grpc",
+ "false",
+ ),
+ (
+ PredictionServiceAsyncClient,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "false",
+ ),
+ (
+ PredictionServiceClient,
+ transports.PredictionServiceRestTransport,
+ "rest",
+ "true",
+ ),
+ (
+ PredictionServiceClient,
+ transports.PredictionServiceRestTransport,
+ "rest",
+ "false",
+ ),
+ ],
+)
+@mock.patch.object(
+ PredictionServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PredictionServiceClient),
+)
+@mock.patch.object(
+ PredictionServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PredictionServiceAsyncClient),
+)
+@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
+def test_prediction_service_client_mtls_env_auto(
+ client_class, transport_class, transport_name, use_client_cert_env
+):
+ # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
+ # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
+
+ # Check the case client_cert_source is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=client_cert_source_callback
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+
+ if use_client_cert_env == "false":
+ expected_client_cert_source = None
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ else:
+ expected_client_cert_source = client_cert_source_callback
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case ADC client cert is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=client_cert_source_callback,
+ ):
+ if use_client_cert_env == "false":
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ expected_client_cert_source = None
+ else:
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+ expected_client_cert_source = client_cert_source_callback
+
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case client_cert_source and ADC client cert are not provided.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class", [PredictionServiceClient, PredictionServiceAsyncClient]
+)
+@mock.patch.object(
+ PredictionServiceClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(PredictionServiceClient),
+)
+@mock.patch.object(
+ PredictionServiceAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(PredictionServiceAsyncClient),
+)
+def test_prediction_service_client_get_mtls_endpoint_and_cert_source(client_class):
+ mock_client_cert_source = mock.Mock()
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source == mock_client_cert_source
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_client_cert_source,
+ ):
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source == mock_client_cert_source
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class", [PredictionServiceClient, PredictionServiceAsyncClient]
+)
+@mock.patch.object(
+ PredictionServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PredictionServiceClient),
+)
+@mock.patch.object(
+ PredictionServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(PredictionServiceAsyncClient),
+)
+def test_prediction_service_client_client_api_endpoint(client_class):
+ mock_client_cert_source = client_cert_source_callback
+ api_override = "foo.com"
+ default_universe = PredictionServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = PredictionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = PredictionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true",
+ # use ClientOptions.api_endpoint as the api endpoint regardless.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=api_override
+ )
+ client = client_class(
+ client_options=options,
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert client.api_endpoint == api_override
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == default_endpoint
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always",
+ # use the DEFAULT_MTLS_ENDPOINT as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+
+ # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default),
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist,
+ # and ClientOptions.universe_domain="bar.com",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint.
+ options = client_options.ClientOptions()
+ universe_exists = hasattr(options, "universe_domain")
+ if universe_exists:
+ options = client_options.ClientOptions(universe_domain=mock_universe)
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ else:
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == (
+ mock_endpoint if universe_exists else default_endpoint
+ )
+ assert client.universe_domain == (
+ mock_universe if universe_exists else default_universe
+ )
+
+ # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ options = client_options.ClientOptions()
+ if hasattr(options, "universe_domain"):
+ delattr(options, "universe_domain")
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == default_endpoint
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"),
+ (
+ PredictionServiceAsyncClient,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest"),
+ ],
+)
+def test_prediction_service_client_client_options_scopes(
+ client_class, transport_class, transport_name
+):
+ # Check the case scopes are provided.
+ options = client_options.ClientOptions(
+ scopes=["1", "2"],
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=["1", "2"],
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ PredictionServiceClient,
+ transports.PredictionServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ PredictionServiceAsyncClient,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ (
+ PredictionServiceClient,
+ transports.PredictionServiceRestTransport,
+ "rest",
+ None,
+ ),
+ ],
+)
+def test_prediction_service_client_client_options_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+def test_prediction_service_client_client_options_from_dict():
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__"
+ ) as grpc_transport:
+ grpc_transport.return_value = None
+ client = PredictionServiceClient(
+ client_options={"api_endpoint": "squid.clam.whelk"}
+ )
+ grpc_transport.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ PredictionServiceClient,
+ transports.PredictionServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ PredictionServiceAsyncClient,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_prediction_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=(
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ ),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.PredictRequest,
+ dict,
+ ],
+)
+def test_predict(request_type, transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = prediction_service.PredictResponse(
+ deployed_model_id="deployed_model_id_value",
+ model="model_value",
+ model_version_id="model_version_id_value",
+ model_display_name="model_display_name_value",
+ )
+ response = client.predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.PredictRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.PredictResponse)
+ assert response.deployed_model_id == "deployed_model_id_value"
+ assert response.model == "model_value"
+ assert response.model_version_id == "model_version_id_value"
+ assert response.model_display_name == "model_display_name_value"
+
+
+def test_predict_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = prediction_service.PredictRequest(
+ endpoint="endpoint_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.predict(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == prediction_service.PredictRequest(
+ endpoint="endpoint_value",
+ )
+
+
+def test_predict_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.predict in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.predict] = mock_rpc
+ request = {}
+ client.predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_predict_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.predict
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.predict
+ ] = mock_rpc
+
+ request = {}
+ await client.predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_predict_async(
+ transport: str = "grpc_asyncio", request_type=prediction_service.PredictRequest
+):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.PredictResponse(
+ deployed_model_id="deployed_model_id_value",
+ model="model_value",
+ model_version_id="model_version_id_value",
+ model_display_name="model_display_name_value",
+ )
+ )
+ response = await client.predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.PredictRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.PredictResponse)
+ assert response.deployed_model_id == "deployed_model_id_value"
+ assert response.model == "model_value"
+ assert response.model_version_id == "model_version_id_value"
+ assert response.model_display_name == "model_display_name_value"
+
+
+@pytest.mark.asyncio
+async def test_predict_async_from_dict():
+ await test_predict_async(request_type=dict)
+
+
+def test_predict_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.PredictRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
+ call.return_value = prediction_service.PredictResponse()
+ client.predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_predict_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.PredictRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.PredictResponse()
+ )
+ await client.predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+def test_predict_flattened_error():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.predict(
+ prediction_service.PredictRequest(),
+ endpoint="endpoint_value",
+ instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
+ parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE),
+ )
+
+
+@pytest.mark.asyncio
+async def test_predict_flattened_error_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.predict(
+ prediction_service.PredictRequest(),
+ endpoint="endpoint_value",
+ instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
+ parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.RawPredictRequest,
+ dict,
+ ],
+)
+def test_raw_predict(request_type, transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.raw_predict), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = httpbody_pb2.HttpBody(
+ content_type="content_type_value",
+ data=b"data_blob",
+ )
+ response = client.raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.RawPredictRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, httpbody_pb2.HttpBody)
+ assert response.content_type == "content_type_value"
+ assert response.data == b"data_blob"
+
+
+def test_raw_predict_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = prediction_service.RawPredictRequest(
+ endpoint="endpoint_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.raw_predict), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.raw_predict(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == prediction_service.RawPredictRequest(
+ endpoint="endpoint_value",
+ )
+
+
+def test_raw_predict_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.raw_predict in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.raw_predict] = mock_rpc
+ request = {}
+ client.raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.raw_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_raw_predict_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.raw_predict
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.raw_predict
+ ] = mock_rpc
+
+ request = {}
+ await client.raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.raw_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_raw_predict_async(
+ transport: str = "grpc_asyncio", request_type=prediction_service.RawPredictRequest
+):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.raw_predict), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ httpbody_pb2.HttpBody(
+ content_type="content_type_value",
+ data=b"data_blob",
+ )
+ )
+ response = await client.raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.RawPredictRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, httpbody_pb2.HttpBody)
+ assert response.content_type == "content_type_value"
+ assert response.data == b"data_blob"
+
+
+@pytest.mark.asyncio
+async def test_raw_predict_async_from_dict():
+ await test_raw_predict_async(request_type=dict)
+
+
+def test_raw_predict_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.RawPredictRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.raw_predict), "__call__") as call:
+ call.return_value = httpbody_pb2.HttpBody()
+ client.raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_raw_predict_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.RawPredictRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.raw_predict), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ httpbody_pb2.HttpBody()
+ )
+ await client.raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+def test_raw_predict_flattened():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.raw_predict), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = httpbody_pb2.HttpBody()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.raw_predict(
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].endpoint
+ mock_val = "endpoint_value"
+ assert arg == mock_val
+ arg = args[0].http_body
+ mock_val = httpbody_pb2.HttpBody(content_type="content_type_value")
+ assert arg == mock_val
+
+
+def test_raw_predict_flattened_error():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.raw_predict(
+ prediction_service.RawPredictRequest(),
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+
+
+@pytest.mark.asyncio
+async def test_raw_predict_flattened_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.raw_predict), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = httpbody_pb2.HttpBody()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ httpbody_pb2.HttpBody()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.raw_predict(
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].endpoint
+ mock_val = "endpoint_value"
+ assert arg == mock_val
+ arg = args[0].http_body
+ mock_val = httpbody_pb2.HttpBody(content_type="content_type_value")
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_raw_predict_flattened_error_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.raw_predict(
+ prediction_service.RawPredictRequest(),
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.StreamRawPredictRequest,
+ dict,
+ ],
+)
+def test_stream_raw_predict(request_type, transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_raw_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iter([httpbody_pb2.HttpBody()])
+ response = client.stream_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.StreamRawPredictRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ for message in response:
+ assert isinstance(message, httpbody_pb2.HttpBody)
+
+
+def test_stream_raw_predict_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = prediction_service.StreamRawPredictRequest(
+ endpoint="endpoint_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_raw_predict), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.stream_raw_predict(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == prediction_service.StreamRawPredictRequest(
+ endpoint="endpoint_value",
+ )
+
+
+def test_stream_raw_predict_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.stream_raw_predict in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.stream_raw_predict
+ ] = mock_rpc
+ request = {}
+ client.stream_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.stream_raw_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_stream_raw_predict_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.stream_raw_predict
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.stream_raw_predict
+ ] = mock_rpc
+
+ request = {}
+ await client.stream_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.stream_raw_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_stream_raw_predict_async(
+ transport: str = "grpc_asyncio",
+ request_type=prediction_service.StreamRawPredictRequest,
+):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_raw_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(side_effect=[httpbody_pb2.HttpBody()])
+ response = await client.stream_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.StreamRawPredictRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ message = await response.read()
+ assert isinstance(message, httpbody_pb2.HttpBody)
+
+
+@pytest.mark.asyncio
+async def test_stream_raw_predict_async_from_dict():
+ await test_stream_raw_predict_async(request_type=dict)
+
+
+def test_stream_raw_predict_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.StreamRawPredictRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_raw_predict), "__call__"
+ ) as call:
+ call.return_value = iter([httpbody_pb2.HttpBody()])
+ client.stream_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_stream_raw_predict_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.StreamRawPredictRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_raw_predict), "__call__"
+ ) as call:
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(side_effect=[httpbody_pb2.HttpBody()])
+ await client.stream_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+def test_stream_raw_predict_flattened():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_raw_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iter([httpbody_pb2.HttpBody()])
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.stream_raw_predict(
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].endpoint
+ mock_val = "endpoint_value"
+ assert arg == mock_val
+ arg = args[0].http_body
+ mock_val = httpbody_pb2.HttpBody(content_type="content_type_value")
+ assert arg == mock_val
+
+
+def test_stream_raw_predict_flattened_error():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.stream_raw_predict(
+ prediction_service.StreamRawPredictRequest(),
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+
+
+@pytest.mark.asyncio
+async def test_stream_raw_predict_flattened_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_raw_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iter([httpbody_pb2.HttpBody()])
+
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.stream_raw_predict(
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].endpoint
+ mock_val = "endpoint_value"
+ assert arg == mock_val
+ arg = args[0].http_body
+ mock_val = httpbody_pb2.HttpBody(content_type="content_type_value")
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_stream_raw_predict_flattened_error_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.stream_raw_predict(
+ prediction_service.StreamRawPredictRequest(),
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.DirectPredictRequest,
+ dict,
+ ],
+)
+def test_direct_predict(request_type, transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.direct_predict), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = prediction_service.DirectPredictResponse()
+ response = client.direct_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.DirectPredictRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.DirectPredictResponse)
+
+
+def test_direct_predict_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = prediction_service.DirectPredictRequest(
+ endpoint="endpoint_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.direct_predict), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.direct_predict(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == prediction_service.DirectPredictRequest(
+ endpoint="endpoint_value",
+ )
+
+
+def test_direct_predict_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.direct_predict in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.direct_predict] = mock_rpc
+ request = {}
+ client.direct_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.direct_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_direct_predict_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.direct_predict
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.direct_predict
+ ] = mock_rpc
+
+ request = {}
+ await client.direct_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.direct_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_direct_predict_async(
+ transport: str = "grpc_asyncio",
+ request_type=prediction_service.DirectPredictRequest,
+):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.direct_predict), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.DirectPredictResponse()
+ )
+ response = await client.direct_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.DirectPredictRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.DirectPredictResponse)
+
+
+@pytest.mark.asyncio
+async def test_direct_predict_async_from_dict():
+ await test_direct_predict_async(request_type=dict)
+
+
+def test_direct_predict_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.DirectPredictRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.direct_predict), "__call__") as call:
+ call.return_value = prediction_service.DirectPredictResponse()
+ client.direct_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_direct_predict_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.DirectPredictRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.direct_predict), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.DirectPredictResponse()
+ )
+ await client.direct_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.DirectRawPredictRequest,
+ dict,
+ ],
+)
+def test_direct_raw_predict(request_type, transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.direct_raw_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = prediction_service.DirectRawPredictResponse(
+ output=b"output_blob",
+ )
+ response = client.direct_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.DirectRawPredictRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.DirectRawPredictResponse)
+ assert response.output == b"output_blob"
+
+
+def test_direct_raw_predict_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = prediction_service.DirectRawPredictRequest(
+ endpoint="endpoint_value",
+ method_name="method_name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.direct_raw_predict), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.direct_raw_predict(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == prediction_service.DirectRawPredictRequest(
+ endpoint="endpoint_value",
+ method_name="method_name_value",
+ )
+
+
+def test_direct_raw_predict_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.direct_raw_predict in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.direct_raw_predict
+ ] = mock_rpc
+ request = {}
+ client.direct_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.direct_raw_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_direct_raw_predict_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.direct_raw_predict
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.direct_raw_predict
+ ] = mock_rpc
+
+ request = {}
+ await client.direct_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.direct_raw_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_direct_raw_predict_async(
+ transport: str = "grpc_asyncio",
+ request_type=prediction_service.DirectRawPredictRequest,
+):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.direct_raw_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.DirectRawPredictResponse(
+ output=b"output_blob",
+ )
+ )
+ response = await client.direct_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.DirectRawPredictRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.DirectRawPredictResponse)
+ assert response.output == b"output_blob"
+
+
+@pytest.mark.asyncio
+async def test_direct_raw_predict_async_from_dict():
+ await test_direct_raw_predict_async(request_type=dict)
+
+
+def test_direct_raw_predict_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.DirectRawPredictRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.direct_raw_predict), "__call__"
+ ) as call:
+ call.return_value = prediction_service.DirectRawPredictResponse()
+ client.direct_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_direct_raw_predict_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.DirectRawPredictRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.direct_raw_predict), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.DirectRawPredictResponse()
+ )
+ await client.direct_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.StreamDirectPredictRequest,
+ dict,
+ ],
+)
+def test_stream_direct_predict(request_type, transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+ requests = [request]
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_direct_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iter([prediction_service.StreamDirectPredictResponse()])
+ response = client.stream_direct_predict(iter(requests))
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert next(args[0]) == request
+
+ # Establish that the response is the type that we expect.
+ for message in response:
+ assert isinstance(message, prediction_service.StreamDirectPredictResponse)
+
+
+def test_stream_direct_predict_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.stream_direct_predict
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.stream_direct_predict
+ ] = mock_rpc
+ request = [{}]
+ client.stream_direct_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.stream_direct_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_stream_direct_predict_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.stream_direct_predict
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.stream_direct_predict
+ ] = mock_rpc
+
+ request = [{}]
+ await client.stream_direct_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.stream_direct_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_stream_direct_predict_async(
+ transport: str = "grpc_asyncio",
+ request_type=prediction_service.StreamDirectPredictRequest,
+):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+ requests = [request]
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_direct_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.StreamStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(
+ side_effect=[prediction_service.StreamDirectPredictResponse()]
+ )
+ response = await client.stream_direct_predict(iter(requests))
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert next(args[0]) == request
+
+ # Establish that the response is the type that we expect.
+ message = await response.read()
+ assert isinstance(message, prediction_service.StreamDirectPredictResponse)
+
+
+@pytest.mark.asyncio
+async def test_stream_direct_predict_async_from_dict():
+ await test_stream_direct_predict_async(request_type=dict)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.StreamDirectRawPredictRequest,
+ dict,
+ ],
+)
+def test_stream_direct_raw_predict(request_type, transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+ requests = [request]
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_direct_raw_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iter([prediction_service.StreamDirectRawPredictResponse()])
+ response = client.stream_direct_raw_predict(iter(requests))
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert next(args[0]) == request
+
+ # Establish that the response is the type that we expect.
+ for message in response:
+ assert isinstance(message, prediction_service.StreamDirectRawPredictResponse)
+
+
+def test_stream_direct_raw_predict_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.stream_direct_raw_predict
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.stream_direct_raw_predict
+ ] = mock_rpc
+ request = [{}]
+ client.stream_direct_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.stream_direct_raw_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_stream_direct_raw_predict_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.stream_direct_raw_predict
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.stream_direct_raw_predict
+ ] = mock_rpc
+
+ request = [{}]
+ await client.stream_direct_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.stream_direct_raw_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_stream_direct_raw_predict_async(
+ transport: str = "grpc_asyncio",
+ request_type=prediction_service.StreamDirectRawPredictRequest,
+):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+ requests = [request]
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_direct_raw_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.StreamStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(
+ side_effect=[prediction_service.StreamDirectRawPredictResponse()]
+ )
+ response = await client.stream_direct_raw_predict(iter(requests))
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert next(args[0]) == request
+
+ # Establish that the response is the type that we expect.
+ message = await response.read()
+ assert isinstance(message, prediction_service.StreamDirectRawPredictResponse)
+
+
+@pytest.mark.asyncio
+async def test_stream_direct_raw_predict_async_from_dict():
+ await test_stream_direct_raw_predict_async(request_type=dict)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.StreamingPredictRequest,
+ dict,
+ ],
+)
+def test_streaming_predict(request_type, transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+ requests = [request]
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.streaming_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iter([prediction_service.StreamingPredictResponse()])
+ response = client.streaming_predict(iter(requests))
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert next(args[0]) == request
+
+ # Establish that the response is the type that we expect.
+ for message in response:
+ assert isinstance(message, prediction_service.StreamingPredictResponse)
+
+
+def test_streaming_predict_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.streaming_predict in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.streaming_predict
+ ] = mock_rpc
+ request = [{}]
+ client.streaming_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.streaming_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_streaming_predict_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.streaming_predict
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.streaming_predict
+ ] = mock_rpc
+
+ request = [{}]
+ await client.streaming_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.streaming_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_streaming_predict_async(
+ transport: str = "grpc_asyncio",
+ request_type=prediction_service.StreamingPredictRequest,
+):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+ requests = [request]
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.streaming_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.StreamStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(
+ side_effect=[prediction_service.StreamingPredictResponse()]
+ )
+ response = await client.streaming_predict(iter(requests))
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert next(args[0]) == request
+
+ # Establish that the response is the type that we expect.
+ message = await response.read()
+ assert isinstance(message, prediction_service.StreamingPredictResponse)
+
+
+@pytest.mark.asyncio
+async def test_streaming_predict_async_from_dict():
+ await test_streaming_predict_async(request_type=dict)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.StreamingPredictRequest,
+ dict,
+ ],
+)
+def test_server_streaming_predict(request_type, transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.server_streaming_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iter([prediction_service.StreamingPredictResponse()])
+ response = client.server_streaming_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.StreamingPredictRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ for message in response:
+ assert isinstance(message, prediction_service.StreamingPredictResponse)
+
+
+def test_server_streaming_predict_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = prediction_service.StreamingPredictRequest(
+ endpoint="endpoint_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.server_streaming_predict), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.server_streaming_predict(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == prediction_service.StreamingPredictRequest(
+ endpoint="endpoint_value",
+ )
+
+
+def test_server_streaming_predict_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.server_streaming_predict
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.server_streaming_predict
+ ] = mock_rpc
+ request = {}
+ client.server_streaming_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.server_streaming_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_server_streaming_predict_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.server_streaming_predict
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.server_streaming_predict
+ ] = mock_rpc
+
+ request = {}
+ await client.server_streaming_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.server_streaming_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_server_streaming_predict_async(
+ transport: str = "grpc_asyncio",
+ request_type=prediction_service.StreamingPredictRequest,
+):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.server_streaming_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(
+ side_effect=[prediction_service.StreamingPredictResponse()]
+ )
+ response = await client.server_streaming_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.StreamingPredictRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ message = await response.read()
+ assert isinstance(message, prediction_service.StreamingPredictResponse)
+
+
+@pytest.mark.asyncio
+async def test_server_streaming_predict_async_from_dict():
+ await test_server_streaming_predict_async(request_type=dict)
+
+
+def test_server_streaming_predict_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.StreamingPredictRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.server_streaming_predict), "__call__"
+ ) as call:
+ call.return_value = iter([prediction_service.StreamingPredictResponse()])
+ client.server_streaming_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_server_streaming_predict_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.StreamingPredictRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.server_streaming_predict), "__call__"
+ ) as call:
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(
+ side_effect=[prediction_service.StreamingPredictResponse()]
+ )
+ await client.server_streaming_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.StreamingRawPredictRequest,
+ dict,
+ ],
+)
+def test_streaming_raw_predict(request_type, transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+ requests = [request]
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.streaming_raw_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iter([prediction_service.StreamingRawPredictResponse()])
+ response = client.streaming_raw_predict(iter(requests))
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert next(args[0]) == request
+
+ # Establish that the response is the type that we expect.
+ for message in response:
+ assert isinstance(message, prediction_service.StreamingRawPredictResponse)
+
+
+def test_streaming_raw_predict_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.streaming_raw_predict
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.streaming_raw_predict
+ ] = mock_rpc
+ request = [{}]
+ client.streaming_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.streaming_raw_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_streaming_raw_predict_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.streaming_raw_predict
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.streaming_raw_predict
+ ] = mock_rpc
+
+ request = [{}]
+ await client.streaming_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.streaming_raw_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_streaming_raw_predict_async(
+ transport: str = "grpc_asyncio",
+ request_type=prediction_service.StreamingRawPredictRequest,
+):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+ requests = [request]
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.streaming_raw_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.StreamStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(
+ side_effect=[prediction_service.StreamingRawPredictResponse()]
+ )
+ response = await client.streaming_raw_predict(iter(requests))
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert next(args[0]) == request
+
+ # Establish that the response is the type that we expect.
+ message = await response.read()
+ assert isinstance(message, prediction_service.StreamingRawPredictResponse)
+
+
+@pytest.mark.asyncio
+async def test_streaming_raw_predict_async_from_dict():
+ await test_streaming_raw_predict_async(request_type=dict)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.ExplainRequest,
+ dict,
+ ],
+)
+def test_explain(request_type, transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.explain), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = prediction_service.ExplainResponse(
+ deployed_model_id="deployed_model_id_value",
+ )
+ response = client.explain(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.ExplainRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.ExplainResponse)
+ assert response.deployed_model_id == "deployed_model_id_value"
+
+
+def test_explain_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = prediction_service.ExplainRequest(
+ endpoint="endpoint_value",
+ deployed_model_id="deployed_model_id_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.explain), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.explain(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == prediction_service.ExplainRequest(
+ endpoint="endpoint_value",
+ deployed_model_id="deployed_model_id_value",
+ )
+
+
+def test_explain_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.explain in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.explain] = mock_rpc
+ request = {}
+ client.explain(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.explain(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_explain_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.explain
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.explain
+ ] = mock_rpc
+
+ request = {}
+ await client.explain(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.explain(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_explain_async(
+ transport: str = "grpc_asyncio", request_type=prediction_service.ExplainRequest
+):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.explain), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.ExplainResponse(
+ deployed_model_id="deployed_model_id_value",
+ )
+ )
+ response = await client.explain(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.ExplainRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.ExplainResponse)
+ assert response.deployed_model_id == "deployed_model_id_value"
+
+
+@pytest.mark.asyncio
+async def test_explain_async_from_dict():
+ await test_explain_async(request_type=dict)
+
+
+def test_explain_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.ExplainRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.explain), "__call__") as call:
+ call.return_value = prediction_service.ExplainResponse()
+ client.explain(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_explain_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.ExplainRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.explain), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.ExplainResponse()
+ )
+ await client.explain(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+def test_explain_flattened_error():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.explain(
+ prediction_service.ExplainRequest(),
+ endpoint="endpoint_value",
+ instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
+ parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE),
+ deployed_model_id="deployed_model_id_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_explain_flattened_error_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.explain(
+ prediction_service.ExplainRequest(),
+ endpoint="endpoint_value",
+ instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
+ parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE),
+ deployed_model_id="deployed_model_id_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.CountTokensRequest,
+ dict,
+ ],
+)
+def test_count_tokens(request_type, transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.count_tokens), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = prediction_service.CountTokensResponse(
+ total_tokens=1303,
+ total_billable_characters=2617,
+ )
+ response = client.count_tokens(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.CountTokensRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.CountTokensResponse)
+ assert response.total_tokens == 1303
+ assert response.total_billable_characters == 2617
+
+
+def test_count_tokens_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = prediction_service.CountTokensRequest(
+ endpoint="endpoint_value",
+ model="model_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.count_tokens), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.count_tokens(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == prediction_service.CountTokensRequest(
+ endpoint="endpoint_value",
+ model="model_value",
+ )
+
+
+def test_count_tokens_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.count_tokens in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.count_tokens] = mock_rpc
+ request = {}
+ client.count_tokens(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.count_tokens(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_count_tokens_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.count_tokens
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.count_tokens
+ ] = mock_rpc
+
+ request = {}
+ await client.count_tokens(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.count_tokens(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_count_tokens_async(
+ transport: str = "grpc_asyncio", request_type=prediction_service.CountTokensRequest
+):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.count_tokens), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.CountTokensResponse(
+ total_tokens=1303,
+ total_billable_characters=2617,
+ )
+ )
+ response = await client.count_tokens(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.CountTokensRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.CountTokensResponse)
+ assert response.total_tokens == 1303
+ assert response.total_billable_characters == 2617
+
+
+@pytest.mark.asyncio
+async def test_count_tokens_async_from_dict():
+ await test_count_tokens_async(request_type=dict)
+
+
+def test_count_tokens_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.CountTokensRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.count_tokens), "__call__") as call:
+ call.return_value = prediction_service.CountTokensResponse()
+ client.count_tokens(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_count_tokens_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.CountTokensRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.count_tokens), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.CountTokensResponse()
+ )
+ await client.count_tokens(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+def test_count_tokens_flattened():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.count_tokens), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = prediction_service.CountTokensResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.count_tokens(
+ endpoint="endpoint_value",
+ instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].endpoint
+ mock_val = "endpoint_value"
+ assert arg == mock_val
+ arg = args[0].instances
+ mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)]
+ assert arg == mock_val
+
+
+def test_count_tokens_flattened_error():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.count_tokens(
+ prediction_service.CountTokensRequest(),
+ endpoint="endpoint_value",
+ instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
+ )
+
+
+@pytest.mark.asyncio
+async def test_count_tokens_flattened_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.count_tokens), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = prediction_service.CountTokensResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.CountTokensResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.count_tokens(
+ endpoint="endpoint_value",
+ instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].endpoint
+ mock_val = "endpoint_value"
+ assert arg == mock_val
+ arg = args[0].instances
+ mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)]
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_count_tokens_flattened_error_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.count_tokens(
+ prediction_service.CountTokensRequest(),
+ endpoint="endpoint_value",
+ instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.GenerateContentRequest,
+ dict,
+ ],
+)
+def test_generate_content(request_type, transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.generate_content), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = prediction_service.GenerateContentResponse(
+ model_version="model_version_value",
+ )
+ response = client.generate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.GenerateContentRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.GenerateContentResponse)
+ assert response.model_version == "model_version_value"
+
+
+def test_generate_content_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = prediction_service.GenerateContentRequest(
+ model="model_value",
+ cached_content="cached_content_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.generate_content), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.generate_content(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == prediction_service.GenerateContentRequest(
+ model="model_value",
+ cached_content="cached_content_value",
+ )
+
+
+def test_generate_content_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.generate_content in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.generate_content
+ ] = mock_rpc
+ request = {}
+ client.generate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.generate_content(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_generate_content_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.generate_content
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.generate_content
+ ] = mock_rpc
+
+ request = {}
+ await client.generate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.generate_content(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_generate_content_async(
+ transport: str = "grpc_asyncio",
+ request_type=prediction_service.GenerateContentRequest,
+):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.generate_content), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.GenerateContentResponse(
+ model_version="model_version_value",
+ )
+ )
+ response = await client.generate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.GenerateContentRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.GenerateContentResponse)
+ assert response.model_version == "model_version_value"
+
+
+@pytest.mark.asyncio
+async def test_generate_content_async_from_dict():
+ await test_generate_content_async(request_type=dict)
+
+
+def test_generate_content_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.GenerateContentRequest()
+
+ request.model = "model_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.generate_content), "__call__") as call:
+ call.return_value = prediction_service.GenerateContentResponse()
+ client.generate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "model=model_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_generate_content_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.GenerateContentRequest()
+
+ request.model = "model_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.generate_content), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.GenerateContentResponse()
+ )
+ await client.generate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "model=model_value",
+ ) in kw["metadata"]
+
+
+def test_generate_content_flattened():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.generate_content), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = prediction_service.GenerateContentResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.generate_content(
+ model="model_value",
+ contents=[content.Content(role="role_value")],
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].model
+ mock_val = "model_value"
+ assert arg == mock_val
+ arg = args[0].contents
+ mock_val = [content.Content(role="role_value")]
+ assert arg == mock_val
+
+
+def test_generate_content_flattened_error():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.generate_content(
+ prediction_service.GenerateContentRequest(),
+ model="model_value",
+ contents=[content.Content(role="role_value")],
+ )
+
+
+@pytest.mark.asyncio
+async def test_generate_content_flattened_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.generate_content), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = prediction_service.GenerateContentResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.GenerateContentResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.generate_content(
+ model="model_value",
+ contents=[content.Content(role="role_value")],
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].model
+ mock_val = "model_value"
+ assert arg == mock_val
+ arg = args[0].contents
+ mock_val = [content.Content(role="role_value")]
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_generate_content_flattened_error_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.generate_content(
+ prediction_service.GenerateContentRequest(),
+ model="model_value",
+ contents=[content.Content(role="role_value")],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.GenerateContentRequest,
+ dict,
+ ],
+)
+def test_stream_generate_content(request_type, transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_generate_content), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iter([prediction_service.GenerateContentResponse()])
+ response = client.stream_generate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.GenerateContentRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ for message in response:
+ assert isinstance(message, prediction_service.GenerateContentResponse)
+
+
+def test_stream_generate_content_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = prediction_service.GenerateContentRequest(
+ model="model_value",
+ cached_content="cached_content_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_generate_content), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.stream_generate_content(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == prediction_service.GenerateContentRequest(
+ model="model_value",
+ cached_content="cached_content_value",
+ )
+
+
+def test_stream_generate_content_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.stream_generate_content
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.stream_generate_content
+ ] = mock_rpc
+ request = {}
+ client.stream_generate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.stream_generate_content(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_stream_generate_content_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.stream_generate_content
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.stream_generate_content
+ ] = mock_rpc
+
+ request = {}
+ await client.stream_generate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.stream_generate_content(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_stream_generate_content_async(
+ transport: str = "grpc_asyncio",
+ request_type=prediction_service.GenerateContentRequest,
+):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_generate_content), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(
+ side_effect=[prediction_service.GenerateContentResponse()]
+ )
+ response = await client.stream_generate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.GenerateContentRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ message = await response.read()
+ assert isinstance(message, prediction_service.GenerateContentResponse)
+
+
+@pytest.mark.asyncio
+async def test_stream_generate_content_async_from_dict():
+ await test_stream_generate_content_async(request_type=dict)
+
+
+def test_stream_generate_content_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.GenerateContentRequest()
+
+ request.model = "model_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_generate_content), "__call__"
+ ) as call:
+ call.return_value = iter([prediction_service.GenerateContentResponse()])
+ client.stream_generate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "model=model_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_stream_generate_content_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.GenerateContentRequest()
+
+ request.model = "model_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_generate_content), "__call__"
+ ) as call:
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(
+ side_effect=[prediction_service.GenerateContentResponse()]
+ )
+ await client.stream_generate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "model=model_value",
+ ) in kw["metadata"]
+
+
+def test_stream_generate_content_flattened():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_generate_content), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iter([prediction_service.GenerateContentResponse()])
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.stream_generate_content(
+ model="model_value",
+ contents=[content.Content(role="role_value")],
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].model
+ mock_val = "model_value"
+ assert arg == mock_val
+ arg = args[0].contents
+ mock_val = [content.Content(role="role_value")]
+ assert arg == mock_val
+
+
+def test_stream_generate_content_flattened_error():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.stream_generate_content(
+ prediction_service.GenerateContentRequest(),
+ model="model_value",
+ contents=[content.Content(role="role_value")],
+ )
+
+
+@pytest.mark.asyncio
+async def test_stream_generate_content_flattened_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_generate_content), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iter([prediction_service.GenerateContentResponse()])
+
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.stream_generate_content(
+ model="model_value",
+ contents=[content.Content(role="role_value")],
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].model
+ mock_val = "model_value"
+ assert arg == mock_val
+ arg = args[0].contents
+ mock_val = [content.Content(role="role_value")]
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_stream_generate_content_flattened_error_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.stream_generate_content(
+ prediction_service.GenerateContentRequest(),
+ model="model_value",
+ contents=[content.Content(role="role_value")],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.ChatCompletionsRequest,
+ dict,
+ ],
+)
+def test_chat_completions(request_type, transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.chat_completions), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iter([httpbody_pb2.HttpBody()])
+ response = client.chat_completions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.ChatCompletionsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ for message in response:
+ assert isinstance(message, httpbody_pb2.HttpBody)
+
+
+def test_chat_completions_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = prediction_service.ChatCompletionsRequest(
+ endpoint="endpoint_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.chat_completions), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.chat_completions(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == prediction_service.ChatCompletionsRequest(
+ endpoint="endpoint_value",
+ )
+
+
+def test_chat_completions_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.chat_completions in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.chat_completions
+ ] = mock_rpc
+ request = {}
+ client.chat_completions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.chat_completions(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_chat_completions_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.chat_completions
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.chat_completions
+ ] = mock_rpc
+
+ request = {}
+ await client.chat_completions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.chat_completions(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_chat_completions_async(
+ transport: str = "grpc_asyncio",
+ request_type=prediction_service.ChatCompletionsRequest,
+):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.chat_completions), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(side_effect=[httpbody_pb2.HttpBody()])
+ response = await client.chat_completions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = prediction_service.ChatCompletionsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ message = await response.read()
+ assert isinstance(message, httpbody_pb2.HttpBody)
+
+
+@pytest.mark.asyncio
+async def test_chat_completions_async_from_dict():
+ await test_chat_completions_async(request_type=dict)
+
+
+def test_chat_completions_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.ChatCompletionsRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.chat_completions), "__call__") as call:
+ call.return_value = iter([httpbody_pb2.HttpBody()])
+ client.chat_completions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_chat_completions_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = prediction_service.ChatCompletionsRequest()
+
+ request.endpoint = "endpoint_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.chat_completions), "__call__") as call:
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(side_effect=[httpbody_pb2.HttpBody()])
+ await client.chat_completions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "endpoint=endpoint_value",
+ ) in kw["metadata"]
+
+
+def test_chat_completions_flattened():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.chat_completions), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iter([httpbody_pb2.HttpBody()])
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.chat_completions(
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].endpoint
+ mock_val = "endpoint_value"
+ assert arg == mock_val
+ arg = args[0].http_body
+ mock_val = httpbody_pb2.HttpBody(content_type="content_type_value")
+ assert arg == mock_val
+
+
+def test_chat_completions_flattened_error():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.chat_completions(
+ prediction_service.ChatCompletionsRequest(),
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+
+
+@pytest.mark.asyncio
+async def test_chat_completions_flattened_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.chat_completions), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iter([httpbody_pb2.HttpBody()])
+
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.chat_completions(
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].endpoint
+ mock_val = "endpoint_value"
+ assert arg == mock_val
+ arg = args[0].http_body
+ mock_val = httpbody_pb2.HttpBody(content_type="content_type_value")
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_chat_completions_flattened_error_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.chat_completions(
+ prediction_service.ChatCompletionsRequest(),
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+
+
+def test_predict_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.predict in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.predict] = mock_rpc
+
+ request = {}
+ client.predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_predict_rest_required_fields(request_type=prediction_service.PredictRequest):
+ transport_class = transports.PredictionServiceRestTransport
+
+ request_init = {}
+ request_init["endpoint"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).predict._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["endpoint"] = "endpoint_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).predict._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "endpoint" in jsonified_request
+ assert jsonified_request["endpoint"] == "endpoint_value"
+
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.PredictResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.PredictResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.predict(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_predict_rest_unset_required_fields():
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.predict._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "endpoint",
+ "instances",
+ )
+ )
+ )
+
+
+def test_predict_rest_flattened():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.PredictResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "endpoint": "projects/sample1/locations/sample2/endpoints/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ endpoint="endpoint_value",
+ instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
+ parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = prediction_service.PredictResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.predict(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}:predict"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_predict_rest_flattened_error(transport: str = "rest"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.predict(
+ prediction_service.PredictRequest(),
+ endpoint="endpoint_value",
+ instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
+ parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE),
+ )
+
+
+def test_raw_predict_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.raw_predict in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.raw_predict] = mock_rpc
+
+ request = {}
+ client.raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.raw_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_raw_predict_rest_required_fields(
+ request_type=prediction_service.RawPredictRequest,
+):
+ transport_class = transports.PredictionServiceRestTransport
+
+ request_init = {}
+ request_init["endpoint"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).raw_predict._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["endpoint"] = "endpoint_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).raw_predict._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "endpoint" in jsonified_request
+ assert jsonified_request["endpoint"] == "endpoint_value"
+
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = httpbody_pb2.HttpBody()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.raw_predict(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_raw_predict_rest_unset_required_fields():
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.raw_predict._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("endpoint",)))
+
+
+def test_raw_predict_rest_flattened():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = httpbody_pb2.HttpBody()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "endpoint": "projects/sample1/locations/sample2/endpoints/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.raw_predict(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}:rawPredict"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_raw_predict_rest_flattened_error(transport: str = "rest"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.raw_predict(
+ prediction_service.RawPredictRequest(),
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+
+
+def test_stream_raw_predict_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.stream_raw_predict in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.stream_raw_predict
+ ] = mock_rpc
+
+ request = {}
+ client.stream_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.stream_raw_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_stream_raw_predict_rest_required_fields(
+ request_type=prediction_service.StreamRawPredictRequest,
+):
+ transport_class = transports.PredictionServiceRestTransport
+
+ request_init = {}
+ request_init["endpoint"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).stream_raw_predict._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["endpoint"] = "endpoint_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).stream_raw_predict._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "endpoint" in jsonified_request
+ assert jsonified_request["endpoint"] == "endpoint_value"
+
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = httpbody_pb2.HttpBody()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ with mock.patch.object(response_value, "iter_content") as iter_content:
+ iter_content.return_value = iter(json_return_value)
+ response = client.stream_raw_predict(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_stream_raw_predict_rest_unset_required_fields():
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.stream_raw_predict._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("endpoint",)))
+
+
+def test_stream_raw_predict_rest_flattened():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = httpbody_pb2.HttpBody()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "endpoint": "projects/sample1/locations/sample2/endpoints/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ with mock.patch.object(response_value, "iter_content") as iter_content:
+ iter_content.return_value = iter(json_return_value)
+ client.stream_raw_predict(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}:streamRawPredict"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_stream_raw_predict_rest_flattened_error(transport: str = "rest"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.stream_raw_predict(
+ prediction_service.StreamRawPredictRequest(),
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+
+
+def test_direct_predict_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.direct_predict in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.direct_predict] = mock_rpc
+
+ request = {}
+ client.direct_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.direct_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_direct_predict_rest_required_fields(
+ request_type=prediction_service.DirectPredictRequest,
+):
+ transport_class = transports.PredictionServiceRestTransport
+
+ request_init = {}
+ request_init["endpoint"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).direct_predict._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["endpoint"] = "endpoint_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).direct_predict._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "endpoint" in jsonified_request
+ assert jsonified_request["endpoint"] == "endpoint_value"
+
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.DirectPredictResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.DirectPredictResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.direct_predict(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_direct_predict_rest_unset_required_fields():
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.direct_predict._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("endpoint",)))
+
+
+def test_direct_raw_predict_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.direct_raw_predict in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.direct_raw_predict
+ ] = mock_rpc
+
+ request = {}
+ client.direct_raw_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.direct_raw_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_direct_raw_predict_rest_required_fields(
+ request_type=prediction_service.DirectRawPredictRequest,
+):
+ transport_class = transports.PredictionServiceRestTransport
+
+ request_init = {}
+ request_init["endpoint"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).direct_raw_predict._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["endpoint"] = "endpoint_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).direct_raw_predict._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "endpoint" in jsonified_request
+ assert jsonified_request["endpoint"] == "endpoint_value"
+
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.DirectRawPredictResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.DirectRawPredictResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.direct_raw_predict(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_direct_raw_predict_rest_unset_required_fields():
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.direct_raw_predict._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("endpoint",)))
+
+
+def test_stream_direct_predict_rest_no_http_options():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = prediction_service.StreamDirectPredictRequest()
+ requests = [request]
+ with pytest.raises(RuntimeError):
+ client.stream_direct_predict(requests)
+
+
+def test_stream_direct_raw_predict_rest_no_http_options():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = prediction_service.StreamDirectRawPredictRequest()
+ requests = [request]
+ with pytest.raises(RuntimeError):
+ client.stream_direct_raw_predict(requests)
+
+
+def test_streaming_predict_rest_no_http_options():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = prediction_service.StreamingPredictRequest()
+ requests = [request]
+ with pytest.raises(RuntimeError):
+ client.streaming_predict(requests)
+
+
+def test_server_streaming_predict_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.server_streaming_predict
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.server_streaming_predict
+ ] = mock_rpc
+
+ request = {}
+ client.server_streaming_predict(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.server_streaming_predict(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_server_streaming_predict_rest_required_fields(
+ request_type=prediction_service.StreamingPredictRequest,
+):
+ transport_class = transports.PredictionServiceRestTransport
+
+ request_init = {}
+ request_init["endpoint"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).server_streaming_predict._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["endpoint"] = "endpoint_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).server_streaming_predict._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "endpoint" in jsonified_request
+ assert jsonified_request["endpoint"] == "endpoint_value"
+
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.StreamingPredictResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.StreamingPredictResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ with mock.patch.object(response_value, "iter_content") as iter_content:
+ iter_content.return_value = iter(json_return_value)
+ response = client.server_streaming_predict(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_server_streaming_predict_rest_unset_required_fields():
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.server_streaming_predict._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("endpoint",)))
+
+
+def test_streaming_raw_predict_rest_no_http_options():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = prediction_service.StreamingRawPredictRequest()
+ requests = [request]
+ with pytest.raises(RuntimeError):
+ client.streaming_raw_predict(requests)
+
+
+def test_explain_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.explain in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.explain] = mock_rpc
+
+ request = {}
+ client.explain(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.explain(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_explain_rest_required_fields(request_type=prediction_service.ExplainRequest):
+ transport_class = transports.PredictionServiceRestTransport
+
+ request_init = {}
+ request_init["endpoint"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).explain._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["endpoint"] = "endpoint_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).explain._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "endpoint" in jsonified_request
+ assert jsonified_request["endpoint"] == "endpoint_value"
+
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.ExplainResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.ExplainResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.explain(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_explain_rest_unset_required_fields():
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.explain._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "endpoint",
+ "instances",
+ )
+ )
+ )
+
+
+def test_explain_rest_flattened():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.ExplainResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "endpoint": "projects/sample1/locations/sample2/endpoints/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ endpoint="endpoint_value",
+ instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
+ parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE),
+ deployed_model_id="deployed_model_id_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = prediction_service.ExplainResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.explain(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}:explain"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_explain_rest_flattened_error(transport: str = "rest"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.explain(
+ prediction_service.ExplainRequest(),
+ endpoint="endpoint_value",
+ instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
+ parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE),
+ deployed_model_id="deployed_model_id_value",
+ )
+
+
+def test_count_tokens_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.count_tokens in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.count_tokens] = mock_rpc
+
+ request = {}
+ client.count_tokens(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.count_tokens(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_count_tokens_rest_required_fields(
+ request_type=prediction_service.CountTokensRequest,
+):
+ transport_class = transports.PredictionServiceRestTransport
+
+ request_init = {}
+ request_init["endpoint"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).count_tokens._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["endpoint"] = "endpoint_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).count_tokens._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "endpoint" in jsonified_request
+ assert jsonified_request["endpoint"] == "endpoint_value"
+
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.CountTokensResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.CountTokensResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.count_tokens(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_count_tokens_rest_unset_required_fields():
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.count_tokens._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("endpoint",)))
+
+
+def test_count_tokens_rest_flattened():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.CountTokensResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "endpoint": "projects/sample1/locations/sample2/endpoints/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ endpoint="endpoint_value",
+ instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = prediction_service.CountTokensResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.count_tokens(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}:countTokens"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_count_tokens_rest_flattened_error(transport: str = "rest"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.count_tokens(
+ prediction_service.CountTokensRequest(),
+ endpoint="endpoint_value",
+ instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
+ )
+
+
+def test_generate_content_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.generate_content in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.generate_content
+ ] = mock_rpc
+
+ request = {}
+ client.generate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.generate_content(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_generate_content_rest_required_fields(
+ request_type=prediction_service.GenerateContentRequest,
+):
+ transport_class = transports.PredictionServiceRestTransport
+
+ request_init = {}
+ request_init["model"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).generate_content._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["model"] = "model_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).generate_content._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "model" in jsonified_request
+ assert jsonified_request["model"] == "model_value"
+
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.GenerateContentResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.GenerateContentResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.generate_content(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_generate_content_rest_unset_required_fields():
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.generate_content._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "model",
+ "contents",
+ )
+ )
+ )
+
+
+def test_generate_content_rest_flattened():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.GenerateContentResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "model": "projects/sample1/locations/sample2/endpoints/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ model="model_value",
+ contents=[content.Content(role="role_value")],
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = prediction_service.GenerateContentResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.generate_content(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{model=projects/*/locations/*/endpoints/*}:generateContent"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_generate_content_rest_flattened_error(transport: str = "rest"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.generate_content(
+ prediction_service.GenerateContentRequest(),
+ model="model_value",
+ contents=[content.Content(role="role_value")],
+ )
+
+
+def test_stream_generate_content_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.stream_generate_content
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.stream_generate_content
+ ] = mock_rpc
+
+ request = {}
+ client.stream_generate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.stream_generate_content(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_stream_generate_content_rest_required_fields(
+ request_type=prediction_service.GenerateContentRequest,
+):
+ transport_class = transports.PredictionServiceRestTransport
+
+ request_init = {}
+ request_init["model"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).stream_generate_content._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["model"] = "model_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).stream_generate_content._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "model" in jsonified_request
+ assert jsonified_request["model"] == "model_value"
+
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.GenerateContentResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.GenerateContentResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ with mock.patch.object(response_value, "iter_content") as iter_content:
+ iter_content.return_value = iter(json_return_value)
+ response = client.stream_generate_content(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_stream_generate_content_rest_unset_required_fields():
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.stream_generate_content._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "model",
+ "contents",
+ )
+ )
+ )
+
+
+def test_stream_generate_content_rest_flattened():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.GenerateContentResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "model": "projects/sample1/locations/sample2/endpoints/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ model="model_value",
+ contents=[content.Content(role="role_value")],
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = prediction_service.GenerateContentResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ with mock.patch.object(response_value, "iter_content") as iter_content:
+ iter_content.return_value = iter(json_return_value)
+ client.stream_generate_content(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{model=projects/*/locations/*/endpoints/*}:streamGenerateContent"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_stream_generate_content_rest_flattened_error(transport: str = "rest"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.stream_generate_content(
+ prediction_service.GenerateContentRequest(),
+ model="model_value",
+ contents=[content.Content(role="role_value")],
+ )
+
+
+def test_chat_completions_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.chat_completions in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.chat_completions
+ ] = mock_rpc
+
+ request = {}
+ client.chat_completions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.chat_completions(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_chat_completions_rest_required_fields(
+ request_type=prediction_service.ChatCompletionsRequest,
+):
+ transport_class = transports.PredictionServiceRestTransport
+
+ request_init = {}
+ request_init["endpoint"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).chat_completions._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["endpoint"] = "endpoint_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).chat_completions._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "endpoint" in jsonified_request
+ assert jsonified_request["endpoint"] == "endpoint_value"
+
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = httpbody_pb2.HttpBody()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ with mock.patch.object(response_value, "iter_content") as iter_content:
+ iter_content.return_value = iter(json_return_value)
+ response = client.chat_completions(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_chat_completions_rest_unset_required_fields():
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.chat_completions._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("endpoint",)))
+
+
+def test_chat_completions_rest_flattened():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = httpbody_pb2.HttpBody()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "endpoint": "projects/sample1/locations/sample2/endpoints/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ with mock.patch.object(response_value, "iter_content") as iter_content:
+ iter_content.return_value = iter(json_return_value)
+ client.chat_completions(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}/chat/completions"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_chat_completions_rest_flattened_error(transport: str = "rest"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.chat_completions(
+ prediction_service.ChatCompletionsRequest(),
+ endpoint="endpoint_value",
+ http_body=httpbody_pb2.HttpBody(content_type="content_type_value"),
+ )
+
+
+def test_stream_direct_predict_rest_error():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # Since a `google.api.http` annotation is required for using a rest transport
+ # method, this should error.
+ with pytest.raises(NotImplementedError) as not_implemented_error:
+ client.stream_direct_predict({})
+ assert "Method StreamDirectPredict is not available over REST transport" in str(
+ not_implemented_error.value
+ )
+
+
+def test_stream_direct_raw_predict_rest_error():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # Since a `google.api.http` annotation is required for using a rest transport
+ # method, this should error.
+ with pytest.raises(NotImplementedError) as not_implemented_error:
+ client.stream_direct_raw_predict({})
+ assert "Method StreamDirectRawPredict is not available over REST transport" in str(
+ not_implemented_error.value
+ )
+
+
+def test_streaming_predict_rest_error():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # Since a `google.api.http` annotation is required for using a rest transport
+ # method, this should error.
+ with pytest.raises(NotImplementedError) as not_implemented_error:
+ client.streaming_predict({})
+ assert "Method StreamingPredict is not available over REST transport" in str(
+ not_implemented_error.value
+ )
+
+
+def test_streaming_raw_predict_rest_error():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # Since a `google.api.http` annotation is required for using a rest transport
+ # method, this should error.
+ with pytest.raises(NotImplementedError) as not_implemented_error:
+ client.streaming_raw_predict({})
+ assert "Method StreamingRawPredict is not available over REST transport" in str(
+ not_implemented_error.value
+ )
+
+
+def test_credentials_transport_error():
+ # It is an error to provide credentials and a transport instance.
+ transport = transports.PredictionServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # It is an error to provide a credentials file and a transport instance.
+ transport = transports.PredictionServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = PredictionServiceClient(
+ client_options={"credentials_file": "credentials.json"},
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a transport instance.
+ transport = transports.PredictionServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = PredictionServiceClient(
+ client_options=options,
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a credential.
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = PredictionServiceClient(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # It is an error to provide scopes and a transport instance.
+ transport = transports.PredictionServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = PredictionServiceClient(
+ client_options={"scopes": ["1", "2"]},
+ transport=transport,
+ )
+
+
+def test_transport_instance():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.PredictionServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ client = PredictionServiceClient(transport=transport)
+ assert client.transport is transport
+
+
+def test_transport_get_channel():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.PredictionServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+ transport = transports.PredictionServiceGrpcAsyncIOTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PredictionServiceGrpcTransport,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ transports.PredictionServiceRestTransport,
+ ],
+)
+def test_transport_adc(transport_class):
+ # Test default credentials are used if not provided.
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class()
+ adc.assert_called_once()
+
+
+def test_transport_kind_grpc():
+ transport = PredictionServiceClient.get_transport_class("grpc")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "grpc"
+
+
+def test_initialize_client_w_grpc():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_predict_empty_call_grpc():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
+ call.return_value = prediction_service.PredictResponse()
+ client.predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.PredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_raw_predict_empty_call_grpc():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.raw_predict), "__call__") as call:
+ call.return_value = httpbody_pb2.HttpBody()
+ client.raw_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.RawPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_stream_raw_predict_empty_call_grpc():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_raw_predict), "__call__"
+ ) as call:
+ call.return_value = iter([httpbody_pb2.HttpBody()])
+ client.stream_raw_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.StreamRawPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_direct_predict_empty_call_grpc():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.direct_predict), "__call__") as call:
+ call.return_value = prediction_service.DirectPredictResponse()
+ client.direct_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.DirectPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_direct_raw_predict_empty_call_grpc():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.direct_raw_predict), "__call__"
+ ) as call:
+ call.return_value = prediction_service.DirectRawPredictResponse()
+ client.direct_raw_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.DirectRawPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_server_streaming_predict_empty_call_grpc():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.server_streaming_predict), "__call__"
+ ) as call:
+ call.return_value = iter([prediction_service.StreamingPredictResponse()])
+ client.server_streaming_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.StreamingPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_explain_empty_call_grpc():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.explain), "__call__") as call:
+ call.return_value = prediction_service.ExplainResponse()
+ client.explain(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.ExplainRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_count_tokens_empty_call_grpc():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.count_tokens), "__call__") as call:
+ call.return_value = prediction_service.CountTokensResponse()
+ client.count_tokens(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.CountTokensRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_generate_content_empty_call_grpc():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.generate_content), "__call__") as call:
+ call.return_value = prediction_service.GenerateContentResponse()
+ client.generate_content(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.GenerateContentRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_stream_generate_content_empty_call_grpc():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_generate_content), "__call__"
+ ) as call:
+ call.return_value = iter([prediction_service.GenerateContentResponse()])
+ client.stream_generate_content(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.GenerateContentRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_chat_completions_empty_call_grpc():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.chat_completions), "__call__") as call:
+ call.return_value = iter([httpbody_pb2.HttpBody()])
+ client.chat_completions(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.ChatCompletionsRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_grpc_asyncio():
+ transport = PredictionServiceAsyncClient.get_transport_class("grpc_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "grpc_asyncio"
+
+
+def test_initialize_client_w_grpc_asyncio():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_predict_empty_call_grpc_asyncio():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.PredictResponse(
+ deployed_model_id="deployed_model_id_value",
+ model="model_value",
+ model_version_id="model_version_id_value",
+ model_display_name="model_display_name_value",
+ )
+ )
+ await client.predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.PredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_raw_predict_empty_call_grpc_asyncio():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.raw_predict), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ httpbody_pb2.HttpBody(
+ content_type="content_type_value",
+ data=b"data_blob",
+ )
+ )
+ await client.raw_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.RawPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_stream_raw_predict_empty_call_grpc_asyncio():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_raw_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(side_effect=[httpbody_pb2.HttpBody()])
+ await client.stream_raw_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.StreamRawPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_direct_predict_empty_call_grpc_asyncio():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.direct_predict), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.DirectPredictResponse()
+ )
+ await client.direct_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.DirectPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_direct_raw_predict_empty_call_grpc_asyncio():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.direct_raw_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.DirectRawPredictResponse(
+ output=b"output_blob",
+ )
+ )
+ await client.direct_raw_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.DirectRawPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_server_streaming_predict_empty_call_grpc_asyncio():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.server_streaming_predict), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(
+ side_effect=[prediction_service.StreamingPredictResponse()]
+ )
+ await client.server_streaming_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.StreamingPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_explain_empty_call_grpc_asyncio():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.explain), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.ExplainResponse(
+ deployed_model_id="deployed_model_id_value",
+ )
+ )
+ await client.explain(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.ExplainRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_count_tokens_empty_call_grpc_asyncio():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.count_tokens), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.CountTokensResponse(
+ total_tokens=1303,
+ total_billable_characters=2617,
+ )
+ )
+ await client.count_tokens(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.CountTokensRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_generate_content_empty_call_grpc_asyncio():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.generate_content), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ prediction_service.GenerateContentResponse(
+ model_version="model_version_value",
+ )
+ )
+ await client.generate_content(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.GenerateContentRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_stream_generate_content_empty_call_grpc_asyncio():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_generate_content), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(
+ side_effect=[prediction_service.GenerateContentResponse()]
+ )
+ await client.stream_generate_content(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.GenerateContentRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_chat_completions_empty_call_grpc_asyncio():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.chat_completions), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(side_effect=[httpbody_pb2.HttpBody()])
+ await client.chat_completions(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.ChatCompletionsRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_rest():
+ transport = PredictionServiceClient.get_transport_class("rest")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "rest"
+
+
+def test_predict_rest_bad_request(request_type=prediction_service.PredictRequest):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.predict(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.PredictRequest,
+ dict,
+ ],
+)
+def test_predict_rest_call_success(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.PredictResponse(
+ deployed_model_id="deployed_model_id_value",
+ model="model_value",
+ model_version_id="model_version_id_value",
+ model_display_name="model_display_name_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.PredictResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.predict(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.PredictResponse)
+ assert response.deployed_model_id == "deployed_model_id_value"
+ assert response.model == "model_value"
+ assert response.model_version_id == "model_version_id_value"
+ assert response.model_display_name == "model_display_name_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_predict_rest_interceptors(null_interceptor):
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "post_predict"
+ ) as post, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "pre_predict"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.PredictRequest.pb(
+ prediction_service.PredictRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = prediction_service.PredictResponse.to_json(
+ prediction_service.PredictResponse()
+ )
+ req.return_value.content = return_value
+
+ request = prediction_service.PredictRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = prediction_service.PredictResponse()
+
+ client.predict(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_raw_predict_rest_bad_request(
+ request_type=prediction_service.RawPredictRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.raw_predict(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.RawPredictRequest,
+ dict,
+ ],
+)
+def test_raw_predict_rest_call_success(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = httpbody_pb2.HttpBody(
+ content_type="content_type_value",
+ data=b"data_blob",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.raw_predict(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, httpbody_pb2.HttpBody)
+ assert response.content_type == "content_type_value"
+ assert response.data == b"data_blob"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_raw_predict_rest_interceptors(null_interceptor):
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "post_raw_predict"
+ ) as post, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "pre_raw_predict"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.RawPredictRequest.pb(
+ prediction_service.RawPredictRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(httpbody_pb2.HttpBody())
+ req.return_value.content = return_value
+
+ request = prediction_service.RawPredictRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = httpbody_pb2.HttpBody()
+
+ client.raw_predict(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_stream_raw_predict_rest_bad_request(
+ request_type=prediction_service.StreamRawPredictRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.stream_raw_predict(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.StreamRawPredictRequest,
+ dict,
+ ],
+)
+def test_stream_raw_predict_rest_call_success(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = httpbody_pb2.HttpBody(
+ content_type="content_type_value",
+ data=b"data_blob",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+ response_value.iter_content = mock.Mock(return_value=iter(json_return_value))
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.stream_raw_predict(request)
+
+ assert isinstance(response, Iterable)
+ response = next(response)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, httpbody_pb2.HttpBody)
+ assert response.content_type == "content_type_value"
+ assert response.data == b"data_blob"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_stream_raw_predict_rest_interceptors(null_interceptor):
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "post_stream_raw_predict"
+ ) as post, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "pre_stream_raw_predict"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.StreamRawPredictRequest.pb(
+ prediction_service.StreamRawPredictRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(httpbody_pb2.HttpBody())
+ req.return_value.iter_content = mock.Mock(return_value=iter(return_value))
+
+ request = prediction_service.StreamRawPredictRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = httpbody_pb2.HttpBody()
+
+ client.stream_raw_predict(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_direct_predict_rest_bad_request(
+ request_type=prediction_service.DirectPredictRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.direct_predict(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.DirectPredictRequest,
+ dict,
+ ],
+)
+def test_direct_predict_rest_call_success(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.DirectPredictResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.DirectPredictResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.direct_predict(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.DirectPredictResponse)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_direct_predict_rest_interceptors(null_interceptor):
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "post_direct_predict"
+ ) as post, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "pre_direct_predict"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.DirectPredictRequest.pb(
+ prediction_service.DirectPredictRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = prediction_service.DirectPredictResponse.to_json(
+ prediction_service.DirectPredictResponse()
+ )
+ req.return_value.content = return_value
+
+ request = prediction_service.DirectPredictRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = prediction_service.DirectPredictResponse()
+
+ client.direct_predict(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_direct_raw_predict_rest_bad_request(
+ request_type=prediction_service.DirectRawPredictRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.direct_raw_predict(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.DirectRawPredictRequest,
+ dict,
+ ],
+)
+def test_direct_raw_predict_rest_call_success(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.DirectRawPredictResponse(
+ output=b"output_blob",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.DirectRawPredictResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.direct_raw_predict(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.DirectRawPredictResponse)
+ assert response.output == b"output_blob"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_direct_raw_predict_rest_interceptors(null_interceptor):
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "post_direct_raw_predict"
+ ) as post, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "pre_direct_raw_predict"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.DirectRawPredictRequest.pb(
+ prediction_service.DirectRawPredictRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = prediction_service.DirectRawPredictResponse.to_json(
+ prediction_service.DirectRawPredictResponse()
+ )
+ req.return_value.content = return_value
+
+ request = prediction_service.DirectRawPredictRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = prediction_service.DirectRawPredictResponse()
+
+ client.direct_raw_predict(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_stream_direct_predict_rest_error():
+
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ with pytest.raises(NotImplementedError) as not_implemented_error:
+ client.stream_direct_predict({})
+ assert "Method StreamDirectPredict is not available over REST transport" in str(
+ not_implemented_error.value
+ )
+
+
+def test_stream_direct_raw_predict_rest_error():
+
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ with pytest.raises(NotImplementedError) as not_implemented_error:
+ client.stream_direct_raw_predict({})
+ assert "Method StreamDirectRawPredict is not available over REST transport" in str(
+ not_implemented_error.value
+ )
+
+
+def test_streaming_predict_rest_error():
+
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ with pytest.raises(NotImplementedError) as not_implemented_error:
+ client.streaming_predict({})
+ assert "Method StreamingPredict is not available over REST transport" in str(
+ not_implemented_error.value
+ )
+
+
+def test_server_streaming_predict_rest_bad_request(
+ request_type=prediction_service.StreamingPredictRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.server_streaming_predict(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.StreamingPredictRequest,
+ dict,
+ ],
+)
+def test_server_streaming_predict_rest_call_success(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.StreamingPredictResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.StreamingPredictResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+ response_value.iter_content = mock.Mock(return_value=iter(json_return_value))
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.server_streaming_predict(request)
+
+ assert isinstance(response, Iterable)
+ response = next(response)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.StreamingPredictResponse)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_server_streaming_predict_rest_interceptors(null_interceptor):
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "post_server_streaming_predict"
+ ) as post, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "pre_server_streaming_predict"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.StreamingPredictRequest.pb(
+ prediction_service.StreamingPredictRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = prediction_service.StreamingPredictResponse.to_json(
+ prediction_service.StreamingPredictResponse()
+ )
+ req.return_value.iter_content = mock.Mock(return_value=iter(return_value))
+
+ request = prediction_service.StreamingPredictRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = prediction_service.StreamingPredictResponse()
+
+ client.server_streaming_predict(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_streaming_raw_predict_rest_error():
+
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ with pytest.raises(NotImplementedError) as not_implemented_error:
+ client.streaming_raw_predict({})
+ assert "Method StreamingRawPredict is not available over REST transport" in str(
+ not_implemented_error.value
+ )
+
+
+def test_explain_rest_bad_request(request_type=prediction_service.ExplainRequest):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.explain(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.ExplainRequest,
+ dict,
+ ],
+)
+def test_explain_rest_call_success(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.ExplainResponse(
+ deployed_model_id="deployed_model_id_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.ExplainResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.explain(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.ExplainResponse)
+ assert response.deployed_model_id == "deployed_model_id_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_explain_rest_interceptors(null_interceptor):
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "post_explain"
+ ) as post, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "pre_explain"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.ExplainRequest.pb(
+ prediction_service.ExplainRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = prediction_service.ExplainResponse.to_json(
+ prediction_service.ExplainResponse()
+ )
+ req.return_value.content = return_value
+
+ request = prediction_service.ExplainRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = prediction_service.ExplainResponse()
+
+ client.explain(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_count_tokens_rest_bad_request(
+ request_type=prediction_service.CountTokensRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.count_tokens(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.CountTokensRequest,
+ dict,
+ ],
+)
+def test_count_tokens_rest_call_success(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.CountTokensResponse(
+ total_tokens=1303,
+ total_billable_characters=2617,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.CountTokensResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.count_tokens(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.CountTokensResponse)
+ assert response.total_tokens == 1303
+ assert response.total_billable_characters == 2617
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_count_tokens_rest_interceptors(null_interceptor):
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "post_count_tokens"
+ ) as post, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "pre_count_tokens"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.CountTokensRequest.pb(
+ prediction_service.CountTokensRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = prediction_service.CountTokensResponse.to_json(
+ prediction_service.CountTokensResponse()
+ )
+ req.return_value.content = return_value
+
+ request = prediction_service.CountTokensRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = prediction_service.CountTokensResponse()
+
+ client.count_tokens(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_generate_content_rest_bad_request(
+ request_type=prediction_service.GenerateContentRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"model": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.generate_content(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.GenerateContentRequest,
+ dict,
+ ],
+)
+def test_generate_content_rest_call_success(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"model": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.GenerateContentResponse(
+ model_version="model_version_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.GenerateContentResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.generate_content(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.GenerateContentResponse)
+ assert response.model_version == "model_version_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_generate_content_rest_interceptors(null_interceptor):
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "post_generate_content"
+ ) as post, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "pre_generate_content"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.GenerateContentRequest.pb(
+ prediction_service.GenerateContentRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = prediction_service.GenerateContentResponse.to_json(
+ prediction_service.GenerateContentResponse()
+ )
+ req.return_value.content = return_value
+
+ request = prediction_service.GenerateContentRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = prediction_service.GenerateContentResponse()
+
+ client.generate_content(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_stream_generate_content_rest_bad_request(
+ request_type=prediction_service.GenerateContentRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"model": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.stream_generate_content(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.GenerateContentRequest,
+ dict,
+ ],
+)
+def test_stream_generate_content_rest_call_success(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"model": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.GenerateContentResponse(
+ model_version="model_version_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.GenerateContentResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+ response_value.iter_content = mock.Mock(return_value=iter(json_return_value))
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.stream_generate_content(request)
+
+ assert isinstance(response, Iterable)
+ response = next(response)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.GenerateContentResponse)
+ assert response.model_version == "model_version_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_stream_generate_content_rest_interceptors(null_interceptor):
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "post_stream_generate_content"
+ ) as post, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "pre_stream_generate_content"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.GenerateContentRequest.pb(
+ prediction_service.GenerateContentRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = prediction_service.GenerateContentResponse.to_json(
+ prediction_service.GenerateContentResponse()
+ )
+ req.return_value.iter_content = mock.Mock(return_value=iter(return_value))
+
+ request = prediction_service.GenerateContentRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = prediction_service.GenerateContentResponse()
+
+ client.stream_generate_content(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_chat_completions_rest_bad_request(
+ request_type=prediction_service.ChatCompletionsRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.chat_completions(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.ChatCompletionsRequest,
+ dict,
+ ],
+)
+def test_chat_completions_rest_call_success(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request_init["http_body"] = {
+ "content_type": "content_type_value",
+ "data": b"data_blob",
+ "extensions": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = prediction_service.ChatCompletionsRequest.meta.fields["http_body"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["http_body"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["http_body"][field])):
+ del request_init["http_body"][field][i][subfield]
+ else:
+ del request_init["http_body"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = httpbody_pb2.HttpBody(
+ content_type="content_type_value",
+ data=b"data_blob",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+ response_value.iter_content = mock.Mock(return_value=iter(json_return_value))
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.chat_completions(request)
+
+ assert isinstance(response, Iterable)
+ response = next(response)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, httpbody_pb2.HttpBody)
+ assert response.content_type == "content_type_value"
+ assert response.data == b"data_blob"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_chat_completions_rest_interceptors(null_interceptor):
+ transport = transports.PredictionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.PredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "post_chat_completions"
+ ) as post, mock.patch.object(
+ transports.PredictionServiceRestInterceptor, "pre_chat_completions"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.ChatCompletionsRequest.pb(
+ prediction_service.ChatCompletionsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(httpbody_pb2.HttpBody())
+ req.return_value.iter_content = mock.Mock(return_value=iter(return_value))
+
+ request = prediction_service.ChatCompletionsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = httpbody_pb2.HttpBody()
+
+ client.chat_completions(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_location_rest_bad_request(request_type=locations_pb2.GetLocationRequest):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_location(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+def test_get_location_rest(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_list_locations_rest_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_locations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+def test_list_locations_rest(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_get_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_get_iam_policy_rest(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_set_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.set_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_set_iam_policy_rest(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_test_iam_permissions_rest_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.test_iam_permissions(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+def test_test_iam_permissions_rest(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+def test_cancel_operation_rest_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+def test_cancel_operation_rest(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_rest_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+def test_delete_operation_rest(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_get_operation_rest_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+def test_get_operation_rest(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_list_operations_rest_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_operations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+def test_list_operations_rest(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_wait_operation_rest_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.wait_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+def test_wait_operation_rest(request_type):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_predict_empty_call_rest():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
+ client.predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.PredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_raw_predict_empty_call_rest():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.raw_predict), "__call__") as call:
+ client.raw_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.RawPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_stream_raw_predict_empty_call_rest():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_raw_predict), "__call__"
+ ) as call:
+ client.stream_raw_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.StreamRawPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_direct_predict_empty_call_rest():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.direct_predict), "__call__") as call:
+ client.direct_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.DirectPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_direct_raw_predict_empty_call_rest():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.direct_raw_predict), "__call__"
+ ) as call:
+ client.direct_raw_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.DirectRawPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_server_streaming_predict_empty_call_rest():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.server_streaming_predict), "__call__"
+ ) as call:
+ client.server_streaming_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.StreamingPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_explain_empty_call_rest():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.explain), "__call__") as call:
+ client.explain(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.ExplainRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_count_tokens_empty_call_rest():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.count_tokens), "__call__") as call:
+ client.count_tokens(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.CountTokensRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_generate_content_empty_call_rest():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.generate_content), "__call__") as call:
+ client.generate_content(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.GenerateContentRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_stream_generate_content_empty_call_rest():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_generate_content), "__call__"
+ ) as call:
+ client.stream_generate_content(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.GenerateContentRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_chat_completions_empty_call_rest():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.chat_completions), "__call__") as call:
+ client.chat_completions(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.ChatCompletionsRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = PredictionServiceAsyncClient.get_transport_class("rest_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "rest_asyncio"
+
+
+@pytest.mark.asyncio
+async def test_predict_rest_asyncio_bad_request(
+ request_type=prediction_service.PredictRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.predict(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.PredictRequest,
+ dict,
+ ],
+)
+async def test_predict_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.PredictResponse(
+ deployed_model_id="deployed_model_id_value",
+ model="model_value",
+ model_version_id="model_version_id_value",
+ model_display_name="model_display_name_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.PredictResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.predict(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.PredictResponse)
+ assert response.deployed_model_id == "deployed_model_id_value"
+ assert response.model == "model_value"
+ assert response.model_version_id == "model_version_id_value"
+ assert response.model_display_name == "model_display_name_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_predict_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPredictionServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "post_predict"
+ ) as post, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "pre_predict"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.PredictRequest.pb(
+ prediction_service.PredictRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = prediction_service.PredictResponse.to_json(
+ prediction_service.PredictResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = prediction_service.PredictRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = prediction_service.PredictResponse()
+
+ await client.predict(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_raw_predict_rest_asyncio_bad_request(
+ request_type=prediction_service.RawPredictRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.raw_predict(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.RawPredictRequest,
+ dict,
+ ],
+)
+async def test_raw_predict_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = httpbody_pb2.HttpBody(
+ content_type="content_type_value",
+ data=b"data_blob",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.raw_predict(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, httpbody_pb2.HttpBody)
+ assert response.content_type == "content_type_value"
+ assert response.data == b"data_blob"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_raw_predict_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPredictionServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "post_raw_predict"
+ ) as post, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "pre_raw_predict"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.RawPredictRequest.pb(
+ prediction_service.RawPredictRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(httpbody_pb2.HttpBody())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = prediction_service.RawPredictRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = httpbody_pb2.HttpBody()
+
+ await client.raw_predict(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_stream_raw_predict_rest_asyncio_bad_request(
+ request_type=prediction_service.StreamRawPredictRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.stream_raw_predict(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.StreamRawPredictRequest,
+ dict,
+ ],
+)
+async def test_stream_raw_predict_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = httpbody_pb2.HttpBody(
+ content_type="content_type_value",
+ data=b"data_blob",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+ response_value.content.return_value = mock_async_gen(json_return_value)
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.stream_raw_predict(request)
+
+ assert isinstance(response, AsyncIterable)
+ response = await response.__anext__()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, httpbody_pb2.HttpBody)
+ assert response.content_type == "content_type_value"
+ assert response.data == b"data_blob"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_stream_raw_predict_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPredictionServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "post_stream_raw_predict"
+ ) as post, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "pre_stream_raw_predict"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.StreamRawPredictRequest.pb(
+ prediction_service.StreamRawPredictRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(httpbody_pb2.HttpBody())
+ req.return_value.content.return_value = mock_async_gen(return_value)
+
+ request = prediction_service.StreamRawPredictRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = httpbody_pb2.HttpBody()
+
+ await client.stream_raw_predict(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_direct_predict_rest_asyncio_bad_request(
+ request_type=prediction_service.DirectPredictRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.direct_predict(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.DirectPredictRequest,
+ dict,
+ ],
+)
+async def test_direct_predict_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.DirectPredictResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.DirectPredictResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.direct_predict(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.DirectPredictResponse)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_direct_predict_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPredictionServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "post_direct_predict"
+ ) as post, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "pre_direct_predict"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.DirectPredictRequest.pb(
+ prediction_service.DirectPredictRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = prediction_service.DirectPredictResponse.to_json(
+ prediction_service.DirectPredictResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = prediction_service.DirectPredictRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = prediction_service.DirectPredictResponse()
+
+ await client.direct_predict(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_direct_raw_predict_rest_asyncio_bad_request(
+ request_type=prediction_service.DirectRawPredictRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.direct_raw_predict(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.DirectRawPredictRequest,
+ dict,
+ ],
+)
+async def test_direct_raw_predict_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.DirectRawPredictResponse(
+ output=b"output_blob",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.DirectRawPredictResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.direct_raw_predict(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.DirectRawPredictResponse)
+ assert response.output == b"output_blob"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_direct_raw_predict_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPredictionServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "post_direct_raw_predict"
+ ) as post, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "pre_direct_raw_predict"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.DirectRawPredictRequest.pb(
+ prediction_service.DirectRawPredictRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = prediction_service.DirectRawPredictResponse.to_json(
+ prediction_service.DirectRawPredictResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = prediction_service.DirectRawPredictRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = prediction_service.DirectRawPredictResponse()
+
+ await client.direct_raw_predict(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_stream_direct_predict_rest_asyncio_error():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ with pytest.raises(NotImplementedError) as not_implemented_error:
+ await client.stream_direct_predict({})
+ assert "Method StreamDirectPredict is not available over REST transport" in str(
+ not_implemented_error.value
+ )
+
+
+@pytest.mark.asyncio
+async def test_stream_direct_raw_predict_rest_asyncio_error():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ with pytest.raises(NotImplementedError) as not_implemented_error:
+ await client.stream_direct_raw_predict({})
+ assert "Method StreamDirectRawPredict is not available over REST transport" in str(
+ not_implemented_error.value
+ )
+
+
+@pytest.mark.asyncio
+async def test_streaming_predict_rest_asyncio_error():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ with pytest.raises(NotImplementedError) as not_implemented_error:
+ await client.streaming_predict({})
+ assert "Method StreamingPredict is not available over REST transport" in str(
+ not_implemented_error.value
+ )
+
+
+@pytest.mark.asyncio
+async def test_server_streaming_predict_rest_asyncio_bad_request(
+ request_type=prediction_service.StreamingPredictRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.server_streaming_predict(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.StreamingPredictRequest,
+ dict,
+ ],
+)
+async def test_server_streaming_predict_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.StreamingPredictResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.StreamingPredictResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+ response_value.content.return_value = mock_async_gen(json_return_value)
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.server_streaming_predict(request)
+
+ assert isinstance(response, AsyncIterable)
+ response = await response.__anext__()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.StreamingPredictResponse)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_server_streaming_predict_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPredictionServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor,
+ "post_server_streaming_predict",
+ ) as post, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "pre_server_streaming_predict"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.StreamingPredictRequest.pb(
+ prediction_service.StreamingPredictRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = prediction_service.StreamingPredictResponse.to_json(
+ prediction_service.StreamingPredictResponse()
+ )
+ req.return_value.content.return_value = mock_async_gen(return_value)
+
+ request = prediction_service.StreamingPredictRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = prediction_service.StreamingPredictResponse()
+
+ await client.server_streaming_predict(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_streaming_raw_predict_rest_asyncio_error():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ with pytest.raises(NotImplementedError) as not_implemented_error:
+ await client.streaming_raw_predict({})
+ assert "Method StreamingRawPredict is not available over REST transport" in str(
+ not_implemented_error.value
+ )
+
+
+@pytest.mark.asyncio
+async def test_explain_rest_asyncio_bad_request(
+ request_type=prediction_service.ExplainRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.explain(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.ExplainRequest,
+ dict,
+ ],
+)
+async def test_explain_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.ExplainResponse(
+ deployed_model_id="deployed_model_id_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.ExplainResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.explain(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.ExplainResponse)
+ assert response.deployed_model_id == "deployed_model_id_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_explain_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPredictionServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "post_explain"
+ ) as post, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "pre_explain"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.ExplainRequest.pb(
+ prediction_service.ExplainRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = prediction_service.ExplainResponse.to_json(
+ prediction_service.ExplainResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = prediction_service.ExplainRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = prediction_service.ExplainResponse()
+
+ await client.explain(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_count_tokens_rest_asyncio_bad_request(
+ request_type=prediction_service.CountTokensRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.count_tokens(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.CountTokensRequest,
+ dict,
+ ],
+)
+async def test_count_tokens_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.CountTokensResponse(
+ total_tokens=1303,
+ total_billable_characters=2617,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.CountTokensResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.count_tokens(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.CountTokensResponse)
+ assert response.total_tokens == 1303
+ assert response.total_billable_characters == 2617
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_count_tokens_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPredictionServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "post_count_tokens"
+ ) as post, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "pre_count_tokens"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.CountTokensRequest.pb(
+ prediction_service.CountTokensRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = prediction_service.CountTokensResponse.to_json(
+ prediction_service.CountTokensResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = prediction_service.CountTokensRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = prediction_service.CountTokensResponse()
+
+ await client.count_tokens(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_generate_content_rest_asyncio_bad_request(
+ request_type=prediction_service.GenerateContentRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"model": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.generate_content(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.GenerateContentRequest,
+ dict,
+ ],
+)
+async def test_generate_content_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"model": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.GenerateContentResponse(
+ model_version="model_version_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.GenerateContentResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.generate_content(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.GenerateContentResponse)
+ assert response.model_version == "model_version_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_generate_content_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPredictionServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "post_generate_content"
+ ) as post, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "pre_generate_content"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.GenerateContentRequest.pb(
+ prediction_service.GenerateContentRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = prediction_service.GenerateContentResponse.to_json(
+ prediction_service.GenerateContentResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = prediction_service.GenerateContentRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = prediction_service.GenerateContentResponse()
+
+ await client.generate_content(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_stream_generate_content_rest_asyncio_bad_request(
+ request_type=prediction_service.GenerateContentRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"model": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.stream_generate_content(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.GenerateContentRequest,
+ dict,
+ ],
+)
+async def test_stream_generate_content_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"model": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = prediction_service.GenerateContentResponse(
+ model_version="model_version_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = prediction_service.GenerateContentResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+ response_value.content.return_value = mock_async_gen(json_return_value)
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.stream_generate_content(request)
+
+ assert isinstance(response, AsyncIterable)
+ response = await response.__anext__()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, prediction_service.GenerateContentResponse)
+ assert response.model_version == "model_version_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_stream_generate_content_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPredictionServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "post_stream_generate_content"
+ ) as post, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "pre_stream_generate_content"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.GenerateContentRequest.pb(
+ prediction_service.GenerateContentRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = prediction_service.GenerateContentResponse.to_json(
+ prediction_service.GenerateContentResponse()
+ )
+ req.return_value.content.return_value = mock_async_gen(return_value)
+
+ request = prediction_service.GenerateContentRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = prediction_service.GenerateContentResponse()
+
+ await client.stream_generate_content(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_chat_completions_rest_asyncio_bad_request(
+ request_type=prediction_service.ChatCompletionsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.chat_completions(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ prediction_service.ChatCompletionsRequest,
+ dict,
+ ],
+)
+async def test_chat_completions_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"}
+ request_init["http_body"] = {
+ "content_type": "content_type_value",
+ "data": b"data_blob",
+ "extensions": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = prediction_service.ChatCompletionsRequest.meta.fields["http_body"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["http_body"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["http_body"][field])):
+ del request_init["http_body"][field][i][subfield]
+ else:
+ del request_init["http_body"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = httpbody_pb2.HttpBody(
+ content_type="content_type_value",
+ data=b"data_blob",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+ response_value.content.return_value = mock_async_gen(json_return_value)
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.chat_completions(request)
+
+ assert isinstance(response, AsyncIterable)
+ response = await response.__anext__()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, httpbody_pb2.HttpBody)
+ assert response.content_type == "content_type_value"
+ assert response.data == b"data_blob"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_chat_completions_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncPredictionServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncPredictionServiceRestInterceptor(),
+ )
+ client = PredictionServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "post_chat_completions"
+ ) as post, mock.patch.object(
+ transports.AsyncPredictionServiceRestInterceptor, "pre_chat_completions"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = prediction_service.ChatCompletionsRequest.pb(
+ prediction_service.ChatCompletionsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(httpbody_pb2.HttpBody())
+ req.return_value.content.return_value = mock_async_gen(return_value)
+
+ request = prediction_service.ChatCompletionsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = httpbody_pb2.HttpBody()
+
+ await client.chat_completions(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_location_rest_asyncio_bad_request(
+ request_type=locations_pb2.GetLocationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_location(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+async def test_get_location_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_rest_asyncio_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_locations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+async def test_list_locations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_get_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.set_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_set_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.test_iam_permissions(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+async def test_test_iam_permissions_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+async def test_cancel_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+async def test_delete_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_get_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+async def test_get_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_rest_asyncio_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_operations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+async def test_list_operations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.wait_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+async def test_wait_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_predict_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
+ await client.predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.PredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_raw_predict_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.raw_predict), "__call__") as call:
+ await client.raw_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.RawPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_stream_raw_predict_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_raw_predict), "__call__"
+ ) as call:
+ await client.stream_raw_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.StreamRawPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_direct_predict_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.direct_predict), "__call__") as call:
+ await client.direct_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.DirectPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_direct_raw_predict_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.direct_raw_predict), "__call__"
+ ) as call:
+ await client.direct_raw_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.DirectRawPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_server_streaming_predict_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.server_streaming_predict), "__call__"
+ ) as call:
+ await client.server_streaming_predict(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.StreamingPredictRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_explain_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.explain), "__call__") as call:
+ await client.explain(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.ExplainRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_count_tokens_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.count_tokens), "__call__") as call:
+ await client.count_tokens(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.CountTokensRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_generate_content_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.generate_content), "__call__") as call:
+ await client.generate_content(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.GenerateContentRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_stream_generate_content_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_generate_content), "__call__"
+ ) as call:
+ await client.stream_generate_content(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.GenerateContentRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_chat_completions_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.chat_completions), "__call__") as call:
+ await client.chat_completions(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = prediction_service.ChatCompletionsRequest()
+
+ assert args[0] == request_msg
+
+
+def test_unsupported_parameter_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with pytest.raises(core_exceptions.AsyncRestUnsupportedParameterError, match="google.api_core.client_options.ClientOptions.quota_project_id") as exc: # type: ignore
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ client_options=options,
+ )
+
+
+def test_transport_grpc_default():
+ # A client should use the gRPC transport by default.
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert isinstance(
+ client.transport,
+ transports.PredictionServiceGrpcTransport,
+ )
+
+
+def test_prediction_service_base_transport_error():
+ # Passing both a credentials object and credentials_file should raise an error
+ with pytest.raises(core_exceptions.DuplicateCredentialArgs):
+ transport = transports.PredictionServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ credentials_file="credentials.json",
+ )
+
+
+def test_prediction_service_base_transport():
+ # Instantiate the base transport.
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport.__init__"
+ ) as Transport:
+ Transport.return_value = None
+ transport = transports.PredictionServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Every method on the transport should just blindly
+ # raise NotImplementedError.
+ methods = (
+ "predict",
+ "raw_predict",
+ "stream_raw_predict",
+ "direct_predict",
+ "direct_raw_predict",
+ "stream_direct_predict",
+ "stream_direct_raw_predict",
+ "streaming_predict",
+ "server_streaming_predict",
+ "streaming_raw_predict",
+ "explain",
+ "count_tokens",
+ "generate_content",
+ "stream_generate_content",
+ "chat_completions",
+ "set_iam_policy",
+ "get_iam_policy",
+ "test_iam_permissions",
+ "get_location",
+ "list_locations",
+ "get_operation",
+ "wait_operation",
+ "cancel_operation",
+ "delete_operation",
+ "list_operations",
+ )
+ for method in methods:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, method)(request=object())
+
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
+ # Catch all for all remaining methods and properties
+ remainder = [
+ "kind",
+ ]
+ for r in remainder:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, r)()
+
+
+def test_prediction_service_base_transport_with_credentials_file():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.PredictionServiceTransport(
+ credentials_file="credentials.json",
+ quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=None,
+ default_scopes=(
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ ),
+ quota_project_id="octopus",
+ )
+
+
+def test_prediction_service_base_transport_with_adc():
+ # Test the default credentials are used if credentials and credentials_file are None.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.PredictionServiceTransport()
+ adc.assert_called_once()
+
+
+def test_prediction_service_auth_adc():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ PredictionServiceClient()
+ adc.assert_called_once_with(
+ scopes=None,
+ default_scopes=(
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ ),
+ quota_project_id=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PredictionServiceGrpcTransport,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_prediction_service_transport_auth_adc(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+ adc.assert_called_once_with(
+ scopes=["1", "2"],
+ default_scopes=(
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ ),
+ quota_project_id="octopus",
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PredictionServiceGrpcTransport,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ transports.PredictionServiceRestTransport,
+ ],
+)
+def test_prediction_service_transport_auth_gdch_credentials(transport_class):
+ host = "https://language.com"
+ api_audience_tests = [None, "https://language2.com"]
+ api_audience_expect = [host, "https://language2.com"]
+ for t, e in zip(api_audience_tests, api_audience_expect):
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ gdch_mock = mock.MagicMock()
+ type(gdch_mock).with_gdch_audience = mock.PropertyMock(
+ return_value=gdch_mock
+ )
+ adc.return_value = (gdch_mock, None)
+ transport_class(host=host, api_audience=t)
+ gdch_mock.with_gdch_audience.assert_called_once_with(e)
+
+
+@pytest.mark.parametrize(
+ "transport_class,grpc_helpers",
+ [
+ (transports.PredictionServiceGrpcTransport, grpc_helpers),
+ (transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async),
+ ],
+)
+def test_prediction_service_transport_create_channel(transport_class, grpc_helpers):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel", autospec=True
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ adc.return_value = (creds, None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=creds,
+ credentials_file=None,
+ quota_project_id="octopus",
+ default_scopes=(
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ ),
+ scopes=["1", "2"],
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PredictionServiceGrpcTransport,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_prediction_service_grpc_transport_client_cert_source_for_mtls(transport_class):
+ cred = ga_credentials.AnonymousCredentials()
+
+ # Check ssl_channel_credentials is used if provided.
+ with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
+ mock_ssl_channel_creds = mock.Mock()
+ transport_class(
+ host="squid.clam.whelk",
+ credentials=cred,
+ ssl_channel_credentials=mock_ssl_channel_creds,
+ )
+ mock_create_channel.assert_called_once_with(
+ "squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_channel_creds,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
+ # is used.
+ with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
+ with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
+ transport_class(
+ credentials=cred,
+ client_cert_source_for_mtls=client_cert_source_callback,
+ )
+ expected_cert, expected_key = client_cert_source_callback()
+ mock_ssl_cred.assert_called_once_with(
+ certificate_chain=expected_cert, private_key=expected_key
+ )
+
+
+def test_prediction_service_http_transport_client_cert_source_for_mtls():
+ cred = ga_credentials.AnonymousCredentials()
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ) as mock_configure_mtls_channel:
+ transports.PredictionServiceRestTransport(
+ credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
+ )
+ mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_prediction_service_host_no_port(transport_name):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_prediction_service_host_with_port(transport_name):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com:8000"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:8000"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com:8000"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "rest",
+ ],
+)
+def test_prediction_service_client_transport_session_collision(transport_name):
+ creds1 = ga_credentials.AnonymousCredentials()
+ creds2 = ga_credentials.AnonymousCredentials()
+ client1 = PredictionServiceClient(
+ credentials=creds1,
+ transport=transport_name,
+ )
+ client2 = PredictionServiceClient(
+ credentials=creds2,
+ transport=transport_name,
+ )
+ session1 = client1.transport.predict._session
+ session2 = client2.transport.predict._session
+ assert session1 != session2
+ session1 = client1.transport.raw_predict._session
+ session2 = client2.transport.raw_predict._session
+ assert session1 != session2
+ session1 = client1.transport.stream_raw_predict._session
+ session2 = client2.transport.stream_raw_predict._session
+ assert session1 != session2
+ session1 = client1.transport.direct_predict._session
+ session2 = client2.transport.direct_predict._session
+ assert session1 != session2
+ session1 = client1.transport.direct_raw_predict._session
+ session2 = client2.transport.direct_raw_predict._session
+ assert session1 != session2
+ session1 = client1.transport.stream_direct_predict._session
+ session2 = client2.transport.stream_direct_predict._session
+ assert session1 != session2
+ session1 = client1.transport.stream_direct_raw_predict._session
+ session2 = client2.transport.stream_direct_raw_predict._session
+ assert session1 != session2
+ session1 = client1.transport.streaming_predict._session
+ session2 = client2.transport.streaming_predict._session
+ assert session1 != session2
+ session1 = client1.transport.server_streaming_predict._session
+ session2 = client2.transport.server_streaming_predict._session
+ assert session1 != session2
+ session1 = client1.transport.streaming_raw_predict._session
+ session2 = client2.transport.streaming_raw_predict._session
+ assert session1 != session2
+ session1 = client1.transport.explain._session
+ session2 = client2.transport.explain._session
+ assert session1 != session2
+ session1 = client1.transport.count_tokens._session
+ session2 = client2.transport.count_tokens._session
+ assert session1 != session2
+ session1 = client1.transport.generate_content._session
+ session2 = client2.transport.generate_content._session
+ assert session1 != session2
+ session1 = client1.transport.stream_generate_content._session
+ session2 = client2.transport.stream_generate_content._session
+ assert session1 != session2
+ session1 = client1.transport.chat_completions._session
+ session2 = client2.transport.chat_completions._session
+ assert session1 != session2
+
+
+def test_prediction_service_grpc_transport_channel():
+ channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.PredictionServiceGrpcTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+def test_prediction_service_grpc_asyncio_transport_channel():
+ channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.PredictionServiceGrpcAsyncIOTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PredictionServiceGrpcTransport,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_prediction_service_transport_channel_mtls_with_client_cert_source(
+ transport_class,
+):
+ with mock.patch(
+ "grpc.ssl_channel_credentials", autospec=True
+ ) as grpc_ssl_channel_cred:
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_ssl_cred = mock.Mock()
+ grpc_ssl_channel_cred.return_value = mock_ssl_cred
+
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+
+ cred = ga_credentials.AnonymousCredentials()
+ with pytest.warns(DeprecationWarning):
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (cred, None)
+ transport = transport_class(
+ host="squid.clam.whelk",
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=client_cert_source_callback,
+ )
+ adc.assert_called_once()
+
+ grpc_ssl_channel_cred.assert_called_once_with(
+ certificate_chain=b"cert bytes", private_key=b"key bytes"
+ )
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.PredictionServiceGrpcTransport,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_prediction_service_transport_channel_mtls_with_adc(transport_class):
+ mock_ssl_cred = mock.Mock()
+ with mock.patch.multiple(
+ "google.auth.transport.grpc.SslCredentials",
+ __init__=mock.Mock(return_value=None),
+ ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
+ ):
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+ mock_cred = mock.Mock()
+
+ with pytest.warns(DeprecationWarning):
+ transport = transport_class(
+ host="squid.clam.whelk",
+ credentials=mock_cred,
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=None,
+ )
+
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=mock_cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+
+
+def test_cached_content_path():
+ project = "squid"
+ location = "clam"
+ cached_content = "whelk"
+ expected = "projects/{project}/locations/{location}/cachedContents/{cached_content}".format(
+ project=project,
+ location=location,
+ cached_content=cached_content,
+ )
+ actual = PredictionServiceClient.cached_content_path(
+ project, location, cached_content
+ )
+ assert expected == actual
+
+
+def test_parse_cached_content_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "cached_content": "nudibranch",
+ }
+ path = PredictionServiceClient.cached_content_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_cached_content_path(path)
+ assert expected == actual
+
+
+def test_endpoint_path():
+ project = "cuttlefish"
+ location = "mussel"
+ endpoint = "winkle"
+ expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(
+ project=project,
+ location=location,
+ endpoint=endpoint,
+ )
+ actual = PredictionServiceClient.endpoint_path(project, location, endpoint)
+ assert expected == actual
+
+
+def test_parse_endpoint_path():
+ expected = {
+ "project": "nautilus",
+ "location": "scallop",
+ "endpoint": "abalone",
+ }
+ path = PredictionServiceClient.endpoint_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_endpoint_path(path)
+ assert expected == actual
+
+
+def test_model_path():
+ project = "squid"
+ location = "clam"
+ model = "whelk"
+ expected = "projects/{project}/locations/{location}/models/{model}".format(
+ project=project,
+ location=location,
+ model=model,
+ )
+ actual = PredictionServiceClient.model_path(project, location, model)
+ assert expected == actual
+
+
+def test_parse_model_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "model": "nudibranch",
+ }
+ path = PredictionServiceClient.model_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_model_path(path)
+ assert expected == actual
+
+
+def test_rag_corpus_path():
+ project = "cuttlefish"
+ location = "mussel"
+ rag_corpus = "winkle"
+ expected = "projects/{project}/locations/{location}/ragCorpora/{rag_corpus}".format(
+ project=project,
+ location=location,
+ rag_corpus=rag_corpus,
+ )
+ actual = PredictionServiceClient.rag_corpus_path(project, location, rag_corpus)
+ assert expected == actual
+
+
+def test_parse_rag_corpus_path():
+ expected = {
+ "project": "nautilus",
+ "location": "scallop",
+ "rag_corpus": "abalone",
+ }
+ path = PredictionServiceClient.rag_corpus_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_rag_corpus_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "squid"
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = PredictionServiceClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "clam",
+ }
+ path = PredictionServiceClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "whelk"
+ expected = "folders/{folder}".format(
+ folder=folder,
+ )
+ actual = PredictionServiceClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "octopus",
+ }
+ path = PredictionServiceClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "oyster"
+ expected = "organizations/{organization}".format(
+ organization=organization,
+ )
+ actual = PredictionServiceClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "nudibranch",
+ }
+ path = PredictionServiceClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "cuttlefish"
+ expected = "projects/{project}".format(
+ project=project,
+ )
+ actual = PredictionServiceClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "mussel",
+ }
+ path = PredictionServiceClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "winkle"
+ location = "nautilus"
+ expected = "projects/{project}/locations/{location}".format(
+ project=project,
+ location=location,
+ )
+ actual = PredictionServiceClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "scallop",
+ "location": "abalone",
+ }
+ path = PredictionServiceClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_common_location_path(path)
+ assert expected == actual
+
+
+def test_client_with_default_client_info():
+ client_info = gapic_v1.client_info.ClientInfo()
+
+ with mock.patch.object(
+ transports.PredictionServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+ with mock.patch.object(
+ transports.PredictionServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ transport_class = PredictionServiceClient.get_transport_class()
+ transport = transport_class(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+
+def test_delete_operation(transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_async(transport: str = "grpc_asyncio"):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = None
+
+ client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_delete_operation_from_dict():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_from_dict_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_cancel_operation(transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_async(transport: str = "grpc_asyncio"):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_operation_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = None
+
+ client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_cancel_operation_from_dict():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_from_dict_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_wait_operation(transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation(transport: str = "grpc_asyncio"):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_wait_operation_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_wait_operation_from_dict():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_from_dict_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_operation(transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_get_operation_async(transport: str = "grpc_asyncio"):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_get_operation_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_operation_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_get_operation_from_dict():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_operation_from_dict_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_operations(transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+ response = client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_async(transport: str = "grpc_asyncio"):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_list_operations_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_operations_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_operations_from_dict():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ response = client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_operations_from_dict_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_locations(transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+ response = client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_async(transport: str = "grpc_asyncio"):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_list_locations_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_locations_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_locations_from_dict():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ response = client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_locations_from_dict_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_location(transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+ response = client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_get_location_async(transport: str = "grpc_asyncio"):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_get_location_field_headers():
+ client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials())
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = locations_pb2.Location()
+
+ client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_location_field_headers_async():
+ client = PredictionServiceAsyncClient(credentials=async_anonymous_credentials())
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+def test_get_location_from_dict():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+
+ response = client.get_location(
+ request={
+ "name": "locations/abc",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_location_from_dict_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_set_iam_policy(transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ response = client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+ response = await client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_set_iam_policy_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_set_iam_policy_from_dict():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_from_dict_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+def test_get_iam_policy(transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_get_iam_policy_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_get_iam_policy_from_dict():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_from_dict_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+def test_test_iam_permissions(transport: str = "grpc"):
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"):
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+ )
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+def test_test_iam_permissions_field_headers():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_field_headers_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_test_iam_permissions_from_dict():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ response = client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_from_dict_async():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ response = await client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+def test_transport_close_grpc():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_grpc_asyncio():
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close_rest():
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = PredictionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "rest",
+ "grpc",
+ ]
+ for transport in transports:
+ client = PredictionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class",
+ [
+ (PredictionServiceClient, transports.PredictionServiceGrpcTransport),
+ (
+ PredictionServiceAsyncClient,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ ),
+ ],
+)
+def test_api_key_credentials(client_class, transport_class):
+ with mock.patch.object(
+ google.auth._default, "get_api_key_credentials", create=True
+ ) as get_api_key_credentials:
+ mock_cred = mock.Mock()
+ get_api_key_credentials.return_value = mock_cred
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=mock_cred,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_execution_service.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_execution_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..855b569109e572d78df49d86fb62b48add7ae6d7
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_execution_service.py
@@ -0,0 +1,6500 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+ from unittest.mock import AsyncMock # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ import mock
+
+import grpc
+from grpc.experimental import aio
+from collections.abc import Iterable, AsyncIterable
+from google.protobuf import json_format
+import json
+import math
+import pytest
+from google.api_core import api_core_version
+from proto.marshal.rules.dates import DurationRule, TimestampRule
+from proto.marshal.rules import wrappers
+
+try:
+ import aiohttp # type: ignore
+ from google.auth.aio.transport.sessions import AsyncAuthorizedSession
+ from google.api_core.operations_v1 import AsyncOperationsRestClient
+
+ HAS_ASYNC_REST_EXTRA = True
+except ImportError: # pragma: NO COVER
+ HAS_ASYNC_REST_EXTRA = False
+from requests import Response
+from requests import Request, PreparedRequest
+from requests.sessions import Session
+from google.protobuf import json_format
+
+try:
+ from google.auth.aio import credentials as ga_credentials_async
+
+ HAS_GOOGLE_AUTH_AIO = True
+except ImportError: # pragma: NO COVER
+ HAS_GOOGLE_AUTH_AIO = False
+
+from google.api import httpbody_pb2 # type: ignore
+from google.api_core import client_options
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers
+from google.api_core import grpc_helpers_async
+from google.api_core import path_template
+from google.api_core import retry as retries
+from google.auth import credentials as ga_credentials
+from google.auth.exceptions import MutualTLSChannelError
+from google.cloud.aiplatform_v1beta1.services.reasoning_engine_execution_service import (
+ ReasoningEngineExecutionServiceAsyncClient,
+)
+from google.cloud.aiplatform_v1beta1.services.reasoning_engine_execution_service import (
+ ReasoningEngineExecutionServiceClient,
+)
+from google.cloud.aiplatform_v1beta1.services.reasoning_engine_execution_service import (
+ transports,
+)
+from google.cloud.aiplatform_v1beta1.types import reasoning_engine_execution_service
+from google.cloud.location import locations_pb2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import options_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.oauth2 import service_account
+from google.protobuf import any_pb2 # type: ignore
+from google.protobuf import struct_pb2 # type: ignore
+import google.auth
+
+
+async def mock_async_gen(data, chunk_size=1):
+ for i in range(0, len(data)): # pragma: NO COVER
+ chunk = data[i : i + chunk_size]
+ yield chunk.encode("utf-8")
+
+
+def client_cert_source_callback():
+ return b"cert bytes", b"key bytes"
+
+
+# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded.
+# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107.
+def async_anonymous_credentials():
+ if HAS_GOOGLE_AUTH_AIO:
+ return ga_credentials_async.AnonymousCredentials()
+ return ga_credentials.AnonymousCredentials()
+
+
+# If default endpoint is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint(client):
+ return (
+ "foo.googleapis.com"
+ if ("localhost" in client.DEFAULT_ENDPOINT)
+ else client.DEFAULT_ENDPOINT
+ )
+
+
+# If default endpoint template is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint template so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint_template(client):
+ return (
+ "test.{UNIVERSE_DOMAIN}"
+ if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE)
+ else client._DEFAULT_ENDPOINT_TEMPLATE
+ )
+
+
+def test__get_default_mtls_endpoint():
+ api_endpoint = "example.googleapis.com"
+ api_mtls_endpoint = "example.mtls.googleapis.com"
+ sandbox_endpoint = "example.sandbox.googleapis.com"
+ sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
+ non_googleapi = "api.example.com"
+
+ assert (
+ ReasoningEngineExecutionServiceClient._get_default_mtls_endpoint(None) is None
+ )
+ assert (
+ ReasoningEngineExecutionServiceClient._get_default_mtls_endpoint(api_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ ReasoningEngineExecutionServiceClient._get_default_mtls_endpoint(
+ api_mtls_endpoint
+ )
+ == api_mtls_endpoint
+ )
+ assert (
+ ReasoningEngineExecutionServiceClient._get_default_mtls_endpoint(
+ sandbox_endpoint
+ )
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ ReasoningEngineExecutionServiceClient._get_default_mtls_endpoint(
+ sandbox_mtls_endpoint
+ )
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ ReasoningEngineExecutionServiceClient._get_default_mtls_endpoint(non_googleapi)
+ == non_googleapi
+ )
+
+
+def test__read_environment_variables():
+ assert ReasoningEngineExecutionServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert ReasoningEngineExecutionServiceClient._read_environment_variables() == (
+ True,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ assert ReasoningEngineExecutionServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ ReasoningEngineExecutionServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ assert ReasoningEngineExecutionServiceClient._read_environment_variables() == (
+ False,
+ "never",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ assert ReasoningEngineExecutionServiceClient._read_environment_variables() == (
+ False,
+ "always",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}):
+ assert ReasoningEngineExecutionServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ ReasoningEngineExecutionServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}):
+ assert ReasoningEngineExecutionServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ "foo.com",
+ )
+
+
+def test__get_client_cert_source():
+ mock_provided_cert_source = mock.Mock()
+ mock_default_cert_source = mock.Mock()
+
+ assert (
+ ReasoningEngineExecutionServiceClient._get_client_cert_source(None, False)
+ is None
+ )
+ assert (
+ ReasoningEngineExecutionServiceClient._get_client_cert_source(
+ mock_provided_cert_source, False
+ )
+ is None
+ )
+ assert (
+ ReasoningEngineExecutionServiceClient._get_client_cert_source(
+ mock_provided_cert_source, True
+ )
+ == mock_provided_cert_source
+ )
+
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source", return_value=True
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_default_cert_source,
+ ):
+ assert (
+ ReasoningEngineExecutionServiceClient._get_client_cert_source(
+ None, True
+ )
+ is mock_default_cert_source
+ )
+ assert (
+ ReasoningEngineExecutionServiceClient._get_client_cert_source(
+ mock_provided_cert_source, "true"
+ )
+ is mock_provided_cert_source
+ )
+
+
+@mock.patch.object(
+ ReasoningEngineExecutionServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ReasoningEngineExecutionServiceClient),
+)
+@mock.patch.object(
+ ReasoningEngineExecutionServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ReasoningEngineExecutionServiceAsyncClient),
+)
+def test__get_api_endpoint():
+ api_override = "foo.com"
+ mock_client_cert_source = mock.Mock()
+ default_universe = ReasoningEngineExecutionServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = (
+ ReasoningEngineExecutionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = (
+ ReasoningEngineExecutionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+ )
+
+ assert (
+ ReasoningEngineExecutionServiceClient._get_api_endpoint(
+ api_override, mock_client_cert_source, default_universe, "always"
+ )
+ == api_override
+ )
+ assert (
+ ReasoningEngineExecutionServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "auto"
+ )
+ == ReasoningEngineExecutionServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ ReasoningEngineExecutionServiceClient._get_api_endpoint(
+ None, None, default_universe, "auto"
+ )
+ == default_endpoint
+ )
+ assert (
+ ReasoningEngineExecutionServiceClient._get_api_endpoint(
+ None, None, default_universe, "always"
+ )
+ == ReasoningEngineExecutionServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ ReasoningEngineExecutionServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "always"
+ )
+ == ReasoningEngineExecutionServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ ReasoningEngineExecutionServiceClient._get_api_endpoint(
+ None, None, mock_universe, "never"
+ )
+ == mock_endpoint
+ )
+ assert (
+ ReasoningEngineExecutionServiceClient._get_api_endpoint(
+ None, None, default_universe, "never"
+ )
+ == default_endpoint
+ )
+
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ ReasoningEngineExecutionServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, mock_universe, "auto"
+ )
+ assert (
+ str(excinfo.value)
+ == "mTLS is not supported in any universe other than googleapis.com."
+ )
+
+
+def test__get_universe_domain():
+ client_universe_domain = "foo.com"
+ universe_domain_env = "bar.com"
+
+ assert (
+ ReasoningEngineExecutionServiceClient._get_universe_domain(
+ client_universe_domain, universe_domain_env
+ )
+ == client_universe_domain
+ )
+ assert (
+ ReasoningEngineExecutionServiceClient._get_universe_domain(
+ None, universe_domain_env
+ )
+ == universe_domain_env
+ )
+ assert (
+ ReasoningEngineExecutionServiceClient._get_universe_domain(None, None)
+ == ReasoningEngineExecutionServiceClient._DEFAULT_UNIVERSE
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ ReasoningEngineExecutionServiceClient._get_universe_domain("", None)
+ assert str(excinfo.value) == "Universe Domain cannot be an empty string."
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (ReasoningEngineExecutionServiceClient, "grpc"),
+ (ReasoningEngineExecutionServiceAsyncClient, "grpc_asyncio"),
+ (ReasoningEngineExecutionServiceClient, "rest"),
+ ],
+)
+def test_reasoning_engine_execution_service_client_from_service_account_info(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_info"
+ ) as factory:
+ factory.return_value = creds
+ info = {"valid": True}
+ client = client_class.from_service_account_info(info, transport=transport_name)
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (transports.ReasoningEngineExecutionServiceGrpcTransport, "grpc"),
+ (
+ transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (transports.ReasoningEngineExecutionServiceRestTransport, "rest"),
+ ],
+)
+def test_reasoning_engine_execution_service_client_service_account_always_use_jwt(
+ transport_class, transport_name
+):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=True)
+ use_jwt.assert_called_once_with(True)
+
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=False)
+ use_jwt.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (ReasoningEngineExecutionServiceClient, "grpc"),
+ (ReasoningEngineExecutionServiceAsyncClient, "grpc_asyncio"),
+ (ReasoningEngineExecutionServiceClient, "rest"),
+ ],
+)
+def test_reasoning_engine_execution_service_client_from_service_account_file(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_file"
+ ) as factory:
+ factory.return_value = creds
+ client = client_class.from_service_account_file(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ client = client_class.from_service_account_json(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+def test_reasoning_engine_execution_service_client_get_transport_class():
+ transport = ReasoningEngineExecutionServiceClient.get_transport_class()
+ available_transports = [
+ transports.ReasoningEngineExecutionServiceGrpcTransport,
+ transports.ReasoningEngineExecutionServiceRestTransport,
+ ]
+ assert transport in available_transports
+
+ transport = ReasoningEngineExecutionServiceClient.get_transport_class("grpc")
+ assert transport == transports.ReasoningEngineExecutionServiceGrpcTransport
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (
+ ReasoningEngineExecutionServiceClient,
+ transports.ReasoningEngineExecutionServiceGrpcTransport,
+ "grpc",
+ ),
+ (
+ ReasoningEngineExecutionServiceAsyncClient,
+ transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (
+ ReasoningEngineExecutionServiceClient,
+ transports.ReasoningEngineExecutionServiceRestTransport,
+ "rest",
+ ),
+ ],
+)
+@mock.patch.object(
+ ReasoningEngineExecutionServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ReasoningEngineExecutionServiceClient),
+)
+@mock.patch.object(
+ ReasoningEngineExecutionServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ReasoningEngineExecutionServiceAsyncClient),
+)
+def test_reasoning_engine_execution_service_client_client_options(
+ client_class, transport_class, transport_name
+):
+ # Check that if channel is provided we won't create a new one.
+ with mock.patch.object(
+ ReasoningEngineExecutionServiceClient, "get_transport_class"
+ ) as gtc:
+ transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
+ client = client_class(transport=transport)
+ gtc.assert_not_called()
+
+ # Check that if channel is provided via str we will create a new one.
+ with mock.patch.object(
+ ReasoningEngineExecutionServiceClient, "get_transport_class"
+ ) as gtc:
+ client = client_class(transport=transport_name)
+ gtc.assert_called()
+
+ # Check the case api_endpoint is provided.
+ options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name, client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_MTLS_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ # Check the case quota_project_id is provided
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id="octopus",
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+ # Check the case api_endpoint is provided
+ options = client_options.ClientOptions(
+ api_audience="https://language.googleapis.com"
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience="https://language.googleapis.com",
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,use_client_cert_env",
+ [
+ (
+ ReasoningEngineExecutionServiceClient,
+ transports.ReasoningEngineExecutionServiceGrpcTransport,
+ "grpc",
+ "true",
+ ),
+ (
+ ReasoningEngineExecutionServiceAsyncClient,
+ transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "true",
+ ),
+ (
+ ReasoningEngineExecutionServiceClient,
+ transports.ReasoningEngineExecutionServiceGrpcTransport,
+ "grpc",
+ "false",
+ ),
+ (
+ ReasoningEngineExecutionServiceAsyncClient,
+ transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "false",
+ ),
+ (
+ ReasoningEngineExecutionServiceClient,
+ transports.ReasoningEngineExecutionServiceRestTransport,
+ "rest",
+ "true",
+ ),
+ (
+ ReasoningEngineExecutionServiceClient,
+ transports.ReasoningEngineExecutionServiceRestTransport,
+ "rest",
+ "false",
+ ),
+ ],
+)
+@mock.patch.object(
+ ReasoningEngineExecutionServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ReasoningEngineExecutionServiceClient),
+)
+@mock.patch.object(
+ ReasoningEngineExecutionServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ReasoningEngineExecutionServiceAsyncClient),
+)
+@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
+def test_reasoning_engine_execution_service_client_mtls_env_auto(
+ client_class, transport_class, transport_name, use_client_cert_env
+):
+ # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
+ # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
+
+ # Check the case client_cert_source is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=client_cert_source_callback
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+
+ if use_client_cert_env == "false":
+ expected_client_cert_source = None
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ else:
+ expected_client_cert_source = client_cert_source_callback
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case ADC client cert is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=client_cert_source_callback,
+ ):
+ if use_client_cert_env == "false":
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ expected_client_cert_source = None
+ else:
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+ expected_client_cert_source = client_cert_source_callback
+
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case client_cert_source and ADC client cert are not provided.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class",
+ [ReasoningEngineExecutionServiceClient, ReasoningEngineExecutionServiceAsyncClient],
+)
+@mock.patch.object(
+ ReasoningEngineExecutionServiceClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(ReasoningEngineExecutionServiceClient),
+)
+@mock.patch.object(
+ ReasoningEngineExecutionServiceAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(ReasoningEngineExecutionServiceAsyncClient),
+)
+def test_reasoning_engine_execution_service_client_get_mtls_endpoint_and_cert_source(
+ client_class,
+):
+ mock_client_cert_source = mock.Mock()
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source == mock_client_cert_source
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_client_cert_source,
+ ):
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source == mock_client_cert_source
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class",
+ [ReasoningEngineExecutionServiceClient, ReasoningEngineExecutionServiceAsyncClient],
+)
+@mock.patch.object(
+ ReasoningEngineExecutionServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ReasoningEngineExecutionServiceClient),
+)
+@mock.patch.object(
+ ReasoningEngineExecutionServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ReasoningEngineExecutionServiceAsyncClient),
+)
+def test_reasoning_engine_execution_service_client_client_api_endpoint(client_class):
+ mock_client_cert_source = client_cert_source_callback
+ api_override = "foo.com"
+ default_universe = ReasoningEngineExecutionServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = (
+ ReasoningEngineExecutionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = (
+ ReasoningEngineExecutionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+ )
+
+ # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true",
+ # use ClientOptions.api_endpoint as the api endpoint regardless.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=api_override
+ )
+ client = client_class(
+ client_options=options,
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert client.api_endpoint == api_override
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == default_endpoint
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always",
+ # use the DEFAULT_MTLS_ENDPOINT as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+
+ # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default),
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist,
+ # and ClientOptions.universe_domain="bar.com",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint.
+ options = client_options.ClientOptions()
+ universe_exists = hasattr(options, "universe_domain")
+ if universe_exists:
+ options = client_options.ClientOptions(universe_domain=mock_universe)
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ else:
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == (
+ mock_endpoint if universe_exists else default_endpoint
+ )
+ assert client.universe_domain == (
+ mock_universe if universe_exists else default_universe
+ )
+
+ # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ options = client_options.ClientOptions()
+ if hasattr(options, "universe_domain"):
+ delattr(options, "universe_domain")
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == default_endpoint
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (
+ ReasoningEngineExecutionServiceClient,
+ transports.ReasoningEngineExecutionServiceGrpcTransport,
+ "grpc",
+ ),
+ (
+ ReasoningEngineExecutionServiceAsyncClient,
+ transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (
+ ReasoningEngineExecutionServiceClient,
+ transports.ReasoningEngineExecutionServiceRestTransport,
+ "rest",
+ ),
+ ],
+)
+def test_reasoning_engine_execution_service_client_client_options_scopes(
+ client_class, transport_class, transport_name
+):
+ # Check the case scopes are provided.
+ options = client_options.ClientOptions(
+ scopes=["1", "2"],
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=["1", "2"],
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ ReasoningEngineExecutionServiceClient,
+ transports.ReasoningEngineExecutionServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ ReasoningEngineExecutionServiceAsyncClient,
+ transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ (
+ ReasoningEngineExecutionServiceClient,
+ transports.ReasoningEngineExecutionServiceRestTransport,
+ "rest",
+ None,
+ ),
+ ],
+)
+def test_reasoning_engine_execution_service_client_client_options_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+def test_reasoning_engine_execution_service_client_client_options_from_dict():
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.reasoning_engine_execution_service.transports.ReasoningEngineExecutionServiceGrpcTransport.__init__"
+ ) as grpc_transport:
+ grpc_transport.return_value = None
+ client = ReasoningEngineExecutionServiceClient(
+ client_options={"api_endpoint": "squid.clam.whelk"}
+ )
+ grpc_transport.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ ReasoningEngineExecutionServiceClient,
+ transports.ReasoningEngineExecutionServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ ReasoningEngineExecutionServiceAsyncClient,
+ transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_reasoning_engine_execution_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_execution_service.QueryReasoningEngineRequest,
+ dict,
+ ],
+)
+def test_query_reasoning_engine(request_type, transport: str = "grpc"):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.query_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = (
+ reasoning_engine_execution_service.QueryReasoningEngineResponse()
+ )
+ response = client.query_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = reasoning_engine_execution_service.QueryReasoningEngineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(
+ response, reasoning_engine_execution_service.QueryReasoningEngineResponse
+ )
+
+
+def test_query_reasoning_engine_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = reasoning_engine_execution_service.QueryReasoningEngineRequest(
+ name="name_value",
+ class_method="class_method_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.query_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.query_reasoning_engine(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[
+ 0
+ ] == reasoning_engine_execution_service.QueryReasoningEngineRequest(
+ name="name_value",
+ class_method="class_method_value",
+ )
+
+
+def test_query_reasoning_engine_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.query_reasoning_engine
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.query_reasoning_engine
+ ] = mock_rpc
+ request = {}
+ client.query_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.query_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_query_reasoning_engine_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.query_reasoning_engine
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.query_reasoning_engine
+ ] = mock_rpc
+
+ request = {}
+ await client.query_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.query_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_query_reasoning_engine_async(
+ transport: str = "grpc_asyncio",
+ request_type=reasoning_engine_execution_service.QueryReasoningEngineRequest,
+):
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.query_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ reasoning_engine_execution_service.QueryReasoningEngineResponse()
+ )
+ response = await client.query_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = reasoning_engine_execution_service.QueryReasoningEngineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(
+ response, reasoning_engine_execution_service.QueryReasoningEngineResponse
+ )
+
+
+@pytest.mark.asyncio
+async def test_query_reasoning_engine_async_from_dict():
+ await test_query_reasoning_engine_async(request_type=dict)
+
+
+def test_query_reasoning_engine_field_headers():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = reasoning_engine_execution_service.QueryReasoningEngineRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.query_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = (
+ reasoning_engine_execution_service.QueryReasoningEngineResponse()
+ )
+ client.query_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_query_reasoning_engine_field_headers_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = reasoning_engine_execution_service.QueryReasoningEngineRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.query_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ reasoning_engine_execution_service.QueryReasoningEngineResponse()
+ )
+ await client.query_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_execution_service.StreamQueryReasoningEngineRequest,
+ dict,
+ ],
+)
+def test_stream_query_reasoning_engine(request_type, transport: str = "grpc"):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_query_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iter([httpbody_pb2.HttpBody()])
+ response = client.stream_query_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = reasoning_engine_execution_service.StreamQueryReasoningEngineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ for message in response:
+ assert isinstance(message, httpbody_pb2.HttpBody)
+
+
+def test_stream_query_reasoning_engine_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = reasoning_engine_execution_service.StreamQueryReasoningEngineRequest(
+ name="name_value",
+ class_method="class_method_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_query_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.stream_query_reasoning_engine(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[
+ 0
+ ] == reasoning_engine_execution_service.StreamQueryReasoningEngineRequest(
+ name="name_value",
+ class_method="class_method_value",
+ )
+
+
+def test_stream_query_reasoning_engine_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.stream_query_reasoning_engine
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.stream_query_reasoning_engine
+ ] = mock_rpc
+ request = {}
+ client.stream_query_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.stream_query_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_stream_query_reasoning_engine_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.stream_query_reasoning_engine
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.stream_query_reasoning_engine
+ ] = mock_rpc
+
+ request = {}
+ await client.stream_query_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.stream_query_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_stream_query_reasoning_engine_async(
+ transport: str = "grpc_asyncio",
+ request_type=reasoning_engine_execution_service.StreamQueryReasoningEngineRequest,
+):
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_query_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(side_effect=[httpbody_pb2.HttpBody()])
+ response = await client.stream_query_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = reasoning_engine_execution_service.StreamQueryReasoningEngineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ message = await response.read()
+ assert isinstance(message, httpbody_pb2.HttpBody)
+
+
+@pytest.mark.asyncio
+async def test_stream_query_reasoning_engine_async_from_dict():
+ await test_stream_query_reasoning_engine_async(request_type=dict)
+
+
+def test_stream_query_reasoning_engine_field_headers():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = reasoning_engine_execution_service.StreamQueryReasoningEngineRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_query_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = iter([httpbody_pb2.HttpBody()])
+ client.stream_query_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_stream_query_reasoning_engine_field_headers_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = reasoning_engine_execution_service.StreamQueryReasoningEngineRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_query_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(side_effect=[httpbody_pb2.HttpBody()])
+ await client.stream_query_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_query_reasoning_engine_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.query_reasoning_engine
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.query_reasoning_engine
+ ] = mock_rpc
+
+ request = {}
+ client.query_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.query_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_query_reasoning_engine_rest_required_fields(
+ request_type=reasoning_engine_execution_service.QueryReasoningEngineRequest,
+):
+ transport_class = transports.ReasoningEngineExecutionServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).query_reasoning_engine._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).query_reasoning_engine._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = reasoning_engine_execution_service.QueryReasoningEngineResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = (
+ reasoning_engine_execution_service.QueryReasoningEngineResponse.pb(
+ return_value
+ )
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.query_reasoning_engine(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_query_reasoning_engine_rest_unset_required_fields():
+ transport = transports.ReasoningEngineExecutionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.query_reasoning_engine._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_stream_query_reasoning_engine_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.stream_query_reasoning_engine
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.stream_query_reasoning_engine
+ ] = mock_rpc
+
+ request = {}
+ client.stream_query_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.stream_query_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_stream_query_reasoning_engine_rest_required_fields(
+ request_type=reasoning_engine_execution_service.StreamQueryReasoningEngineRequest,
+):
+ transport_class = transports.ReasoningEngineExecutionServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).stream_query_reasoning_engine._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).stream_query_reasoning_engine._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = httpbody_pb2.HttpBody()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ with mock.patch.object(response_value, "iter_content") as iter_content:
+ iter_content.return_value = iter(json_return_value)
+ response = client.stream_query_reasoning_engine(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_stream_query_reasoning_engine_rest_unset_required_fields():
+ transport = transports.ReasoningEngineExecutionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.stream_query_reasoning_engine._get_unset_required_fields(
+ {}
+ )
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_credentials_transport_error():
+ # It is an error to provide credentials and a transport instance.
+ transport = transports.ReasoningEngineExecutionServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # It is an error to provide a credentials file and a transport instance.
+ transport = transports.ReasoningEngineExecutionServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = ReasoningEngineExecutionServiceClient(
+ client_options={"credentials_file": "credentials.json"},
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a transport instance.
+ transport = transports.ReasoningEngineExecutionServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = ReasoningEngineExecutionServiceClient(
+ client_options=options,
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a credential.
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = ReasoningEngineExecutionServiceClient(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # It is an error to provide scopes and a transport instance.
+ transport = transports.ReasoningEngineExecutionServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = ReasoningEngineExecutionServiceClient(
+ client_options={"scopes": ["1", "2"]},
+ transport=transport,
+ )
+
+
+def test_transport_instance():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.ReasoningEngineExecutionServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ client = ReasoningEngineExecutionServiceClient(transport=transport)
+ assert client.transport is transport
+
+
+def test_transport_get_channel():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.ReasoningEngineExecutionServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+ transport = transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ReasoningEngineExecutionServiceGrpcTransport,
+ transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport,
+ transports.ReasoningEngineExecutionServiceRestTransport,
+ ],
+)
+def test_transport_adc(transport_class):
+ # Test default credentials are used if not provided.
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class()
+ adc.assert_called_once()
+
+
+def test_transport_kind_grpc():
+ transport = ReasoningEngineExecutionServiceClient.get_transport_class("grpc")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "grpc"
+
+
+def test_initialize_client_w_grpc():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_query_reasoning_engine_empty_call_grpc():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.query_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = (
+ reasoning_engine_execution_service.QueryReasoningEngineResponse()
+ )
+ client.query_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_execution_service.QueryReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_stream_query_reasoning_engine_empty_call_grpc():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_query_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = iter([httpbody_pb2.HttpBody()])
+ client.stream_query_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = (
+ reasoning_engine_execution_service.StreamQueryReasoningEngineRequest()
+ )
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_grpc_asyncio():
+ transport = ReasoningEngineExecutionServiceAsyncClient.get_transport_class(
+ "grpc_asyncio"
+ )(credentials=async_anonymous_credentials())
+ assert transport.kind == "grpc_asyncio"
+
+
+def test_initialize_client_w_grpc_asyncio():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_query_reasoning_engine_empty_call_grpc_asyncio():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.query_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ reasoning_engine_execution_service.QueryReasoningEngineResponse()
+ )
+ await client.query_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_execution_service.QueryReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_stream_query_reasoning_engine_empty_call_grpc_asyncio():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_query_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(side_effect=[httpbody_pb2.HttpBody()])
+ await client.stream_query_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = (
+ reasoning_engine_execution_service.StreamQueryReasoningEngineRequest()
+ )
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_rest():
+ transport = ReasoningEngineExecutionServiceClient.get_transport_class("rest")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "rest"
+
+
+def test_query_reasoning_engine_rest_bad_request(
+ request_type=reasoning_engine_execution_service.QueryReasoningEngineRequest,
+):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.query_reasoning_engine(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_execution_service.QueryReasoningEngineRequest,
+ dict,
+ ],
+)
+def test_query_reasoning_engine_rest_call_success(request_type):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = reasoning_engine_execution_service.QueryReasoningEngineResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = (
+ reasoning_engine_execution_service.QueryReasoningEngineResponse.pb(
+ return_value
+ )
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.query_reasoning_engine(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(
+ response, reasoning_engine_execution_service.QueryReasoningEngineResponse
+ )
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_query_reasoning_engine_rest_interceptors(null_interceptor):
+ transport = transports.ReasoningEngineExecutionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.ReasoningEngineExecutionServiceRestInterceptor(),
+ )
+ client = ReasoningEngineExecutionServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.ReasoningEngineExecutionServiceRestInterceptor,
+ "post_query_reasoning_engine",
+ ) as post, mock.patch.object(
+ transports.ReasoningEngineExecutionServiceRestInterceptor,
+ "pre_query_reasoning_engine",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = reasoning_engine_execution_service.QueryReasoningEngineRequest.pb(
+ reasoning_engine_execution_service.QueryReasoningEngineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = (
+ reasoning_engine_execution_service.QueryReasoningEngineResponse.to_json(
+ reasoning_engine_execution_service.QueryReasoningEngineResponse()
+ )
+ )
+ req.return_value.content = return_value
+
+ request = reasoning_engine_execution_service.QueryReasoningEngineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = (
+ reasoning_engine_execution_service.QueryReasoningEngineResponse()
+ )
+
+ client.query_reasoning_engine(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_stream_query_reasoning_engine_rest_bad_request(
+ request_type=reasoning_engine_execution_service.StreamQueryReasoningEngineRequest,
+):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.stream_query_reasoning_engine(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_execution_service.StreamQueryReasoningEngineRequest,
+ dict,
+ ],
+)
+def test_stream_query_reasoning_engine_rest_call_success(request_type):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = httpbody_pb2.HttpBody(
+ content_type="content_type_value",
+ data=b"data_blob",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+ response_value.iter_content = mock.Mock(return_value=iter(json_return_value))
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.stream_query_reasoning_engine(request)
+
+ assert isinstance(response, Iterable)
+ response = next(response)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, httpbody_pb2.HttpBody)
+ assert response.content_type == "content_type_value"
+ assert response.data == b"data_blob"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_stream_query_reasoning_engine_rest_interceptors(null_interceptor):
+ transport = transports.ReasoningEngineExecutionServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.ReasoningEngineExecutionServiceRestInterceptor(),
+ )
+ client = ReasoningEngineExecutionServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.ReasoningEngineExecutionServiceRestInterceptor,
+ "post_stream_query_reasoning_engine",
+ ) as post, mock.patch.object(
+ transports.ReasoningEngineExecutionServiceRestInterceptor,
+ "pre_stream_query_reasoning_engine",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = (
+ reasoning_engine_execution_service.StreamQueryReasoningEngineRequest.pb(
+ reasoning_engine_execution_service.StreamQueryReasoningEngineRequest()
+ )
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(httpbody_pb2.HttpBody())
+ req.return_value.iter_content = mock.Mock(return_value=iter(return_value))
+
+ request = reasoning_engine_execution_service.StreamQueryReasoningEngineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = httpbody_pb2.HttpBody()
+
+ client.stream_query_reasoning_engine(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_location_rest_bad_request(request_type=locations_pb2.GetLocationRequest):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_location(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+def test_get_location_rest(request_type):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_list_locations_rest_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_locations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+def test_list_locations_rest(request_type):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_get_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_get_iam_policy_rest(request_type):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_set_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.set_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_set_iam_policy_rest(request_type):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_test_iam_permissions_rest_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.test_iam_permissions(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+def test_test_iam_permissions_rest(request_type):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+def test_cancel_operation_rest_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+def test_cancel_operation_rest(request_type):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_rest_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+def test_delete_operation_rest(request_type):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_get_operation_rest_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+def test_get_operation_rest(request_type):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_list_operations_rest_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_operations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+def test_list_operations_rest(request_type):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_wait_operation_rest_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.wait_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+def test_wait_operation_rest(request_type):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_query_reasoning_engine_empty_call_rest():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.query_reasoning_engine), "__call__"
+ ) as call:
+ client.query_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_execution_service.QueryReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_stream_query_reasoning_engine_empty_call_rest():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_query_reasoning_engine), "__call__"
+ ) as call:
+ client.stream_query_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = (
+ reasoning_engine_execution_service.StreamQueryReasoningEngineRequest()
+ )
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = ReasoningEngineExecutionServiceAsyncClient.get_transport_class(
+ "rest_asyncio"
+ )(credentials=async_anonymous_credentials())
+ assert transport.kind == "rest_asyncio"
+
+
+@pytest.mark.asyncio
+async def test_query_reasoning_engine_rest_asyncio_bad_request(
+ request_type=reasoning_engine_execution_service.QueryReasoningEngineRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.query_reasoning_engine(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_execution_service.QueryReasoningEngineRequest,
+ dict,
+ ],
+)
+async def test_query_reasoning_engine_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = reasoning_engine_execution_service.QueryReasoningEngineResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = (
+ reasoning_engine_execution_service.QueryReasoningEngineResponse.pb(
+ return_value
+ )
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.query_reasoning_engine(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(
+ response, reasoning_engine_execution_service.QueryReasoningEngineResponse
+ )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_query_reasoning_engine_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncReasoningEngineExecutionServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncReasoningEngineExecutionServiceRestInterceptor(),
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncReasoningEngineExecutionServiceRestInterceptor,
+ "post_query_reasoning_engine",
+ ) as post, mock.patch.object(
+ transports.AsyncReasoningEngineExecutionServiceRestInterceptor,
+ "pre_query_reasoning_engine",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = reasoning_engine_execution_service.QueryReasoningEngineRequest.pb(
+ reasoning_engine_execution_service.QueryReasoningEngineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = (
+ reasoning_engine_execution_service.QueryReasoningEngineResponse.to_json(
+ reasoning_engine_execution_service.QueryReasoningEngineResponse()
+ )
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = reasoning_engine_execution_service.QueryReasoningEngineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = (
+ reasoning_engine_execution_service.QueryReasoningEngineResponse()
+ )
+
+ await client.query_reasoning_engine(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_stream_query_reasoning_engine_rest_asyncio_bad_request(
+ request_type=reasoning_engine_execution_service.StreamQueryReasoningEngineRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.stream_query_reasoning_engine(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_execution_service.StreamQueryReasoningEngineRequest,
+ dict,
+ ],
+)
+async def test_stream_query_reasoning_engine_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = httpbody_pb2.HttpBody(
+ content_type="content_type_value",
+ data=b"data_blob",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = "[{}]".format(json_return_value)
+ response_value.content.return_value = mock_async_gen(json_return_value)
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.stream_query_reasoning_engine(request)
+
+ assert isinstance(response, AsyncIterable)
+ response = await response.__anext__()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, httpbody_pb2.HttpBody)
+ assert response.content_type == "content_type_value"
+ assert response.data == b"data_blob"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_stream_query_reasoning_engine_rest_asyncio_interceptors(
+ null_interceptor,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncReasoningEngineExecutionServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncReasoningEngineExecutionServiceRestInterceptor(),
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncReasoningEngineExecutionServiceRestInterceptor,
+ "post_stream_query_reasoning_engine",
+ ) as post, mock.patch.object(
+ transports.AsyncReasoningEngineExecutionServiceRestInterceptor,
+ "pre_stream_query_reasoning_engine",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = (
+ reasoning_engine_execution_service.StreamQueryReasoningEngineRequest.pb(
+ reasoning_engine_execution_service.StreamQueryReasoningEngineRequest()
+ )
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(httpbody_pb2.HttpBody())
+ req.return_value.content.return_value = mock_async_gen(return_value)
+
+ request = reasoning_engine_execution_service.StreamQueryReasoningEngineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = httpbody_pb2.HttpBody()
+
+ await client.stream_query_reasoning_engine(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_location_rest_asyncio_bad_request(
+ request_type=locations_pb2.GetLocationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_location(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+async def test_get_location_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_rest_asyncio_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_locations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+async def test_list_locations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_get_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.set_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_set_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.test_iam_permissions(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+async def test_test_iam_permissions_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+async def test_cancel_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+async def test_delete_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_get_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+async def test_get_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_rest_asyncio_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_operations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+async def test_list_operations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.wait_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+async def test_wait_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_query_reasoning_engine_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.query_reasoning_engine), "__call__"
+ ) as call:
+ await client.query_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_execution_service.QueryReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_stream_query_reasoning_engine_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.stream_query_reasoning_engine), "__call__"
+ ) as call:
+ await client.stream_query_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = (
+ reasoning_engine_execution_service.StreamQueryReasoningEngineRequest()
+ )
+
+ assert args[0] == request_msg
+
+
+def test_unsupported_parameter_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with pytest.raises(core_exceptions.AsyncRestUnsupportedParameterError, match="google.api_core.client_options.ClientOptions.quota_project_id") as exc: # type: ignore
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ client_options=options,
+ )
+
+
+def test_transport_grpc_default():
+ # A client should use the gRPC transport by default.
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert isinstance(
+ client.transport,
+ transports.ReasoningEngineExecutionServiceGrpcTransport,
+ )
+
+
+def test_reasoning_engine_execution_service_base_transport_error():
+ # Passing both a credentials object and credentials_file should raise an error
+ with pytest.raises(core_exceptions.DuplicateCredentialArgs):
+ transport = transports.ReasoningEngineExecutionServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ credentials_file="credentials.json",
+ )
+
+
+def test_reasoning_engine_execution_service_base_transport():
+ # Instantiate the base transport.
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.reasoning_engine_execution_service.transports.ReasoningEngineExecutionServiceTransport.__init__"
+ ) as Transport:
+ Transport.return_value = None
+ transport = transports.ReasoningEngineExecutionServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Every method on the transport should just blindly
+ # raise NotImplementedError.
+ methods = (
+ "query_reasoning_engine",
+ "stream_query_reasoning_engine",
+ "set_iam_policy",
+ "get_iam_policy",
+ "test_iam_permissions",
+ "get_location",
+ "list_locations",
+ "get_operation",
+ "wait_operation",
+ "cancel_operation",
+ "delete_operation",
+ "list_operations",
+ )
+ for method in methods:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, method)(request=object())
+
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
+ # Catch all for all remaining methods and properties
+ remainder = [
+ "kind",
+ ]
+ for r in remainder:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, r)()
+
+
+def test_reasoning_engine_execution_service_base_transport_with_credentials_file():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.reasoning_engine_execution_service.transports.ReasoningEngineExecutionServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.ReasoningEngineExecutionServiceTransport(
+ credentials_file="credentials.json",
+ quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+def test_reasoning_engine_execution_service_base_transport_with_adc():
+ # Test the default credentials are used if credentials and credentials_file are None.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.reasoning_engine_execution_service.transports.ReasoningEngineExecutionServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.ReasoningEngineExecutionServiceTransport()
+ adc.assert_called_once()
+
+
+def test_reasoning_engine_execution_service_auth_adc():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ ReasoningEngineExecutionServiceClient()
+ adc.assert_called_once_with(
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ReasoningEngineExecutionServiceGrpcTransport,
+ transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_reasoning_engine_execution_service_transport_auth_adc(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+ adc.assert_called_once_with(
+ scopes=["1", "2"],
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ReasoningEngineExecutionServiceGrpcTransport,
+ transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport,
+ transports.ReasoningEngineExecutionServiceRestTransport,
+ ],
+)
+def test_reasoning_engine_execution_service_transport_auth_gdch_credentials(
+ transport_class,
+):
+ host = "https://language.com"
+ api_audience_tests = [None, "https://language2.com"]
+ api_audience_expect = [host, "https://language2.com"]
+ for t, e in zip(api_audience_tests, api_audience_expect):
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ gdch_mock = mock.MagicMock()
+ type(gdch_mock).with_gdch_audience = mock.PropertyMock(
+ return_value=gdch_mock
+ )
+ adc.return_value = (gdch_mock, None)
+ transport_class(host=host, api_audience=t)
+ gdch_mock.with_gdch_audience.assert_called_once_with(e)
+
+
+@pytest.mark.parametrize(
+ "transport_class,grpc_helpers",
+ [
+ (transports.ReasoningEngineExecutionServiceGrpcTransport, grpc_helpers),
+ (
+ transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport,
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_reasoning_engine_execution_service_transport_create_channel(
+ transport_class, grpc_helpers
+):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel", autospec=True
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ adc.return_value = (creds, None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=creds,
+ credentials_file=None,
+ quota_project_id="octopus",
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=["1", "2"],
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ReasoningEngineExecutionServiceGrpcTransport,
+ transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_reasoning_engine_execution_service_grpc_transport_client_cert_source_for_mtls(
+ transport_class,
+):
+ cred = ga_credentials.AnonymousCredentials()
+
+ # Check ssl_channel_credentials is used if provided.
+ with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
+ mock_ssl_channel_creds = mock.Mock()
+ transport_class(
+ host="squid.clam.whelk",
+ credentials=cred,
+ ssl_channel_credentials=mock_ssl_channel_creds,
+ )
+ mock_create_channel.assert_called_once_with(
+ "squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_channel_creds,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
+ # is used.
+ with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
+ with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
+ transport_class(
+ credentials=cred,
+ client_cert_source_for_mtls=client_cert_source_callback,
+ )
+ expected_cert, expected_key = client_cert_source_callback()
+ mock_ssl_cred.assert_called_once_with(
+ certificate_chain=expected_cert, private_key=expected_key
+ )
+
+
+def test_reasoning_engine_execution_service_http_transport_client_cert_source_for_mtls():
+ cred = ga_credentials.AnonymousCredentials()
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ) as mock_configure_mtls_channel:
+ transports.ReasoningEngineExecutionServiceRestTransport(
+ credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
+ )
+ mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_reasoning_engine_execution_service_host_no_port(transport_name):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_reasoning_engine_execution_service_host_with_port(transport_name):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com:8000"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:8000"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com:8000"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "rest",
+ ],
+)
+def test_reasoning_engine_execution_service_client_transport_session_collision(
+ transport_name,
+):
+ creds1 = ga_credentials.AnonymousCredentials()
+ creds2 = ga_credentials.AnonymousCredentials()
+ client1 = ReasoningEngineExecutionServiceClient(
+ credentials=creds1,
+ transport=transport_name,
+ )
+ client2 = ReasoningEngineExecutionServiceClient(
+ credentials=creds2,
+ transport=transport_name,
+ )
+ session1 = client1.transport.query_reasoning_engine._session
+ session2 = client2.transport.query_reasoning_engine._session
+ assert session1 != session2
+ session1 = client1.transport.stream_query_reasoning_engine._session
+ session2 = client2.transport.stream_query_reasoning_engine._session
+ assert session1 != session2
+
+
+def test_reasoning_engine_execution_service_grpc_transport_channel():
+ channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.ReasoningEngineExecutionServiceGrpcTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+def test_reasoning_engine_execution_service_grpc_asyncio_transport_channel():
+ channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ReasoningEngineExecutionServiceGrpcTransport,
+ transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_reasoning_engine_execution_service_transport_channel_mtls_with_client_cert_source(
+ transport_class,
+):
+ with mock.patch(
+ "grpc.ssl_channel_credentials", autospec=True
+ ) as grpc_ssl_channel_cred:
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_ssl_cred = mock.Mock()
+ grpc_ssl_channel_cred.return_value = mock_ssl_cred
+
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+
+ cred = ga_credentials.AnonymousCredentials()
+ with pytest.warns(DeprecationWarning):
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (cred, None)
+ transport = transport_class(
+ host="squid.clam.whelk",
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=client_cert_source_callback,
+ )
+ adc.assert_called_once()
+
+ grpc_ssl_channel_cred.assert_called_once_with(
+ certificate_chain=b"cert bytes", private_key=b"key bytes"
+ )
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ReasoningEngineExecutionServiceGrpcTransport,
+ transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_reasoning_engine_execution_service_transport_channel_mtls_with_adc(
+ transport_class,
+):
+ mock_ssl_cred = mock.Mock()
+ with mock.patch.multiple(
+ "google.auth.transport.grpc.SslCredentials",
+ __init__=mock.Mock(return_value=None),
+ ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
+ ):
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+ mock_cred = mock.Mock()
+
+ with pytest.warns(DeprecationWarning):
+ transport = transport_class(
+ host="squid.clam.whelk",
+ credentials=mock_cred,
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=None,
+ )
+
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=mock_cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+
+
+def test_reasoning_engine_path():
+ project = "squid"
+ location = "clam"
+ reasoning_engine = "whelk"
+ expected = "projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}".format(
+ project=project,
+ location=location,
+ reasoning_engine=reasoning_engine,
+ )
+ actual = ReasoningEngineExecutionServiceClient.reasoning_engine_path(
+ project, location, reasoning_engine
+ )
+ assert expected == actual
+
+
+def test_parse_reasoning_engine_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "reasoning_engine": "nudibranch",
+ }
+ path = ReasoningEngineExecutionServiceClient.reasoning_engine_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ReasoningEngineExecutionServiceClient.parse_reasoning_engine_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "cuttlefish"
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = ReasoningEngineExecutionServiceClient.common_billing_account_path(
+ billing_account
+ )
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "mussel",
+ }
+ path = ReasoningEngineExecutionServiceClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ReasoningEngineExecutionServiceClient.parse_common_billing_account_path(
+ path
+ )
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "winkle"
+ expected = "folders/{folder}".format(
+ folder=folder,
+ )
+ actual = ReasoningEngineExecutionServiceClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "nautilus",
+ }
+ path = ReasoningEngineExecutionServiceClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ReasoningEngineExecutionServiceClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "scallop"
+ expected = "organizations/{organization}".format(
+ organization=organization,
+ )
+ actual = ReasoningEngineExecutionServiceClient.common_organization_path(
+ organization
+ )
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "abalone",
+ }
+ path = ReasoningEngineExecutionServiceClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ReasoningEngineExecutionServiceClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "squid"
+ expected = "projects/{project}".format(
+ project=project,
+ )
+ actual = ReasoningEngineExecutionServiceClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "clam",
+ }
+ path = ReasoningEngineExecutionServiceClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ReasoningEngineExecutionServiceClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "whelk"
+ location = "octopus"
+ expected = "projects/{project}/locations/{location}".format(
+ project=project,
+ location=location,
+ )
+ actual = ReasoningEngineExecutionServiceClient.common_location_path(
+ project, location
+ )
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "oyster",
+ "location": "nudibranch",
+ }
+ path = ReasoningEngineExecutionServiceClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ReasoningEngineExecutionServiceClient.parse_common_location_path(path)
+ assert expected == actual
+
+
+def test_client_with_default_client_info():
+ client_info = gapic_v1.client_info.ClientInfo()
+
+ with mock.patch.object(
+ transports.ReasoningEngineExecutionServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+ with mock.patch.object(
+ transports.ReasoningEngineExecutionServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ transport_class = ReasoningEngineExecutionServiceClient.get_transport_class()
+ transport = transport_class(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+
+def test_delete_operation(transport: str = "grpc"):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_field_headers():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = None
+
+ client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_field_headers_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_delete_operation_from_dict():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_from_dict_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_cancel_operation(transport: str = "grpc"):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_operation_field_headers():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = None
+
+ client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_field_headers_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_cancel_operation_from_dict():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_from_dict_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_wait_operation(transport: str = "grpc"):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_wait_operation_field_headers():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_field_headers_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_wait_operation_from_dict():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_from_dict_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_operation(transport: str = "grpc"):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_get_operation_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_get_operation_field_headers():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_operation_field_headers_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_get_operation_from_dict():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_operation_from_dict_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_operations(transport: str = "grpc"):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+ response = client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_list_operations_field_headers():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_operations_field_headers_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_operations_from_dict():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ response = client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_operations_from_dict_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_locations(transport: str = "grpc"):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+ response = client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_list_locations_field_headers():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_locations_field_headers_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_locations_from_dict():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ response = client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_locations_from_dict_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_location(transport: str = "grpc"):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+ response = client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_get_location_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_get_location_field_headers():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = locations_pb2.Location()
+
+ client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_location_field_headers_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials()
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+def test_get_location_from_dict():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+
+ response = client.get_location(
+ request={
+ "name": "locations/abc",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_location_from_dict_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_set_iam_policy(transport: str = "grpc"):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ response = client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+ response = await client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_set_iam_policy_field_headers():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_field_headers_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_set_iam_policy_from_dict():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_from_dict_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+def test_get_iam_policy(transport: str = "grpc"):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_get_iam_policy_field_headers():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_field_headers_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_get_iam_policy_from_dict():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_from_dict_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+def test_test_iam_permissions(transport: str = "grpc"):
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+ )
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+def test_test_iam_permissions_field_headers():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_field_headers_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_test_iam_permissions_from_dict():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ response = client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_from_dict_async():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ response = await client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+def test_transport_close_grpc():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_grpc_asyncio():
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close_rest():
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineExecutionServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "rest",
+ "grpc",
+ ]
+ for transport in transports:
+ client = ReasoningEngineExecutionServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class",
+ [
+ (
+ ReasoningEngineExecutionServiceClient,
+ transports.ReasoningEngineExecutionServiceGrpcTransport,
+ ),
+ (
+ ReasoningEngineExecutionServiceAsyncClient,
+ transports.ReasoningEngineExecutionServiceGrpcAsyncIOTransport,
+ ),
+ ],
+)
+def test_api_key_credentials(client_class, transport_class):
+ with mock.patch.object(
+ google.auth._default, "get_api_key_credentials", create=True
+ ) as get_api_key_credentials:
+ mock_cred = mock.Mock()
+ get_api_key_credentials.return_value = mock_cred
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=mock_cred,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_service.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd7a704224e93d9b79f4f4eed0174b4db93cce25
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_service.py
@@ -0,0 +1,10153 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+ from unittest.mock import AsyncMock # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ import mock
+
+import grpc
+from grpc.experimental import aio
+from collections.abc import Iterable, AsyncIterable
+from google.protobuf import json_format
+import json
+import math
+import pytest
+from google.api_core import api_core_version
+from proto.marshal.rules.dates import DurationRule, TimestampRule
+from proto.marshal.rules import wrappers
+
+try:
+ import aiohttp # type: ignore
+ from google.auth.aio.transport.sessions import AsyncAuthorizedSession
+ from google.api_core.operations_v1 import AsyncOperationsRestClient
+
+ HAS_ASYNC_REST_EXTRA = True
+except ImportError: # pragma: NO COVER
+ HAS_ASYNC_REST_EXTRA = False
+from requests import Response
+from requests import Request, PreparedRequest
+from requests.sessions import Session
+from google.protobuf import json_format
+
+try:
+ from google.auth.aio import credentials as ga_credentials_async
+
+ HAS_GOOGLE_AUTH_AIO = True
+except ImportError: # pragma: NO COVER
+ HAS_GOOGLE_AUTH_AIO = False
+
+from google.api_core import client_options
+from google.api_core import exceptions as core_exceptions
+from google.api_core import future
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers
+from google.api_core import grpc_helpers_async
+from google.api_core import operation
+from google.api_core import operation_async # type: ignore
+from google.api_core import operations_v1
+from google.api_core import path_template
+from google.api_core import retry as retries
+from google.auth import credentials as ga_credentials
+from google.auth.exceptions import MutualTLSChannelError
+from google.cloud.aiplatform_v1beta1.services.reasoning_engine_service import (
+ ReasoningEngineServiceAsyncClient,
+)
+from google.cloud.aiplatform_v1beta1.services.reasoning_engine_service import (
+ ReasoningEngineServiceClient,
+)
+from google.cloud.aiplatform_v1beta1.services.reasoning_engine_service import pagers
+from google.cloud.aiplatform_v1beta1.services.reasoning_engine_service import transports
+from google.cloud.aiplatform_v1beta1.types import operation as gca_operation
+from google.cloud.aiplatform_v1beta1.types import reasoning_engine
+from google.cloud.aiplatform_v1beta1.types import (
+ reasoning_engine as gca_reasoning_engine,
+)
+from google.cloud.aiplatform_v1beta1.types import reasoning_engine_service
+from google.cloud.location import locations_pb2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import options_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.oauth2 import service_account
+from google.protobuf import empty_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import struct_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
+import google.auth
+
+
+async def mock_async_gen(data, chunk_size=1):
+ for i in range(0, len(data)): # pragma: NO COVER
+ chunk = data[i : i + chunk_size]
+ yield chunk.encode("utf-8")
+
+
+def client_cert_source_callback():
+ return b"cert bytes", b"key bytes"
+
+
+# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded.
+# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107.
+def async_anonymous_credentials():
+ if HAS_GOOGLE_AUTH_AIO:
+ return ga_credentials_async.AnonymousCredentials()
+ return ga_credentials.AnonymousCredentials()
+
+
+# If default endpoint is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint(client):
+ return (
+ "foo.googleapis.com"
+ if ("localhost" in client.DEFAULT_ENDPOINT)
+ else client.DEFAULT_ENDPOINT
+ )
+
+
+# If default endpoint template is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint template so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint_template(client):
+ return (
+ "test.{UNIVERSE_DOMAIN}"
+ if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE)
+ else client._DEFAULT_ENDPOINT_TEMPLATE
+ )
+
+
+def test__get_default_mtls_endpoint():
+ api_endpoint = "example.googleapis.com"
+ api_mtls_endpoint = "example.mtls.googleapis.com"
+ sandbox_endpoint = "example.sandbox.googleapis.com"
+ sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
+ non_googleapi = "api.example.com"
+
+ assert ReasoningEngineServiceClient._get_default_mtls_endpoint(None) is None
+ assert (
+ ReasoningEngineServiceClient._get_default_mtls_endpoint(api_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ ReasoningEngineServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ ReasoningEngineServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ ReasoningEngineServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ ReasoningEngineServiceClient._get_default_mtls_endpoint(non_googleapi)
+ == non_googleapi
+ )
+
+
+def test__read_environment_variables():
+ assert ReasoningEngineServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert ReasoningEngineServiceClient._read_environment_variables() == (
+ True,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ assert ReasoningEngineServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ ReasoningEngineServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ assert ReasoningEngineServiceClient._read_environment_variables() == (
+ False,
+ "never",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ assert ReasoningEngineServiceClient._read_environment_variables() == (
+ False,
+ "always",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}):
+ assert ReasoningEngineServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ ReasoningEngineServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}):
+ assert ReasoningEngineServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ "foo.com",
+ )
+
+
+def test__get_client_cert_source():
+ mock_provided_cert_source = mock.Mock()
+ mock_default_cert_source = mock.Mock()
+
+ assert ReasoningEngineServiceClient._get_client_cert_source(None, False) is None
+ assert (
+ ReasoningEngineServiceClient._get_client_cert_source(
+ mock_provided_cert_source, False
+ )
+ is None
+ )
+ assert (
+ ReasoningEngineServiceClient._get_client_cert_source(
+ mock_provided_cert_source, True
+ )
+ == mock_provided_cert_source
+ )
+
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source", return_value=True
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_default_cert_source,
+ ):
+ assert (
+ ReasoningEngineServiceClient._get_client_cert_source(None, True)
+ is mock_default_cert_source
+ )
+ assert (
+ ReasoningEngineServiceClient._get_client_cert_source(
+ mock_provided_cert_source, "true"
+ )
+ is mock_provided_cert_source
+ )
+
+
+@mock.patch.object(
+ ReasoningEngineServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ReasoningEngineServiceClient),
+)
+@mock.patch.object(
+ ReasoningEngineServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ReasoningEngineServiceAsyncClient),
+)
+def test__get_api_endpoint():
+ api_override = "foo.com"
+ mock_client_cert_source = mock.Mock()
+ default_universe = ReasoningEngineServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = ReasoningEngineServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = ReasoningEngineServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ assert (
+ ReasoningEngineServiceClient._get_api_endpoint(
+ api_override, mock_client_cert_source, default_universe, "always"
+ )
+ == api_override
+ )
+ assert (
+ ReasoningEngineServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "auto"
+ )
+ == ReasoningEngineServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ ReasoningEngineServiceClient._get_api_endpoint(
+ None, None, default_universe, "auto"
+ )
+ == default_endpoint
+ )
+ assert (
+ ReasoningEngineServiceClient._get_api_endpoint(
+ None, None, default_universe, "always"
+ )
+ == ReasoningEngineServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ ReasoningEngineServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "always"
+ )
+ == ReasoningEngineServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ ReasoningEngineServiceClient._get_api_endpoint(
+ None, None, mock_universe, "never"
+ )
+ == mock_endpoint
+ )
+ assert (
+ ReasoningEngineServiceClient._get_api_endpoint(
+ None, None, default_universe, "never"
+ )
+ == default_endpoint
+ )
+
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ ReasoningEngineServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, mock_universe, "auto"
+ )
+ assert (
+ str(excinfo.value)
+ == "mTLS is not supported in any universe other than googleapis.com."
+ )
+
+
+def test__get_universe_domain():
+ client_universe_domain = "foo.com"
+ universe_domain_env = "bar.com"
+
+ assert (
+ ReasoningEngineServiceClient._get_universe_domain(
+ client_universe_domain, universe_domain_env
+ )
+ == client_universe_domain
+ )
+ assert (
+ ReasoningEngineServiceClient._get_universe_domain(None, universe_domain_env)
+ == universe_domain_env
+ )
+ assert (
+ ReasoningEngineServiceClient._get_universe_domain(None, None)
+ == ReasoningEngineServiceClient._DEFAULT_UNIVERSE
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ ReasoningEngineServiceClient._get_universe_domain("", None)
+ assert str(excinfo.value) == "Universe Domain cannot be an empty string."
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (ReasoningEngineServiceClient, "grpc"),
+ (ReasoningEngineServiceAsyncClient, "grpc_asyncio"),
+ (ReasoningEngineServiceClient, "rest"),
+ ],
+)
+def test_reasoning_engine_service_client_from_service_account_info(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_info"
+ ) as factory:
+ factory.return_value = creds
+ info = {"valid": True}
+ client = client_class.from_service_account_info(info, transport=transport_name)
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (transports.ReasoningEngineServiceGrpcTransport, "grpc"),
+ (transports.ReasoningEngineServiceGrpcAsyncIOTransport, "grpc_asyncio"),
+ (transports.ReasoningEngineServiceRestTransport, "rest"),
+ ],
+)
+def test_reasoning_engine_service_client_service_account_always_use_jwt(
+ transport_class, transport_name
+):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=True)
+ use_jwt.assert_called_once_with(True)
+
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=False)
+ use_jwt.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (ReasoningEngineServiceClient, "grpc"),
+ (ReasoningEngineServiceAsyncClient, "grpc_asyncio"),
+ (ReasoningEngineServiceClient, "rest"),
+ ],
+)
+def test_reasoning_engine_service_client_from_service_account_file(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_file"
+ ) as factory:
+ factory.return_value = creds
+ client = client_class.from_service_account_file(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ client = client_class.from_service_account_json(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+def test_reasoning_engine_service_client_get_transport_class():
+ transport = ReasoningEngineServiceClient.get_transport_class()
+ available_transports = [
+ transports.ReasoningEngineServiceGrpcTransport,
+ transports.ReasoningEngineServiceRestTransport,
+ ]
+ assert transport in available_transports
+
+ transport = ReasoningEngineServiceClient.get_transport_class("grpc")
+ assert transport == transports.ReasoningEngineServiceGrpcTransport
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (
+ ReasoningEngineServiceClient,
+ transports.ReasoningEngineServiceGrpcTransport,
+ "grpc",
+ ),
+ (
+ ReasoningEngineServiceAsyncClient,
+ transports.ReasoningEngineServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (
+ ReasoningEngineServiceClient,
+ transports.ReasoningEngineServiceRestTransport,
+ "rest",
+ ),
+ ],
+)
+@mock.patch.object(
+ ReasoningEngineServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ReasoningEngineServiceClient),
+)
+@mock.patch.object(
+ ReasoningEngineServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ReasoningEngineServiceAsyncClient),
+)
+def test_reasoning_engine_service_client_client_options(
+ client_class, transport_class, transport_name
+):
+ # Check that if channel is provided we won't create a new one.
+ with mock.patch.object(ReasoningEngineServiceClient, "get_transport_class") as gtc:
+ transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
+ client = client_class(transport=transport)
+ gtc.assert_not_called()
+
+ # Check that if channel is provided via str we will create a new one.
+ with mock.patch.object(ReasoningEngineServiceClient, "get_transport_class") as gtc:
+ client = client_class(transport=transport_name)
+ gtc.assert_called()
+
+ # Check the case api_endpoint is provided.
+ options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name, client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_MTLS_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ # Check the case quota_project_id is provided
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id="octopus",
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+ # Check the case api_endpoint is provided
+ options = client_options.ClientOptions(
+ api_audience="https://language.googleapis.com"
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience="https://language.googleapis.com",
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,use_client_cert_env",
+ [
+ (
+ ReasoningEngineServiceClient,
+ transports.ReasoningEngineServiceGrpcTransport,
+ "grpc",
+ "true",
+ ),
+ (
+ ReasoningEngineServiceAsyncClient,
+ transports.ReasoningEngineServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "true",
+ ),
+ (
+ ReasoningEngineServiceClient,
+ transports.ReasoningEngineServiceGrpcTransport,
+ "grpc",
+ "false",
+ ),
+ (
+ ReasoningEngineServiceAsyncClient,
+ transports.ReasoningEngineServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "false",
+ ),
+ (
+ ReasoningEngineServiceClient,
+ transports.ReasoningEngineServiceRestTransport,
+ "rest",
+ "true",
+ ),
+ (
+ ReasoningEngineServiceClient,
+ transports.ReasoningEngineServiceRestTransport,
+ "rest",
+ "false",
+ ),
+ ],
+)
+@mock.patch.object(
+ ReasoningEngineServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ReasoningEngineServiceClient),
+)
+@mock.patch.object(
+ ReasoningEngineServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ReasoningEngineServiceAsyncClient),
+)
+@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
+def test_reasoning_engine_service_client_mtls_env_auto(
+ client_class, transport_class, transport_name, use_client_cert_env
+):
+ # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
+ # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
+
+ # Check the case client_cert_source is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=client_cert_source_callback
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+
+ if use_client_cert_env == "false":
+ expected_client_cert_source = None
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ else:
+ expected_client_cert_source = client_cert_source_callback
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case ADC client cert is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=client_cert_source_callback,
+ ):
+ if use_client_cert_env == "false":
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ expected_client_cert_source = None
+ else:
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+ expected_client_cert_source = client_cert_source_callback
+
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case client_cert_source and ADC client cert are not provided.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class", [ReasoningEngineServiceClient, ReasoningEngineServiceAsyncClient]
+)
+@mock.patch.object(
+ ReasoningEngineServiceClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(ReasoningEngineServiceClient),
+)
+@mock.patch.object(
+ ReasoningEngineServiceAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(ReasoningEngineServiceAsyncClient),
+)
+def test_reasoning_engine_service_client_get_mtls_endpoint_and_cert_source(
+ client_class,
+):
+ mock_client_cert_source = mock.Mock()
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source == mock_client_cert_source
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_client_cert_source,
+ ):
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source == mock_client_cert_source
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class", [ReasoningEngineServiceClient, ReasoningEngineServiceAsyncClient]
+)
+@mock.patch.object(
+ ReasoningEngineServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ReasoningEngineServiceClient),
+)
+@mock.patch.object(
+ ReasoningEngineServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ReasoningEngineServiceAsyncClient),
+)
+def test_reasoning_engine_service_client_client_api_endpoint(client_class):
+ mock_client_cert_source = client_cert_source_callback
+ api_override = "foo.com"
+ default_universe = ReasoningEngineServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = ReasoningEngineServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = ReasoningEngineServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true",
+ # use ClientOptions.api_endpoint as the api endpoint regardless.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=api_override
+ )
+ client = client_class(
+ client_options=options,
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert client.api_endpoint == api_override
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == default_endpoint
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always",
+ # use the DEFAULT_MTLS_ENDPOINT as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+
+ # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default),
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist,
+ # and ClientOptions.universe_domain="bar.com",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint.
+ options = client_options.ClientOptions()
+ universe_exists = hasattr(options, "universe_domain")
+ if universe_exists:
+ options = client_options.ClientOptions(universe_domain=mock_universe)
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ else:
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == (
+ mock_endpoint if universe_exists else default_endpoint
+ )
+ assert client.universe_domain == (
+ mock_universe if universe_exists else default_universe
+ )
+
+ # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ options = client_options.ClientOptions()
+ if hasattr(options, "universe_domain"):
+ delattr(options, "universe_domain")
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == default_endpoint
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (
+ ReasoningEngineServiceClient,
+ transports.ReasoningEngineServiceGrpcTransport,
+ "grpc",
+ ),
+ (
+ ReasoningEngineServiceAsyncClient,
+ transports.ReasoningEngineServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (
+ ReasoningEngineServiceClient,
+ transports.ReasoningEngineServiceRestTransport,
+ "rest",
+ ),
+ ],
+)
+def test_reasoning_engine_service_client_client_options_scopes(
+ client_class, transport_class, transport_name
+):
+ # Check the case scopes are provided.
+ options = client_options.ClientOptions(
+ scopes=["1", "2"],
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=["1", "2"],
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ ReasoningEngineServiceClient,
+ transports.ReasoningEngineServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ ReasoningEngineServiceAsyncClient,
+ transports.ReasoningEngineServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ (
+ ReasoningEngineServiceClient,
+ transports.ReasoningEngineServiceRestTransport,
+ "rest",
+ None,
+ ),
+ ],
+)
+def test_reasoning_engine_service_client_client_options_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+def test_reasoning_engine_service_client_client_options_from_dict():
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.reasoning_engine_service.transports.ReasoningEngineServiceGrpcTransport.__init__"
+ ) as grpc_transport:
+ grpc_transport.return_value = None
+ client = ReasoningEngineServiceClient(
+ client_options={"api_endpoint": "squid.clam.whelk"}
+ )
+ grpc_transport.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ ReasoningEngineServiceClient,
+ transports.ReasoningEngineServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ ReasoningEngineServiceAsyncClient,
+ transports.ReasoningEngineServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_reasoning_engine_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_service.CreateReasoningEngineRequest,
+ dict,
+ ],
+)
+def test_create_reasoning_engine(request_type, transport: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.create_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = reasoning_engine_service.CreateReasoningEngineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_create_reasoning_engine_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = reasoning_engine_service.CreateReasoningEngineRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.create_reasoning_engine(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == reasoning_engine_service.CreateReasoningEngineRequest(
+ parent="parent_value",
+ )
+
+
+def test_create_reasoning_engine_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_reasoning_engine
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_reasoning_engine
+ ] = mock_rpc
+ request = {}
+ client.create_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.create_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_reasoning_engine_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.create_reasoning_engine
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.create_reasoning_engine
+ ] = mock_rpc
+
+ request = {}
+ await client.create_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.create_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_reasoning_engine_async(
+ transport: str = "grpc_asyncio",
+ request_type=reasoning_engine_service.CreateReasoningEngineRequest,
+):
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.create_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = reasoning_engine_service.CreateReasoningEngineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_create_reasoning_engine_async_from_dict():
+ await test_create_reasoning_engine_async(request_type=dict)
+
+
+def test_create_reasoning_engine_field_headers():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = reasoning_engine_service.CreateReasoningEngineRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.create_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_create_reasoning_engine_field_headers_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = reasoning_engine_service.CreateReasoningEngineRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.create_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_create_reasoning_engine_flattened():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.create_reasoning_engine(
+ parent="parent_value",
+ reasoning_engine=gca_reasoning_engine.ReasoningEngine(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].reasoning_engine
+ mock_val = gca_reasoning_engine.ReasoningEngine(name="name_value")
+ assert arg == mock_val
+
+
+def test_create_reasoning_engine_flattened_error():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_reasoning_engine(
+ reasoning_engine_service.CreateReasoningEngineRequest(),
+ parent="parent_value",
+ reasoning_engine=gca_reasoning_engine.ReasoningEngine(name="name_value"),
+ )
+
+
+@pytest.mark.asyncio
+async def test_create_reasoning_engine_flattened_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.create_reasoning_engine(
+ parent="parent_value",
+ reasoning_engine=gca_reasoning_engine.ReasoningEngine(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].reasoning_engine
+ mock_val = gca_reasoning_engine.ReasoningEngine(name="name_value")
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_create_reasoning_engine_flattened_error_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.create_reasoning_engine(
+ reasoning_engine_service.CreateReasoningEngineRequest(),
+ parent="parent_value",
+ reasoning_engine=gca_reasoning_engine.ReasoningEngine(name="name_value"),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_service.GetReasoningEngineRequest,
+ dict,
+ ],
+)
+def test_get_reasoning_engine(request_type, transport: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = reasoning_engine.ReasoningEngine(
+ name="name_value",
+ display_name="display_name_value",
+ description="description_value",
+ etag="etag_value",
+ )
+ response = client.get_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = reasoning_engine_service.GetReasoningEngineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, reasoning_engine.ReasoningEngine)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.description == "description_value"
+ assert response.etag == "etag_value"
+
+
+def test_get_reasoning_engine_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = reasoning_engine_service.GetReasoningEngineRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.get_reasoning_engine(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == reasoning_engine_service.GetReasoningEngineRequest(
+ name="name_value",
+ )
+
+
+def test_get_reasoning_engine_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_reasoning_engine in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_reasoning_engine
+ ] = mock_rpc
+ request = {}
+ client.get_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_reasoning_engine_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.get_reasoning_engine
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.get_reasoning_engine
+ ] = mock_rpc
+
+ request = {}
+ await client.get_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.get_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_reasoning_engine_async(
+ transport: str = "grpc_asyncio",
+ request_type=reasoning_engine_service.GetReasoningEngineRequest,
+):
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ reasoning_engine.ReasoningEngine(
+ name="name_value",
+ display_name="display_name_value",
+ description="description_value",
+ etag="etag_value",
+ )
+ )
+ response = await client.get_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = reasoning_engine_service.GetReasoningEngineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, reasoning_engine.ReasoningEngine)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.description == "description_value"
+ assert response.etag == "etag_value"
+
+
+@pytest.mark.asyncio
+async def test_get_reasoning_engine_async_from_dict():
+ await test_get_reasoning_engine_async(request_type=dict)
+
+
+def test_get_reasoning_engine_field_headers():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = reasoning_engine_service.GetReasoningEngineRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = reasoning_engine.ReasoningEngine()
+ client.get_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_reasoning_engine_field_headers_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = reasoning_engine_service.GetReasoningEngineRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ reasoning_engine.ReasoningEngine()
+ )
+ await client.get_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_get_reasoning_engine_flattened():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = reasoning_engine.ReasoningEngine()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_reasoning_engine(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_get_reasoning_engine_flattened_error():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_reasoning_engine(
+ reasoning_engine_service.GetReasoningEngineRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_reasoning_engine_flattened_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = reasoning_engine.ReasoningEngine()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ reasoning_engine.ReasoningEngine()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_reasoning_engine(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_get_reasoning_engine_flattened_error_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_reasoning_engine(
+ reasoning_engine_service.GetReasoningEngineRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_service.ListReasoningEnginesRequest,
+ dict,
+ ],
+)
+def test_list_reasoning_engines(request_type, transport: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_reasoning_engines), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = reasoning_engine_service.ListReasoningEnginesResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_reasoning_engines(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = reasoning_engine_service.ListReasoningEnginesRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListReasoningEnginesPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_reasoning_engines_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = reasoning_engine_service.ListReasoningEnginesRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_reasoning_engines), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.list_reasoning_engines(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == reasoning_engine_service.ListReasoningEnginesRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ )
+
+
+def test_list_reasoning_engines_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_reasoning_engines
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_reasoning_engines
+ ] = mock_rpc
+ request = {}
+ client.list_reasoning_engines(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_reasoning_engines(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_reasoning_engines_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.list_reasoning_engines
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.list_reasoning_engines
+ ] = mock_rpc
+
+ request = {}
+ await client.list_reasoning_engines(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.list_reasoning_engines(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_reasoning_engines_async(
+ transport: str = "grpc_asyncio",
+ request_type=reasoning_engine_service.ListReasoningEnginesRequest,
+):
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_reasoning_engines), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.list_reasoning_engines(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = reasoning_engine_service.ListReasoningEnginesRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListReasoningEnginesAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_reasoning_engines_async_from_dict():
+ await test_list_reasoning_engines_async(request_type=dict)
+
+
+def test_list_reasoning_engines_field_headers():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = reasoning_engine_service.ListReasoningEnginesRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_reasoning_engines), "__call__"
+ ) as call:
+ call.return_value = reasoning_engine_service.ListReasoningEnginesResponse()
+ client.list_reasoning_engines(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_reasoning_engines_field_headers_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = reasoning_engine_service.ListReasoningEnginesRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_reasoning_engines), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ reasoning_engine_service.ListReasoningEnginesResponse()
+ )
+ await client.list_reasoning_engines(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_list_reasoning_engines_flattened():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_reasoning_engines), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = reasoning_engine_service.ListReasoningEnginesResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_reasoning_engines(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+def test_list_reasoning_engines_flattened_error():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_reasoning_engines(
+ reasoning_engine_service.ListReasoningEnginesRequest(),
+ parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_reasoning_engines_flattened_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_reasoning_engines), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = reasoning_engine_service.ListReasoningEnginesResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ reasoning_engine_service.ListReasoningEnginesResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_reasoning_engines(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_list_reasoning_engines_flattened_error_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_reasoning_engines(
+ reasoning_engine_service.ListReasoningEnginesRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_reasoning_engines_pager(transport_name: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_reasoning_engines), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[
+ reasoning_engine.ReasoningEngine(),
+ reasoning_engine.ReasoningEngine(),
+ reasoning_engine.ReasoningEngine(),
+ ],
+ next_page_token="abc",
+ ),
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[],
+ next_page_token="def",
+ ),
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[
+ reasoning_engine.ReasoningEngine(),
+ ],
+ next_page_token="ghi",
+ ),
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[
+ reasoning_engine.ReasoningEngine(),
+ reasoning_engine.ReasoningEngine(),
+ ],
+ ),
+ RuntimeError,
+ )
+
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_reasoning_engines(request={}, retry=retry, timeout=timeout)
+
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, reasoning_engine.ReasoningEngine) for i in results)
+
+
+def test_list_reasoning_engines_pages(transport_name: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_reasoning_engines), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[
+ reasoning_engine.ReasoningEngine(),
+ reasoning_engine.ReasoningEngine(),
+ reasoning_engine.ReasoningEngine(),
+ ],
+ next_page_token="abc",
+ ),
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[],
+ next_page_token="def",
+ ),
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[
+ reasoning_engine.ReasoningEngine(),
+ ],
+ next_page_token="ghi",
+ ),
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[
+ reasoning_engine.ReasoningEngine(),
+ reasoning_engine.ReasoningEngine(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_reasoning_engines(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_reasoning_engines_async_pager():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_reasoning_engines),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[
+ reasoning_engine.ReasoningEngine(),
+ reasoning_engine.ReasoningEngine(),
+ reasoning_engine.ReasoningEngine(),
+ ],
+ next_page_token="abc",
+ ),
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[],
+ next_page_token="def",
+ ),
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[
+ reasoning_engine.ReasoningEngine(),
+ ],
+ next_page_token="ghi",
+ ),
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[
+ reasoning_engine.ReasoningEngine(),
+ reasoning_engine.ReasoningEngine(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_reasoning_engines(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(isinstance(i, reasoning_engine.ReasoningEngine) for i in responses)
+
+
+@pytest.mark.asyncio
+async def test_list_reasoning_engines_async_pages():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_reasoning_engines),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[
+ reasoning_engine.ReasoningEngine(),
+ reasoning_engine.ReasoningEngine(),
+ reasoning_engine.ReasoningEngine(),
+ ],
+ next_page_token="abc",
+ ),
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[],
+ next_page_token="def",
+ ),
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[
+ reasoning_engine.ReasoningEngine(),
+ ],
+ next_page_token="ghi",
+ ),
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[
+ reasoning_engine.ReasoningEngine(),
+ reasoning_engine.ReasoningEngine(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.list_reasoning_engines(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_service.UpdateReasoningEngineRequest,
+ dict,
+ ],
+)
+def test_update_reasoning_engine(request_type, transport: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.update_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = reasoning_engine_service.UpdateReasoningEngineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_update_reasoning_engine_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = reasoning_engine_service.UpdateReasoningEngineRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.update_reasoning_engine(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == reasoning_engine_service.UpdateReasoningEngineRequest()
+
+
+def test_update_reasoning_engine_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.update_reasoning_engine
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.update_reasoning_engine
+ ] = mock_rpc
+ request = {}
+ client.update_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.update_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_update_reasoning_engine_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.update_reasoning_engine
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.update_reasoning_engine
+ ] = mock_rpc
+
+ request = {}
+ await client.update_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.update_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_update_reasoning_engine_async(
+ transport: str = "grpc_asyncio",
+ request_type=reasoning_engine_service.UpdateReasoningEngineRequest,
+):
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.update_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = reasoning_engine_service.UpdateReasoningEngineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_update_reasoning_engine_async_from_dict():
+ await test_update_reasoning_engine_async(request_type=dict)
+
+
+def test_update_reasoning_engine_field_headers():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = reasoning_engine_service.UpdateReasoningEngineRequest()
+
+ request.reasoning_engine.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.update_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "reasoning_engine.name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_update_reasoning_engine_field_headers_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = reasoning_engine_service.UpdateReasoningEngineRequest()
+
+ request.reasoning_engine.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.update_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "reasoning_engine.name=name_value",
+ ) in kw["metadata"]
+
+
+def test_update_reasoning_engine_flattened():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.update_reasoning_engine(
+ reasoning_engine=gca_reasoning_engine.ReasoningEngine(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].reasoning_engine
+ mock_val = gca_reasoning_engine.ReasoningEngine(name="name_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
+
+
+def test_update_reasoning_engine_flattened_error():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.update_reasoning_engine(
+ reasoning_engine_service.UpdateReasoningEngineRequest(),
+ reasoning_engine=gca_reasoning_engine.ReasoningEngine(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+@pytest.mark.asyncio
+async def test_update_reasoning_engine_flattened_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.update_reasoning_engine(
+ reasoning_engine=gca_reasoning_engine.ReasoningEngine(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].reasoning_engine
+ mock_val = gca_reasoning_engine.ReasoningEngine(name="name_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_update_reasoning_engine_flattened_error_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.update_reasoning_engine(
+ reasoning_engine_service.UpdateReasoningEngineRequest(),
+ reasoning_engine=gca_reasoning_engine.ReasoningEngine(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_service.DeleteReasoningEngineRequest,
+ dict,
+ ],
+)
+def test_delete_reasoning_engine(request_type, transport: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.delete_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = reasoning_engine_service.DeleteReasoningEngineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_delete_reasoning_engine_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = reasoning_engine_service.DeleteReasoningEngineRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.delete_reasoning_engine(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == reasoning_engine_service.DeleteReasoningEngineRequest(
+ name="name_value",
+ )
+
+
+def test_delete_reasoning_engine_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_reasoning_engine
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_reasoning_engine
+ ] = mock_rpc
+ request = {}
+ client.delete_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_reasoning_engine_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.delete_reasoning_engine
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.delete_reasoning_engine
+ ] = mock_rpc
+
+ request = {}
+ await client.delete_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.delete_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_reasoning_engine_async(
+ transport: str = "grpc_asyncio",
+ request_type=reasoning_engine_service.DeleteReasoningEngineRequest,
+):
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.delete_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = reasoning_engine_service.DeleteReasoningEngineRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_delete_reasoning_engine_async_from_dict():
+ await test_delete_reasoning_engine_async(request_type=dict)
+
+
+def test_delete_reasoning_engine_field_headers():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = reasoning_engine_service.DeleteReasoningEngineRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_reasoning_engine_field_headers_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = reasoning_engine_service.DeleteReasoningEngineRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.delete_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_delete_reasoning_engine_flattened():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_reasoning_engine(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_delete_reasoning_engine_flattened_error():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_reasoning_engine(
+ reasoning_engine_service.DeleteReasoningEngineRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_delete_reasoning_engine_flattened_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_reasoning_engine(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_delete_reasoning_engine_flattened_error_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.delete_reasoning_engine(
+ reasoning_engine_service.DeleteReasoningEngineRequest(),
+ name="name_value",
+ )
+
+
+def test_create_reasoning_engine_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_reasoning_engine
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_reasoning_engine
+ ] = mock_rpc
+
+ request = {}
+ client.create_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.create_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_create_reasoning_engine_rest_required_fields(
+ request_type=reasoning_engine_service.CreateReasoningEngineRequest,
+):
+ transport_class = transports.ReasoningEngineServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_reasoning_engine._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_reasoning_engine._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.create_reasoning_engine(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_create_reasoning_engine_rest_unset_required_fields():
+ transport = transports.ReasoningEngineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.create_reasoning_engine._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "reasoningEngine",
+ )
+ )
+ )
+
+
+def test_create_reasoning_engine_rest_flattened():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ reasoning_engine=gca_reasoning_engine.ReasoningEngine(name="name_value"),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.create_reasoning_engine(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}/reasoningEngines"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_create_reasoning_engine_rest_flattened_error(transport: str = "rest"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_reasoning_engine(
+ reasoning_engine_service.CreateReasoningEngineRequest(),
+ parent="parent_value",
+ reasoning_engine=gca_reasoning_engine.ReasoningEngine(name="name_value"),
+ )
+
+
+def test_get_reasoning_engine_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_reasoning_engine in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_reasoning_engine
+ ] = mock_rpc
+
+ request = {}
+ client.get_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_get_reasoning_engine_rest_required_fields(
+ request_type=reasoning_engine_service.GetReasoningEngineRequest,
+):
+ transport_class = transports.ReasoningEngineServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_reasoning_engine._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_reasoning_engine._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = reasoning_engine.ReasoningEngine()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = reasoning_engine.ReasoningEngine.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_reasoning_engine(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_get_reasoning_engine_rest_unset_required_fields():
+ transport = transports.ReasoningEngineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.get_reasoning_engine._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_reasoning_engine_rest_flattened():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = reasoning_engine.ReasoningEngine()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = reasoning_engine.ReasoningEngine.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_reasoning_engine(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/reasoningEngines/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_get_reasoning_engine_rest_flattened_error(transport: str = "rest"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_reasoning_engine(
+ reasoning_engine_service.GetReasoningEngineRequest(),
+ name="name_value",
+ )
+
+
+def test_list_reasoning_engines_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_reasoning_engines
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_reasoning_engines
+ ] = mock_rpc
+
+ request = {}
+ client.list_reasoning_engines(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_reasoning_engines(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_list_reasoning_engines_rest_required_fields(
+ request_type=reasoning_engine_service.ListReasoningEnginesRequest,
+):
+ transport_class = transports.ReasoningEngineServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_reasoning_engines._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_reasoning_engines._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "filter",
+ "page_size",
+ "page_token",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = reasoning_engine_service.ListReasoningEnginesResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = reasoning_engine_service.ListReasoningEnginesResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_reasoning_engines(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_list_reasoning_engines_rest_unset_required_fields():
+ transport = transports.ReasoningEngineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.list_reasoning_engines._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(
+ (
+ "filter",
+ "pageSize",
+ "pageToken",
+ )
+ )
+ & set(("parent",))
+ )
+
+
+def test_list_reasoning_engines_rest_flattened():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = reasoning_engine_service.ListReasoningEnginesResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = reasoning_engine_service.ListReasoningEnginesResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.list_reasoning_engines(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}/reasoningEngines"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_list_reasoning_engines_rest_flattened_error(transport: str = "rest"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_reasoning_engines(
+ reasoning_engine_service.ListReasoningEnginesRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_reasoning_engines_rest_pager(transport: str = "rest"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[
+ reasoning_engine.ReasoningEngine(),
+ reasoning_engine.ReasoningEngine(),
+ reasoning_engine.ReasoningEngine(),
+ ],
+ next_page_token="abc",
+ ),
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[],
+ next_page_token="def",
+ ),
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[
+ reasoning_engine.ReasoningEngine(),
+ ],
+ next_page_token="ghi",
+ ),
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ reasoning_engines=[
+ reasoning_engine.ReasoningEngine(),
+ reasoning_engine.ReasoningEngine(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ reasoning_engine_service.ListReasoningEnginesResponse.to_json(x)
+ for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ pager = client.list_reasoning_engines(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, reasoning_engine.ReasoningEngine) for i in results)
+
+ pages = list(client.list_reasoning_engines(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_update_reasoning_engine_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.update_reasoning_engine
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.update_reasoning_engine
+ ] = mock_rpc
+
+ request = {}
+ client.update_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.update_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_update_reasoning_engine_rest_required_fields(
+ request_type=reasoning_engine_service.UpdateReasoningEngineRequest,
+):
+ transport_class = transports.ReasoningEngineServiceRestTransport
+
+ request_init = {}
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).update_reasoning_engine._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).update_reasoning_engine._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(("update_mask",))
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "patch",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.update_reasoning_engine(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_update_reasoning_engine_rest_unset_required_fields():
+ transport = transports.ReasoningEngineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.update_reasoning_engine._get_unset_required_fields({})
+ assert set(unset_fields) == (set(("updateMask",)) & set(("reasoningEngine",)))
+
+
+def test_update_reasoning_engine_rest_flattened():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "reasoning_engine": {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ reasoning_engine=gca_reasoning_engine.ReasoningEngine(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.update_reasoning_engine(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{reasoning_engine.name=projects/*/locations/*/reasoningEngines/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_update_reasoning_engine_rest_flattened_error(transport: str = "rest"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.update_reasoning_engine(
+ reasoning_engine_service.UpdateReasoningEngineRequest(),
+ reasoning_engine=gca_reasoning_engine.ReasoningEngine(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+def test_delete_reasoning_engine_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_reasoning_engine
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_reasoning_engine
+ ] = mock_rpc
+
+ request = {}
+ client.delete_reasoning_engine(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_reasoning_engine(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_delete_reasoning_engine_rest_required_fields(
+ request_type=reasoning_engine_service.DeleteReasoningEngineRequest,
+):
+ transport_class = transports.ReasoningEngineServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_reasoning_engine._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_reasoning_engine._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "delete",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_reasoning_engine(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_delete_reasoning_engine_rest_unset_required_fields():
+ transport = transports.ReasoningEngineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.delete_reasoning_engine._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_delete_reasoning_engine_rest_flattened():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.delete_reasoning_engine(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/reasoningEngines/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_reasoning_engine_rest_flattened_error(transport: str = "rest"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_reasoning_engine(
+ reasoning_engine_service.DeleteReasoningEngineRequest(),
+ name="name_value",
+ )
+
+
+def test_credentials_transport_error():
+ # It is an error to provide credentials and a transport instance.
+ transport = transports.ReasoningEngineServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # It is an error to provide a credentials file and a transport instance.
+ transport = transports.ReasoningEngineServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = ReasoningEngineServiceClient(
+ client_options={"credentials_file": "credentials.json"},
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a transport instance.
+ transport = transports.ReasoningEngineServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = ReasoningEngineServiceClient(
+ client_options=options,
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a credential.
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = ReasoningEngineServiceClient(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # It is an error to provide scopes and a transport instance.
+ transport = transports.ReasoningEngineServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = ReasoningEngineServiceClient(
+ client_options={"scopes": ["1", "2"]},
+ transport=transport,
+ )
+
+
+def test_transport_instance():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.ReasoningEngineServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ client = ReasoningEngineServiceClient(transport=transport)
+ assert client.transport is transport
+
+
+def test_transport_get_channel():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.ReasoningEngineServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+ transport = transports.ReasoningEngineServiceGrpcAsyncIOTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ReasoningEngineServiceGrpcTransport,
+ transports.ReasoningEngineServiceGrpcAsyncIOTransport,
+ transports.ReasoningEngineServiceRestTransport,
+ ],
+)
+def test_transport_adc(transport_class):
+ # Test default credentials are used if not provided.
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class()
+ adc.assert_called_once()
+
+
+def test_transport_kind_grpc():
+ transport = ReasoningEngineServiceClient.get_transport_class("grpc")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "grpc"
+
+
+def test_initialize_client_w_grpc():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_reasoning_engine_empty_call_grpc():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.create_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.CreateReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_reasoning_engine_empty_call_grpc():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = reasoning_engine.ReasoningEngine()
+ client.get_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.GetReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_reasoning_engines_empty_call_grpc():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_reasoning_engines), "__call__"
+ ) as call:
+ call.return_value = reasoning_engine_service.ListReasoningEnginesResponse()
+ client.list_reasoning_engines(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.ListReasoningEnginesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_reasoning_engine_empty_call_grpc():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.update_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.UpdateReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_reasoning_engine_empty_call_grpc():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_reasoning_engine), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.DeleteReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_grpc_asyncio():
+ transport = ReasoningEngineServiceAsyncClient.get_transport_class("grpc_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "grpc_asyncio"
+
+
+def test_initialize_client_w_grpc_asyncio():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_reasoning_engine_empty_call_grpc_asyncio():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.create_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.CreateReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_reasoning_engine_empty_call_grpc_asyncio():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ reasoning_engine.ReasoningEngine(
+ name="name_value",
+ display_name="display_name_value",
+ description="description_value",
+ etag="etag_value",
+ )
+ )
+ await client.get_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.GetReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_reasoning_engines_empty_call_grpc_asyncio():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_reasoning_engines), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ reasoning_engine_service.ListReasoningEnginesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_reasoning_engines(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.ListReasoningEnginesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_update_reasoning_engine_empty_call_grpc_asyncio():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.update_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.UpdateReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_reasoning_engine_empty_call_grpc_asyncio():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_reasoning_engine), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.delete_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.DeleteReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_rest():
+ transport = ReasoningEngineServiceClient.get_transport_class("rest")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "rest"
+
+
+def test_create_reasoning_engine_rest_bad_request(
+ request_type=reasoning_engine_service.CreateReasoningEngineRequest,
+):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.create_reasoning_engine(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_service.CreateReasoningEngineRequest,
+ dict,
+ ],
+)
+def test_create_reasoning_engine_rest_call_success(request_type):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["reasoning_engine"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "description": "description_value",
+ "spec": {
+ "package_spec": {
+ "pickle_object_gcs_uri": "pickle_object_gcs_uri_value",
+ "dependency_files_gcs_uri": "dependency_files_gcs_uri_value",
+ "requirements_gcs_uri": "requirements_gcs_uri_value",
+ "python_version": "python_version_value",
+ },
+ "class_methods": [{"fields": {}}],
+ },
+ "create_time": {"seconds": 751, "nanos": 543},
+ "update_time": {},
+ "etag": "etag_value",
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = reasoning_engine_service.CreateReasoningEngineRequest.meta.fields[
+ "reasoning_engine"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["reasoning_engine"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["reasoning_engine"][field])):
+ del request_init["reasoning_engine"][field][i][subfield]
+ else:
+ del request_init["reasoning_engine"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.create_reasoning_engine(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_create_reasoning_engine_rest_interceptors(null_interceptor):
+ transport = transports.ReasoningEngineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.ReasoningEngineServiceRestInterceptor(),
+ )
+ client = ReasoningEngineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.ReasoningEngineServiceRestInterceptor, "post_create_reasoning_engine"
+ ) as post, mock.patch.object(
+ transports.ReasoningEngineServiceRestInterceptor, "pre_create_reasoning_engine"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = reasoning_engine_service.CreateReasoningEngineRequest.pb(
+ reasoning_engine_service.CreateReasoningEngineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = reasoning_engine_service.CreateReasoningEngineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.create_reasoning_engine(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_reasoning_engine_rest_bad_request(
+ request_type=reasoning_engine_service.GetReasoningEngineRequest,
+):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_reasoning_engine(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_service.GetReasoningEngineRequest,
+ dict,
+ ],
+)
+def test_get_reasoning_engine_rest_call_success(request_type):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = reasoning_engine.ReasoningEngine(
+ name="name_value",
+ display_name="display_name_value",
+ description="description_value",
+ etag="etag_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = reasoning_engine.ReasoningEngine.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_reasoning_engine(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, reasoning_engine.ReasoningEngine)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.description == "description_value"
+ assert response.etag == "etag_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_get_reasoning_engine_rest_interceptors(null_interceptor):
+ transport = transports.ReasoningEngineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.ReasoningEngineServiceRestInterceptor(),
+ )
+ client = ReasoningEngineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.ReasoningEngineServiceRestInterceptor, "post_get_reasoning_engine"
+ ) as post, mock.patch.object(
+ transports.ReasoningEngineServiceRestInterceptor, "pre_get_reasoning_engine"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = reasoning_engine_service.GetReasoningEngineRequest.pb(
+ reasoning_engine_service.GetReasoningEngineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = reasoning_engine.ReasoningEngine.to_json(
+ reasoning_engine.ReasoningEngine()
+ )
+ req.return_value.content = return_value
+
+ request = reasoning_engine_service.GetReasoningEngineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = reasoning_engine.ReasoningEngine()
+
+ client.get_reasoning_engine(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_list_reasoning_engines_rest_bad_request(
+ request_type=reasoning_engine_service.ListReasoningEnginesRequest,
+):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_reasoning_engines(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_service.ListReasoningEnginesRequest,
+ dict,
+ ],
+)
+def test_list_reasoning_engines_rest_call_success(request_type):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = reasoning_engine_service.ListReasoningEnginesResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = reasoning_engine_service.ListReasoningEnginesResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.list_reasoning_engines(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListReasoningEnginesPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_list_reasoning_engines_rest_interceptors(null_interceptor):
+ transport = transports.ReasoningEngineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.ReasoningEngineServiceRestInterceptor(),
+ )
+ client = ReasoningEngineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.ReasoningEngineServiceRestInterceptor, "post_list_reasoning_engines"
+ ) as post, mock.patch.object(
+ transports.ReasoningEngineServiceRestInterceptor, "pre_list_reasoning_engines"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = reasoning_engine_service.ListReasoningEnginesRequest.pb(
+ reasoning_engine_service.ListReasoningEnginesRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = reasoning_engine_service.ListReasoningEnginesResponse.to_json(
+ reasoning_engine_service.ListReasoningEnginesResponse()
+ )
+ req.return_value.content = return_value
+
+ request = reasoning_engine_service.ListReasoningEnginesRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = reasoning_engine_service.ListReasoningEnginesResponse()
+
+ client.list_reasoning_engines(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_update_reasoning_engine_rest_bad_request(
+ request_type=reasoning_engine_service.UpdateReasoningEngineRequest,
+):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "reasoning_engine": {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.update_reasoning_engine(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_service.UpdateReasoningEngineRequest,
+ dict,
+ ],
+)
+def test_update_reasoning_engine_rest_call_success(request_type):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "reasoning_engine": {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ }
+ request_init["reasoning_engine"] = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3",
+ "display_name": "display_name_value",
+ "description": "description_value",
+ "spec": {
+ "package_spec": {
+ "pickle_object_gcs_uri": "pickle_object_gcs_uri_value",
+ "dependency_files_gcs_uri": "dependency_files_gcs_uri_value",
+ "requirements_gcs_uri": "requirements_gcs_uri_value",
+ "python_version": "python_version_value",
+ },
+ "class_methods": [{"fields": {}}],
+ },
+ "create_time": {"seconds": 751, "nanos": 543},
+ "update_time": {},
+ "etag": "etag_value",
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = reasoning_engine_service.UpdateReasoningEngineRequest.meta.fields[
+ "reasoning_engine"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["reasoning_engine"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["reasoning_engine"][field])):
+ del request_init["reasoning_engine"][field][i][subfield]
+ else:
+ del request_init["reasoning_engine"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.update_reasoning_engine(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_update_reasoning_engine_rest_interceptors(null_interceptor):
+ transport = transports.ReasoningEngineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.ReasoningEngineServiceRestInterceptor(),
+ )
+ client = ReasoningEngineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.ReasoningEngineServiceRestInterceptor, "post_update_reasoning_engine"
+ ) as post, mock.patch.object(
+ transports.ReasoningEngineServiceRestInterceptor, "pre_update_reasoning_engine"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = reasoning_engine_service.UpdateReasoningEngineRequest.pb(
+ reasoning_engine_service.UpdateReasoningEngineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = reasoning_engine_service.UpdateReasoningEngineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.update_reasoning_engine(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_delete_reasoning_engine_rest_bad_request(
+ request_type=reasoning_engine_service.DeleteReasoningEngineRequest,
+):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_reasoning_engine(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_service.DeleteReasoningEngineRequest,
+ dict,
+ ],
+)
+def test_delete_reasoning_engine_rest_call_success(request_type):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.delete_reasoning_engine(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_delete_reasoning_engine_rest_interceptors(null_interceptor):
+ transport = transports.ReasoningEngineServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.ReasoningEngineServiceRestInterceptor(),
+ )
+ client = ReasoningEngineServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.ReasoningEngineServiceRestInterceptor, "post_delete_reasoning_engine"
+ ) as post, mock.patch.object(
+ transports.ReasoningEngineServiceRestInterceptor, "pre_delete_reasoning_engine"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = reasoning_engine_service.DeleteReasoningEngineRequest.pb(
+ reasoning_engine_service.DeleteReasoningEngineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = reasoning_engine_service.DeleteReasoningEngineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.delete_reasoning_engine(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_location_rest_bad_request(request_type=locations_pb2.GetLocationRequest):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_location(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+def test_get_location_rest(request_type):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_list_locations_rest_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_locations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+def test_list_locations_rest(request_type):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_get_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_get_iam_policy_rest(request_type):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_set_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.set_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_set_iam_policy_rest(request_type):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_test_iam_permissions_rest_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.test_iam_permissions(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+def test_test_iam_permissions_rest(request_type):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+def test_cancel_operation_rest_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+def test_cancel_operation_rest(request_type):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_rest_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+def test_delete_operation_rest(request_type):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_get_operation_rest_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+def test_get_operation_rest(request_type):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_list_operations_rest_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_operations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+def test_list_operations_rest(request_type):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_wait_operation_rest_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.wait_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+def test_wait_operation_rest(request_type):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_reasoning_engine_empty_call_rest():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_reasoning_engine), "__call__"
+ ) as call:
+ client.create_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.CreateReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_reasoning_engine_empty_call_rest():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_reasoning_engine), "__call__"
+ ) as call:
+ client.get_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.GetReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_reasoning_engines_empty_call_rest():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_reasoning_engines), "__call__"
+ ) as call:
+ client.list_reasoning_engines(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.ListReasoningEnginesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_reasoning_engine_empty_call_rest():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_reasoning_engine), "__call__"
+ ) as call:
+ client.update_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.UpdateReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_reasoning_engine_empty_call_rest():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_reasoning_engine), "__call__"
+ ) as call:
+ client.delete_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.DeleteReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+def test_reasoning_engine_service_rest_lro_client():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ transport = client.transport
+
+ # Ensure that we have an api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.AbstractOperationsClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_transport_kind_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = ReasoningEngineServiceAsyncClient.get_transport_class("rest_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "rest_asyncio"
+
+
+@pytest.mark.asyncio
+async def test_create_reasoning_engine_rest_asyncio_bad_request(
+ request_type=reasoning_engine_service.CreateReasoningEngineRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.create_reasoning_engine(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_service.CreateReasoningEngineRequest,
+ dict,
+ ],
+)
+async def test_create_reasoning_engine_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["reasoning_engine"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "description": "description_value",
+ "spec": {
+ "package_spec": {
+ "pickle_object_gcs_uri": "pickle_object_gcs_uri_value",
+ "dependency_files_gcs_uri": "dependency_files_gcs_uri_value",
+ "requirements_gcs_uri": "requirements_gcs_uri_value",
+ "python_version": "python_version_value",
+ },
+ "class_methods": [{"fields": {}}],
+ },
+ "create_time": {"seconds": 751, "nanos": 543},
+ "update_time": {},
+ "etag": "etag_value",
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = reasoning_engine_service.CreateReasoningEngineRequest.meta.fields[
+ "reasoning_engine"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["reasoning_engine"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["reasoning_engine"][field])):
+ del request_init["reasoning_engine"][field][i][subfield]
+ else:
+ del request_init["reasoning_engine"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.create_reasoning_engine(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_create_reasoning_engine_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncReasoningEngineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncReasoningEngineServiceRestInterceptor(),
+ )
+ client = ReasoningEngineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncReasoningEngineServiceRestInterceptor,
+ "post_create_reasoning_engine",
+ ) as post, mock.patch.object(
+ transports.AsyncReasoningEngineServiceRestInterceptor,
+ "pre_create_reasoning_engine",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = reasoning_engine_service.CreateReasoningEngineRequest.pb(
+ reasoning_engine_service.CreateReasoningEngineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = reasoning_engine_service.CreateReasoningEngineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.create_reasoning_engine(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_reasoning_engine_rest_asyncio_bad_request(
+ request_type=reasoning_engine_service.GetReasoningEngineRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_reasoning_engine(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_service.GetReasoningEngineRequest,
+ dict,
+ ],
+)
+async def test_get_reasoning_engine_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = reasoning_engine.ReasoningEngine(
+ name="name_value",
+ display_name="display_name_value",
+ description="description_value",
+ etag="etag_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = reasoning_engine.ReasoningEngine.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.get_reasoning_engine(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, reasoning_engine.ReasoningEngine)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.description == "description_value"
+ assert response.etag == "etag_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_get_reasoning_engine_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncReasoningEngineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncReasoningEngineServiceRestInterceptor(),
+ )
+ client = ReasoningEngineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncReasoningEngineServiceRestInterceptor,
+ "post_get_reasoning_engine",
+ ) as post, mock.patch.object(
+ transports.AsyncReasoningEngineServiceRestInterceptor,
+ "pre_get_reasoning_engine",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = reasoning_engine_service.GetReasoningEngineRequest.pb(
+ reasoning_engine_service.GetReasoningEngineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = reasoning_engine.ReasoningEngine.to_json(
+ reasoning_engine.ReasoningEngine()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = reasoning_engine_service.GetReasoningEngineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = reasoning_engine.ReasoningEngine()
+
+ await client.get_reasoning_engine(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_list_reasoning_engines_rest_asyncio_bad_request(
+ request_type=reasoning_engine_service.ListReasoningEnginesRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_reasoning_engines(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_service.ListReasoningEnginesRequest,
+ dict,
+ ],
+)
+async def test_list_reasoning_engines_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = reasoning_engine_service.ListReasoningEnginesResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = reasoning_engine_service.ListReasoningEnginesResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.list_reasoning_engines(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListReasoningEnginesAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_list_reasoning_engines_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncReasoningEngineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncReasoningEngineServiceRestInterceptor(),
+ )
+ client = ReasoningEngineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncReasoningEngineServiceRestInterceptor,
+ "post_list_reasoning_engines",
+ ) as post, mock.patch.object(
+ transports.AsyncReasoningEngineServiceRestInterceptor,
+ "pre_list_reasoning_engines",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = reasoning_engine_service.ListReasoningEnginesRequest.pb(
+ reasoning_engine_service.ListReasoningEnginesRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = reasoning_engine_service.ListReasoningEnginesResponse.to_json(
+ reasoning_engine_service.ListReasoningEnginesResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = reasoning_engine_service.ListReasoningEnginesRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = reasoning_engine_service.ListReasoningEnginesResponse()
+
+ await client.list_reasoning_engines(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_update_reasoning_engine_rest_asyncio_bad_request(
+ request_type=reasoning_engine_service.UpdateReasoningEngineRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "reasoning_engine": {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.update_reasoning_engine(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_service.UpdateReasoningEngineRequest,
+ dict,
+ ],
+)
+async def test_update_reasoning_engine_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "reasoning_engine": {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ }
+ request_init["reasoning_engine"] = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3",
+ "display_name": "display_name_value",
+ "description": "description_value",
+ "spec": {
+ "package_spec": {
+ "pickle_object_gcs_uri": "pickle_object_gcs_uri_value",
+ "dependency_files_gcs_uri": "dependency_files_gcs_uri_value",
+ "requirements_gcs_uri": "requirements_gcs_uri_value",
+ "python_version": "python_version_value",
+ },
+ "class_methods": [{"fields": {}}],
+ },
+ "create_time": {"seconds": 751, "nanos": 543},
+ "update_time": {},
+ "etag": "etag_value",
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = reasoning_engine_service.UpdateReasoningEngineRequest.meta.fields[
+ "reasoning_engine"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["reasoning_engine"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["reasoning_engine"][field])):
+ del request_init["reasoning_engine"][field][i][subfield]
+ else:
+ del request_init["reasoning_engine"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.update_reasoning_engine(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_update_reasoning_engine_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncReasoningEngineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncReasoningEngineServiceRestInterceptor(),
+ )
+ client = ReasoningEngineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncReasoningEngineServiceRestInterceptor,
+ "post_update_reasoning_engine",
+ ) as post, mock.patch.object(
+ transports.AsyncReasoningEngineServiceRestInterceptor,
+ "pre_update_reasoning_engine",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = reasoning_engine_service.UpdateReasoningEngineRequest.pb(
+ reasoning_engine_service.UpdateReasoningEngineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = reasoning_engine_service.UpdateReasoningEngineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.update_reasoning_engine(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_delete_reasoning_engine_rest_asyncio_bad_request(
+ request_type=reasoning_engine_service.DeleteReasoningEngineRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_reasoning_engine(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ reasoning_engine_service.DeleteReasoningEngineRequest,
+ dict,
+ ],
+)
+async def test_delete_reasoning_engine_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/reasoningEngines/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.delete_reasoning_engine(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_delete_reasoning_engine_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncReasoningEngineServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncReasoningEngineServiceRestInterceptor(),
+ )
+ client = ReasoningEngineServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncReasoningEngineServiceRestInterceptor,
+ "post_delete_reasoning_engine",
+ ) as post, mock.patch.object(
+ transports.AsyncReasoningEngineServiceRestInterceptor,
+ "pre_delete_reasoning_engine",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = reasoning_engine_service.DeleteReasoningEngineRequest.pb(
+ reasoning_engine_service.DeleteReasoningEngineRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = reasoning_engine_service.DeleteReasoningEngineRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.delete_reasoning_engine(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_location_rest_asyncio_bad_request(
+ request_type=locations_pb2.GetLocationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_location(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+async def test_get_location_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_rest_asyncio_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_locations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+async def test_list_locations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_get_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.set_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_set_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.test_iam_permissions(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+async def test_test_iam_permissions_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+async def test_cancel_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+async def test_delete_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_get_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+async def test_get_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_rest_asyncio_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_operations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+async def test_list_operations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.wait_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+async def test_wait_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_reasoning_engine_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_reasoning_engine), "__call__"
+ ) as call:
+ await client.create_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.CreateReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_reasoning_engine_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_reasoning_engine), "__call__"
+ ) as call:
+ await client.get_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.GetReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_reasoning_engines_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_reasoning_engines), "__call__"
+ ) as call:
+ await client.list_reasoning_engines(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.ListReasoningEnginesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_update_reasoning_engine_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_reasoning_engine), "__call__"
+ ) as call:
+ await client.update_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.UpdateReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_reasoning_engine_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_reasoning_engine), "__call__"
+ ) as call:
+ await client.delete_reasoning_engine(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = reasoning_engine_service.DeleteReasoningEngineRequest()
+
+ assert args[0] == request_msg
+
+
+def test_reasoning_engine_service_rest_asyncio_lro_client():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ transport = client.transport
+
+ # Ensure that we have an api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.AsyncOperationsRestClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_unsupported_parameter_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with pytest.raises(core_exceptions.AsyncRestUnsupportedParameterError, match="google.api_core.client_options.ClientOptions.quota_project_id") as exc: # type: ignore
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ client_options=options,
+ )
+
+
+def test_transport_grpc_default():
+ # A client should use the gRPC transport by default.
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert isinstance(
+ client.transport,
+ transports.ReasoningEngineServiceGrpcTransport,
+ )
+
+
+def test_reasoning_engine_service_base_transport_error():
+ # Passing both a credentials object and credentials_file should raise an error
+ with pytest.raises(core_exceptions.DuplicateCredentialArgs):
+ transport = transports.ReasoningEngineServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ credentials_file="credentials.json",
+ )
+
+
+def test_reasoning_engine_service_base_transport():
+ # Instantiate the base transport.
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.reasoning_engine_service.transports.ReasoningEngineServiceTransport.__init__"
+ ) as Transport:
+ Transport.return_value = None
+ transport = transports.ReasoningEngineServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Every method on the transport should just blindly
+ # raise NotImplementedError.
+ methods = (
+ "create_reasoning_engine",
+ "get_reasoning_engine",
+ "list_reasoning_engines",
+ "update_reasoning_engine",
+ "delete_reasoning_engine",
+ "set_iam_policy",
+ "get_iam_policy",
+ "test_iam_permissions",
+ "get_location",
+ "list_locations",
+ "get_operation",
+ "wait_operation",
+ "cancel_operation",
+ "delete_operation",
+ "list_operations",
+ )
+ for method in methods:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, method)(request=object())
+
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
+ # Additionally, the LRO client (a property) should
+ # also raise NotImplementedError
+ with pytest.raises(NotImplementedError):
+ transport.operations_client
+
+ # Catch all for all remaining methods and properties
+ remainder = [
+ "kind",
+ ]
+ for r in remainder:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, r)()
+
+
+def test_reasoning_engine_service_base_transport_with_credentials_file():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.reasoning_engine_service.transports.ReasoningEngineServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.ReasoningEngineServiceTransport(
+ credentials_file="credentials.json",
+ quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+def test_reasoning_engine_service_base_transport_with_adc():
+ # Test the default credentials are used if credentials and credentials_file are None.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.reasoning_engine_service.transports.ReasoningEngineServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.ReasoningEngineServiceTransport()
+ adc.assert_called_once()
+
+
+def test_reasoning_engine_service_auth_adc():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ ReasoningEngineServiceClient()
+ adc.assert_called_once_with(
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ReasoningEngineServiceGrpcTransport,
+ transports.ReasoningEngineServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_reasoning_engine_service_transport_auth_adc(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+ adc.assert_called_once_with(
+ scopes=["1", "2"],
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ReasoningEngineServiceGrpcTransport,
+ transports.ReasoningEngineServiceGrpcAsyncIOTransport,
+ transports.ReasoningEngineServiceRestTransport,
+ ],
+)
+def test_reasoning_engine_service_transport_auth_gdch_credentials(transport_class):
+ host = "https://language.com"
+ api_audience_tests = [None, "https://language2.com"]
+ api_audience_expect = [host, "https://language2.com"]
+ for t, e in zip(api_audience_tests, api_audience_expect):
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ gdch_mock = mock.MagicMock()
+ type(gdch_mock).with_gdch_audience = mock.PropertyMock(
+ return_value=gdch_mock
+ )
+ adc.return_value = (gdch_mock, None)
+ transport_class(host=host, api_audience=t)
+ gdch_mock.with_gdch_audience.assert_called_once_with(e)
+
+
+@pytest.mark.parametrize(
+ "transport_class,grpc_helpers",
+ [
+ (transports.ReasoningEngineServiceGrpcTransport, grpc_helpers),
+ (transports.ReasoningEngineServiceGrpcAsyncIOTransport, grpc_helpers_async),
+ ],
+)
+def test_reasoning_engine_service_transport_create_channel(
+ transport_class, grpc_helpers
+):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel", autospec=True
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ adc.return_value = (creds, None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=creds,
+ credentials_file=None,
+ quota_project_id="octopus",
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=["1", "2"],
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ReasoningEngineServiceGrpcTransport,
+ transports.ReasoningEngineServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_reasoning_engine_service_grpc_transport_client_cert_source_for_mtls(
+ transport_class,
+):
+ cred = ga_credentials.AnonymousCredentials()
+
+ # Check ssl_channel_credentials is used if provided.
+ with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
+ mock_ssl_channel_creds = mock.Mock()
+ transport_class(
+ host="squid.clam.whelk",
+ credentials=cred,
+ ssl_channel_credentials=mock_ssl_channel_creds,
+ )
+ mock_create_channel.assert_called_once_with(
+ "squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_channel_creds,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
+ # is used.
+ with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
+ with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
+ transport_class(
+ credentials=cred,
+ client_cert_source_for_mtls=client_cert_source_callback,
+ )
+ expected_cert, expected_key = client_cert_source_callback()
+ mock_ssl_cred.assert_called_once_with(
+ certificate_chain=expected_cert, private_key=expected_key
+ )
+
+
+def test_reasoning_engine_service_http_transport_client_cert_source_for_mtls():
+ cred = ga_credentials.AnonymousCredentials()
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ) as mock_configure_mtls_channel:
+ transports.ReasoningEngineServiceRestTransport(
+ credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
+ )
+ mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_reasoning_engine_service_host_no_port(transport_name):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_reasoning_engine_service_host_with_port(transport_name):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com:8000"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:8000"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com:8000"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "rest",
+ ],
+)
+def test_reasoning_engine_service_client_transport_session_collision(transport_name):
+ creds1 = ga_credentials.AnonymousCredentials()
+ creds2 = ga_credentials.AnonymousCredentials()
+ client1 = ReasoningEngineServiceClient(
+ credentials=creds1,
+ transport=transport_name,
+ )
+ client2 = ReasoningEngineServiceClient(
+ credentials=creds2,
+ transport=transport_name,
+ )
+ session1 = client1.transport.create_reasoning_engine._session
+ session2 = client2.transport.create_reasoning_engine._session
+ assert session1 != session2
+ session1 = client1.transport.get_reasoning_engine._session
+ session2 = client2.transport.get_reasoning_engine._session
+ assert session1 != session2
+ session1 = client1.transport.list_reasoning_engines._session
+ session2 = client2.transport.list_reasoning_engines._session
+ assert session1 != session2
+ session1 = client1.transport.update_reasoning_engine._session
+ session2 = client2.transport.update_reasoning_engine._session
+ assert session1 != session2
+ session1 = client1.transport.delete_reasoning_engine._session
+ session2 = client2.transport.delete_reasoning_engine._session
+ assert session1 != session2
+
+
+def test_reasoning_engine_service_grpc_transport_channel():
+ channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.ReasoningEngineServiceGrpcTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+def test_reasoning_engine_service_grpc_asyncio_transport_channel():
+ channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.ReasoningEngineServiceGrpcAsyncIOTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ReasoningEngineServiceGrpcTransport,
+ transports.ReasoningEngineServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_reasoning_engine_service_transport_channel_mtls_with_client_cert_source(
+ transport_class,
+):
+ with mock.patch(
+ "grpc.ssl_channel_credentials", autospec=True
+ ) as grpc_ssl_channel_cred:
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_ssl_cred = mock.Mock()
+ grpc_ssl_channel_cred.return_value = mock_ssl_cred
+
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+
+ cred = ga_credentials.AnonymousCredentials()
+ with pytest.warns(DeprecationWarning):
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (cred, None)
+ transport = transport_class(
+ host="squid.clam.whelk",
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=client_cert_source_callback,
+ )
+ adc.assert_called_once()
+
+ grpc_ssl_channel_cred.assert_called_once_with(
+ certificate_chain=b"cert bytes", private_key=b"key bytes"
+ )
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ReasoningEngineServiceGrpcTransport,
+ transports.ReasoningEngineServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_reasoning_engine_service_transport_channel_mtls_with_adc(transport_class):
+ mock_ssl_cred = mock.Mock()
+ with mock.patch.multiple(
+ "google.auth.transport.grpc.SslCredentials",
+ __init__=mock.Mock(return_value=None),
+ ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
+ ):
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+ mock_cred = mock.Mock()
+
+ with pytest.warns(DeprecationWarning):
+ transport = transport_class(
+ host="squid.clam.whelk",
+ credentials=mock_cred,
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=None,
+ )
+
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=mock_cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+
+
+def test_reasoning_engine_service_grpc_lro_client():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.OperationsClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_reasoning_engine_service_grpc_lro_async_client():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc_asyncio",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.OperationsAsyncClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_reasoning_engine_path():
+ project = "squid"
+ location = "clam"
+ reasoning_engine = "whelk"
+ expected = "projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}".format(
+ project=project,
+ location=location,
+ reasoning_engine=reasoning_engine,
+ )
+ actual = ReasoningEngineServiceClient.reasoning_engine_path(
+ project, location, reasoning_engine
+ )
+ assert expected == actual
+
+
+def test_parse_reasoning_engine_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "reasoning_engine": "nudibranch",
+ }
+ path = ReasoningEngineServiceClient.reasoning_engine_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ReasoningEngineServiceClient.parse_reasoning_engine_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "cuttlefish"
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = ReasoningEngineServiceClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "mussel",
+ }
+ path = ReasoningEngineServiceClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ReasoningEngineServiceClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "winkle"
+ expected = "folders/{folder}".format(
+ folder=folder,
+ )
+ actual = ReasoningEngineServiceClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "nautilus",
+ }
+ path = ReasoningEngineServiceClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ReasoningEngineServiceClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "scallop"
+ expected = "organizations/{organization}".format(
+ organization=organization,
+ )
+ actual = ReasoningEngineServiceClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "abalone",
+ }
+ path = ReasoningEngineServiceClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ReasoningEngineServiceClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "squid"
+ expected = "projects/{project}".format(
+ project=project,
+ )
+ actual = ReasoningEngineServiceClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "clam",
+ }
+ path = ReasoningEngineServiceClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ReasoningEngineServiceClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "whelk"
+ location = "octopus"
+ expected = "projects/{project}/locations/{location}".format(
+ project=project,
+ location=location,
+ )
+ actual = ReasoningEngineServiceClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "oyster",
+ "location": "nudibranch",
+ }
+ path = ReasoningEngineServiceClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ReasoningEngineServiceClient.parse_common_location_path(path)
+ assert expected == actual
+
+
+def test_client_with_default_client_info():
+ client_info = gapic_v1.client_info.ClientInfo()
+
+ with mock.patch.object(
+ transports.ReasoningEngineServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+ with mock.patch.object(
+ transports.ReasoningEngineServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ transport_class = ReasoningEngineServiceClient.get_transport_class()
+ transport = transport_class(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+
+def test_delete_operation(transport: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_field_headers():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = None
+
+ client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_field_headers_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_delete_operation_from_dict():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_from_dict_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_cancel_operation(transport: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_operation_field_headers():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = None
+
+ client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_field_headers_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_cancel_operation_from_dict():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_from_dict_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_wait_operation(transport: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_wait_operation_field_headers():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_field_headers_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_wait_operation_from_dict():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_from_dict_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_operation(transport: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_get_operation_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_get_operation_field_headers():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_operation_field_headers_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_get_operation_from_dict():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_operation_from_dict_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_operations(transport: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+ response = client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_list_operations_field_headers():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_operations_field_headers_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_operations_from_dict():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ response = client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_operations_from_dict_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_locations(transport: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+ response = client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_list_locations_field_headers():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_locations_field_headers_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_locations_from_dict():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ response = client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_locations_from_dict_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_location(transport: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+ response = client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_get_location_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_get_location_field_headers():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = locations_pb2.Location()
+
+ client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_location_field_headers_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials()
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+def test_get_location_from_dict():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+
+ response = client.get_location(
+ request={
+ "name": "locations/abc",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_location_from_dict_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_set_iam_policy(transport: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ response = client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+ response = await client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_set_iam_policy_field_headers():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_field_headers_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_set_iam_policy_from_dict():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_from_dict_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+def test_get_iam_policy(transport: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_get_iam_policy_field_headers():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_field_headers_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_get_iam_policy_from_dict():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_from_dict_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+def test_test_iam_permissions(transport: str = "grpc"):
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"):
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+ )
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+def test_test_iam_permissions_field_headers():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_field_headers_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_test_iam_permissions_from_dict():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ response = client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_from_dict_async():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ response = await client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+def test_transport_close_grpc():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_grpc_asyncio():
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close_rest():
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ReasoningEngineServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "rest",
+ "grpc",
+ ]
+ for transport in transports:
+ client = ReasoningEngineServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class",
+ [
+ (ReasoningEngineServiceClient, transports.ReasoningEngineServiceGrpcTransport),
+ (
+ ReasoningEngineServiceAsyncClient,
+ transports.ReasoningEngineServiceGrpcAsyncIOTransport,
+ ),
+ ],
+)
+def test_api_key_credentials(client_class, transport_class):
+ with mock.patch.object(
+ google.auth._default, "get_api_key_credentials", create=True
+ ) as get_api_key_credentials:
+ mock_cred = mock.Mock()
+ get_api_key_credentials.return_value = mock_cred
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=mock_cred,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..44dcd2782360155e4e2b393d9f9b1d8581eb4a91
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py
@@ -0,0 +1,13515 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+ from unittest.mock import AsyncMock # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ import mock
+
+import grpc
+from grpc.experimental import aio
+from collections.abc import Iterable, AsyncIterable
+from google.protobuf import json_format
+import json
+import math
+import pytest
+from google.api_core import api_core_version
+from proto.marshal.rules.dates import DurationRule, TimestampRule
+from proto.marshal.rules import wrappers
+
+try:
+ import aiohttp # type: ignore
+ from google.auth.aio.transport.sessions import AsyncAuthorizedSession
+ from google.api_core.operations_v1 import AsyncOperationsRestClient
+
+ HAS_ASYNC_REST_EXTRA = True
+except ImportError: # pragma: NO COVER
+ HAS_ASYNC_REST_EXTRA = False
+from requests import Response
+from requests import Request, PreparedRequest
+from requests.sessions import Session
+from google.protobuf import json_format
+
+try:
+ from google.auth.aio import credentials as ga_credentials_async
+
+ HAS_GOOGLE_AUTH_AIO = True
+except ImportError: # pragma: NO COVER
+ HAS_GOOGLE_AUTH_AIO = False
+
+from google.api_core import client_options
+from google.api_core import exceptions as core_exceptions
+from google.api_core import future
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers
+from google.api_core import grpc_helpers_async
+from google.api_core import operation
+from google.api_core import operation_async # type: ignore
+from google.api_core import operations_v1
+from google.api_core import path_template
+from google.api_core import retry as retries
+from google.auth import credentials as ga_credentials
+from google.auth.exceptions import MutualTLSChannelError
+from google.cloud.aiplatform_v1beta1.services.schedule_service import (
+ ScheduleServiceAsyncClient,
+)
+from google.cloud.aiplatform_v1beta1.services.schedule_service import (
+ ScheduleServiceClient,
+)
+from google.cloud.aiplatform_v1beta1.services.schedule_service import pagers
+from google.cloud.aiplatform_v1beta1.services.schedule_service import transports
+from google.cloud.aiplatform_v1beta1.types import accelerator_type
+from google.cloud.aiplatform_v1beta1.types import artifact
+from google.cloud.aiplatform_v1beta1.types import context
+from google.cloud.aiplatform_v1beta1.types import encryption_spec
+from google.cloud.aiplatform_v1beta1.types import execution
+from google.cloud.aiplatform_v1beta1.types import explanation
+from google.cloud.aiplatform_v1beta1.types import explanation_metadata
+from google.cloud.aiplatform_v1beta1.types import io
+from google.cloud.aiplatform_v1beta1.types import job_state
+from google.cloud.aiplatform_v1beta1.types import machine_resources
+from google.cloud.aiplatform_v1beta1.types import model_monitoring_alert
+from google.cloud.aiplatform_v1beta1.types import model_monitoring_job
+from google.cloud.aiplatform_v1beta1.types import model_monitoring_service
+from google.cloud.aiplatform_v1beta1.types import model_monitoring_spec
+from google.cloud.aiplatform_v1beta1.types import network_spec
+from google.cloud.aiplatform_v1beta1.types import notebook_execution_job
+from google.cloud.aiplatform_v1beta1.types import notebook_service
+from google.cloud.aiplatform_v1beta1.types import operation as gca_operation
+from google.cloud.aiplatform_v1beta1.types import pipeline_failure_policy
+from google.cloud.aiplatform_v1beta1.types import pipeline_job
+from google.cloud.aiplatform_v1beta1.types import pipeline_service
+from google.cloud.aiplatform_v1beta1.types import pipeline_state
+from google.cloud.aiplatform_v1beta1.types import reservation_affinity
+from google.cloud.aiplatform_v1beta1.types import schedule
+from google.cloud.aiplatform_v1beta1.types import schedule as gca_schedule
+from google.cloud.aiplatform_v1beta1.types import schedule_service
+from google.cloud.aiplatform_v1beta1.types import service_networking
+from google.cloud.aiplatform_v1beta1.types import ui_pipeline_spec
+from google.cloud.aiplatform_v1beta1.types import value
+from google.cloud.location import locations_pb2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import options_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.oauth2 import service_account
+from google.protobuf import any_pb2 # type: ignore
+from google.protobuf import duration_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import struct_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
+from google.rpc import status_pb2 # type: ignore
+from google.type import interval_pb2 # type: ignore
+import google.auth
+
+
+async def mock_async_gen(data, chunk_size=1):
+ for i in range(0, len(data)): # pragma: NO COVER
+ chunk = data[i : i + chunk_size]
+ yield chunk.encode("utf-8")
+
+
+def client_cert_source_callback():
+ return b"cert bytes", b"key bytes"
+
+
+# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded.
+# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107.
+def async_anonymous_credentials():
+ if HAS_GOOGLE_AUTH_AIO:
+ return ga_credentials_async.AnonymousCredentials()
+ return ga_credentials.AnonymousCredentials()
+
+
+# If default endpoint is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint(client):
+ return (
+ "foo.googleapis.com"
+ if ("localhost" in client.DEFAULT_ENDPOINT)
+ else client.DEFAULT_ENDPOINT
+ )
+
+
+# If default endpoint template is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint template so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint_template(client):
+ return (
+ "test.{UNIVERSE_DOMAIN}"
+ if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE)
+ else client._DEFAULT_ENDPOINT_TEMPLATE
+ )
+
+
+def test__get_default_mtls_endpoint():
+ api_endpoint = "example.googleapis.com"
+ api_mtls_endpoint = "example.mtls.googleapis.com"
+ sandbox_endpoint = "example.sandbox.googleapis.com"
+ sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
+ non_googleapi = "api.example.com"
+
+ assert ScheduleServiceClient._get_default_mtls_endpoint(None) is None
+ assert (
+ ScheduleServiceClient._get_default_mtls_endpoint(api_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ ScheduleServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ ScheduleServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ ScheduleServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ ScheduleServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
+ )
+
+
+def test__read_environment_variables():
+ assert ScheduleServiceClient._read_environment_variables() == (False, "auto", None)
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert ScheduleServiceClient._read_environment_variables() == (
+ True,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ assert ScheduleServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ ScheduleServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ assert ScheduleServiceClient._read_environment_variables() == (
+ False,
+ "never",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ assert ScheduleServiceClient._read_environment_variables() == (
+ False,
+ "always",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}):
+ assert ScheduleServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ ScheduleServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}):
+ assert ScheduleServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ "foo.com",
+ )
+
+
+def test__get_client_cert_source():
+ mock_provided_cert_source = mock.Mock()
+ mock_default_cert_source = mock.Mock()
+
+ assert ScheduleServiceClient._get_client_cert_source(None, False) is None
+ assert (
+ ScheduleServiceClient._get_client_cert_source(mock_provided_cert_source, False)
+ is None
+ )
+ assert (
+ ScheduleServiceClient._get_client_cert_source(mock_provided_cert_source, True)
+ == mock_provided_cert_source
+ )
+
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source", return_value=True
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_default_cert_source,
+ ):
+ assert (
+ ScheduleServiceClient._get_client_cert_source(None, True)
+ is mock_default_cert_source
+ )
+ assert (
+ ScheduleServiceClient._get_client_cert_source(
+ mock_provided_cert_source, "true"
+ )
+ is mock_provided_cert_source
+ )
+
+
+@mock.patch.object(
+ ScheduleServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ScheduleServiceClient),
+)
+@mock.patch.object(
+ ScheduleServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ScheduleServiceAsyncClient),
+)
+def test__get_api_endpoint():
+ api_override = "foo.com"
+ mock_client_cert_source = mock.Mock()
+ default_universe = ScheduleServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = ScheduleServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = ScheduleServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ assert (
+ ScheduleServiceClient._get_api_endpoint(
+ api_override, mock_client_cert_source, default_universe, "always"
+ )
+ == api_override
+ )
+ assert (
+ ScheduleServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "auto"
+ )
+ == ScheduleServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ ScheduleServiceClient._get_api_endpoint(None, None, default_universe, "auto")
+ == default_endpoint
+ )
+ assert (
+ ScheduleServiceClient._get_api_endpoint(None, None, default_universe, "always")
+ == ScheduleServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ ScheduleServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "always"
+ )
+ == ScheduleServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ ScheduleServiceClient._get_api_endpoint(None, None, mock_universe, "never")
+ == mock_endpoint
+ )
+ assert (
+ ScheduleServiceClient._get_api_endpoint(None, None, default_universe, "never")
+ == default_endpoint
+ )
+
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ ScheduleServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, mock_universe, "auto"
+ )
+ assert (
+ str(excinfo.value)
+ == "mTLS is not supported in any universe other than googleapis.com."
+ )
+
+
+def test__get_universe_domain():
+ client_universe_domain = "foo.com"
+ universe_domain_env = "bar.com"
+
+ assert (
+ ScheduleServiceClient._get_universe_domain(
+ client_universe_domain, universe_domain_env
+ )
+ == client_universe_domain
+ )
+ assert (
+ ScheduleServiceClient._get_universe_domain(None, universe_domain_env)
+ == universe_domain_env
+ )
+ assert (
+ ScheduleServiceClient._get_universe_domain(None, None)
+ == ScheduleServiceClient._DEFAULT_UNIVERSE
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ ScheduleServiceClient._get_universe_domain("", None)
+ assert str(excinfo.value) == "Universe Domain cannot be an empty string."
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (ScheduleServiceClient, "grpc"),
+ (ScheduleServiceAsyncClient, "grpc_asyncio"),
+ (ScheduleServiceClient, "rest"),
+ ],
+)
+def test_schedule_service_client_from_service_account_info(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_info"
+ ) as factory:
+ factory.return_value = creds
+ info = {"valid": True}
+ client = client_class.from_service_account_info(info, transport=transport_name)
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (transports.ScheduleServiceGrpcTransport, "grpc"),
+ (transports.ScheduleServiceGrpcAsyncIOTransport, "grpc_asyncio"),
+ (transports.ScheduleServiceRestTransport, "rest"),
+ ],
+)
+def test_schedule_service_client_service_account_always_use_jwt(
+ transport_class, transport_name
+):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=True)
+ use_jwt.assert_called_once_with(True)
+
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=False)
+ use_jwt.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (ScheduleServiceClient, "grpc"),
+ (ScheduleServiceAsyncClient, "grpc_asyncio"),
+ (ScheduleServiceClient, "rest"),
+ ],
+)
+def test_schedule_service_client_from_service_account_file(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_file"
+ ) as factory:
+ factory.return_value = creds
+ client = client_class.from_service_account_file(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ client = client_class.from_service_account_json(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+def test_schedule_service_client_get_transport_class():
+ transport = ScheduleServiceClient.get_transport_class()
+ available_transports = [
+ transports.ScheduleServiceGrpcTransport,
+ transports.ScheduleServiceRestTransport,
+ ]
+ assert transport in available_transports
+
+ transport = ScheduleServiceClient.get_transport_class("grpc")
+ assert transport == transports.ScheduleServiceGrpcTransport
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (ScheduleServiceClient, transports.ScheduleServiceGrpcTransport, "grpc"),
+ (
+ ScheduleServiceAsyncClient,
+ transports.ScheduleServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (ScheduleServiceClient, transports.ScheduleServiceRestTransport, "rest"),
+ ],
+)
+@mock.patch.object(
+ ScheduleServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ScheduleServiceClient),
+)
+@mock.patch.object(
+ ScheduleServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ScheduleServiceAsyncClient),
+)
+def test_schedule_service_client_client_options(
+ client_class, transport_class, transport_name
+):
+ # Check that if channel is provided we won't create a new one.
+ with mock.patch.object(ScheduleServiceClient, "get_transport_class") as gtc:
+ transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
+ client = client_class(transport=transport)
+ gtc.assert_not_called()
+
+ # Check that if channel is provided via str we will create a new one.
+ with mock.patch.object(ScheduleServiceClient, "get_transport_class") as gtc:
+ client = client_class(transport=transport_name)
+ gtc.assert_called()
+
+ # Check the case api_endpoint is provided.
+ options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name, client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_MTLS_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ # Check the case quota_project_id is provided
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id="octopus",
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+ # Check the case api_endpoint is provided
+ options = client_options.ClientOptions(
+ api_audience="https://language.googleapis.com"
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience="https://language.googleapis.com",
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,use_client_cert_env",
+ [
+ (
+ ScheduleServiceClient,
+ transports.ScheduleServiceGrpcTransport,
+ "grpc",
+ "true",
+ ),
+ (
+ ScheduleServiceAsyncClient,
+ transports.ScheduleServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "true",
+ ),
+ (
+ ScheduleServiceClient,
+ transports.ScheduleServiceGrpcTransport,
+ "grpc",
+ "false",
+ ),
+ (
+ ScheduleServiceAsyncClient,
+ transports.ScheduleServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "false",
+ ),
+ (
+ ScheduleServiceClient,
+ transports.ScheduleServiceRestTransport,
+ "rest",
+ "true",
+ ),
+ (
+ ScheduleServiceClient,
+ transports.ScheduleServiceRestTransport,
+ "rest",
+ "false",
+ ),
+ ],
+)
+@mock.patch.object(
+ ScheduleServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ScheduleServiceClient),
+)
+@mock.patch.object(
+ ScheduleServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ScheduleServiceAsyncClient),
+)
+@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
+def test_schedule_service_client_mtls_env_auto(
+ client_class, transport_class, transport_name, use_client_cert_env
+):
+ # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
+ # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
+
+ # Check the case client_cert_source is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=client_cert_source_callback
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+
+ if use_client_cert_env == "false":
+ expected_client_cert_source = None
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ else:
+ expected_client_cert_source = client_cert_source_callback
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case ADC client cert is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=client_cert_source_callback,
+ ):
+ if use_client_cert_env == "false":
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ expected_client_cert_source = None
+ else:
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+ expected_client_cert_source = client_cert_source_callback
+
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case client_cert_source and ADC client cert are not provided.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class", [ScheduleServiceClient, ScheduleServiceAsyncClient]
+)
+@mock.patch.object(
+ ScheduleServiceClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(ScheduleServiceClient),
+)
+@mock.patch.object(
+ ScheduleServiceAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(ScheduleServiceAsyncClient),
+)
+def test_schedule_service_client_get_mtls_endpoint_and_cert_source(client_class):
+ mock_client_cert_source = mock.Mock()
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source == mock_client_cert_source
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_client_cert_source,
+ ):
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source == mock_client_cert_source
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class", [ScheduleServiceClient, ScheduleServiceAsyncClient]
+)
+@mock.patch.object(
+ ScheduleServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ScheduleServiceClient),
+)
+@mock.patch.object(
+ ScheduleServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(ScheduleServiceAsyncClient),
+)
+def test_schedule_service_client_client_api_endpoint(client_class):
+ mock_client_cert_source = client_cert_source_callback
+ api_override = "foo.com"
+ default_universe = ScheduleServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = ScheduleServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = ScheduleServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true",
+ # use ClientOptions.api_endpoint as the api endpoint regardless.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=api_override
+ )
+ client = client_class(
+ client_options=options,
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert client.api_endpoint == api_override
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == default_endpoint
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always",
+ # use the DEFAULT_MTLS_ENDPOINT as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+
+ # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default),
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist,
+ # and ClientOptions.universe_domain="bar.com",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint.
+ options = client_options.ClientOptions()
+ universe_exists = hasattr(options, "universe_domain")
+ if universe_exists:
+ options = client_options.ClientOptions(universe_domain=mock_universe)
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ else:
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == (
+ mock_endpoint if universe_exists else default_endpoint
+ )
+ assert client.universe_domain == (
+ mock_universe if universe_exists else default_universe
+ )
+
+ # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ options = client_options.ClientOptions()
+ if hasattr(options, "universe_domain"):
+ delattr(options, "universe_domain")
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == default_endpoint
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (ScheduleServiceClient, transports.ScheduleServiceGrpcTransport, "grpc"),
+ (
+ ScheduleServiceAsyncClient,
+ transports.ScheduleServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (ScheduleServiceClient, transports.ScheduleServiceRestTransport, "rest"),
+ ],
+)
+def test_schedule_service_client_client_options_scopes(
+ client_class, transport_class, transport_name
+):
+ # Check the case scopes are provided.
+ options = client_options.ClientOptions(
+ scopes=["1", "2"],
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=["1", "2"],
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ ScheduleServiceClient,
+ transports.ScheduleServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ ScheduleServiceAsyncClient,
+ transports.ScheduleServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ (ScheduleServiceClient, transports.ScheduleServiceRestTransport, "rest", None),
+ ],
+)
+def test_schedule_service_client_client_options_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+def test_schedule_service_client_client_options_from_dict():
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.schedule_service.transports.ScheduleServiceGrpcTransport.__init__"
+ ) as grpc_transport:
+ grpc_transport.return_value = None
+ client = ScheduleServiceClient(
+ client_options={"api_endpoint": "squid.clam.whelk"}
+ )
+ grpc_transport.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ ScheduleServiceClient,
+ transports.ScheduleServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ ScheduleServiceAsyncClient,
+ transports.ScheduleServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_schedule_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.CreateScheduleRequest,
+ dict,
+ ],
+)
+def test_create_schedule(request_type, transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_schedule.Schedule(
+ name="name_value",
+ display_name="display_name_value",
+ max_run_count=1410,
+ started_run_count=1843,
+ state=gca_schedule.Schedule.State.ACTIVE,
+ max_concurrent_run_count=2596,
+ allow_queueing=True,
+ catch_up=True,
+ cron="cron_value",
+ )
+ response = client.create_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = schedule_service.CreateScheduleRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_schedule.Schedule)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_run_count == 1410
+ assert response.started_run_count == 1843
+ assert response.state == gca_schedule.Schedule.State.ACTIVE
+ assert response.max_concurrent_run_count == 2596
+ assert response.allow_queueing is True
+ assert response.catch_up is True
+
+
+def test_create_schedule_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = schedule_service.CreateScheduleRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_schedule), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.create_schedule(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == schedule_service.CreateScheduleRequest(
+ parent="parent_value",
+ )
+
+
+def test_create_schedule_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.create_schedule in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.create_schedule] = mock_rpc
+ request = {}
+ client.create_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_schedule_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.create_schedule
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.create_schedule
+ ] = mock_rpc
+
+ request = {}
+ await client.create_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.create_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_schedule_async(
+ transport: str = "grpc_asyncio", request_type=schedule_service.CreateScheduleRequest
+):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_schedule.Schedule(
+ name="name_value",
+ display_name="display_name_value",
+ max_run_count=1410,
+ started_run_count=1843,
+ state=gca_schedule.Schedule.State.ACTIVE,
+ max_concurrent_run_count=2596,
+ allow_queueing=True,
+ catch_up=True,
+ )
+ )
+ response = await client.create_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = schedule_service.CreateScheduleRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_schedule.Schedule)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_run_count == 1410
+ assert response.started_run_count == 1843
+ assert response.state == gca_schedule.Schedule.State.ACTIVE
+ assert response.max_concurrent_run_count == 2596
+ assert response.allow_queueing is True
+ assert response.catch_up is True
+
+
+@pytest.mark.asyncio
+async def test_create_schedule_async_from_dict():
+ await test_create_schedule_async(request_type=dict)
+
+
+def test_create_schedule_field_headers():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = schedule_service.CreateScheduleRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_schedule), "__call__") as call:
+ call.return_value = gca_schedule.Schedule()
+ client.create_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_create_schedule_field_headers_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = schedule_service.CreateScheduleRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_schedule), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_schedule.Schedule()
+ )
+ await client.create_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_create_schedule_flattened():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_schedule.Schedule()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.create_schedule(
+ parent="parent_value",
+ schedule=gca_schedule.Schedule(cron="cron_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].schedule
+ mock_val = gca_schedule.Schedule(cron="cron_value")
+ assert arg == mock_val
+
+
+def test_create_schedule_flattened_error():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_schedule(
+ schedule_service.CreateScheduleRequest(),
+ parent="parent_value",
+ schedule=gca_schedule.Schedule(cron="cron_value"),
+ )
+
+
+@pytest.mark.asyncio
+async def test_create_schedule_flattened_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_schedule.Schedule()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_schedule.Schedule()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.create_schedule(
+ parent="parent_value",
+ schedule=gca_schedule.Schedule(cron="cron_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].schedule
+ mock_val = gca_schedule.Schedule(cron="cron_value")
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_create_schedule_flattened_error_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.create_schedule(
+ schedule_service.CreateScheduleRequest(),
+ parent="parent_value",
+ schedule=gca_schedule.Schedule(cron="cron_value"),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.DeleteScheduleRequest,
+ dict,
+ ],
+)
+def test_delete_schedule(request_type, transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.delete_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = schedule_service.DeleteScheduleRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_delete_schedule_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = schedule_service.DeleteScheduleRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_schedule), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.delete_schedule(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == schedule_service.DeleteScheduleRequest(
+ name="name_value",
+ )
+
+
+def test_delete_schedule_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.delete_schedule in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.delete_schedule] = mock_rpc
+ request = {}
+ client.delete_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_schedule_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.delete_schedule
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.delete_schedule
+ ] = mock_rpc
+
+ request = {}
+ await client.delete_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.delete_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_schedule_async(
+ transport: str = "grpc_asyncio", request_type=schedule_service.DeleteScheduleRequest
+):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.delete_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = schedule_service.DeleteScheduleRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_delete_schedule_async_from_dict():
+ await test_delete_schedule_async(request_type=dict)
+
+
+def test_delete_schedule_field_headers():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = schedule_service.DeleteScheduleRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_schedule), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_schedule_field_headers_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = schedule_service.DeleteScheduleRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_schedule), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.delete_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_delete_schedule_flattened():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_schedule(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_delete_schedule_flattened_error():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_schedule(
+ schedule_service.DeleteScheduleRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_delete_schedule_flattened_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_schedule(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_delete_schedule_flattened_error_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.delete_schedule(
+ schedule_service.DeleteScheduleRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.GetScheduleRequest,
+ dict,
+ ],
+)
+def test_get_schedule(request_type, transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = schedule.Schedule(
+ name="name_value",
+ display_name="display_name_value",
+ max_run_count=1410,
+ started_run_count=1843,
+ state=schedule.Schedule.State.ACTIVE,
+ max_concurrent_run_count=2596,
+ allow_queueing=True,
+ catch_up=True,
+ cron="cron_value",
+ )
+ response = client.get_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = schedule_service.GetScheduleRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, schedule.Schedule)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_run_count == 1410
+ assert response.started_run_count == 1843
+ assert response.state == schedule.Schedule.State.ACTIVE
+ assert response.max_concurrent_run_count == 2596
+ assert response.allow_queueing is True
+ assert response.catch_up is True
+
+
+def test_get_schedule_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = schedule_service.GetScheduleRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_schedule), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.get_schedule(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == schedule_service.GetScheduleRequest(
+ name="name_value",
+ )
+
+
+def test_get_schedule_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.get_schedule in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.get_schedule] = mock_rpc
+ request = {}
+ client.get_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_schedule_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.get_schedule
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.get_schedule
+ ] = mock_rpc
+
+ request = {}
+ await client.get_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.get_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_schedule_async(
+ transport: str = "grpc_asyncio", request_type=schedule_service.GetScheduleRequest
+):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ schedule.Schedule(
+ name="name_value",
+ display_name="display_name_value",
+ max_run_count=1410,
+ started_run_count=1843,
+ state=schedule.Schedule.State.ACTIVE,
+ max_concurrent_run_count=2596,
+ allow_queueing=True,
+ catch_up=True,
+ )
+ )
+ response = await client.get_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = schedule_service.GetScheduleRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, schedule.Schedule)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_run_count == 1410
+ assert response.started_run_count == 1843
+ assert response.state == schedule.Schedule.State.ACTIVE
+ assert response.max_concurrent_run_count == 2596
+ assert response.allow_queueing is True
+ assert response.catch_up is True
+
+
+@pytest.mark.asyncio
+async def test_get_schedule_async_from_dict():
+ await test_get_schedule_async(request_type=dict)
+
+
+def test_get_schedule_field_headers():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = schedule_service.GetScheduleRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_schedule), "__call__") as call:
+ call.return_value = schedule.Schedule()
+ client.get_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_schedule_field_headers_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = schedule_service.GetScheduleRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_schedule), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(schedule.Schedule())
+ await client.get_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_get_schedule_flattened():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = schedule.Schedule()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_schedule(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_get_schedule_flattened_error():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_schedule(
+ schedule_service.GetScheduleRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_schedule_flattened_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = schedule.Schedule()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(schedule.Schedule())
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_schedule(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_get_schedule_flattened_error_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_schedule(
+ schedule_service.GetScheduleRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.ListSchedulesRequest,
+ dict,
+ ],
+)
+def test_list_schedules(request_type, transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_schedules), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = schedule_service.ListSchedulesResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_schedules(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = schedule_service.ListSchedulesRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListSchedulesPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_schedules_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = schedule_service.ListSchedulesRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ order_by="order_by_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_schedules), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.list_schedules(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == schedule_service.ListSchedulesRequest(
+ parent="parent_value",
+ filter="filter_value",
+ page_token="page_token_value",
+ order_by="order_by_value",
+ )
+
+
+def test_list_schedules_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.list_schedules in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.list_schedules] = mock_rpc
+ request = {}
+ client.list_schedules(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_schedules(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_schedules_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.list_schedules
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.list_schedules
+ ] = mock_rpc
+
+ request = {}
+ await client.list_schedules(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.list_schedules(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_schedules_async(
+ transport: str = "grpc_asyncio", request_type=schedule_service.ListSchedulesRequest
+):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_schedules), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ schedule_service.ListSchedulesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.list_schedules(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = schedule_service.ListSchedulesRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListSchedulesAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_schedules_async_from_dict():
+ await test_list_schedules_async(request_type=dict)
+
+
+def test_list_schedules_field_headers():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = schedule_service.ListSchedulesRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_schedules), "__call__") as call:
+ call.return_value = schedule_service.ListSchedulesResponse()
+ client.list_schedules(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_schedules_field_headers_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = schedule_service.ListSchedulesRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_schedules), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ schedule_service.ListSchedulesResponse()
+ )
+ await client.list_schedules(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_list_schedules_flattened():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_schedules), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = schedule_service.ListSchedulesResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_schedules(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+def test_list_schedules_flattened_error():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_schedules(
+ schedule_service.ListSchedulesRequest(),
+ parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_schedules_flattened_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_schedules), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = schedule_service.ListSchedulesResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ schedule_service.ListSchedulesResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_schedules(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_list_schedules_flattened_error_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_schedules(
+ schedule_service.ListSchedulesRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_schedules_pager(transport_name: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_schedules), "__call__") as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ schedule_service.ListSchedulesResponse(
+ schedules=[
+ schedule.Schedule(),
+ schedule.Schedule(),
+ schedule.Schedule(),
+ ],
+ next_page_token="abc",
+ ),
+ schedule_service.ListSchedulesResponse(
+ schedules=[],
+ next_page_token="def",
+ ),
+ schedule_service.ListSchedulesResponse(
+ schedules=[
+ schedule.Schedule(),
+ ],
+ next_page_token="ghi",
+ ),
+ schedule_service.ListSchedulesResponse(
+ schedules=[
+ schedule.Schedule(),
+ schedule.Schedule(),
+ ],
+ ),
+ RuntimeError,
+ )
+
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_schedules(request={}, retry=retry, timeout=timeout)
+
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, schedule.Schedule) for i in results)
+
+
+def test_list_schedules_pages(transport_name: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_schedules), "__call__") as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ schedule_service.ListSchedulesResponse(
+ schedules=[
+ schedule.Schedule(),
+ schedule.Schedule(),
+ schedule.Schedule(),
+ ],
+ next_page_token="abc",
+ ),
+ schedule_service.ListSchedulesResponse(
+ schedules=[],
+ next_page_token="def",
+ ),
+ schedule_service.ListSchedulesResponse(
+ schedules=[
+ schedule.Schedule(),
+ ],
+ next_page_token="ghi",
+ ),
+ schedule_service.ListSchedulesResponse(
+ schedules=[
+ schedule.Schedule(),
+ schedule.Schedule(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_schedules(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_schedules_async_pager():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_schedules), "__call__", new_callable=mock.AsyncMock
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ schedule_service.ListSchedulesResponse(
+ schedules=[
+ schedule.Schedule(),
+ schedule.Schedule(),
+ schedule.Schedule(),
+ ],
+ next_page_token="abc",
+ ),
+ schedule_service.ListSchedulesResponse(
+ schedules=[],
+ next_page_token="def",
+ ),
+ schedule_service.ListSchedulesResponse(
+ schedules=[
+ schedule.Schedule(),
+ ],
+ next_page_token="ghi",
+ ),
+ schedule_service.ListSchedulesResponse(
+ schedules=[
+ schedule.Schedule(),
+ schedule.Schedule(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_schedules(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(isinstance(i, schedule.Schedule) for i in responses)
+
+
+@pytest.mark.asyncio
+async def test_list_schedules_async_pages():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_schedules), "__call__", new_callable=mock.AsyncMock
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ schedule_service.ListSchedulesResponse(
+ schedules=[
+ schedule.Schedule(),
+ schedule.Schedule(),
+ schedule.Schedule(),
+ ],
+ next_page_token="abc",
+ ),
+ schedule_service.ListSchedulesResponse(
+ schedules=[],
+ next_page_token="def",
+ ),
+ schedule_service.ListSchedulesResponse(
+ schedules=[
+ schedule.Schedule(),
+ ],
+ next_page_token="ghi",
+ ),
+ schedule_service.ListSchedulesResponse(
+ schedules=[
+ schedule.Schedule(),
+ schedule.Schedule(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.list_schedules(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.PauseScheduleRequest,
+ dict,
+ ],
+)
+def test_pause_schedule(request_type, transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.pause_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.pause_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = schedule_service.PauseScheduleRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_pause_schedule_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = schedule_service.PauseScheduleRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.pause_schedule), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.pause_schedule(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == schedule_service.PauseScheduleRequest(
+ name="name_value",
+ )
+
+
+def test_pause_schedule_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.pause_schedule in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.pause_schedule] = mock_rpc
+ request = {}
+ client.pause_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.pause_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_pause_schedule_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.pause_schedule
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.pause_schedule
+ ] = mock_rpc
+
+ request = {}
+ await client.pause_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.pause_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_pause_schedule_async(
+ transport: str = "grpc_asyncio", request_type=schedule_service.PauseScheduleRequest
+):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.pause_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.pause_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = schedule_service.PauseScheduleRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_pause_schedule_async_from_dict():
+ await test_pause_schedule_async(request_type=dict)
+
+
+def test_pause_schedule_field_headers():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = schedule_service.PauseScheduleRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.pause_schedule), "__call__") as call:
+ call.return_value = None
+ client.pause_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_pause_schedule_field_headers_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = schedule_service.PauseScheduleRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.pause_schedule), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.pause_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_pause_schedule_flattened():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.pause_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.pause_schedule(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_pause_schedule_flattened_error():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.pause_schedule(
+ schedule_service.PauseScheduleRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_pause_schedule_flattened_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.pause_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.pause_schedule(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_pause_schedule_flattened_error_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.pause_schedule(
+ schedule_service.PauseScheduleRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.ResumeScheduleRequest,
+ dict,
+ ],
+)
+def test_resume_schedule(request_type, transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.resume_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.resume_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = schedule_service.ResumeScheduleRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_resume_schedule_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = schedule_service.ResumeScheduleRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.resume_schedule), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.resume_schedule(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == schedule_service.ResumeScheduleRequest(
+ name="name_value",
+ )
+
+
+def test_resume_schedule_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.resume_schedule in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.resume_schedule] = mock_rpc
+ request = {}
+ client.resume_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.resume_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_resume_schedule_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.resume_schedule
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.resume_schedule
+ ] = mock_rpc
+
+ request = {}
+ await client.resume_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.resume_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_resume_schedule_async(
+ transport: str = "grpc_asyncio", request_type=schedule_service.ResumeScheduleRequest
+):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.resume_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.resume_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = schedule_service.ResumeScheduleRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_resume_schedule_async_from_dict():
+ await test_resume_schedule_async(request_type=dict)
+
+
+def test_resume_schedule_field_headers():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = schedule_service.ResumeScheduleRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.resume_schedule), "__call__") as call:
+ call.return_value = None
+ client.resume_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_resume_schedule_field_headers_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = schedule_service.ResumeScheduleRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.resume_schedule), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.resume_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_resume_schedule_flattened():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.resume_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.resume_schedule(
+ name="name_value",
+ catch_up=True,
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].catch_up
+ mock_val = True
+ assert arg == mock_val
+
+
+def test_resume_schedule_flattened_error():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.resume_schedule(
+ schedule_service.ResumeScheduleRequest(),
+ name="name_value",
+ catch_up=True,
+ )
+
+
+@pytest.mark.asyncio
+async def test_resume_schedule_flattened_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.resume_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.resume_schedule(
+ name="name_value",
+ catch_up=True,
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].catch_up
+ mock_val = True
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_resume_schedule_flattened_error_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.resume_schedule(
+ schedule_service.ResumeScheduleRequest(),
+ name="name_value",
+ catch_up=True,
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.UpdateScheduleRequest,
+ dict,
+ ],
+)
+def test_update_schedule(request_type, transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.update_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_schedule.Schedule(
+ name="name_value",
+ display_name="display_name_value",
+ max_run_count=1410,
+ started_run_count=1843,
+ state=gca_schedule.Schedule.State.ACTIVE,
+ max_concurrent_run_count=2596,
+ allow_queueing=True,
+ catch_up=True,
+ cron="cron_value",
+ )
+ response = client.update_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = schedule_service.UpdateScheduleRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_schedule.Schedule)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_run_count == 1410
+ assert response.started_run_count == 1843
+ assert response.state == gca_schedule.Schedule.State.ACTIVE
+ assert response.max_concurrent_run_count == 2596
+ assert response.allow_queueing is True
+ assert response.catch_up is True
+
+
+def test_update_schedule_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = schedule_service.UpdateScheduleRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.update_schedule), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.update_schedule(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == schedule_service.UpdateScheduleRequest()
+
+
+def test_update_schedule_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.update_schedule in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.update_schedule] = mock_rpc
+ request = {}
+ client.update_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.update_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_update_schedule_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.update_schedule
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.update_schedule
+ ] = mock_rpc
+
+ request = {}
+ await client.update_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.update_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_update_schedule_async(
+ transport: str = "grpc_asyncio", request_type=schedule_service.UpdateScheduleRequest
+):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.update_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_schedule.Schedule(
+ name="name_value",
+ display_name="display_name_value",
+ max_run_count=1410,
+ started_run_count=1843,
+ state=gca_schedule.Schedule.State.ACTIVE,
+ max_concurrent_run_count=2596,
+ allow_queueing=True,
+ catch_up=True,
+ )
+ )
+ response = await client.update_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = schedule_service.UpdateScheduleRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_schedule.Schedule)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_run_count == 1410
+ assert response.started_run_count == 1843
+ assert response.state == gca_schedule.Schedule.State.ACTIVE
+ assert response.max_concurrent_run_count == 2596
+ assert response.allow_queueing is True
+ assert response.catch_up is True
+
+
+@pytest.mark.asyncio
+async def test_update_schedule_async_from_dict():
+ await test_update_schedule_async(request_type=dict)
+
+
+def test_update_schedule_field_headers():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = schedule_service.UpdateScheduleRequest()
+
+ request.schedule.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.update_schedule), "__call__") as call:
+ call.return_value = gca_schedule.Schedule()
+ client.update_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "schedule.name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_update_schedule_field_headers_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = schedule_service.UpdateScheduleRequest()
+
+ request.schedule.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.update_schedule), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_schedule.Schedule()
+ )
+ await client.update_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "schedule.name=name_value",
+ ) in kw["metadata"]
+
+
+def test_update_schedule_flattened():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.update_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_schedule.Schedule()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.update_schedule(
+ schedule=gca_schedule.Schedule(cron="cron_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].schedule
+ mock_val = gca_schedule.Schedule(cron="cron_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
+
+
+def test_update_schedule_flattened_error():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.update_schedule(
+ schedule_service.UpdateScheduleRequest(),
+ schedule=gca_schedule.Schedule(cron="cron_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+@pytest.mark.asyncio
+async def test_update_schedule_flattened_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.update_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = gca_schedule.Schedule()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_schedule.Schedule()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.update_schedule(
+ schedule=gca_schedule.Schedule(cron="cron_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].schedule
+ mock_val = gca_schedule.Schedule(cron="cron_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_update_schedule_flattened_error_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.update_schedule(
+ schedule_service.UpdateScheduleRequest(),
+ schedule=gca_schedule.Schedule(cron="cron_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+def test_create_schedule_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.create_schedule in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.create_schedule] = mock_rpc
+
+ request = {}
+ client.create_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.create_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_create_schedule_rest_required_fields(
+ request_type=schedule_service.CreateScheduleRequest,
+):
+ transport_class = transports.ScheduleServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_schedule._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_schedule._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = gca_schedule.Schedule()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_schedule.Schedule.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.create_schedule(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_create_schedule_rest_unset_required_fields():
+ transport = transports.ScheduleServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.create_schedule._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "schedule",
+ )
+ )
+ )
+
+
+def test_create_schedule_rest_flattened():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_schedule.Schedule()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ schedule=gca_schedule.Schedule(cron="cron_value"),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = gca_schedule.Schedule.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.create_schedule(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}/schedules"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_create_schedule_rest_flattened_error(transport: str = "rest"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_schedule(
+ schedule_service.CreateScheduleRequest(),
+ parent="parent_value",
+ schedule=gca_schedule.Schedule(cron="cron_value"),
+ )
+
+
+def test_delete_schedule_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.delete_schedule in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.delete_schedule] = mock_rpc
+
+ request = {}
+ client.delete_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_delete_schedule_rest_required_fields(
+ request_type=schedule_service.DeleteScheduleRequest,
+):
+ transport_class = transports.ScheduleServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_schedule._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_schedule._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "delete",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_schedule(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_delete_schedule_rest_unset_required_fields():
+ transport = transports.ScheduleServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.delete_schedule._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_delete_schedule_rest_flattened():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/schedules/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.delete_schedule(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/schedules/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_schedule_rest_flattened_error(transport: str = "rest"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_schedule(
+ schedule_service.DeleteScheduleRequest(),
+ name="name_value",
+ )
+
+
+def test_get_schedule_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.get_schedule in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.get_schedule] = mock_rpc
+
+ request = {}
+ client.get_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_get_schedule_rest_required_fields(
+ request_type=schedule_service.GetScheduleRequest,
+):
+ transport_class = transports.ScheduleServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_schedule._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_schedule._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = schedule.Schedule()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = schedule.Schedule.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_schedule(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_get_schedule_rest_unset_required_fields():
+ transport = transports.ScheduleServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.get_schedule._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_schedule_rest_flattened():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = schedule.Schedule()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/schedules/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = schedule.Schedule.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_schedule(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/schedules/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_get_schedule_rest_flattened_error(transport: str = "rest"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_schedule(
+ schedule_service.GetScheduleRequest(),
+ name="name_value",
+ )
+
+
+def test_list_schedules_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.list_schedules in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.list_schedules] = mock_rpc
+
+ request = {}
+ client.list_schedules(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_schedules(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_list_schedules_rest_required_fields(
+ request_type=schedule_service.ListSchedulesRequest,
+):
+ transport_class = transports.ScheduleServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_schedules._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_schedules._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "filter",
+ "order_by",
+ "page_size",
+ "page_token",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = schedule_service.ListSchedulesResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = schedule_service.ListSchedulesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_schedules(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_list_schedules_rest_unset_required_fields():
+ transport = transports.ScheduleServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.list_schedules._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(
+ (
+ "filter",
+ "orderBy",
+ "pageSize",
+ "pageToken",
+ )
+ )
+ & set(("parent",))
+ )
+
+
+def test_list_schedules_rest_flattened():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = schedule_service.ListSchedulesResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = schedule_service.ListSchedulesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.list_schedules(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}/schedules"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_list_schedules_rest_flattened_error(transport: str = "rest"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_schedules(
+ schedule_service.ListSchedulesRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_schedules_rest_pager(transport: str = "rest"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ schedule_service.ListSchedulesResponse(
+ schedules=[
+ schedule.Schedule(),
+ schedule.Schedule(),
+ schedule.Schedule(),
+ ],
+ next_page_token="abc",
+ ),
+ schedule_service.ListSchedulesResponse(
+ schedules=[],
+ next_page_token="def",
+ ),
+ schedule_service.ListSchedulesResponse(
+ schedules=[
+ schedule.Schedule(),
+ ],
+ next_page_token="ghi",
+ ),
+ schedule_service.ListSchedulesResponse(
+ schedules=[
+ schedule.Schedule(),
+ schedule.Schedule(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ schedule_service.ListSchedulesResponse.to_json(x) for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ pager = client.list_schedules(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, schedule.Schedule) for i in results)
+
+ pages = list(client.list_schedules(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_pause_schedule_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.pause_schedule in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.pause_schedule] = mock_rpc
+
+ request = {}
+ client.pause_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.pause_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_pause_schedule_rest_required_fields(
+ request_type=schedule_service.PauseScheduleRequest,
+):
+ transport_class = transports.ScheduleServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).pause_schedule._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).pause_schedule._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = None
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.pause_schedule(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_pause_schedule_rest_unset_required_fields():
+ transport = transports.ScheduleServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.pause_schedule._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_pause_schedule_rest_flattened():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/schedules/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.pause_schedule(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/schedules/*}:pause"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_pause_schedule_rest_flattened_error(transport: str = "rest"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.pause_schedule(
+ schedule_service.PauseScheduleRequest(),
+ name="name_value",
+ )
+
+
+def test_resume_schedule_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.resume_schedule in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.resume_schedule] = mock_rpc
+
+ request = {}
+ client.resume_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.resume_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_resume_schedule_rest_required_fields(
+ request_type=schedule_service.ResumeScheduleRequest,
+):
+ transport_class = transports.ScheduleServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).resume_schedule._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).resume_schedule._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = None
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.resume_schedule(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_resume_schedule_rest_unset_required_fields():
+ transport = transports.ScheduleServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.resume_schedule._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_resume_schedule_rest_flattened():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/schedules/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ catch_up=True,
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.resume_schedule(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/schedules/*}:resume"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_resume_schedule_rest_flattened_error(transport: str = "rest"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.resume_schedule(
+ schedule_service.ResumeScheduleRequest(),
+ name="name_value",
+ catch_up=True,
+ )
+
+
+def test_update_schedule_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.update_schedule in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.update_schedule] = mock_rpc
+
+ request = {}
+ client.update_schedule(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.update_schedule(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_update_schedule_rest_required_fields(
+ request_type=schedule_service.UpdateScheduleRequest,
+):
+ transport_class = transports.ScheduleServiceRestTransport
+
+ request_init = {}
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).update_schedule._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).update_schedule._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(("update_mask",))
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = gca_schedule.Schedule()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "patch",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_schedule.Schedule.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.update_schedule(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_update_schedule_rest_unset_required_fields():
+ transport = transports.ScheduleServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.update_schedule._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(("updateMask",))
+ & set(
+ (
+ "schedule",
+ "updateMask",
+ )
+ )
+ )
+
+
+def test_update_schedule_rest_flattened():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_schedule.Schedule()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "schedule": {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ schedule=gca_schedule.Schedule(cron="cron_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = gca_schedule.Schedule.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.update_schedule(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{schedule.name=projects/*/locations/*/schedules/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_update_schedule_rest_flattened_error(transport: str = "rest"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.update_schedule(
+ schedule_service.UpdateScheduleRequest(),
+ schedule=gca_schedule.Schedule(cron="cron_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+def test_credentials_transport_error():
+ # It is an error to provide credentials and a transport instance.
+ transport = transports.ScheduleServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # It is an error to provide a credentials file and a transport instance.
+ transport = transports.ScheduleServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = ScheduleServiceClient(
+ client_options={"credentials_file": "credentials.json"},
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a transport instance.
+ transport = transports.ScheduleServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = ScheduleServiceClient(
+ client_options=options,
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a credential.
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = ScheduleServiceClient(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # It is an error to provide scopes and a transport instance.
+ transport = transports.ScheduleServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = ScheduleServiceClient(
+ client_options={"scopes": ["1", "2"]},
+ transport=transport,
+ )
+
+
+def test_transport_instance():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.ScheduleServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ client = ScheduleServiceClient(transport=transport)
+ assert client.transport is transport
+
+
+def test_transport_get_channel():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.ScheduleServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+ transport = transports.ScheduleServiceGrpcAsyncIOTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ScheduleServiceGrpcTransport,
+ transports.ScheduleServiceGrpcAsyncIOTransport,
+ transports.ScheduleServiceRestTransport,
+ ],
+)
+def test_transport_adc(transport_class):
+ # Test default credentials are used if not provided.
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class()
+ adc.assert_called_once()
+
+
+def test_transport_kind_grpc():
+ transport = ScheduleServiceClient.get_transport_class("grpc")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "grpc"
+
+
+def test_initialize_client_w_grpc():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_schedule_empty_call_grpc():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.create_schedule), "__call__") as call:
+ call.return_value = gca_schedule.Schedule()
+ client.create_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.CreateScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_schedule_empty_call_grpc():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_schedule), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.DeleteScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_schedule_empty_call_grpc():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_schedule), "__call__") as call:
+ call.return_value = schedule.Schedule()
+ client.get_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.GetScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_schedules_empty_call_grpc():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_schedules), "__call__") as call:
+ call.return_value = schedule_service.ListSchedulesResponse()
+ client.list_schedules(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.ListSchedulesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_pause_schedule_empty_call_grpc():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.pause_schedule), "__call__") as call:
+ call.return_value = None
+ client.pause_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.PauseScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_resume_schedule_empty_call_grpc():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.resume_schedule), "__call__") as call:
+ call.return_value = None
+ client.resume_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.ResumeScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_schedule_empty_call_grpc():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.update_schedule), "__call__") as call:
+ call.return_value = gca_schedule.Schedule()
+ client.update_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.UpdateScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_grpc_asyncio():
+ transport = ScheduleServiceAsyncClient.get_transport_class("grpc_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "grpc_asyncio"
+
+
+def test_initialize_client_w_grpc_asyncio():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_schedule_empty_call_grpc_asyncio():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.create_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_schedule.Schedule(
+ name="name_value",
+ display_name="display_name_value",
+ max_run_count=1410,
+ started_run_count=1843,
+ state=gca_schedule.Schedule.State.ACTIVE,
+ max_concurrent_run_count=2596,
+ allow_queueing=True,
+ catch_up=True,
+ )
+ )
+ await client.create_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.CreateScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_schedule_empty_call_grpc_asyncio():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.delete_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.DeleteScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_schedule_empty_call_grpc_asyncio():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ schedule.Schedule(
+ name="name_value",
+ display_name="display_name_value",
+ max_run_count=1410,
+ started_run_count=1843,
+ state=schedule.Schedule.State.ACTIVE,
+ max_concurrent_run_count=2596,
+ allow_queueing=True,
+ catch_up=True,
+ )
+ )
+ await client.get_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.GetScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_schedules_empty_call_grpc_asyncio():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_schedules), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ schedule_service.ListSchedulesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_schedules(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.ListSchedulesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_pause_schedule_empty_call_grpc_asyncio():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.pause_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.pause_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.PauseScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_resume_schedule_empty_call_grpc_asyncio():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.resume_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.resume_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.ResumeScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_update_schedule_empty_call_grpc_asyncio():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.update_schedule), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gca_schedule.Schedule(
+ name="name_value",
+ display_name="display_name_value",
+ max_run_count=1410,
+ started_run_count=1843,
+ state=gca_schedule.Schedule.State.ACTIVE,
+ max_concurrent_run_count=2596,
+ allow_queueing=True,
+ catch_up=True,
+ )
+ )
+ await client.update_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.UpdateScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_rest():
+ transport = ScheduleServiceClient.get_transport_class("rest")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "rest"
+
+
+def test_create_schedule_rest_bad_request(
+ request_type=schedule_service.CreateScheduleRequest,
+):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.create_schedule(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.CreateScheduleRequest,
+ dict,
+ ],
+)
+def test_create_schedule_rest_call_success(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["schedule"] = {
+ "cron": "cron_value",
+ "create_pipeline_job_request": {
+ "parent": "parent_value",
+ "pipeline_job": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "create_time": {"seconds": 751, "nanos": 543},
+ "start_time": {},
+ "end_time": {},
+ "update_time": {},
+ "pipeline_spec": {"fields": {}},
+ "state": 1,
+ "job_detail": {
+ "pipeline_context": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "etag": "etag_value",
+ "labels": {},
+ "create_time": {},
+ "update_time": {},
+ "parent_contexts": [
+ "parent_contexts_value1",
+ "parent_contexts_value2",
+ ],
+ "schema_title": "schema_title_value",
+ "schema_version": "schema_version_value",
+ "metadata": {},
+ "description": "description_value",
+ },
+ "pipeline_run_context": {},
+ "task_details": [
+ {
+ "task_id": 735,
+ "parent_task_id": 1480,
+ "task_name": "task_name_value",
+ "create_time": {},
+ "start_time": {},
+ "end_time": {},
+ "executor_detail": {
+ "container_detail": {
+ "main_job": "main_job_value",
+ "pre_caching_check_job": "pre_caching_check_job_value",
+ "failed_main_jobs": [
+ "failed_main_jobs_value1",
+ "failed_main_jobs_value2",
+ ],
+ "failed_pre_caching_check_jobs": [
+ "failed_pre_caching_check_jobs_value1",
+ "failed_pre_caching_check_jobs_value2",
+ ],
+ },
+ "custom_job_detail": {
+ "job": "job_value",
+ "failed_jobs": [
+ "failed_jobs_value1",
+ "failed_jobs_value2",
+ ],
+ },
+ },
+ "state": 1,
+ "execution": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "state": 1,
+ "etag": "etag_value",
+ "labels": {},
+ "create_time": {},
+ "update_time": {},
+ "schema_title": "schema_title_value",
+ "schema_version": "schema_version_value",
+ "metadata": {},
+ "description": "description_value",
+ },
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "pipeline_task_status": [
+ {"update_time": {}, "state": 1, "error": {}}
+ ],
+ "inputs": {},
+ "outputs": {},
+ }
+ ],
+ },
+ "error": {},
+ "labels": {},
+ "runtime_config": {
+ "parameters": {},
+ "gcs_output_directory": "gcs_output_directory_value",
+ "parameter_values": {},
+ "failure_policy": 1,
+ "input_artifacts": {},
+ "default_runtime": {
+ "persistent_resource_runtime_detail": {
+ "persistent_resource_name": "persistent_resource_name_value",
+ "task_resource_unavailable_wait_time_ms": 4030,
+ "task_resource_unavailable_timeout_behavior": 1,
+ }
+ },
+ },
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "service_account": "service_account_value",
+ "network": "network_value",
+ "reserved_ip_ranges": [
+ "reserved_ip_ranges_value1",
+ "reserved_ip_ranges_value2",
+ ],
+ "psc_interface_config": {
+ "network_attachment": "network_attachment_value"
+ },
+ "template_uri": "template_uri_value",
+ "template_metadata": {"version": "version_value"},
+ "schedule_name": "schedule_name_value",
+ "preflight_validations": True,
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ "original_pipeline_job_id": 2512,
+ "pipeline_task_rerun_configs": [
+ {
+ "task_id": 735,
+ "task_name": "task_name_value",
+ "inputs": {"artifacts": {}, "parameter_values": {}},
+ "skip_task": True,
+ "skip_downstream_tasks": True,
+ }
+ ],
+ },
+ "pipeline_job_id": "pipeline_job_id_value",
+ },
+ "create_model_monitoring_job_request": {
+ "parent": "parent_value",
+ "model_monitoring_job": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "model_monitoring_spec": {
+ "objective_spec": {
+ "tabular_objective": {
+ "feature_drift_spec": {
+ "features": ["features_value1", "features_value2"],
+ "categorical_metric_type": "categorical_metric_type_value",
+ "numeric_metric_type": "numeric_metric_type_value",
+ "default_categorical_alert_condition": {
+ "threshold": 0.973
+ },
+ "default_numeric_alert_condition": {},
+ "feature_alert_conditions": {},
+ },
+ "prediction_output_drift_spec": {},
+ "feature_attribution_spec": {
+ "features": ["features_value1", "features_value2"],
+ "default_alert_condition": {},
+ "feature_alert_conditions": {},
+ "batch_explanation_dedicated_resources": {
+ "machine_spec": {
+ "machine_type": "machine_type_value",
+ "accelerator_type": 1,
+ "accelerator_count": 1805,
+ "tpu_topology": "tpu_topology_value",
+ "reservation_affinity": {
+ "reservation_affinity_type": 1,
+ "key": "key_value",
+ "values": [
+ "values_value1",
+ "values_value2",
+ ],
+ },
+ },
+ "starting_replica_count": 2355,
+ "max_replica_count": 1805,
+ },
+ },
+ },
+ "explanation_spec": {
+ "parameters": {
+ "sampled_shapley_attribution": {"path_count": 1077},
+ "integrated_gradients_attribution": {
+ "step_count": 1092,
+ "smooth_grad_config": {
+ "noise_sigma": 0.11660000000000001,
+ "feature_noise_sigma": {
+ "noise_sigma": [
+ {"name": "name_value", "sigma": 0.529}
+ ]
+ },
+ "noisy_sample_count": 1947,
+ },
+ "blur_baseline_config": {"max_blur_sigma": 0.1482},
+ },
+ "xrai_attribution": {
+ "step_count": 1092,
+ "smooth_grad_config": {},
+ "blur_baseline_config": {},
+ },
+ "examples": {
+ "example_gcs_source": {
+ "data_format": 1,
+ "gcs_source": {
+ "uris": ["uris_value1", "uris_value2"]
+ },
+ },
+ "nearest_neighbor_search_config": {
+ "null_value": 0,
+ "number_value": 0.1285,
+ "string_value": "string_value_value",
+ "bool_value": True,
+ "struct_value": {},
+ "list_value": {"values": {}},
+ },
+ "presets": {"query": 1, "modality": 1},
+ "gcs_source": {},
+ "neighbor_count": 1494,
+ },
+ "top_k": 541,
+ "output_indices": {},
+ },
+ "metadata": {
+ "inputs": {},
+ "outputs": {},
+ "feature_attributions_schema_uri": "feature_attributions_schema_uri_value",
+ "latent_space_source": "latent_space_source_value",
+ },
+ },
+ "baseline_dataset": {
+ "columnized_dataset": {
+ "vertex_dataset": "vertex_dataset_value",
+ "gcs_source": {
+ "gcs_uri": "gcs_uri_value",
+ "format_": 1,
+ },
+ "bigquery_source": {
+ "table_uri": "table_uri_value",
+ "query": "query_value",
+ },
+ "timestamp_field": "timestamp_field_value",
+ },
+ "batch_prediction_output": {
+ "batch_prediction_job": "batch_prediction_job_value"
+ },
+ "vertex_endpoint_logs": {
+ "endpoints": ["endpoints_value1", "endpoints_value2"]
+ },
+ "time_interval": {"start_time": {}, "end_time": {}},
+ "time_offset": {
+ "offset": "offset_value",
+ "window": "window_value",
+ },
+ },
+ "target_dataset": {},
+ },
+ "notification_spec": {
+ "email_config": {
+ "user_emails": ["user_emails_value1", "user_emails_value2"]
+ },
+ "enable_cloud_logging": True,
+ "notification_channel_configs": [
+ {"notification_channel": "notification_channel_value"}
+ ],
+ },
+ "output_spec": {
+ "gcs_base_directory": {
+ "output_uri_prefix": "output_uri_prefix_value"
+ }
+ },
+ },
+ "create_time": {},
+ "update_time": {},
+ "state": 1,
+ "schedule": "schedule_value",
+ "job_execution_detail": {
+ "baseline_datasets": [
+ {"location": "location_value", "time_range": {}}
+ ],
+ "target_datasets": {},
+ "objective_status": {},
+ "error": {},
+ },
+ "schedule_time": {},
+ },
+ "model_monitoring_job_id": "model_monitoring_job_id_value",
+ },
+ "create_notebook_execution_job_request": {
+ "parent": "parent_value",
+ "notebook_execution_job": {
+ "dataform_repository_source": {
+ "dataform_repository_resource_name": "dataform_repository_resource_name_value",
+ "commit_sha": "commit_sha_value",
+ },
+ "gcs_notebook_source": {
+ "uri": "uri_value",
+ "generation": "generation_value",
+ },
+ "direct_notebook_source": {"content": b"content_blob"},
+ "notebook_runtime_template_resource_name": "notebook_runtime_template_resource_name_value",
+ "custom_environment_spec": {
+ "machine_spec": {},
+ "persistent_disk_spec": {
+ "disk_type": "disk_type_value",
+ "disk_size_gb": 1261,
+ },
+ "network_spec": {
+ "enable_internet_access": True,
+ "network": "network_value",
+ "subnetwork": "subnetwork_value",
+ },
+ },
+ "gcs_output_uri": "gcs_output_uri_value",
+ "execution_user": "execution_user_value",
+ "service_account": "service_account_value",
+ "workbench_runtime": {},
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "execution_timeout": {"seconds": 751, "nanos": 543},
+ "schedule_resource_name": "schedule_resource_name_value",
+ "job_state": 1,
+ "status": {},
+ "create_time": {},
+ "update_time": {},
+ "labels": {},
+ "kernel_name": "kernel_name_value",
+ "encryption_spec": {},
+ },
+ "notebook_execution_job_id": "notebook_execution_job_id_value",
+ },
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "start_time": {},
+ "end_time": {},
+ "max_run_count": 1410,
+ "started_run_count": 1843,
+ "state": 1,
+ "create_time": {},
+ "update_time": {},
+ "next_run_time": {},
+ "last_pause_time": {},
+ "last_resume_time": {},
+ "max_concurrent_run_count": 2596,
+ "allow_queueing": True,
+ "catch_up": True,
+ "last_scheduled_run_response": {
+ "scheduled_run_time": {},
+ "run_response": "run_response_value",
+ },
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = schedule_service.CreateScheduleRequest.meta.fields["schedule"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["schedule"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["schedule"][field])):
+ del request_init["schedule"][field][i][subfield]
+ else:
+ del request_init["schedule"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_schedule.Schedule(
+ name="name_value",
+ display_name="display_name_value",
+ max_run_count=1410,
+ started_run_count=1843,
+ state=gca_schedule.Schedule.State.ACTIVE,
+ max_concurrent_run_count=2596,
+ allow_queueing=True,
+ catch_up=True,
+ cron="cron_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_schedule.Schedule.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.create_schedule(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_schedule.Schedule)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_run_count == 1410
+ assert response.started_run_count == 1843
+ assert response.state == gca_schedule.Schedule.State.ACTIVE
+ assert response.max_concurrent_run_count == 2596
+ assert response.allow_queueing is True
+ assert response.catch_up is True
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_create_schedule_rest_interceptors(null_interceptor):
+ transport = transports.ScheduleServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.ScheduleServiceRestInterceptor(),
+ )
+ client = ScheduleServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.ScheduleServiceRestInterceptor, "post_create_schedule"
+ ) as post, mock.patch.object(
+ transports.ScheduleServiceRestInterceptor, "pre_create_schedule"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = schedule_service.CreateScheduleRequest.pb(
+ schedule_service.CreateScheduleRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_schedule.Schedule.to_json(gca_schedule.Schedule())
+ req.return_value.content = return_value
+
+ request = schedule_service.CreateScheduleRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_schedule.Schedule()
+
+ client.create_schedule(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_delete_schedule_rest_bad_request(
+ request_type=schedule_service.DeleteScheduleRequest,
+):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_schedule(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.DeleteScheduleRequest,
+ dict,
+ ],
+)
+def test_delete_schedule_rest_call_success(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.delete_schedule(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_delete_schedule_rest_interceptors(null_interceptor):
+ transport = transports.ScheduleServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.ScheduleServiceRestInterceptor(),
+ )
+ client = ScheduleServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.ScheduleServiceRestInterceptor, "post_delete_schedule"
+ ) as post, mock.patch.object(
+ transports.ScheduleServiceRestInterceptor, "pre_delete_schedule"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = schedule_service.DeleteScheduleRequest.pb(
+ schedule_service.DeleteScheduleRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = schedule_service.DeleteScheduleRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.delete_schedule(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_schedule_rest_bad_request(
+ request_type=schedule_service.GetScheduleRequest,
+):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_schedule(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.GetScheduleRequest,
+ dict,
+ ],
+)
+def test_get_schedule_rest_call_success(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = schedule.Schedule(
+ name="name_value",
+ display_name="display_name_value",
+ max_run_count=1410,
+ started_run_count=1843,
+ state=schedule.Schedule.State.ACTIVE,
+ max_concurrent_run_count=2596,
+ allow_queueing=True,
+ catch_up=True,
+ cron="cron_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = schedule.Schedule.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_schedule(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, schedule.Schedule)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_run_count == 1410
+ assert response.started_run_count == 1843
+ assert response.state == schedule.Schedule.State.ACTIVE
+ assert response.max_concurrent_run_count == 2596
+ assert response.allow_queueing is True
+ assert response.catch_up is True
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_get_schedule_rest_interceptors(null_interceptor):
+ transport = transports.ScheduleServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.ScheduleServiceRestInterceptor(),
+ )
+ client = ScheduleServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.ScheduleServiceRestInterceptor, "post_get_schedule"
+ ) as post, mock.patch.object(
+ transports.ScheduleServiceRestInterceptor, "pre_get_schedule"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = schedule_service.GetScheduleRequest.pb(
+ schedule_service.GetScheduleRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = schedule.Schedule.to_json(schedule.Schedule())
+ req.return_value.content = return_value
+
+ request = schedule_service.GetScheduleRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = schedule.Schedule()
+
+ client.get_schedule(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_list_schedules_rest_bad_request(
+ request_type=schedule_service.ListSchedulesRequest,
+):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_schedules(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.ListSchedulesRequest,
+ dict,
+ ],
+)
+def test_list_schedules_rest_call_success(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = schedule_service.ListSchedulesResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = schedule_service.ListSchedulesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.list_schedules(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListSchedulesPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_list_schedules_rest_interceptors(null_interceptor):
+ transport = transports.ScheduleServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.ScheduleServiceRestInterceptor(),
+ )
+ client = ScheduleServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.ScheduleServiceRestInterceptor, "post_list_schedules"
+ ) as post, mock.patch.object(
+ transports.ScheduleServiceRestInterceptor, "pre_list_schedules"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = schedule_service.ListSchedulesRequest.pb(
+ schedule_service.ListSchedulesRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = schedule_service.ListSchedulesResponse.to_json(
+ schedule_service.ListSchedulesResponse()
+ )
+ req.return_value.content = return_value
+
+ request = schedule_service.ListSchedulesRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = schedule_service.ListSchedulesResponse()
+
+ client.list_schedules(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_pause_schedule_rest_bad_request(
+ request_type=schedule_service.PauseScheduleRequest,
+):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.pause_schedule(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.PauseScheduleRequest,
+ dict,
+ ],
+)
+def test_pause_schedule_rest_call_success(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.pause_schedule(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_pause_schedule_rest_interceptors(null_interceptor):
+ transport = transports.ScheduleServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.ScheduleServiceRestInterceptor(),
+ )
+ client = ScheduleServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.ScheduleServiceRestInterceptor, "pre_pause_schedule"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = schedule_service.PauseScheduleRequest.pb(
+ schedule_service.PauseScheduleRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = schedule_service.PauseScheduleRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ client.pause_schedule(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+def test_resume_schedule_rest_bad_request(
+ request_type=schedule_service.ResumeScheduleRequest,
+):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.resume_schedule(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.ResumeScheduleRequest,
+ dict,
+ ],
+)
+def test_resume_schedule_rest_call_success(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.resume_schedule(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_resume_schedule_rest_interceptors(null_interceptor):
+ transport = transports.ScheduleServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.ScheduleServiceRestInterceptor(),
+ )
+ client = ScheduleServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.ScheduleServiceRestInterceptor, "pre_resume_schedule"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = schedule_service.ResumeScheduleRequest.pb(
+ schedule_service.ResumeScheduleRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = schedule_service.ResumeScheduleRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ client.resume_schedule(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+def test_update_schedule_rest_bad_request(
+ request_type=schedule_service.UpdateScheduleRequest,
+):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "schedule": {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.update_schedule(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.UpdateScheduleRequest,
+ dict,
+ ],
+)
+def test_update_schedule_rest_call_success(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "schedule": {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ }
+ request_init["schedule"] = {
+ "cron": "cron_value",
+ "create_pipeline_job_request": {
+ "parent": "parent_value",
+ "pipeline_job": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "create_time": {"seconds": 751, "nanos": 543},
+ "start_time": {},
+ "end_time": {},
+ "update_time": {},
+ "pipeline_spec": {"fields": {}},
+ "state": 1,
+ "job_detail": {
+ "pipeline_context": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "etag": "etag_value",
+ "labels": {},
+ "create_time": {},
+ "update_time": {},
+ "parent_contexts": [
+ "parent_contexts_value1",
+ "parent_contexts_value2",
+ ],
+ "schema_title": "schema_title_value",
+ "schema_version": "schema_version_value",
+ "metadata": {},
+ "description": "description_value",
+ },
+ "pipeline_run_context": {},
+ "task_details": [
+ {
+ "task_id": 735,
+ "parent_task_id": 1480,
+ "task_name": "task_name_value",
+ "create_time": {},
+ "start_time": {},
+ "end_time": {},
+ "executor_detail": {
+ "container_detail": {
+ "main_job": "main_job_value",
+ "pre_caching_check_job": "pre_caching_check_job_value",
+ "failed_main_jobs": [
+ "failed_main_jobs_value1",
+ "failed_main_jobs_value2",
+ ],
+ "failed_pre_caching_check_jobs": [
+ "failed_pre_caching_check_jobs_value1",
+ "failed_pre_caching_check_jobs_value2",
+ ],
+ },
+ "custom_job_detail": {
+ "job": "job_value",
+ "failed_jobs": [
+ "failed_jobs_value1",
+ "failed_jobs_value2",
+ ],
+ },
+ },
+ "state": 1,
+ "execution": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "state": 1,
+ "etag": "etag_value",
+ "labels": {},
+ "create_time": {},
+ "update_time": {},
+ "schema_title": "schema_title_value",
+ "schema_version": "schema_version_value",
+ "metadata": {},
+ "description": "description_value",
+ },
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "pipeline_task_status": [
+ {"update_time": {}, "state": 1, "error": {}}
+ ],
+ "inputs": {},
+ "outputs": {},
+ }
+ ],
+ },
+ "error": {},
+ "labels": {},
+ "runtime_config": {
+ "parameters": {},
+ "gcs_output_directory": "gcs_output_directory_value",
+ "parameter_values": {},
+ "failure_policy": 1,
+ "input_artifacts": {},
+ "default_runtime": {
+ "persistent_resource_runtime_detail": {
+ "persistent_resource_name": "persistent_resource_name_value",
+ "task_resource_unavailable_wait_time_ms": 4030,
+ "task_resource_unavailable_timeout_behavior": 1,
+ }
+ },
+ },
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "service_account": "service_account_value",
+ "network": "network_value",
+ "reserved_ip_ranges": [
+ "reserved_ip_ranges_value1",
+ "reserved_ip_ranges_value2",
+ ],
+ "psc_interface_config": {
+ "network_attachment": "network_attachment_value"
+ },
+ "template_uri": "template_uri_value",
+ "template_metadata": {"version": "version_value"},
+ "schedule_name": "schedule_name_value",
+ "preflight_validations": True,
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ "original_pipeline_job_id": 2512,
+ "pipeline_task_rerun_configs": [
+ {
+ "task_id": 735,
+ "task_name": "task_name_value",
+ "inputs": {"artifacts": {}, "parameter_values": {}},
+ "skip_task": True,
+ "skip_downstream_tasks": True,
+ }
+ ],
+ },
+ "pipeline_job_id": "pipeline_job_id_value",
+ },
+ "create_model_monitoring_job_request": {
+ "parent": "parent_value",
+ "model_monitoring_job": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "model_monitoring_spec": {
+ "objective_spec": {
+ "tabular_objective": {
+ "feature_drift_spec": {
+ "features": ["features_value1", "features_value2"],
+ "categorical_metric_type": "categorical_metric_type_value",
+ "numeric_metric_type": "numeric_metric_type_value",
+ "default_categorical_alert_condition": {
+ "threshold": 0.973
+ },
+ "default_numeric_alert_condition": {},
+ "feature_alert_conditions": {},
+ },
+ "prediction_output_drift_spec": {},
+ "feature_attribution_spec": {
+ "features": ["features_value1", "features_value2"],
+ "default_alert_condition": {},
+ "feature_alert_conditions": {},
+ "batch_explanation_dedicated_resources": {
+ "machine_spec": {
+ "machine_type": "machine_type_value",
+ "accelerator_type": 1,
+ "accelerator_count": 1805,
+ "tpu_topology": "tpu_topology_value",
+ "reservation_affinity": {
+ "reservation_affinity_type": 1,
+ "key": "key_value",
+ "values": [
+ "values_value1",
+ "values_value2",
+ ],
+ },
+ },
+ "starting_replica_count": 2355,
+ "max_replica_count": 1805,
+ },
+ },
+ },
+ "explanation_spec": {
+ "parameters": {
+ "sampled_shapley_attribution": {"path_count": 1077},
+ "integrated_gradients_attribution": {
+ "step_count": 1092,
+ "smooth_grad_config": {
+ "noise_sigma": 0.11660000000000001,
+ "feature_noise_sigma": {
+ "noise_sigma": [
+ {"name": "name_value", "sigma": 0.529}
+ ]
+ },
+ "noisy_sample_count": 1947,
+ },
+ "blur_baseline_config": {"max_blur_sigma": 0.1482},
+ },
+ "xrai_attribution": {
+ "step_count": 1092,
+ "smooth_grad_config": {},
+ "blur_baseline_config": {},
+ },
+ "examples": {
+ "example_gcs_source": {
+ "data_format": 1,
+ "gcs_source": {
+ "uris": ["uris_value1", "uris_value2"]
+ },
+ },
+ "nearest_neighbor_search_config": {
+ "null_value": 0,
+ "number_value": 0.1285,
+ "string_value": "string_value_value",
+ "bool_value": True,
+ "struct_value": {},
+ "list_value": {"values": {}},
+ },
+ "presets": {"query": 1, "modality": 1},
+ "gcs_source": {},
+ "neighbor_count": 1494,
+ },
+ "top_k": 541,
+ "output_indices": {},
+ },
+ "metadata": {
+ "inputs": {},
+ "outputs": {},
+ "feature_attributions_schema_uri": "feature_attributions_schema_uri_value",
+ "latent_space_source": "latent_space_source_value",
+ },
+ },
+ "baseline_dataset": {
+ "columnized_dataset": {
+ "vertex_dataset": "vertex_dataset_value",
+ "gcs_source": {
+ "gcs_uri": "gcs_uri_value",
+ "format_": 1,
+ },
+ "bigquery_source": {
+ "table_uri": "table_uri_value",
+ "query": "query_value",
+ },
+ "timestamp_field": "timestamp_field_value",
+ },
+ "batch_prediction_output": {
+ "batch_prediction_job": "batch_prediction_job_value"
+ },
+ "vertex_endpoint_logs": {
+ "endpoints": ["endpoints_value1", "endpoints_value2"]
+ },
+ "time_interval": {"start_time": {}, "end_time": {}},
+ "time_offset": {
+ "offset": "offset_value",
+ "window": "window_value",
+ },
+ },
+ "target_dataset": {},
+ },
+ "notification_spec": {
+ "email_config": {
+ "user_emails": ["user_emails_value1", "user_emails_value2"]
+ },
+ "enable_cloud_logging": True,
+ "notification_channel_configs": [
+ {"notification_channel": "notification_channel_value"}
+ ],
+ },
+ "output_spec": {
+ "gcs_base_directory": {
+ "output_uri_prefix": "output_uri_prefix_value"
+ }
+ },
+ },
+ "create_time": {},
+ "update_time": {},
+ "state": 1,
+ "schedule": "schedule_value",
+ "job_execution_detail": {
+ "baseline_datasets": [
+ {"location": "location_value", "time_range": {}}
+ ],
+ "target_datasets": {},
+ "objective_status": {},
+ "error": {},
+ },
+ "schedule_time": {},
+ },
+ "model_monitoring_job_id": "model_monitoring_job_id_value",
+ },
+ "create_notebook_execution_job_request": {
+ "parent": "parent_value",
+ "notebook_execution_job": {
+ "dataform_repository_source": {
+ "dataform_repository_resource_name": "dataform_repository_resource_name_value",
+ "commit_sha": "commit_sha_value",
+ },
+ "gcs_notebook_source": {
+ "uri": "uri_value",
+ "generation": "generation_value",
+ },
+ "direct_notebook_source": {"content": b"content_blob"},
+ "notebook_runtime_template_resource_name": "notebook_runtime_template_resource_name_value",
+ "custom_environment_spec": {
+ "machine_spec": {},
+ "persistent_disk_spec": {
+ "disk_type": "disk_type_value",
+ "disk_size_gb": 1261,
+ },
+ "network_spec": {
+ "enable_internet_access": True,
+ "network": "network_value",
+ "subnetwork": "subnetwork_value",
+ },
+ },
+ "gcs_output_uri": "gcs_output_uri_value",
+ "execution_user": "execution_user_value",
+ "service_account": "service_account_value",
+ "workbench_runtime": {},
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "execution_timeout": {"seconds": 751, "nanos": 543},
+ "schedule_resource_name": "schedule_resource_name_value",
+ "job_state": 1,
+ "status": {},
+ "create_time": {},
+ "update_time": {},
+ "labels": {},
+ "kernel_name": "kernel_name_value",
+ "encryption_spec": {},
+ },
+ "notebook_execution_job_id": "notebook_execution_job_id_value",
+ },
+ "name": "projects/sample1/locations/sample2/schedules/sample3",
+ "display_name": "display_name_value",
+ "start_time": {},
+ "end_time": {},
+ "max_run_count": 1410,
+ "started_run_count": 1843,
+ "state": 1,
+ "create_time": {},
+ "update_time": {},
+ "next_run_time": {},
+ "last_pause_time": {},
+ "last_resume_time": {},
+ "max_concurrent_run_count": 2596,
+ "allow_queueing": True,
+ "catch_up": True,
+ "last_scheduled_run_response": {
+ "scheduled_run_time": {},
+ "run_response": "run_response_value",
+ },
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = schedule_service.UpdateScheduleRequest.meta.fields["schedule"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["schedule"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["schedule"][field])):
+ del request_init["schedule"][field][i][subfield]
+ else:
+ del request_init["schedule"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_schedule.Schedule(
+ name="name_value",
+ display_name="display_name_value",
+ max_run_count=1410,
+ started_run_count=1843,
+ state=gca_schedule.Schedule.State.ACTIVE,
+ max_concurrent_run_count=2596,
+ allow_queueing=True,
+ catch_up=True,
+ cron="cron_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_schedule.Schedule.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.update_schedule(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_schedule.Schedule)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_run_count == 1410
+ assert response.started_run_count == 1843
+ assert response.state == gca_schedule.Schedule.State.ACTIVE
+ assert response.max_concurrent_run_count == 2596
+ assert response.allow_queueing is True
+ assert response.catch_up is True
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_update_schedule_rest_interceptors(null_interceptor):
+ transport = transports.ScheduleServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.ScheduleServiceRestInterceptor(),
+ )
+ client = ScheduleServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.ScheduleServiceRestInterceptor, "post_update_schedule"
+ ) as post, mock.patch.object(
+ transports.ScheduleServiceRestInterceptor, "pre_update_schedule"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = schedule_service.UpdateScheduleRequest.pb(
+ schedule_service.UpdateScheduleRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_schedule.Schedule.to_json(gca_schedule.Schedule())
+ req.return_value.content = return_value
+
+ request = schedule_service.UpdateScheduleRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_schedule.Schedule()
+
+ client.update_schedule(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_location_rest_bad_request(request_type=locations_pb2.GetLocationRequest):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_location(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+def test_get_location_rest(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_list_locations_rest_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_locations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+def test_list_locations_rest(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_get_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_get_iam_policy_rest(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_set_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.set_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_set_iam_policy_rest(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_test_iam_permissions_rest_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.test_iam_permissions(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+def test_test_iam_permissions_rest(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+def test_cancel_operation_rest_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+def test_cancel_operation_rest(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_rest_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+def test_delete_operation_rest(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_get_operation_rest_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+def test_get_operation_rest(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_list_operations_rest_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_operations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+def test_list_operations_rest(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_wait_operation_rest_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.wait_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+def test_wait_operation_rest(request_type):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_schedule_empty_call_rest():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.create_schedule), "__call__") as call:
+ client.create_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.CreateScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_schedule_empty_call_rest():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_schedule), "__call__") as call:
+ client.delete_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.DeleteScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_schedule_empty_call_rest():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_schedule), "__call__") as call:
+ client.get_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.GetScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_schedules_empty_call_rest():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_schedules), "__call__") as call:
+ client.list_schedules(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.ListSchedulesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_pause_schedule_empty_call_rest():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.pause_schedule), "__call__") as call:
+ client.pause_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.PauseScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_resume_schedule_empty_call_rest():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.resume_schedule), "__call__") as call:
+ client.resume_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.ResumeScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_schedule_empty_call_rest():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.update_schedule), "__call__") as call:
+ client.update_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.UpdateScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+def test_schedule_service_rest_lro_client():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ transport = client.transport
+
+ # Ensure that we have an api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.AbstractOperationsClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_transport_kind_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = ScheduleServiceAsyncClient.get_transport_class("rest_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "rest_asyncio"
+
+
+@pytest.mark.asyncio
+async def test_create_schedule_rest_asyncio_bad_request(
+ request_type=schedule_service.CreateScheduleRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.create_schedule(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.CreateScheduleRequest,
+ dict,
+ ],
+)
+async def test_create_schedule_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["schedule"] = {
+ "cron": "cron_value",
+ "create_pipeline_job_request": {
+ "parent": "parent_value",
+ "pipeline_job": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "create_time": {"seconds": 751, "nanos": 543},
+ "start_time": {},
+ "end_time": {},
+ "update_time": {},
+ "pipeline_spec": {"fields": {}},
+ "state": 1,
+ "job_detail": {
+ "pipeline_context": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "etag": "etag_value",
+ "labels": {},
+ "create_time": {},
+ "update_time": {},
+ "parent_contexts": [
+ "parent_contexts_value1",
+ "parent_contexts_value2",
+ ],
+ "schema_title": "schema_title_value",
+ "schema_version": "schema_version_value",
+ "metadata": {},
+ "description": "description_value",
+ },
+ "pipeline_run_context": {},
+ "task_details": [
+ {
+ "task_id": 735,
+ "parent_task_id": 1480,
+ "task_name": "task_name_value",
+ "create_time": {},
+ "start_time": {},
+ "end_time": {},
+ "executor_detail": {
+ "container_detail": {
+ "main_job": "main_job_value",
+ "pre_caching_check_job": "pre_caching_check_job_value",
+ "failed_main_jobs": [
+ "failed_main_jobs_value1",
+ "failed_main_jobs_value2",
+ ],
+ "failed_pre_caching_check_jobs": [
+ "failed_pre_caching_check_jobs_value1",
+ "failed_pre_caching_check_jobs_value2",
+ ],
+ },
+ "custom_job_detail": {
+ "job": "job_value",
+ "failed_jobs": [
+ "failed_jobs_value1",
+ "failed_jobs_value2",
+ ],
+ },
+ },
+ "state": 1,
+ "execution": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "state": 1,
+ "etag": "etag_value",
+ "labels": {},
+ "create_time": {},
+ "update_time": {},
+ "schema_title": "schema_title_value",
+ "schema_version": "schema_version_value",
+ "metadata": {},
+ "description": "description_value",
+ },
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "pipeline_task_status": [
+ {"update_time": {}, "state": 1, "error": {}}
+ ],
+ "inputs": {},
+ "outputs": {},
+ }
+ ],
+ },
+ "error": {},
+ "labels": {},
+ "runtime_config": {
+ "parameters": {},
+ "gcs_output_directory": "gcs_output_directory_value",
+ "parameter_values": {},
+ "failure_policy": 1,
+ "input_artifacts": {},
+ "default_runtime": {
+ "persistent_resource_runtime_detail": {
+ "persistent_resource_name": "persistent_resource_name_value",
+ "task_resource_unavailable_wait_time_ms": 4030,
+ "task_resource_unavailable_timeout_behavior": 1,
+ }
+ },
+ },
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "service_account": "service_account_value",
+ "network": "network_value",
+ "reserved_ip_ranges": [
+ "reserved_ip_ranges_value1",
+ "reserved_ip_ranges_value2",
+ ],
+ "psc_interface_config": {
+ "network_attachment": "network_attachment_value"
+ },
+ "template_uri": "template_uri_value",
+ "template_metadata": {"version": "version_value"},
+ "schedule_name": "schedule_name_value",
+ "preflight_validations": True,
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ "original_pipeline_job_id": 2512,
+ "pipeline_task_rerun_configs": [
+ {
+ "task_id": 735,
+ "task_name": "task_name_value",
+ "inputs": {"artifacts": {}, "parameter_values": {}},
+ "skip_task": True,
+ "skip_downstream_tasks": True,
+ }
+ ],
+ },
+ "pipeline_job_id": "pipeline_job_id_value",
+ },
+ "create_model_monitoring_job_request": {
+ "parent": "parent_value",
+ "model_monitoring_job": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "model_monitoring_spec": {
+ "objective_spec": {
+ "tabular_objective": {
+ "feature_drift_spec": {
+ "features": ["features_value1", "features_value2"],
+ "categorical_metric_type": "categorical_metric_type_value",
+ "numeric_metric_type": "numeric_metric_type_value",
+ "default_categorical_alert_condition": {
+ "threshold": 0.973
+ },
+ "default_numeric_alert_condition": {},
+ "feature_alert_conditions": {},
+ },
+ "prediction_output_drift_spec": {},
+ "feature_attribution_spec": {
+ "features": ["features_value1", "features_value2"],
+ "default_alert_condition": {},
+ "feature_alert_conditions": {},
+ "batch_explanation_dedicated_resources": {
+ "machine_spec": {
+ "machine_type": "machine_type_value",
+ "accelerator_type": 1,
+ "accelerator_count": 1805,
+ "tpu_topology": "tpu_topology_value",
+ "reservation_affinity": {
+ "reservation_affinity_type": 1,
+ "key": "key_value",
+ "values": [
+ "values_value1",
+ "values_value2",
+ ],
+ },
+ },
+ "starting_replica_count": 2355,
+ "max_replica_count": 1805,
+ },
+ },
+ },
+ "explanation_spec": {
+ "parameters": {
+ "sampled_shapley_attribution": {"path_count": 1077},
+ "integrated_gradients_attribution": {
+ "step_count": 1092,
+ "smooth_grad_config": {
+ "noise_sigma": 0.11660000000000001,
+ "feature_noise_sigma": {
+ "noise_sigma": [
+ {"name": "name_value", "sigma": 0.529}
+ ]
+ },
+ "noisy_sample_count": 1947,
+ },
+ "blur_baseline_config": {"max_blur_sigma": 0.1482},
+ },
+ "xrai_attribution": {
+ "step_count": 1092,
+ "smooth_grad_config": {},
+ "blur_baseline_config": {},
+ },
+ "examples": {
+ "example_gcs_source": {
+ "data_format": 1,
+ "gcs_source": {
+ "uris": ["uris_value1", "uris_value2"]
+ },
+ },
+ "nearest_neighbor_search_config": {
+ "null_value": 0,
+ "number_value": 0.1285,
+ "string_value": "string_value_value",
+ "bool_value": True,
+ "struct_value": {},
+ "list_value": {"values": {}},
+ },
+ "presets": {"query": 1, "modality": 1},
+ "gcs_source": {},
+ "neighbor_count": 1494,
+ },
+ "top_k": 541,
+ "output_indices": {},
+ },
+ "metadata": {
+ "inputs": {},
+ "outputs": {},
+ "feature_attributions_schema_uri": "feature_attributions_schema_uri_value",
+ "latent_space_source": "latent_space_source_value",
+ },
+ },
+ "baseline_dataset": {
+ "columnized_dataset": {
+ "vertex_dataset": "vertex_dataset_value",
+ "gcs_source": {
+ "gcs_uri": "gcs_uri_value",
+ "format_": 1,
+ },
+ "bigquery_source": {
+ "table_uri": "table_uri_value",
+ "query": "query_value",
+ },
+ "timestamp_field": "timestamp_field_value",
+ },
+ "batch_prediction_output": {
+ "batch_prediction_job": "batch_prediction_job_value"
+ },
+ "vertex_endpoint_logs": {
+ "endpoints": ["endpoints_value1", "endpoints_value2"]
+ },
+ "time_interval": {"start_time": {}, "end_time": {}},
+ "time_offset": {
+ "offset": "offset_value",
+ "window": "window_value",
+ },
+ },
+ "target_dataset": {},
+ },
+ "notification_spec": {
+ "email_config": {
+ "user_emails": ["user_emails_value1", "user_emails_value2"]
+ },
+ "enable_cloud_logging": True,
+ "notification_channel_configs": [
+ {"notification_channel": "notification_channel_value"}
+ ],
+ },
+ "output_spec": {
+ "gcs_base_directory": {
+ "output_uri_prefix": "output_uri_prefix_value"
+ }
+ },
+ },
+ "create_time": {},
+ "update_time": {},
+ "state": 1,
+ "schedule": "schedule_value",
+ "job_execution_detail": {
+ "baseline_datasets": [
+ {"location": "location_value", "time_range": {}}
+ ],
+ "target_datasets": {},
+ "objective_status": {},
+ "error": {},
+ },
+ "schedule_time": {},
+ },
+ "model_monitoring_job_id": "model_monitoring_job_id_value",
+ },
+ "create_notebook_execution_job_request": {
+ "parent": "parent_value",
+ "notebook_execution_job": {
+ "dataform_repository_source": {
+ "dataform_repository_resource_name": "dataform_repository_resource_name_value",
+ "commit_sha": "commit_sha_value",
+ },
+ "gcs_notebook_source": {
+ "uri": "uri_value",
+ "generation": "generation_value",
+ },
+ "direct_notebook_source": {"content": b"content_blob"},
+ "notebook_runtime_template_resource_name": "notebook_runtime_template_resource_name_value",
+ "custom_environment_spec": {
+ "machine_spec": {},
+ "persistent_disk_spec": {
+ "disk_type": "disk_type_value",
+ "disk_size_gb": 1261,
+ },
+ "network_spec": {
+ "enable_internet_access": True,
+ "network": "network_value",
+ "subnetwork": "subnetwork_value",
+ },
+ },
+ "gcs_output_uri": "gcs_output_uri_value",
+ "execution_user": "execution_user_value",
+ "service_account": "service_account_value",
+ "workbench_runtime": {},
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "execution_timeout": {"seconds": 751, "nanos": 543},
+ "schedule_resource_name": "schedule_resource_name_value",
+ "job_state": 1,
+ "status": {},
+ "create_time": {},
+ "update_time": {},
+ "labels": {},
+ "kernel_name": "kernel_name_value",
+ "encryption_spec": {},
+ },
+ "notebook_execution_job_id": "notebook_execution_job_id_value",
+ },
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "start_time": {},
+ "end_time": {},
+ "max_run_count": 1410,
+ "started_run_count": 1843,
+ "state": 1,
+ "create_time": {},
+ "update_time": {},
+ "next_run_time": {},
+ "last_pause_time": {},
+ "last_resume_time": {},
+ "max_concurrent_run_count": 2596,
+ "allow_queueing": True,
+ "catch_up": True,
+ "last_scheduled_run_response": {
+ "scheduled_run_time": {},
+ "run_response": "run_response_value",
+ },
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = schedule_service.CreateScheduleRequest.meta.fields["schedule"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["schedule"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["schedule"][field])):
+ del request_init["schedule"][field][i][subfield]
+ else:
+ del request_init["schedule"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_schedule.Schedule(
+ name="name_value",
+ display_name="display_name_value",
+ max_run_count=1410,
+ started_run_count=1843,
+ state=gca_schedule.Schedule.State.ACTIVE,
+ max_concurrent_run_count=2596,
+ allow_queueing=True,
+ catch_up=True,
+ cron="cron_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_schedule.Schedule.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.create_schedule(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_schedule.Schedule)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_run_count == 1410
+ assert response.started_run_count == 1843
+ assert response.state == gca_schedule.Schedule.State.ACTIVE
+ assert response.max_concurrent_run_count == 2596
+ assert response.allow_queueing is True
+ assert response.catch_up is True
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_create_schedule_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncScheduleServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncScheduleServiceRestInterceptor(),
+ )
+ client = ScheduleServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncScheduleServiceRestInterceptor, "post_create_schedule"
+ ) as post, mock.patch.object(
+ transports.AsyncScheduleServiceRestInterceptor, "pre_create_schedule"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = schedule_service.CreateScheduleRequest.pb(
+ schedule_service.CreateScheduleRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_schedule.Schedule.to_json(gca_schedule.Schedule())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = schedule_service.CreateScheduleRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_schedule.Schedule()
+
+ await client.create_schedule(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_delete_schedule_rest_asyncio_bad_request(
+ request_type=schedule_service.DeleteScheduleRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_schedule(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.DeleteScheduleRequest,
+ dict,
+ ],
+)
+async def test_delete_schedule_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.delete_schedule(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_delete_schedule_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncScheduleServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncScheduleServiceRestInterceptor(),
+ )
+ client = ScheduleServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncScheduleServiceRestInterceptor, "post_delete_schedule"
+ ) as post, mock.patch.object(
+ transports.AsyncScheduleServiceRestInterceptor, "pre_delete_schedule"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = schedule_service.DeleteScheduleRequest.pb(
+ schedule_service.DeleteScheduleRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = schedule_service.DeleteScheduleRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.delete_schedule(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_schedule_rest_asyncio_bad_request(
+ request_type=schedule_service.GetScheduleRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_schedule(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.GetScheduleRequest,
+ dict,
+ ],
+)
+async def test_get_schedule_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = schedule.Schedule(
+ name="name_value",
+ display_name="display_name_value",
+ max_run_count=1410,
+ started_run_count=1843,
+ state=schedule.Schedule.State.ACTIVE,
+ max_concurrent_run_count=2596,
+ allow_queueing=True,
+ catch_up=True,
+ cron="cron_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = schedule.Schedule.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.get_schedule(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, schedule.Schedule)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_run_count == 1410
+ assert response.started_run_count == 1843
+ assert response.state == schedule.Schedule.State.ACTIVE
+ assert response.max_concurrent_run_count == 2596
+ assert response.allow_queueing is True
+ assert response.catch_up is True
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_get_schedule_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncScheduleServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncScheduleServiceRestInterceptor(),
+ )
+ client = ScheduleServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncScheduleServiceRestInterceptor, "post_get_schedule"
+ ) as post, mock.patch.object(
+ transports.AsyncScheduleServiceRestInterceptor, "pre_get_schedule"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = schedule_service.GetScheduleRequest.pb(
+ schedule_service.GetScheduleRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = schedule.Schedule.to_json(schedule.Schedule())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = schedule_service.GetScheduleRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = schedule.Schedule()
+
+ await client.get_schedule(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_list_schedules_rest_asyncio_bad_request(
+ request_type=schedule_service.ListSchedulesRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_schedules(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.ListSchedulesRequest,
+ dict,
+ ],
+)
+async def test_list_schedules_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = schedule_service.ListSchedulesResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = schedule_service.ListSchedulesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.list_schedules(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListSchedulesAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_list_schedules_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncScheduleServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncScheduleServiceRestInterceptor(),
+ )
+ client = ScheduleServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncScheduleServiceRestInterceptor, "post_list_schedules"
+ ) as post, mock.patch.object(
+ transports.AsyncScheduleServiceRestInterceptor, "pre_list_schedules"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = schedule_service.ListSchedulesRequest.pb(
+ schedule_service.ListSchedulesRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = schedule_service.ListSchedulesResponse.to_json(
+ schedule_service.ListSchedulesResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = schedule_service.ListSchedulesRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = schedule_service.ListSchedulesResponse()
+
+ await client.list_schedules(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_pause_schedule_rest_asyncio_bad_request(
+ request_type=schedule_service.PauseScheduleRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.pause_schedule(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.PauseScheduleRequest,
+ dict,
+ ],
+)
+async def test_pause_schedule_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.pause_schedule(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_pause_schedule_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncScheduleServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncScheduleServiceRestInterceptor(),
+ )
+ client = ScheduleServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncScheduleServiceRestInterceptor, "pre_pause_schedule"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = schedule_service.PauseScheduleRequest.pb(
+ schedule_service.PauseScheduleRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = schedule_service.PauseScheduleRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ await client.pause_schedule(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_resume_schedule_rest_asyncio_bad_request(
+ request_type=schedule_service.ResumeScheduleRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.resume_schedule(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.ResumeScheduleRequest,
+ dict,
+ ],
+)
+async def test_resume_schedule_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.resume_schedule(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_resume_schedule_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncScheduleServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncScheduleServiceRestInterceptor(),
+ )
+ client = ScheduleServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncScheduleServiceRestInterceptor, "pre_resume_schedule"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = schedule_service.ResumeScheduleRequest.pb(
+ schedule_service.ResumeScheduleRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = schedule_service.ResumeScheduleRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+
+ await client.resume_schedule(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_update_schedule_rest_asyncio_bad_request(
+ request_type=schedule_service.UpdateScheduleRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "schedule": {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.update_schedule(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ schedule_service.UpdateScheduleRequest,
+ dict,
+ ],
+)
+async def test_update_schedule_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "schedule": {"name": "projects/sample1/locations/sample2/schedules/sample3"}
+ }
+ request_init["schedule"] = {
+ "cron": "cron_value",
+ "create_pipeline_job_request": {
+ "parent": "parent_value",
+ "pipeline_job": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "create_time": {"seconds": 751, "nanos": 543},
+ "start_time": {},
+ "end_time": {},
+ "update_time": {},
+ "pipeline_spec": {"fields": {}},
+ "state": 1,
+ "job_detail": {
+ "pipeline_context": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "etag": "etag_value",
+ "labels": {},
+ "create_time": {},
+ "update_time": {},
+ "parent_contexts": [
+ "parent_contexts_value1",
+ "parent_contexts_value2",
+ ],
+ "schema_title": "schema_title_value",
+ "schema_version": "schema_version_value",
+ "metadata": {},
+ "description": "description_value",
+ },
+ "pipeline_run_context": {},
+ "task_details": [
+ {
+ "task_id": 735,
+ "parent_task_id": 1480,
+ "task_name": "task_name_value",
+ "create_time": {},
+ "start_time": {},
+ "end_time": {},
+ "executor_detail": {
+ "container_detail": {
+ "main_job": "main_job_value",
+ "pre_caching_check_job": "pre_caching_check_job_value",
+ "failed_main_jobs": [
+ "failed_main_jobs_value1",
+ "failed_main_jobs_value2",
+ ],
+ "failed_pre_caching_check_jobs": [
+ "failed_pre_caching_check_jobs_value1",
+ "failed_pre_caching_check_jobs_value2",
+ ],
+ },
+ "custom_job_detail": {
+ "job": "job_value",
+ "failed_jobs": [
+ "failed_jobs_value1",
+ "failed_jobs_value2",
+ ],
+ },
+ },
+ "state": 1,
+ "execution": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "state": 1,
+ "etag": "etag_value",
+ "labels": {},
+ "create_time": {},
+ "update_time": {},
+ "schema_title": "schema_title_value",
+ "schema_version": "schema_version_value",
+ "metadata": {},
+ "description": "description_value",
+ },
+ "error": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "pipeline_task_status": [
+ {"update_time": {}, "state": 1, "error": {}}
+ ],
+ "inputs": {},
+ "outputs": {},
+ }
+ ],
+ },
+ "error": {},
+ "labels": {},
+ "runtime_config": {
+ "parameters": {},
+ "gcs_output_directory": "gcs_output_directory_value",
+ "parameter_values": {},
+ "failure_policy": 1,
+ "input_artifacts": {},
+ "default_runtime": {
+ "persistent_resource_runtime_detail": {
+ "persistent_resource_name": "persistent_resource_name_value",
+ "task_resource_unavailable_wait_time_ms": 4030,
+ "task_resource_unavailable_timeout_behavior": 1,
+ }
+ },
+ },
+ "encryption_spec": {"kms_key_name": "kms_key_name_value"},
+ "service_account": "service_account_value",
+ "network": "network_value",
+ "reserved_ip_ranges": [
+ "reserved_ip_ranges_value1",
+ "reserved_ip_ranges_value2",
+ ],
+ "psc_interface_config": {
+ "network_attachment": "network_attachment_value"
+ },
+ "template_uri": "template_uri_value",
+ "template_metadata": {"version": "version_value"},
+ "schedule_name": "schedule_name_value",
+ "preflight_validations": True,
+ "satisfies_pzs": True,
+ "satisfies_pzi": True,
+ "original_pipeline_job_id": 2512,
+ "pipeline_task_rerun_configs": [
+ {
+ "task_id": 735,
+ "task_name": "task_name_value",
+ "inputs": {"artifacts": {}, "parameter_values": {}},
+ "skip_task": True,
+ "skip_downstream_tasks": True,
+ }
+ ],
+ },
+ "pipeline_job_id": "pipeline_job_id_value",
+ },
+ "create_model_monitoring_job_request": {
+ "parent": "parent_value",
+ "model_monitoring_job": {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "model_monitoring_spec": {
+ "objective_spec": {
+ "tabular_objective": {
+ "feature_drift_spec": {
+ "features": ["features_value1", "features_value2"],
+ "categorical_metric_type": "categorical_metric_type_value",
+ "numeric_metric_type": "numeric_metric_type_value",
+ "default_categorical_alert_condition": {
+ "threshold": 0.973
+ },
+ "default_numeric_alert_condition": {},
+ "feature_alert_conditions": {},
+ },
+ "prediction_output_drift_spec": {},
+ "feature_attribution_spec": {
+ "features": ["features_value1", "features_value2"],
+ "default_alert_condition": {},
+ "feature_alert_conditions": {},
+ "batch_explanation_dedicated_resources": {
+ "machine_spec": {
+ "machine_type": "machine_type_value",
+ "accelerator_type": 1,
+ "accelerator_count": 1805,
+ "tpu_topology": "tpu_topology_value",
+ "reservation_affinity": {
+ "reservation_affinity_type": 1,
+ "key": "key_value",
+ "values": [
+ "values_value1",
+ "values_value2",
+ ],
+ },
+ },
+ "starting_replica_count": 2355,
+ "max_replica_count": 1805,
+ },
+ },
+ },
+ "explanation_spec": {
+ "parameters": {
+ "sampled_shapley_attribution": {"path_count": 1077},
+ "integrated_gradients_attribution": {
+ "step_count": 1092,
+ "smooth_grad_config": {
+ "noise_sigma": 0.11660000000000001,
+ "feature_noise_sigma": {
+ "noise_sigma": [
+ {"name": "name_value", "sigma": 0.529}
+ ]
+ },
+ "noisy_sample_count": 1947,
+ },
+ "blur_baseline_config": {"max_blur_sigma": 0.1482},
+ },
+ "xrai_attribution": {
+ "step_count": 1092,
+ "smooth_grad_config": {},
+ "blur_baseline_config": {},
+ },
+ "examples": {
+ "example_gcs_source": {
+ "data_format": 1,
+ "gcs_source": {
+ "uris": ["uris_value1", "uris_value2"]
+ },
+ },
+ "nearest_neighbor_search_config": {
+ "null_value": 0,
+ "number_value": 0.1285,
+ "string_value": "string_value_value",
+ "bool_value": True,
+ "struct_value": {},
+ "list_value": {"values": {}},
+ },
+ "presets": {"query": 1, "modality": 1},
+ "gcs_source": {},
+ "neighbor_count": 1494,
+ },
+ "top_k": 541,
+ "output_indices": {},
+ },
+ "metadata": {
+ "inputs": {},
+ "outputs": {},
+ "feature_attributions_schema_uri": "feature_attributions_schema_uri_value",
+ "latent_space_source": "latent_space_source_value",
+ },
+ },
+ "baseline_dataset": {
+ "columnized_dataset": {
+ "vertex_dataset": "vertex_dataset_value",
+ "gcs_source": {
+ "gcs_uri": "gcs_uri_value",
+ "format_": 1,
+ },
+ "bigquery_source": {
+ "table_uri": "table_uri_value",
+ "query": "query_value",
+ },
+ "timestamp_field": "timestamp_field_value",
+ },
+ "batch_prediction_output": {
+ "batch_prediction_job": "batch_prediction_job_value"
+ },
+ "vertex_endpoint_logs": {
+ "endpoints": ["endpoints_value1", "endpoints_value2"]
+ },
+ "time_interval": {"start_time": {}, "end_time": {}},
+ "time_offset": {
+ "offset": "offset_value",
+ "window": "window_value",
+ },
+ },
+ "target_dataset": {},
+ },
+ "notification_spec": {
+ "email_config": {
+ "user_emails": ["user_emails_value1", "user_emails_value2"]
+ },
+ "enable_cloud_logging": True,
+ "notification_channel_configs": [
+ {"notification_channel": "notification_channel_value"}
+ ],
+ },
+ "output_spec": {
+ "gcs_base_directory": {
+ "output_uri_prefix": "output_uri_prefix_value"
+ }
+ },
+ },
+ "create_time": {},
+ "update_time": {},
+ "state": 1,
+ "schedule": "schedule_value",
+ "job_execution_detail": {
+ "baseline_datasets": [
+ {"location": "location_value", "time_range": {}}
+ ],
+ "target_datasets": {},
+ "objective_status": {},
+ "error": {},
+ },
+ "schedule_time": {},
+ },
+ "model_monitoring_job_id": "model_monitoring_job_id_value",
+ },
+ "create_notebook_execution_job_request": {
+ "parent": "parent_value",
+ "notebook_execution_job": {
+ "dataform_repository_source": {
+ "dataform_repository_resource_name": "dataform_repository_resource_name_value",
+ "commit_sha": "commit_sha_value",
+ },
+ "gcs_notebook_source": {
+ "uri": "uri_value",
+ "generation": "generation_value",
+ },
+ "direct_notebook_source": {"content": b"content_blob"},
+ "notebook_runtime_template_resource_name": "notebook_runtime_template_resource_name_value",
+ "custom_environment_spec": {
+ "machine_spec": {},
+ "persistent_disk_spec": {
+ "disk_type": "disk_type_value",
+ "disk_size_gb": 1261,
+ },
+ "network_spec": {
+ "enable_internet_access": True,
+ "network": "network_value",
+ "subnetwork": "subnetwork_value",
+ },
+ },
+ "gcs_output_uri": "gcs_output_uri_value",
+ "execution_user": "execution_user_value",
+ "service_account": "service_account_value",
+ "workbench_runtime": {},
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "execution_timeout": {"seconds": 751, "nanos": 543},
+ "schedule_resource_name": "schedule_resource_name_value",
+ "job_state": 1,
+ "status": {},
+ "create_time": {},
+ "update_time": {},
+ "labels": {},
+ "kernel_name": "kernel_name_value",
+ "encryption_spec": {},
+ },
+ "notebook_execution_job_id": "notebook_execution_job_id_value",
+ },
+ "name": "projects/sample1/locations/sample2/schedules/sample3",
+ "display_name": "display_name_value",
+ "start_time": {},
+ "end_time": {},
+ "max_run_count": 1410,
+ "started_run_count": 1843,
+ "state": 1,
+ "create_time": {},
+ "update_time": {},
+ "next_run_time": {},
+ "last_pause_time": {},
+ "last_resume_time": {},
+ "max_concurrent_run_count": 2596,
+ "allow_queueing": True,
+ "catch_up": True,
+ "last_scheduled_run_response": {
+ "scheduled_run_time": {},
+ "run_response": "run_response_value",
+ },
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = schedule_service.UpdateScheduleRequest.meta.fields["schedule"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["schedule"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["schedule"][field])):
+ del request_init["schedule"][field][i][subfield]
+ else:
+ del request_init["schedule"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gca_schedule.Schedule(
+ name="name_value",
+ display_name="display_name_value",
+ max_run_count=1410,
+ started_run_count=1843,
+ state=gca_schedule.Schedule.State.ACTIVE,
+ max_concurrent_run_count=2596,
+ allow_queueing=True,
+ catch_up=True,
+ cron="cron_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gca_schedule.Schedule.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.update_schedule(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gca_schedule.Schedule)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.max_run_count == 1410
+ assert response.started_run_count == 1843
+ assert response.state == gca_schedule.Schedule.State.ACTIVE
+ assert response.max_concurrent_run_count == 2596
+ assert response.allow_queueing is True
+ assert response.catch_up is True
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_update_schedule_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncScheduleServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncScheduleServiceRestInterceptor(),
+ )
+ client = ScheduleServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncScheduleServiceRestInterceptor, "post_update_schedule"
+ ) as post, mock.patch.object(
+ transports.AsyncScheduleServiceRestInterceptor, "pre_update_schedule"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = schedule_service.UpdateScheduleRequest.pb(
+ schedule_service.UpdateScheduleRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gca_schedule.Schedule.to_json(gca_schedule.Schedule())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = schedule_service.UpdateScheduleRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gca_schedule.Schedule()
+
+ await client.update_schedule(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_location_rest_asyncio_bad_request(
+ request_type=locations_pb2.GetLocationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_location(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+async def test_get_location_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_rest_asyncio_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_locations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+async def test_list_locations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_get_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.set_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_set_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.test_iam_permissions(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+async def test_test_iam_permissions_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+async def test_cancel_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+async def test_delete_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_get_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+async def test_get_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_rest_asyncio_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_operations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+async def test_list_operations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.wait_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+async def test_wait_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_schedule_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.create_schedule), "__call__") as call:
+ await client.create_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.CreateScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_schedule_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_schedule), "__call__") as call:
+ await client.delete_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.DeleteScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_schedule_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_schedule), "__call__") as call:
+ await client.get_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.GetScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_schedules_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_schedules), "__call__") as call:
+ await client.list_schedules(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.ListSchedulesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_pause_schedule_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.pause_schedule), "__call__") as call:
+ await client.pause_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.PauseScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_resume_schedule_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.resume_schedule), "__call__") as call:
+ await client.resume_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.ResumeScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_update_schedule_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.update_schedule), "__call__") as call:
+ await client.update_schedule(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = schedule_service.UpdateScheduleRequest()
+
+ assert args[0] == request_msg
+
+
+def test_schedule_service_rest_asyncio_lro_client():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ transport = client.transport
+
+ # Ensure that we have an api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.AsyncOperationsRestClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_unsupported_parameter_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with pytest.raises(core_exceptions.AsyncRestUnsupportedParameterError, match="google.api_core.client_options.ClientOptions.quota_project_id") as exc: # type: ignore
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ client_options=options,
+ )
+
+
+def test_transport_grpc_default():
+ # A client should use the gRPC transport by default.
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert isinstance(
+ client.transport,
+ transports.ScheduleServiceGrpcTransport,
+ )
+
+
+def test_schedule_service_base_transport_error():
+ # Passing both a credentials object and credentials_file should raise an error
+ with pytest.raises(core_exceptions.DuplicateCredentialArgs):
+ transport = transports.ScheduleServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ credentials_file="credentials.json",
+ )
+
+
+def test_schedule_service_base_transport():
+ # Instantiate the base transport.
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.schedule_service.transports.ScheduleServiceTransport.__init__"
+ ) as Transport:
+ Transport.return_value = None
+ transport = transports.ScheduleServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Every method on the transport should just blindly
+ # raise NotImplementedError.
+ methods = (
+ "create_schedule",
+ "delete_schedule",
+ "get_schedule",
+ "list_schedules",
+ "pause_schedule",
+ "resume_schedule",
+ "update_schedule",
+ "set_iam_policy",
+ "get_iam_policy",
+ "test_iam_permissions",
+ "get_location",
+ "list_locations",
+ "get_operation",
+ "wait_operation",
+ "cancel_operation",
+ "delete_operation",
+ "list_operations",
+ )
+ for method in methods:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, method)(request=object())
+
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
+ # Additionally, the LRO client (a property) should
+ # also raise NotImplementedError
+ with pytest.raises(NotImplementedError):
+ transport.operations_client
+
+ # Catch all for all remaining methods and properties
+ remainder = [
+ "kind",
+ ]
+ for r in remainder:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, r)()
+
+
+def test_schedule_service_base_transport_with_credentials_file():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.schedule_service.transports.ScheduleServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.ScheduleServiceTransport(
+ credentials_file="credentials.json",
+ quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+def test_schedule_service_base_transport_with_adc():
+ # Test the default credentials are used if credentials and credentials_file are None.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.schedule_service.transports.ScheduleServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.ScheduleServiceTransport()
+ adc.assert_called_once()
+
+
+def test_schedule_service_auth_adc():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ ScheduleServiceClient()
+ adc.assert_called_once_with(
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ScheduleServiceGrpcTransport,
+ transports.ScheduleServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_schedule_service_transport_auth_adc(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+ adc.assert_called_once_with(
+ scopes=["1", "2"],
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ScheduleServiceGrpcTransport,
+ transports.ScheduleServiceGrpcAsyncIOTransport,
+ transports.ScheduleServiceRestTransport,
+ ],
+)
+def test_schedule_service_transport_auth_gdch_credentials(transport_class):
+ host = "https://language.com"
+ api_audience_tests = [None, "https://language2.com"]
+ api_audience_expect = [host, "https://language2.com"]
+ for t, e in zip(api_audience_tests, api_audience_expect):
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ gdch_mock = mock.MagicMock()
+ type(gdch_mock).with_gdch_audience = mock.PropertyMock(
+ return_value=gdch_mock
+ )
+ adc.return_value = (gdch_mock, None)
+ transport_class(host=host, api_audience=t)
+ gdch_mock.with_gdch_audience.assert_called_once_with(e)
+
+
+@pytest.mark.parametrize(
+ "transport_class,grpc_helpers",
+ [
+ (transports.ScheduleServiceGrpcTransport, grpc_helpers),
+ (transports.ScheduleServiceGrpcAsyncIOTransport, grpc_helpers_async),
+ ],
+)
+def test_schedule_service_transport_create_channel(transport_class, grpc_helpers):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel", autospec=True
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ adc.return_value = (creds, None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=creds,
+ credentials_file=None,
+ quota_project_id="octopus",
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=["1", "2"],
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ScheduleServiceGrpcTransport,
+ transports.ScheduleServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_schedule_service_grpc_transport_client_cert_source_for_mtls(transport_class):
+ cred = ga_credentials.AnonymousCredentials()
+
+ # Check ssl_channel_credentials is used if provided.
+ with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
+ mock_ssl_channel_creds = mock.Mock()
+ transport_class(
+ host="squid.clam.whelk",
+ credentials=cred,
+ ssl_channel_credentials=mock_ssl_channel_creds,
+ )
+ mock_create_channel.assert_called_once_with(
+ "squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_channel_creds,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
+ # is used.
+ with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
+ with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
+ transport_class(
+ credentials=cred,
+ client_cert_source_for_mtls=client_cert_source_callback,
+ )
+ expected_cert, expected_key = client_cert_source_callback()
+ mock_ssl_cred.assert_called_once_with(
+ certificate_chain=expected_cert, private_key=expected_key
+ )
+
+
+def test_schedule_service_http_transport_client_cert_source_for_mtls():
+ cred = ga_credentials.AnonymousCredentials()
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ) as mock_configure_mtls_channel:
+ transports.ScheduleServiceRestTransport(
+ credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
+ )
+ mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_schedule_service_host_no_port(transport_name):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_schedule_service_host_with_port(transport_name):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com:8000"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:8000"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com:8000"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "rest",
+ ],
+)
+def test_schedule_service_client_transport_session_collision(transport_name):
+ creds1 = ga_credentials.AnonymousCredentials()
+ creds2 = ga_credentials.AnonymousCredentials()
+ client1 = ScheduleServiceClient(
+ credentials=creds1,
+ transport=transport_name,
+ )
+ client2 = ScheduleServiceClient(
+ credentials=creds2,
+ transport=transport_name,
+ )
+ session1 = client1.transport.create_schedule._session
+ session2 = client2.transport.create_schedule._session
+ assert session1 != session2
+ session1 = client1.transport.delete_schedule._session
+ session2 = client2.transport.delete_schedule._session
+ assert session1 != session2
+ session1 = client1.transport.get_schedule._session
+ session2 = client2.transport.get_schedule._session
+ assert session1 != session2
+ session1 = client1.transport.list_schedules._session
+ session2 = client2.transport.list_schedules._session
+ assert session1 != session2
+ session1 = client1.transport.pause_schedule._session
+ session2 = client2.transport.pause_schedule._session
+ assert session1 != session2
+ session1 = client1.transport.resume_schedule._session
+ session2 = client2.transport.resume_schedule._session
+ assert session1 != session2
+ session1 = client1.transport.update_schedule._session
+ session2 = client2.transport.update_schedule._session
+ assert session1 != session2
+
+
+def test_schedule_service_grpc_transport_channel():
+ channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.ScheduleServiceGrpcTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+def test_schedule_service_grpc_asyncio_transport_channel():
+ channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.ScheduleServiceGrpcAsyncIOTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ScheduleServiceGrpcTransport,
+ transports.ScheduleServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_schedule_service_transport_channel_mtls_with_client_cert_source(
+ transport_class,
+):
+ with mock.patch(
+ "grpc.ssl_channel_credentials", autospec=True
+ ) as grpc_ssl_channel_cred:
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_ssl_cred = mock.Mock()
+ grpc_ssl_channel_cred.return_value = mock_ssl_cred
+
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+
+ cred = ga_credentials.AnonymousCredentials()
+ with pytest.warns(DeprecationWarning):
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (cred, None)
+ transport = transport_class(
+ host="squid.clam.whelk",
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=client_cert_source_callback,
+ )
+ adc.assert_called_once()
+
+ grpc_ssl_channel_cred.assert_called_once_with(
+ certificate_chain=b"cert bytes", private_key=b"key bytes"
+ )
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.ScheduleServiceGrpcTransport,
+ transports.ScheduleServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_schedule_service_transport_channel_mtls_with_adc(transport_class):
+ mock_ssl_cred = mock.Mock()
+ with mock.patch.multiple(
+ "google.auth.transport.grpc.SslCredentials",
+ __init__=mock.Mock(return_value=None),
+ ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
+ ):
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+ mock_cred = mock.Mock()
+
+ with pytest.warns(DeprecationWarning):
+ transport = transport_class(
+ host="squid.clam.whelk",
+ credentials=mock_cred,
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=None,
+ )
+
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=mock_cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+
+
+def test_schedule_service_grpc_lro_client():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.OperationsClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_schedule_service_grpc_lro_async_client():
+ client = ScheduleServiceAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc_asyncio",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.OperationsAsyncClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_artifact_path():
+ project = "squid"
+ location = "clam"
+ metadata_store = "whelk"
+ artifact = "octopus"
+ expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(
+ project=project,
+ location=location,
+ metadata_store=metadata_store,
+ artifact=artifact,
+ )
+ actual = ScheduleServiceClient.artifact_path(
+ project, location, metadata_store, artifact
+ )
+ assert expected == actual
+
+
+def test_parse_artifact_path():
+ expected = {
+ "project": "oyster",
+ "location": "nudibranch",
+ "metadata_store": "cuttlefish",
+ "artifact": "mussel",
+ }
+ path = ScheduleServiceClient.artifact_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_artifact_path(path)
+ assert expected == actual
+
+
+def test_batch_prediction_job_path():
+ project = "winkle"
+ location = "nautilus"
+ batch_prediction_job = "scallop"
+ expected = "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(
+ project=project,
+ location=location,
+ batch_prediction_job=batch_prediction_job,
+ )
+ actual = ScheduleServiceClient.batch_prediction_job_path(
+ project, location, batch_prediction_job
+ )
+ assert expected == actual
+
+
+def test_parse_batch_prediction_job_path():
+ expected = {
+ "project": "abalone",
+ "location": "squid",
+ "batch_prediction_job": "clam",
+ }
+ path = ScheduleServiceClient.batch_prediction_job_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_batch_prediction_job_path(path)
+ assert expected == actual
+
+
+def test_context_path():
+ project = "whelk"
+ location = "octopus"
+ metadata_store = "oyster"
+ context = "nudibranch"
+ expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(
+ project=project,
+ location=location,
+ metadata_store=metadata_store,
+ context=context,
+ )
+ actual = ScheduleServiceClient.context_path(
+ project, location, metadata_store, context
+ )
+ assert expected == actual
+
+
+def test_parse_context_path():
+ expected = {
+ "project": "cuttlefish",
+ "location": "mussel",
+ "metadata_store": "winkle",
+ "context": "nautilus",
+ }
+ path = ScheduleServiceClient.context_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_context_path(path)
+ assert expected == actual
+
+
+def test_custom_job_path():
+ project = "scallop"
+ location = "abalone"
+ custom_job = "squid"
+ expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(
+ project=project,
+ location=location,
+ custom_job=custom_job,
+ )
+ actual = ScheduleServiceClient.custom_job_path(project, location, custom_job)
+ assert expected == actual
+
+
+def test_parse_custom_job_path():
+ expected = {
+ "project": "clam",
+ "location": "whelk",
+ "custom_job": "octopus",
+ }
+ path = ScheduleServiceClient.custom_job_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_custom_job_path(path)
+ assert expected == actual
+
+
+def test_dataset_path():
+ project = "oyster"
+ location = "nudibranch"
+ dataset = "cuttlefish"
+ expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
+ project=project,
+ location=location,
+ dataset=dataset,
+ )
+ actual = ScheduleServiceClient.dataset_path(project, location, dataset)
+ assert expected == actual
+
+
+def test_parse_dataset_path():
+ expected = {
+ "project": "mussel",
+ "location": "winkle",
+ "dataset": "nautilus",
+ }
+ path = ScheduleServiceClient.dataset_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_dataset_path(path)
+ assert expected == actual
+
+
+def test_endpoint_path():
+ project = "scallop"
+ location = "abalone"
+ endpoint = "squid"
+ expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(
+ project=project,
+ location=location,
+ endpoint=endpoint,
+ )
+ actual = ScheduleServiceClient.endpoint_path(project, location, endpoint)
+ assert expected == actual
+
+
+def test_parse_endpoint_path():
+ expected = {
+ "project": "clam",
+ "location": "whelk",
+ "endpoint": "octopus",
+ }
+ path = ScheduleServiceClient.endpoint_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_endpoint_path(path)
+ assert expected == actual
+
+
+def test_execution_path():
+ project = "oyster"
+ location = "nudibranch"
+ metadata_store = "cuttlefish"
+ execution = "mussel"
+ expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(
+ project=project,
+ location=location,
+ metadata_store=metadata_store,
+ execution=execution,
+ )
+ actual = ScheduleServiceClient.execution_path(
+ project, location, metadata_store, execution
+ )
+ assert expected == actual
+
+
+def test_parse_execution_path():
+ expected = {
+ "project": "winkle",
+ "location": "nautilus",
+ "metadata_store": "scallop",
+ "execution": "abalone",
+ }
+ path = ScheduleServiceClient.execution_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_execution_path(path)
+ assert expected == actual
+
+
+def test_model_monitor_path():
+ project = "squid"
+ location = "clam"
+ model_monitor = "whelk"
+ expected = (
+ "projects/{project}/locations/{location}/modelMonitors/{model_monitor}".format(
+ project=project,
+ location=location,
+ model_monitor=model_monitor,
+ )
+ )
+ actual = ScheduleServiceClient.model_monitor_path(project, location, model_monitor)
+ assert expected == actual
+
+
+def test_parse_model_monitor_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "model_monitor": "nudibranch",
+ }
+ path = ScheduleServiceClient.model_monitor_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_model_monitor_path(path)
+ assert expected == actual
+
+
+def test_model_monitoring_job_path():
+ project = "cuttlefish"
+ location = "mussel"
+ model_monitor = "winkle"
+ model_monitoring_job = "nautilus"
+ expected = "projects/{project}/locations/{location}/modelMonitors/{model_monitor}/modelMonitoringJobs/{model_monitoring_job}".format(
+ project=project,
+ location=location,
+ model_monitor=model_monitor,
+ model_monitoring_job=model_monitoring_job,
+ )
+ actual = ScheduleServiceClient.model_monitoring_job_path(
+ project, location, model_monitor, model_monitoring_job
+ )
+ assert expected == actual
+
+
+def test_parse_model_monitoring_job_path():
+ expected = {
+ "project": "scallop",
+ "location": "abalone",
+ "model_monitor": "squid",
+ "model_monitoring_job": "clam",
+ }
+ path = ScheduleServiceClient.model_monitoring_job_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_model_monitoring_job_path(path)
+ assert expected == actual
+
+
+def test_network_path():
+ project = "whelk"
+ network = "octopus"
+ expected = "projects/{project}/global/networks/{network}".format(
+ project=project,
+ network=network,
+ )
+ actual = ScheduleServiceClient.network_path(project, network)
+ assert expected == actual
+
+
+def test_parse_network_path():
+ expected = {
+ "project": "oyster",
+ "network": "nudibranch",
+ }
+ path = ScheduleServiceClient.network_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_network_path(path)
+ assert expected == actual
+
+
+def test_network_attachment_path():
+ project = "cuttlefish"
+ region = "mussel"
+ networkattachment = "winkle"
+ expected = "projects/{project}/regions/{region}/networkAttachments/{networkattachment}".format(
+ project=project,
+ region=region,
+ networkattachment=networkattachment,
+ )
+ actual = ScheduleServiceClient.network_attachment_path(
+ project, region, networkattachment
+ )
+ assert expected == actual
+
+
+def test_parse_network_attachment_path():
+ expected = {
+ "project": "nautilus",
+ "region": "scallop",
+ "networkattachment": "abalone",
+ }
+ path = ScheduleServiceClient.network_attachment_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_network_attachment_path(path)
+ assert expected == actual
+
+
+def test_notebook_execution_job_path():
+ project = "squid"
+ location = "clam"
+ notebook_execution_job = "whelk"
+ expected = "projects/{project}/locations/{location}/notebookExecutionJobs/{notebook_execution_job}".format(
+ project=project,
+ location=location,
+ notebook_execution_job=notebook_execution_job,
+ )
+ actual = ScheduleServiceClient.notebook_execution_job_path(
+ project, location, notebook_execution_job
+ )
+ assert expected == actual
+
+
+def test_parse_notebook_execution_job_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "notebook_execution_job": "nudibranch",
+ }
+ path = ScheduleServiceClient.notebook_execution_job_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_notebook_execution_job_path(path)
+ assert expected == actual
+
+
+def test_notebook_runtime_template_path():
+ project = "cuttlefish"
+ location = "mussel"
+ notebook_runtime_template = "winkle"
+ expected = "projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}".format(
+ project=project,
+ location=location,
+ notebook_runtime_template=notebook_runtime_template,
+ )
+ actual = ScheduleServiceClient.notebook_runtime_template_path(
+ project, location, notebook_runtime_template
+ )
+ assert expected == actual
+
+
+def test_parse_notebook_runtime_template_path():
+ expected = {
+ "project": "nautilus",
+ "location": "scallop",
+ "notebook_runtime_template": "abalone",
+ }
+ path = ScheduleServiceClient.notebook_runtime_template_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_notebook_runtime_template_path(path)
+ assert expected == actual
+
+
+def test_pipeline_job_path():
+ project = "squid"
+ location = "clam"
+ pipeline_job = "whelk"
+ expected = (
+ "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(
+ project=project,
+ location=location,
+ pipeline_job=pipeline_job,
+ )
+ )
+ actual = ScheduleServiceClient.pipeline_job_path(project, location, pipeline_job)
+ assert expected == actual
+
+
+def test_parse_pipeline_job_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "pipeline_job": "nudibranch",
+ }
+ path = ScheduleServiceClient.pipeline_job_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_pipeline_job_path(path)
+ assert expected == actual
+
+
+def test_reservation_path():
+ project_id_or_number = "cuttlefish"
+ zone = "mussel"
+ reservation_name = "winkle"
+ expected = "projects/{project_id_or_number}/zones/{zone}/reservations/{reservation_name}".format(
+ project_id_or_number=project_id_or_number,
+ zone=zone,
+ reservation_name=reservation_name,
+ )
+ actual = ScheduleServiceClient.reservation_path(
+ project_id_or_number, zone, reservation_name
+ )
+ assert expected == actual
+
+
+def test_parse_reservation_path():
+ expected = {
+ "project_id_or_number": "nautilus",
+ "zone": "scallop",
+ "reservation_name": "abalone",
+ }
+ path = ScheduleServiceClient.reservation_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_reservation_path(path)
+ assert expected == actual
+
+
+def test_schedule_path():
+ project = "squid"
+ location = "clam"
+ schedule = "whelk"
+ expected = "projects/{project}/locations/{location}/schedules/{schedule}".format(
+ project=project,
+ location=location,
+ schedule=schedule,
+ )
+ actual = ScheduleServiceClient.schedule_path(project, location, schedule)
+ assert expected == actual
+
+
+def test_parse_schedule_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "schedule": "nudibranch",
+ }
+ path = ScheduleServiceClient.schedule_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_schedule_path(path)
+ assert expected == actual
+
+
+def test_subnetwork_path():
+ project = "cuttlefish"
+ region = "mussel"
+ subnetwork = "winkle"
+ expected = "projects/{project}/regions/{region}/subnetworks/{subnetwork}".format(
+ project=project,
+ region=region,
+ subnetwork=subnetwork,
+ )
+ actual = ScheduleServiceClient.subnetwork_path(project, region, subnetwork)
+ assert expected == actual
+
+
+def test_parse_subnetwork_path():
+ expected = {
+ "project": "nautilus",
+ "region": "scallop",
+ "subnetwork": "abalone",
+ }
+ path = ScheduleServiceClient.subnetwork_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_subnetwork_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "squid"
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = ScheduleServiceClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "clam",
+ }
+ path = ScheduleServiceClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "whelk"
+ expected = "folders/{folder}".format(
+ folder=folder,
+ )
+ actual = ScheduleServiceClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "octopus",
+ }
+ path = ScheduleServiceClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "oyster"
+ expected = "organizations/{organization}".format(
+ organization=organization,
+ )
+ actual = ScheduleServiceClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "nudibranch",
+ }
+ path = ScheduleServiceClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "cuttlefish"
+ expected = "projects/{project}".format(
+ project=project,
+ )
+ actual = ScheduleServiceClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "mussel",
+ }
+ path = ScheduleServiceClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "winkle"
+ location = "nautilus"
+ expected = "projects/{project}/locations/{location}".format(
+ project=project,
+ location=location,
+ )
+ actual = ScheduleServiceClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "scallop",
+ "location": "abalone",
+ }
+ path = ScheduleServiceClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ScheduleServiceClient.parse_common_location_path(path)
+ assert expected == actual
+
+
+def test_client_with_default_client_info():
+ client_info = gapic_v1.client_info.ClientInfo()
+
+ with mock.patch.object(
+ transports.ScheduleServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+ with mock.patch.object(
+ transports.ScheduleServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ transport_class = ScheduleServiceClient.get_transport_class()
+ transport = transport_class(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+
+def test_delete_operation(transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_async(transport: str = "grpc_asyncio"):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_field_headers():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = None
+
+ client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_field_headers_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_delete_operation_from_dict():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_from_dict_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_cancel_operation(transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_async(transport: str = "grpc_asyncio"):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_operation_field_headers():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = None
+
+ client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_field_headers_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_cancel_operation_from_dict():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_from_dict_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_wait_operation(transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation(transport: str = "grpc_asyncio"):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_wait_operation_field_headers():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_field_headers_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_wait_operation_from_dict():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_from_dict_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_operation(transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_get_operation_async(transport: str = "grpc_asyncio"):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_get_operation_field_headers():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_operation_field_headers_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_get_operation_from_dict():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_operation_from_dict_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_operations(transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+ response = client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_async(transport: str = "grpc_asyncio"):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_list_operations_field_headers():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_operations_field_headers_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_operations_from_dict():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ response = client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_operations_from_dict_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_locations(transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+ response = client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_async(transport: str = "grpc_asyncio"):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_list_locations_field_headers():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_locations_field_headers_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_locations_from_dict():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ response = client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_locations_from_dict_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_location(transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+ response = client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_get_location_async(transport: str = "grpc_asyncio"):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_get_location_field_headers():
+ client = ScheduleServiceClient(credentials=ga_credentials.AnonymousCredentials())
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = locations_pb2.Location()
+
+ client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_location_field_headers_async():
+ client = ScheduleServiceAsyncClient(credentials=async_anonymous_credentials())
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+def test_get_location_from_dict():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+
+ response = client.get_location(
+ request={
+ "name": "locations/abc",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_location_from_dict_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_set_iam_policy(transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ response = client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+ response = await client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_set_iam_policy_field_headers():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_field_headers_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_set_iam_policy_from_dict():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_from_dict_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+def test_get_iam_policy(transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_get_iam_policy_field_headers():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_field_headers_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_get_iam_policy_from_dict():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_from_dict_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+def test_test_iam_permissions(transport: str = "grpc"):
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"):
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+ )
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+def test_test_iam_permissions_field_headers():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_field_headers_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_test_iam_permissions_from_dict():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ response = client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_from_dict_async():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ response = await client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+def test_transport_close_grpc():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_grpc_asyncio():
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close_rest():
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = ScheduleServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "rest",
+ "grpc",
+ ]
+ for transport in transports:
+ client = ScheduleServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class",
+ [
+ (ScheduleServiceClient, transports.ScheduleServiceGrpcTransport),
+ (ScheduleServiceAsyncClient, transports.ScheduleServiceGrpcAsyncIOTransport),
+ ],
+)
+def test_api_key_credentials(client_class, transport_class):
+ with mock.patch.object(
+ google.auth._default, "get_api_key_credentials", create=True
+ ) as get_api_key_credentials:
+ mock_cred = mock.Mock()
+ get_api_key_credentials.return_value = mock_cred
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=mock_cred,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..187cc99ba065de7bdda4d7686721c0fc3c9e6535
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py
@@ -0,0 +1,10169 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+ from unittest.mock import AsyncMock # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ import mock
+
+import grpc
+from grpc.experimental import aio
+from collections.abc import Iterable, AsyncIterable
+from google.protobuf import json_format
+import json
+import math
+import pytest
+from google.api_core import api_core_version
+from proto.marshal.rules.dates import DurationRule, TimestampRule
+from proto.marshal.rules import wrappers
+
+try:
+ import aiohttp # type: ignore
+ from google.auth.aio.transport.sessions import AsyncAuthorizedSession
+ from google.api_core.operations_v1 import AsyncOperationsRestClient
+
+ HAS_ASYNC_REST_EXTRA = True
+except ImportError: # pragma: NO COVER
+ HAS_ASYNC_REST_EXTRA = False
+from requests import Response
+from requests import Request, PreparedRequest
+from requests.sessions import Session
+from google.protobuf import json_format
+
+try:
+ from google.auth.aio import credentials as ga_credentials_async
+
+ HAS_GOOGLE_AUTH_AIO = True
+except ImportError: # pragma: NO COVER
+ HAS_GOOGLE_AUTH_AIO = False
+
+from google.api_core import client_options
+from google.api_core import exceptions as core_exceptions
+from google.api_core import future
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers
+from google.api_core import grpc_helpers_async
+from google.api_core import operation
+from google.api_core import operation_async # type: ignore
+from google.api_core import operations_v1
+from google.api_core import path_template
+from google.api_core import retry as retries
+from google.auth import credentials as ga_credentials
+from google.auth.exceptions import MutualTLSChannelError
+from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import (
+ SpecialistPoolServiceAsyncClient,
+)
+from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import (
+ SpecialistPoolServiceClient,
+)
+from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers
+from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import transports
+from google.cloud.aiplatform_v1beta1.types import operation as gca_operation
+from google.cloud.aiplatform_v1beta1.types import specialist_pool
+from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool
+from google.cloud.aiplatform_v1beta1.types import specialist_pool_service
+from google.cloud.location import locations_pb2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import options_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.oauth2 import service_account
+from google.protobuf import empty_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+import google.auth
+
+
+async def mock_async_gen(data, chunk_size=1):
+ for i in range(0, len(data)): # pragma: NO COVER
+ chunk = data[i : i + chunk_size]
+ yield chunk.encode("utf-8")
+
+
+def client_cert_source_callback():
+ return b"cert bytes", b"key bytes"
+
+
+# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded.
+# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107.
+def async_anonymous_credentials():
+ if HAS_GOOGLE_AUTH_AIO:
+ return ga_credentials_async.AnonymousCredentials()
+ return ga_credentials.AnonymousCredentials()
+
+
+# If default endpoint is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint(client):
+ return (
+ "foo.googleapis.com"
+ if ("localhost" in client.DEFAULT_ENDPOINT)
+ else client.DEFAULT_ENDPOINT
+ )
+
+
+# If default endpoint template is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint template so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint_template(client):
+ return (
+ "test.{UNIVERSE_DOMAIN}"
+ if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE)
+ else client._DEFAULT_ENDPOINT_TEMPLATE
+ )
+
+
+def test__get_default_mtls_endpoint():
+ api_endpoint = "example.googleapis.com"
+ api_mtls_endpoint = "example.mtls.googleapis.com"
+ sandbox_endpoint = "example.sandbox.googleapis.com"
+ sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
+ non_googleapi = "api.example.com"
+
+ assert SpecialistPoolServiceClient._get_default_mtls_endpoint(None) is None
+ assert (
+ SpecialistPoolServiceClient._get_default_mtls_endpoint(api_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ SpecialistPoolServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ SpecialistPoolServiceClient._get_default_mtls_endpoint(non_googleapi)
+ == non_googleapi
+ )
+
+
+def test__read_environment_variables():
+ assert SpecialistPoolServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert SpecialistPoolServiceClient._read_environment_variables() == (
+ True,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ assert SpecialistPoolServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ SpecialistPoolServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ assert SpecialistPoolServiceClient._read_environment_variables() == (
+ False,
+ "never",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ assert SpecialistPoolServiceClient._read_environment_variables() == (
+ False,
+ "always",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}):
+ assert SpecialistPoolServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ SpecialistPoolServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}):
+ assert SpecialistPoolServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ "foo.com",
+ )
+
+
+def test__get_client_cert_source():
+ mock_provided_cert_source = mock.Mock()
+ mock_default_cert_source = mock.Mock()
+
+ assert SpecialistPoolServiceClient._get_client_cert_source(None, False) is None
+ assert (
+ SpecialistPoolServiceClient._get_client_cert_source(
+ mock_provided_cert_source, False
+ )
+ is None
+ )
+ assert (
+ SpecialistPoolServiceClient._get_client_cert_source(
+ mock_provided_cert_source, True
+ )
+ == mock_provided_cert_source
+ )
+
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source", return_value=True
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_default_cert_source,
+ ):
+ assert (
+ SpecialistPoolServiceClient._get_client_cert_source(None, True)
+ is mock_default_cert_source
+ )
+ assert (
+ SpecialistPoolServiceClient._get_client_cert_source(
+ mock_provided_cert_source, "true"
+ )
+ is mock_provided_cert_source
+ )
+
+
+@mock.patch.object(
+ SpecialistPoolServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(SpecialistPoolServiceClient),
+)
+@mock.patch.object(
+ SpecialistPoolServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(SpecialistPoolServiceAsyncClient),
+)
+def test__get_api_endpoint():
+ api_override = "foo.com"
+ mock_client_cert_source = mock.Mock()
+ default_universe = SpecialistPoolServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = SpecialistPoolServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = SpecialistPoolServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ assert (
+ SpecialistPoolServiceClient._get_api_endpoint(
+ api_override, mock_client_cert_source, default_universe, "always"
+ )
+ == api_override
+ )
+ assert (
+ SpecialistPoolServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "auto"
+ )
+ == SpecialistPoolServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ SpecialistPoolServiceClient._get_api_endpoint(
+ None, None, default_universe, "auto"
+ )
+ == default_endpoint
+ )
+ assert (
+ SpecialistPoolServiceClient._get_api_endpoint(
+ None, None, default_universe, "always"
+ )
+ == SpecialistPoolServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ SpecialistPoolServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "always"
+ )
+ == SpecialistPoolServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ SpecialistPoolServiceClient._get_api_endpoint(
+ None, None, mock_universe, "never"
+ )
+ == mock_endpoint
+ )
+ assert (
+ SpecialistPoolServiceClient._get_api_endpoint(
+ None, None, default_universe, "never"
+ )
+ == default_endpoint
+ )
+
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ SpecialistPoolServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, mock_universe, "auto"
+ )
+ assert (
+ str(excinfo.value)
+ == "mTLS is not supported in any universe other than googleapis.com."
+ )
+
+
+def test__get_universe_domain():
+ client_universe_domain = "foo.com"
+ universe_domain_env = "bar.com"
+
+ assert (
+ SpecialistPoolServiceClient._get_universe_domain(
+ client_universe_domain, universe_domain_env
+ )
+ == client_universe_domain
+ )
+ assert (
+ SpecialistPoolServiceClient._get_universe_domain(None, universe_domain_env)
+ == universe_domain_env
+ )
+ assert (
+ SpecialistPoolServiceClient._get_universe_domain(None, None)
+ == SpecialistPoolServiceClient._DEFAULT_UNIVERSE
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ SpecialistPoolServiceClient._get_universe_domain("", None)
+ assert str(excinfo.value) == "Universe Domain cannot be an empty string."
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (SpecialistPoolServiceClient, "grpc"),
+ (SpecialistPoolServiceAsyncClient, "grpc_asyncio"),
+ (SpecialistPoolServiceClient, "rest"),
+ ],
+)
+def test_specialist_pool_service_client_from_service_account_info(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_info"
+ ) as factory:
+ factory.return_value = creds
+ info = {"valid": True}
+ client = client_class.from_service_account_info(info, transport=transport_name)
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (transports.SpecialistPoolServiceGrpcTransport, "grpc"),
+ (transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"),
+ (transports.SpecialistPoolServiceRestTransport, "rest"),
+ ],
+)
+def test_specialist_pool_service_client_service_account_always_use_jwt(
+ transport_class, transport_name
+):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=True)
+ use_jwt.assert_called_once_with(True)
+
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=False)
+ use_jwt.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (SpecialistPoolServiceClient, "grpc"),
+ (SpecialistPoolServiceAsyncClient, "grpc_asyncio"),
+ (SpecialistPoolServiceClient, "rest"),
+ ],
+)
+def test_specialist_pool_service_client_from_service_account_file(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_file"
+ ) as factory:
+ factory.return_value = creds
+ client = client_class.from_service_account_file(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ client = client_class.from_service_account_json(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+def test_specialist_pool_service_client_get_transport_class():
+ transport = SpecialistPoolServiceClient.get_transport_class()
+ available_transports = [
+ transports.SpecialistPoolServiceGrpcTransport,
+ transports.SpecialistPoolServiceRestTransport,
+ ]
+ assert transport in available_transports
+
+ transport = SpecialistPoolServiceClient.get_transport_class("grpc")
+ assert transport == transports.SpecialistPoolServiceGrpcTransport
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (
+ SpecialistPoolServiceClient,
+ transports.SpecialistPoolServiceGrpcTransport,
+ "grpc",
+ ),
+ (
+ SpecialistPoolServiceAsyncClient,
+ transports.SpecialistPoolServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (
+ SpecialistPoolServiceClient,
+ transports.SpecialistPoolServiceRestTransport,
+ "rest",
+ ),
+ ],
+)
+@mock.patch.object(
+ SpecialistPoolServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(SpecialistPoolServiceClient),
+)
+@mock.patch.object(
+ SpecialistPoolServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(SpecialistPoolServiceAsyncClient),
+)
+def test_specialist_pool_service_client_client_options(
+ client_class, transport_class, transport_name
+):
+ # Check that if channel is provided we won't create a new one.
+ with mock.patch.object(SpecialistPoolServiceClient, "get_transport_class") as gtc:
+ transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
+ client = client_class(transport=transport)
+ gtc.assert_not_called()
+
+ # Check that if channel is provided via str we will create a new one.
+ with mock.patch.object(SpecialistPoolServiceClient, "get_transport_class") as gtc:
+ client = client_class(transport=transport_name)
+ gtc.assert_called()
+
+ # Check the case api_endpoint is provided.
+ options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name, client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_MTLS_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ # Check the case quota_project_id is provided
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id="octopus",
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+ # Check the case api_endpoint is provided
+ options = client_options.ClientOptions(
+ api_audience="https://language.googleapis.com"
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience="https://language.googleapis.com",
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,use_client_cert_env",
+ [
+ (
+ SpecialistPoolServiceClient,
+ transports.SpecialistPoolServiceGrpcTransport,
+ "grpc",
+ "true",
+ ),
+ (
+ SpecialistPoolServiceAsyncClient,
+ transports.SpecialistPoolServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "true",
+ ),
+ (
+ SpecialistPoolServiceClient,
+ transports.SpecialistPoolServiceGrpcTransport,
+ "grpc",
+ "false",
+ ),
+ (
+ SpecialistPoolServiceAsyncClient,
+ transports.SpecialistPoolServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "false",
+ ),
+ (
+ SpecialistPoolServiceClient,
+ transports.SpecialistPoolServiceRestTransport,
+ "rest",
+ "true",
+ ),
+ (
+ SpecialistPoolServiceClient,
+ transports.SpecialistPoolServiceRestTransport,
+ "rest",
+ "false",
+ ),
+ ],
+)
+@mock.patch.object(
+ SpecialistPoolServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(SpecialistPoolServiceClient),
+)
+@mock.patch.object(
+ SpecialistPoolServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(SpecialistPoolServiceAsyncClient),
+)
+@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
+def test_specialist_pool_service_client_mtls_env_auto(
+ client_class, transport_class, transport_name, use_client_cert_env
+):
+ # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
+ # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
+
+ # Check the case client_cert_source is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=client_cert_source_callback
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+
+ if use_client_cert_env == "false":
+ expected_client_cert_source = None
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ else:
+ expected_client_cert_source = client_cert_source_callback
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case ADC client cert is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=client_cert_source_callback,
+ ):
+ if use_client_cert_env == "false":
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ expected_client_cert_source = None
+ else:
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+ expected_client_cert_source = client_cert_source_callback
+
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case client_cert_source and ADC client cert are not provided.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class", [SpecialistPoolServiceClient, SpecialistPoolServiceAsyncClient]
+)
+@mock.patch.object(
+ SpecialistPoolServiceClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(SpecialistPoolServiceClient),
+)
+@mock.patch.object(
+ SpecialistPoolServiceAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(SpecialistPoolServiceAsyncClient),
+)
+def test_specialist_pool_service_client_get_mtls_endpoint_and_cert_source(client_class):
+ mock_client_cert_source = mock.Mock()
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source == mock_client_cert_source
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_client_cert_source,
+ ):
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source == mock_client_cert_source
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class", [SpecialistPoolServiceClient, SpecialistPoolServiceAsyncClient]
+)
+@mock.patch.object(
+ SpecialistPoolServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(SpecialistPoolServiceClient),
+)
+@mock.patch.object(
+ SpecialistPoolServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(SpecialistPoolServiceAsyncClient),
+)
+def test_specialist_pool_service_client_client_api_endpoint(client_class):
+ mock_client_cert_source = client_cert_source_callback
+ api_override = "foo.com"
+ default_universe = SpecialistPoolServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = SpecialistPoolServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = SpecialistPoolServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true",
+ # use ClientOptions.api_endpoint as the api endpoint regardless.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=api_override
+ )
+ client = client_class(
+ client_options=options,
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert client.api_endpoint == api_override
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == default_endpoint
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always",
+ # use the DEFAULT_MTLS_ENDPOINT as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+
+ # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default),
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist,
+ # and ClientOptions.universe_domain="bar.com",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint.
+ options = client_options.ClientOptions()
+ universe_exists = hasattr(options, "universe_domain")
+ if universe_exists:
+ options = client_options.ClientOptions(universe_domain=mock_universe)
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ else:
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == (
+ mock_endpoint if universe_exists else default_endpoint
+ )
+ assert client.universe_domain == (
+ mock_universe if universe_exists else default_universe
+ )
+
+ # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ options = client_options.ClientOptions()
+ if hasattr(options, "universe_domain"):
+ delattr(options, "universe_domain")
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == default_endpoint
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (
+ SpecialistPoolServiceClient,
+ transports.SpecialistPoolServiceGrpcTransport,
+ "grpc",
+ ),
+ (
+ SpecialistPoolServiceAsyncClient,
+ transports.SpecialistPoolServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (
+ SpecialistPoolServiceClient,
+ transports.SpecialistPoolServiceRestTransport,
+ "rest",
+ ),
+ ],
+)
+def test_specialist_pool_service_client_client_options_scopes(
+ client_class, transport_class, transport_name
+):
+ # Check the case scopes are provided.
+ options = client_options.ClientOptions(
+ scopes=["1", "2"],
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=["1", "2"],
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ SpecialistPoolServiceClient,
+ transports.SpecialistPoolServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ SpecialistPoolServiceAsyncClient,
+ transports.SpecialistPoolServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ (
+ SpecialistPoolServiceClient,
+ transports.SpecialistPoolServiceRestTransport,
+ "rest",
+ None,
+ ),
+ ],
+)
+def test_specialist_pool_service_client_client_options_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+def test_specialist_pool_service_client_client_options_from_dict():
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceGrpcTransport.__init__"
+ ) as grpc_transport:
+ grpc_transport.return_value = None
+ client = SpecialistPoolServiceClient(
+ client_options={"api_endpoint": "squid.clam.whelk"}
+ )
+ grpc_transport.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ SpecialistPoolServiceClient,
+ transports.SpecialistPoolServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ SpecialistPoolServiceAsyncClient,
+ transports.SpecialistPoolServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_specialist_pool_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ specialist_pool_service.CreateSpecialistPoolRequest,
+ dict,
+ ],
+)
+def test_create_specialist_pool(request_type, transport: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.create_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = specialist_pool_service.CreateSpecialistPoolRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_create_specialist_pool_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = specialist_pool_service.CreateSpecialistPoolRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_specialist_pool), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.create_specialist_pool(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest(
+ parent="parent_value",
+ )
+
+
+def test_create_specialist_pool_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_specialist_pool
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_specialist_pool
+ ] = mock_rpc
+ request = {}
+ client.create_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.create_specialist_pool(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_specialist_pool_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.create_specialist_pool
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.create_specialist_pool
+ ] = mock_rpc
+
+ request = {}
+ await client.create_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.create_specialist_pool(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_specialist_pool_async(
+ transport: str = "grpc_asyncio",
+ request_type=specialist_pool_service.CreateSpecialistPoolRequest,
+):
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.create_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = specialist_pool_service.CreateSpecialistPoolRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_create_specialist_pool_async_from_dict():
+ await test_create_specialist_pool_async(request_type=dict)
+
+
+def test_create_specialist_pool_field_headers():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = specialist_pool_service.CreateSpecialistPoolRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_specialist_pool), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.create_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_create_specialist_pool_field_headers_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = specialist_pool_service.CreateSpecialistPoolRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_specialist_pool), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.create_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_create_specialist_pool_flattened():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.create_specialist_pool(
+ parent="parent_value",
+ specialist_pool=gca_specialist_pool.SpecialistPool(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].specialist_pool
+ mock_val = gca_specialist_pool.SpecialistPool(name="name_value")
+ assert arg == mock_val
+
+
+def test_create_specialist_pool_flattened_error():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_specialist_pool(
+ specialist_pool_service.CreateSpecialistPoolRequest(),
+ parent="parent_value",
+ specialist_pool=gca_specialist_pool.SpecialistPool(name="name_value"),
+ )
+
+
+@pytest.mark.asyncio
+async def test_create_specialist_pool_flattened_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.create_specialist_pool(
+ parent="parent_value",
+ specialist_pool=gca_specialist_pool.SpecialistPool(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].specialist_pool
+ mock_val = gca_specialist_pool.SpecialistPool(name="name_value")
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_create_specialist_pool_flattened_error_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.create_specialist_pool(
+ specialist_pool_service.CreateSpecialistPoolRequest(),
+ parent="parent_value",
+ specialist_pool=gca_specialist_pool.SpecialistPool(name="name_value"),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ specialist_pool_service.GetSpecialistPoolRequest,
+ dict,
+ ],
+)
+def test_get_specialist_pool(request_type, transport: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = specialist_pool.SpecialistPool(
+ name="name_value",
+ display_name="display_name_value",
+ specialist_managers_count=2662,
+ specialist_manager_emails=["specialist_manager_emails_value"],
+ pending_data_labeling_jobs=["pending_data_labeling_jobs_value"],
+ specialist_worker_emails=["specialist_worker_emails_value"],
+ )
+ response = client.get_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = specialist_pool_service.GetSpecialistPoolRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, specialist_pool.SpecialistPool)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.specialist_managers_count == 2662
+ assert response.specialist_manager_emails == ["specialist_manager_emails_value"]
+ assert response.pending_data_labeling_jobs == ["pending_data_labeling_jobs_value"]
+ assert response.specialist_worker_emails == ["specialist_worker_emails_value"]
+
+
+def test_get_specialist_pool_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = specialist_pool_service.GetSpecialistPoolRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_specialist_pool), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.get_specialist_pool(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == specialist_pool_service.GetSpecialistPoolRequest(
+ name="name_value",
+ )
+
+
+def test_get_specialist_pool_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_specialist_pool in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_specialist_pool
+ ] = mock_rpc
+ request = {}
+ client.get_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_specialist_pool(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_specialist_pool_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.get_specialist_pool
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.get_specialist_pool
+ ] = mock_rpc
+
+ request = {}
+ await client.get_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.get_specialist_pool(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_specialist_pool_async(
+ transport: str = "grpc_asyncio",
+ request_type=specialist_pool_service.GetSpecialistPoolRequest,
+):
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ specialist_pool.SpecialistPool(
+ name="name_value",
+ display_name="display_name_value",
+ specialist_managers_count=2662,
+ specialist_manager_emails=["specialist_manager_emails_value"],
+ pending_data_labeling_jobs=["pending_data_labeling_jobs_value"],
+ specialist_worker_emails=["specialist_worker_emails_value"],
+ )
+ )
+ response = await client.get_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = specialist_pool_service.GetSpecialistPoolRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, specialist_pool.SpecialistPool)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.specialist_managers_count == 2662
+ assert response.specialist_manager_emails == ["specialist_manager_emails_value"]
+ assert response.pending_data_labeling_jobs == ["pending_data_labeling_jobs_value"]
+ assert response.specialist_worker_emails == ["specialist_worker_emails_value"]
+
+
+@pytest.mark.asyncio
+async def test_get_specialist_pool_async_from_dict():
+ await test_get_specialist_pool_async(request_type=dict)
+
+
+def test_get_specialist_pool_field_headers():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = specialist_pool_service.GetSpecialistPoolRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_specialist_pool), "__call__"
+ ) as call:
+ call.return_value = specialist_pool.SpecialistPool()
+ client.get_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_specialist_pool_field_headers_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = specialist_pool_service.GetSpecialistPoolRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_specialist_pool), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ specialist_pool.SpecialistPool()
+ )
+ await client.get_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_get_specialist_pool_flattened():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = specialist_pool.SpecialistPool()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_specialist_pool(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_get_specialist_pool_flattened_error():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_specialist_pool(
+ specialist_pool_service.GetSpecialistPoolRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_specialist_pool_flattened_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = specialist_pool.SpecialistPool()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ specialist_pool.SpecialistPool()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_specialist_pool(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_get_specialist_pool_flattened_error_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_specialist_pool(
+ specialist_pool_service.GetSpecialistPoolRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ specialist_pool_service.ListSpecialistPoolsRequest,
+ dict,
+ ],
+)
+def test_list_specialist_pools(request_type, transport: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_specialist_pools), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = specialist_pool_service.ListSpecialistPoolsResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_specialist_pools(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = specialist_pool_service.ListSpecialistPoolsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListSpecialistPoolsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_specialist_pools_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = specialist_pool_service.ListSpecialistPoolsRequest(
+ parent="parent_value",
+ page_token="page_token_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_specialist_pools), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.list_specialist_pools(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest(
+ parent="parent_value",
+ page_token="page_token_value",
+ )
+
+
+def test_list_specialist_pools_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_specialist_pools
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_specialist_pools
+ ] = mock_rpc
+ request = {}
+ client.list_specialist_pools(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_specialist_pools(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_specialist_pools_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.list_specialist_pools
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.list_specialist_pools
+ ] = mock_rpc
+
+ request = {}
+ await client.list_specialist_pools(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.list_specialist_pools(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_specialist_pools_async(
+ transport: str = "grpc_asyncio",
+ request_type=specialist_pool_service.ListSpecialistPoolsRequest,
+):
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_specialist_pools), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.list_specialist_pools(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = specialist_pool_service.ListSpecialistPoolsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListSpecialistPoolsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_specialist_pools_async_from_dict():
+ await test_list_specialist_pools_async(request_type=dict)
+
+
+def test_list_specialist_pools_field_headers():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = specialist_pool_service.ListSpecialistPoolsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_specialist_pools), "__call__"
+ ) as call:
+ call.return_value = specialist_pool_service.ListSpecialistPoolsResponse()
+ client.list_specialist_pools(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_specialist_pools_field_headers_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = specialist_pool_service.ListSpecialistPoolsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_specialist_pools), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ specialist_pool_service.ListSpecialistPoolsResponse()
+ )
+ await client.list_specialist_pools(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_list_specialist_pools_flattened():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_specialist_pools), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = specialist_pool_service.ListSpecialistPoolsResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_specialist_pools(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+def test_list_specialist_pools_flattened_error():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_specialist_pools(
+ specialist_pool_service.ListSpecialistPoolsRequest(),
+ parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_specialist_pools_flattened_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_specialist_pools), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = specialist_pool_service.ListSpecialistPoolsResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ specialist_pool_service.ListSpecialistPoolsResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_specialist_pools(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_list_specialist_pools_flattened_error_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_specialist_pools(
+ specialist_pool_service.ListSpecialistPoolsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_specialist_pools_pager(transport_name: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_specialist_pools), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[
+ specialist_pool.SpecialistPool(),
+ specialist_pool.SpecialistPool(),
+ specialist_pool.SpecialistPool(),
+ ],
+ next_page_token="abc",
+ ),
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[],
+ next_page_token="def",
+ ),
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[
+ specialist_pool.SpecialistPool(),
+ ],
+ next_page_token="ghi",
+ ),
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[
+ specialist_pool.SpecialistPool(),
+ specialist_pool.SpecialistPool(),
+ ],
+ ),
+ RuntimeError,
+ )
+
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_specialist_pools(request={}, retry=retry, timeout=timeout)
+
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, specialist_pool.SpecialistPool) for i in results)
+
+
+def test_list_specialist_pools_pages(transport_name: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_specialist_pools), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[
+ specialist_pool.SpecialistPool(),
+ specialist_pool.SpecialistPool(),
+ specialist_pool.SpecialistPool(),
+ ],
+ next_page_token="abc",
+ ),
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[],
+ next_page_token="def",
+ ),
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[
+ specialist_pool.SpecialistPool(),
+ ],
+ next_page_token="ghi",
+ ),
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[
+ specialist_pool.SpecialistPool(),
+ specialist_pool.SpecialistPool(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_specialist_pools(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_specialist_pools_async_pager():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_specialist_pools),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[
+ specialist_pool.SpecialistPool(),
+ specialist_pool.SpecialistPool(),
+ specialist_pool.SpecialistPool(),
+ ],
+ next_page_token="abc",
+ ),
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[],
+ next_page_token="def",
+ ),
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[
+ specialist_pool.SpecialistPool(),
+ ],
+ next_page_token="ghi",
+ ),
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[
+ specialist_pool.SpecialistPool(),
+ specialist_pool.SpecialistPool(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_specialist_pools(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(isinstance(i, specialist_pool.SpecialistPool) for i in responses)
+
+
+@pytest.mark.asyncio
+async def test_list_specialist_pools_async_pages():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_specialist_pools),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[
+ specialist_pool.SpecialistPool(),
+ specialist_pool.SpecialistPool(),
+ specialist_pool.SpecialistPool(),
+ ],
+ next_page_token="abc",
+ ),
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[],
+ next_page_token="def",
+ ),
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[
+ specialist_pool.SpecialistPool(),
+ ],
+ next_page_token="ghi",
+ ),
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[
+ specialist_pool.SpecialistPool(),
+ specialist_pool.SpecialistPool(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.list_specialist_pools(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ specialist_pool_service.DeleteSpecialistPoolRequest,
+ dict,
+ ],
+)
+def test_delete_specialist_pool(request_type, transport: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.delete_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = specialist_pool_service.DeleteSpecialistPoolRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_delete_specialist_pool_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = specialist_pool_service.DeleteSpecialistPoolRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_specialist_pool), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.delete_specialist_pool(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest(
+ name="name_value",
+ )
+
+
+def test_delete_specialist_pool_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_specialist_pool
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_specialist_pool
+ ] = mock_rpc
+ request = {}
+ client.delete_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_specialist_pool(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_specialist_pool_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.delete_specialist_pool
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.delete_specialist_pool
+ ] = mock_rpc
+
+ request = {}
+ await client.delete_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.delete_specialist_pool(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_specialist_pool_async(
+ transport: str = "grpc_asyncio",
+ request_type=specialist_pool_service.DeleteSpecialistPoolRequest,
+):
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.delete_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = specialist_pool_service.DeleteSpecialistPoolRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_delete_specialist_pool_async_from_dict():
+ await test_delete_specialist_pool_async(request_type=dict)
+
+
+def test_delete_specialist_pool_field_headers():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = specialist_pool_service.DeleteSpecialistPoolRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_specialist_pool), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_specialist_pool_field_headers_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = specialist_pool_service.DeleteSpecialistPoolRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_specialist_pool), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.delete_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_delete_specialist_pool_flattened():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_specialist_pool(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_delete_specialist_pool_flattened_error():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_specialist_pool(
+ specialist_pool_service.DeleteSpecialistPoolRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_delete_specialist_pool_flattened_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_specialist_pool(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_delete_specialist_pool_flattened_error_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.delete_specialist_pool(
+ specialist_pool_service.DeleteSpecialistPoolRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ specialist_pool_service.UpdateSpecialistPoolRequest,
+ dict,
+ ],
+)
+def test_update_specialist_pool(request_type, transport: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.update_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = specialist_pool_service.UpdateSpecialistPoolRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_update_specialist_pool_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = specialist_pool_service.UpdateSpecialistPoolRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_specialist_pool), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.update_specialist_pool(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest()
+
+
+def test_update_specialist_pool_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.update_specialist_pool
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.update_specialist_pool
+ ] = mock_rpc
+ request = {}
+ client.update_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.update_specialist_pool(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_update_specialist_pool_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.update_specialist_pool
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.update_specialist_pool
+ ] = mock_rpc
+
+ request = {}
+ await client.update_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.update_specialist_pool(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_update_specialist_pool_async(
+ transport: str = "grpc_asyncio",
+ request_type=specialist_pool_service.UpdateSpecialistPoolRequest,
+):
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.update_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = specialist_pool_service.UpdateSpecialistPoolRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_update_specialist_pool_async_from_dict():
+ await test_update_specialist_pool_async(request_type=dict)
+
+
+def test_update_specialist_pool_field_headers():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = specialist_pool_service.UpdateSpecialistPoolRequest()
+
+ request.specialist_pool.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_specialist_pool), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.update_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "specialist_pool.name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_update_specialist_pool_field_headers_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = specialist_pool_service.UpdateSpecialistPoolRequest()
+
+ request.specialist_pool.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_specialist_pool), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.update_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "specialist_pool.name=name_value",
+ ) in kw["metadata"]
+
+
+def test_update_specialist_pool_flattened():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.update_specialist_pool(
+ specialist_pool=gca_specialist_pool.SpecialistPool(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].specialist_pool
+ mock_val = gca_specialist_pool.SpecialistPool(name="name_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
+
+
+def test_update_specialist_pool_flattened_error():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.update_specialist_pool(
+ specialist_pool_service.UpdateSpecialistPoolRequest(),
+ specialist_pool=gca_specialist_pool.SpecialistPool(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+@pytest.mark.asyncio
+async def test_update_specialist_pool_flattened_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.update_specialist_pool(
+ specialist_pool=gca_specialist_pool.SpecialistPool(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].specialist_pool
+ mock_val = gca_specialist_pool.SpecialistPool(name="name_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_update_specialist_pool_flattened_error_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.update_specialist_pool(
+ specialist_pool_service.UpdateSpecialistPoolRequest(),
+ specialist_pool=gca_specialist_pool.SpecialistPool(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+def test_create_specialist_pool_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.create_specialist_pool
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_specialist_pool
+ ] = mock_rpc
+
+ request = {}
+ client.create_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.create_specialist_pool(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_create_specialist_pool_rest_required_fields(
+ request_type=specialist_pool_service.CreateSpecialistPoolRequest,
+):
+ transport_class = transports.SpecialistPoolServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_specialist_pool._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_specialist_pool._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.create_specialist_pool(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_create_specialist_pool_rest_unset_required_fields():
+ transport = transports.SpecialistPoolServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.create_specialist_pool._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "specialistPool",
+ )
+ )
+ )
+
+
+def test_create_specialist_pool_rest_flattened():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ specialist_pool=gca_specialist_pool.SpecialistPool(name="name_value"),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.create_specialist_pool(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}/specialistPools"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_create_specialist_pool_rest_flattened_error(transport: str = "rest"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_specialist_pool(
+ specialist_pool_service.CreateSpecialistPoolRequest(),
+ parent="parent_value",
+ specialist_pool=gca_specialist_pool.SpecialistPool(name="name_value"),
+ )
+
+
+def test_get_specialist_pool_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.get_specialist_pool in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_specialist_pool
+ ] = mock_rpc
+
+ request = {}
+ client.get_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_specialist_pool(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_get_specialist_pool_rest_required_fields(
+ request_type=specialist_pool_service.GetSpecialistPoolRequest,
+):
+ transport_class = transports.SpecialistPoolServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_specialist_pool._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_specialist_pool._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = specialist_pool.SpecialistPool()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = specialist_pool.SpecialistPool.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_specialist_pool(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_get_specialist_pool_rest_unset_required_fields():
+ transport = transports.SpecialistPoolServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.get_specialist_pool._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_specialist_pool_rest_flattened():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = specialist_pool.SpecialistPool()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = specialist_pool.SpecialistPool.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_specialist_pool(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/specialistPools/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_get_specialist_pool_rest_flattened_error(transport: str = "rest"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_specialist_pool(
+ specialist_pool_service.GetSpecialistPoolRequest(),
+ name="name_value",
+ )
+
+
+def test_list_specialist_pools_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_specialist_pools
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_specialist_pools
+ ] = mock_rpc
+
+ request = {}
+ client.list_specialist_pools(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_specialist_pools(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_list_specialist_pools_rest_required_fields(
+ request_type=specialist_pool_service.ListSpecialistPoolsRequest,
+):
+ transport_class = transports.SpecialistPoolServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_specialist_pools._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_specialist_pools._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "page_size",
+ "page_token",
+ "read_mask",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = specialist_pool_service.ListSpecialistPoolsResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = specialist_pool_service.ListSpecialistPoolsResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_specialist_pools(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_list_specialist_pools_rest_unset_required_fields():
+ transport = transports.SpecialistPoolServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.list_specialist_pools._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(
+ (
+ "pageSize",
+ "pageToken",
+ "readMask",
+ )
+ )
+ & set(("parent",))
+ )
+
+
+def test_list_specialist_pools_rest_flattened():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = specialist_pool_service.ListSpecialistPoolsResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = specialist_pool_service.ListSpecialistPoolsResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.list_specialist_pools(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}/specialistPools"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_list_specialist_pools_rest_flattened_error(transport: str = "rest"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_specialist_pools(
+ specialist_pool_service.ListSpecialistPoolsRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_specialist_pools_rest_pager(transport: str = "rest"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[
+ specialist_pool.SpecialistPool(),
+ specialist_pool.SpecialistPool(),
+ specialist_pool.SpecialistPool(),
+ ],
+ next_page_token="abc",
+ ),
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[],
+ next_page_token="def",
+ ),
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[
+ specialist_pool.SpecialistPool(),
+ ],
+ next_page_token="ghi",
+ ),
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ specialist_pools=[
+ specialist_pool.SpecialistPool(),
+ specialist_pool.SpecialistPool(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ specialist_pool_service.ListSpecialistPoolsResponse.to_json(x)
+ for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ pager = client.list_specialist_pools(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, specialist_pool.SpecialistPool) for i in results)
+
+ pages = list(client.list_specialist_pools(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_delete_specialist_pool_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_specialist_pool
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_specialist_pool
+ ] = mock_rpc
+
+ request = {}
+ client.delete_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_specialist_pool(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_delete_specialist_pool_rest_required_fields(
+ request_type=specialist_pool_service.DeleteSpecialistPoolRequest,
+):
+ transport_class = transports.SpecialistPoolServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_specialist_pool._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_specialist_pool._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(("force",))
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "delete",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_specialist_pool(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_delete_specialist_pool_rest_unset_required_fields():
+ transport = transports.SpecialistPoolServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.delete_specialist_pool._get_unset_required_fields({})
+ assert set(unset_fields) == (set(("force",)) & set(("name",)))
+
+
+def test_delete_specialist_pool_rest_flattened():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.delete_specialist_pool(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/specialistPools/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_specialist_pool_rest_flattened_error(transport: str = "rest"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_specialist_pool(
+ specialist_pool_service.DeleteSpecialistPoolRequest(),
+ name="name_value",
+ )
+
+
+def test_update_specialist_pool_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.update_specialist_pool
+ in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.update_specialist_pool
+ ] = mock_rpc
+
+ request = {}
+ client.update_specialist_pool(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.update_specialist_pool(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_update_specialist_pool_rest_required_fields(
+ request_type=specialist_pool_service.UpdateSpecialistPoolRequest,
+):
+ transport_class = transports.SpecialistPoolServiceRestTransport
+
+ request_init = {}
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).update_specialist_pool._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).update_specialist_pool._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(("update_mask",))
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "patch",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.update_specialist_pool(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_update_specialist_pool_rest_unset_required_fields():
+ transport = transports.SpecialistPoolServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.update_specialist_pool._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(("updateMask",))
+ & set(
+ (
+ "specialistPool",
+ "updateMask",
+ )
+ )
+ )
+
+
+def test_update_specialist_pool_rest_flattened():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "specialist_pool": {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3"
+ }
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ specialist_pool=gca_specialist_pool.SpecialistPool(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.update_specialist_pool(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{specialist_pool.name=projects/*/locations/*/specialistPools/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_update_specialist_pool_rest_flattened_error(transport: str = "rest"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.update_specialist_pool(
+ specialist_pool_service.UpdateSpecialistPoolRequest(),
+ specialist_pool=gca_specialist_pool.SpecialistPool(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+def test_credentials_transport_error():
+ # It is an error to provide credentials and a transport instance.
+ transport = transports.SpecialistPoolServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # It is an error to provide a credentials file and a transport instance.
+ transport = transports.SpecialistPoolServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = SpecialistPoolServiceClient(
+ client_options={"credentials_file": "credentials.json"},
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a transport instance.
+ transport = transports.SpecialistPoolServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = SpecialistPoolServiceClient(
+ client_options=options,
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a credential.
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = SpecialistPoolServiceClient(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # It is an error to provide scopes and a transport instance.
+ transport = transports.SpecialistPoolServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = SpecialistPoolServiceClient(
+ client_options={"scopes": ["1", "2"]},
+ transport=transport,
+ )
+
+
+def test_transport_instance():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.SpecialistPoolServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ client = SpecialistPoolServiceClient(transport=transport)
+ assert client.transport is transport
+
+
+def test_transport_get_channel():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.SpecialistPoolServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+ transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.SpecialistPoolServiceGrpcTransport,
+ transports.SpecialistPoolServiceGrpcAsyncIOTransport,
+ transports.SpecialistPoolServiceRestTransport,
+ ],
+)
+def test_transport_adc(transport_class):
+ # Test default credentials are used if not provided.
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class()
+ adc.assert_called_once()
+
+
+def test_transport_kind_grpc():
+ transport = SpecialistPoolServiceClient.get_transport_class("grpc")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "grpc"
+
+
+def test_initialize_client_w_grpc():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_specialist_pool_empty_call_grpc():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_specialist_pool), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.create_specialist_pool(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.CreateSpecialistPoolRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_specialist_pool_empty_call_grpc():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_specialist_pool), "__call__"
+ ) as call:
+ call.return_value = specialist_pool.SpecialistPool()
+ client.get_specialist_pool(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.GetSpecialistPoolRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_specialist_pools_empty_call_grpc():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_specialist_pools), "__call__"
+ ) as call:
+ call.return_value = specialist_pool_service.ListSpecialistPoolsResponse()
+ client.list_specialist_pools(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.ListSpecialistPoolsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_specialist_pool_empty_call_grpc():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_specialist_pool), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_specialist_pool(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.DeleteSpecialistPoolRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_specialist_pool_empty_call_grpc():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_specialist_pool), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.update_specialist_pool(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.UpdateSpecialistPoolRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_grpc_asyncio():
+ transport = SpecialistPoolServiceAsyncClient.get_transport_class("grpc_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "grpc_asyncio"
+
+
+def test_initialize_client_w_grpc_asyncio():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_specialist_pool_empty_call_grpc_asyncio():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.create_specialist_pool(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.CreateSpecialistPoolRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_specialist_pool_empty_call_grpc_asyncio():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ specialist_pool.SpecialistPool(
+ name="name_value",
+ display_name="display_name_value",
+ specialist_managers_count=2662,
+ specialist_manager_emails=["specialist_manager_emails_value"],
+ pending_data_labeling_jobs=["pending_data_labeling_jobs_value"],
+ specialist_worker_emails=["specialist_worker_emails_value"],
+ )
+ )
+ await client.get_specialist_pool(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.GetSpecialistPoolRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_specialist_pools_empty_call_grpc_asyncio():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_specialist_pools), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ specialist_pool_service.ListSpecialistPoolsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_specialist_pools(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.ListSpecialistPoolsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_specialist_pool_empty_call_grpc_asyncio():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.delete_specialist_pool(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.DeleteSpecialistPoolRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_update_specialist_pool_empty_call_grpc_asyncio():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_specialist_pool), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.update_specialist_pool(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.UpdateSpecialistPoolRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_rest():
+ transport = SpecialistPoolServiceClient.get_transport_class("rest")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "rest"
+
+
+def test_create_specialist_pool_rest_bad_request(
+ request_type=specialist_pool_service.CreateSpecialistPoolRequest,
+):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.create_specialist_pool(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ specialist_pool_service.CreateSpecialistPoolRequest,
+ dict,
+ ],
+)
+def test_create_specialist_pool_rest_call_success(request_type):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["specialist_pool"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "specialist_managers_count": 2662,
+ "specialist_manager_emails": [
+ "specialist_manager_emails_value1",
+ "specialist_manager_emails_value2",
+ ],
+ "pending_data_labeling_jobs": [
+ "pending_data_labeling_jobs_value1",
+ "pending_data_labeling_jobs_value2",
+ ],
+ "specialist_worker_emails": [
+ "specialist_worker_emails_value1",
+ "specialist_worker_emails_value2",
+ ],
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = specialist_pool_service.CreateSpecialistPoolRequest.meta.fields[
+ "specialist_pool"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["specialist_pool"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["specialist_pool"][field])):
+ del request_init["specialist_pool"][field][i][subfield]
+ else:
+ del request_init["specialist_pool"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.create_specialist_pool(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_create_specialist_pool_rest_interceptors(null_interceptor):
+ transport = transports.SpecialistPoolServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.SpecialistPoolServiceRestInterceptor(),
+ )
+ client = SpecialistPoolServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.SpecialistPoolServiceRestInterceptor, "post_create_specialist_pool"
+ ) as post, mock.patch.object(
+ transports.SpecialistPoolServiceRestInterceptor, "pre_create_specialist_pool"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = specialist_pool_service.CreateSpecialistPoolRequest.pb(
+ specialist_pool_service.CreateSpecialistPoolRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = specialist_pool_service.CreateSpecialistPoolRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.create_specialist_pool(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_specialist_pool_rest_bad_request(
+ request_type=specialist_pool_service.GetSpecialistPoolRequest,
+):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_specialist_pool(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ specialist_pool_service.GetSpecialistPoolRequest,
+ dict,
+ ],
+)
+def test_get_specialist_pool_rest_call_success(request_type):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = specialist_pool.SpecialistPool(
+ name="name_value",
+ display_name="display_name_value",
+ specialist_managers_count=2662,
+ specialist_manager_emails=["specialist_manager_emails_value"],
+ pending_data_labeling_jobs=["pending_data_labeling_jobs_value"],
+ specialist_worker_emails=["specialist_worker_emails_value"],
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = specialist_pool.SpecialistPool.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_specialist_pool(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, specialist_pool.SpecialistPool)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.specialist_managers_count == 2662
+ assert response.specialist_manager_emails == ["specialist_manager_emails_value"]
+ assert response.pending_data_labeling_jobs == ["pending_data_labeling_jobs_value"]
+ assert response.specialist_worker_emails == ["specialist_worker_emails_value"]
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_get_specialist_pool_rest_interceptors(null_interceptor):
+ transport = transports.SpecialistPoolServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.SpecialistPoolServiceRestInterceptor(),
+ )
+ client = SpecialistPoolServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.SpecialistPoolServiceRestInterceptor, "post_get_specialist_pool"
+ ) as post, mock.patch.object(
+ transports.SpecialistPoolServiceRestInterceptor, "pre_get_specialist_pool"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = specialist_pool_service.GetSpecialistPoolRequest.pb(
+ specialist_pool_service.GetSpecialistPoolRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = specialist_pool.SpecialistPool.to_json(
+ specialist_pool.SpecialistPool()
+ )
+ req.return_value.content = return_value
+
+ request = specialist_pool_service.GetSpecialistPoolRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = specialist_pool.SpecialistPool()
+
+ client.get_specialist_pool(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_list_specialist_pools_rest_bad_request(
+ request_type=specialist_pool_service.ListSpecialistPoolsRequest,
+):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_specialist_pools(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ specialist_pool_service.ListSpecialistPoolsRequest,
+ dict,
+ ],
+)
+def test_list_specialist_pools_rest_call_success(request_type):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = specialist_pool_service.ListSpecialistPoolsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = specialist_pool_service.ListSpecialistPoolsResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.list_specialist_pools(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListSpecialistPoolsPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_list_specialist_pools_rest_interceptors(null_interceptor):
+ transport = transports.SpecialistPoolServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.SpecialistPoolServiceRestInterceptor(),
+ )
+ client = SpecialistPoolServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.SpecialistPoolServiceRestInterceptor, "post_list_specialist_pools"
+ ) as post, mock.patch.object(
+ transports.SpecialistPoolServiceRestInterceptor, "pre_list_specialist_pools"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = specialist_pool_service.ListSpecialistPoolsRequest.pb(
+ specialist_pool_service.ListSpecialistPoolsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = specialist_pool_service.ListSpecialistPoolsResponse.to_json(
+ specialist_pool_service.ListSpecialistPoolsResponse()
+ )
+ req.return_value.content = return_value
+
+ request = specialist_pool_service.ListSpecialistPoolsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = specialist_pool_service.ListSpecialistPoolsResponse()
+
+ client.list_specialist_pools(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_delete_specialist_pool_rest_bad_request(
+ request_type=specialist_pool_service.DeleteSpecialistPoolRequest,
+):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_specialist_pool(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ specialist_pool_service.DeleteSpecialistPoolRequest,
+ dict,
+ ],
+)
+def test_delete_specialist_pool_rest_call_success(request_type):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.delete_specialist_pool(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_delete_specialist_pool_rest_interceptors(null_interceptor):
+ transport = transports.SpecialistPoolServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.SpecialistPoolServiceRestInterceptor(),
+ )
+ client = SpecialistPoolServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.SpecialistPoolServiceRestInterceptor, "post_delete_specialist_pool"
+ ) as post, mock.patch.object(
+ transports.SpecialistPoolServiceRestInterceptor, "pre_delete_specialist_pool"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = specialist_pool_service.DeleteSpecialistPoolRequest.pb(
+ specialist_pool_service.DeleteSpecialistPoolRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = specialist_pool_service.DeleteSpecialistPoolRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.delete_specialist_pool(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_update_specialist_pool_rest_bad_request(
+ request_type=specialist_pool_service.UpdateSpecialistPoolRequest,
+):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "specialist_pool": {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3"
+ }
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.update_specialist_pool(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ specialist_pool_service.UpdateSpecialistPoolRequest,
+ dict,
+ ],
+)
+def test_update_specialist_pool_rest_call_success(request_type):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "specialist_pool": {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3"
+ }
+ }
+ request_init["specialist_pool"] = {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3",
+ "display_name": "display_name_value",
+ "specialist_managers_count": 2662,
+ "specialist_manager_emails": [
+ "specialist_manager_emails_value1",
+ "specialist_manager_emails_value2",
+ ],
+ "pending_data_labeling_jobs": [
+ "pending_data_labeling_jobs_value1",
+ "pending_data_labeling_jobs_value2",
+ ],
+ "specialist_worker_emails": [
+ "specialist_worker_emails_value1",
+ "specialist_worker_emails_value2",
+ ],
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = specialist_pool_service.UpdateSpecialistPoolRequest.meta.fields[
+ "specialist_pool"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["specialist_pool"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["specialist_pool"][field])):
+ del request_init["specialist_pool"][field][i][subfield]
+ else:
+ del request_init["specialist_pool"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.update_specialist_pool(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_update_specialist_pool_rest_interceptors(null_interceptor):
+ transport = transports.SpecialistPoolServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.SpecialistPoolServiceRestInterceptor(),
+ )
+ client = SpecialistPoolServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.SpecialistPoolServiceRestInterceptor, "post_update_specialist_pool"
+ ) as post, mock.patch.object(
+ transports.SpecialistPoolServiceRestInterceptor, "pre_update_specialist_pool"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = specialist_pool_service.UpdateSpecialistPoolRequest.pb(
+ specialist_pool_service.UpdateSpecialistPoolRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = specialist_pool_service.UpdateSpecialistPoolRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.update_specialist_pool(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_location_rest_bad_request(request_type=locations_pb2.GetLocationRequest):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_location(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+def test_get_location_rest(request_type):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_list_locations_rest_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_locations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+def test_list_locations_rest(request_type):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_get_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_get_iam_policy_rest(request_type):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_set_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.set_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_set_iam_policy_rest(request_type):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_test_iam_permissions_rest_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.test_iam_permissions(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+def test_test_iam_permissions_rest(request_type):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+def test_cancel_operation_rest_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+def test_cancel_operation_rest(request_type):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_rest_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+def test_delete_operation_rest(request_type):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_get_operation_rest_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+def test_get_operation_rest(request_type):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_list_operations_rest_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_operations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+def test_list_operations_rest(request_type):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_wait_operation_rest_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.wait_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+def test_wait_operation_rest(request_type):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_specialist_pool_empty_call_rest():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_specialist_pool), "__call__"
+ ) as call:
+ client.create_specialist_pool(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.CreateSpecialistPoolRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_specialist_pool_empty_call_rest():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_specialist_pool), "__call__"
+ ) as call:
+ client.get_specialist_pool(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.GetSpecialistPoolRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_specialist_pools_empty_call_rest():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_specialist_pools), "__call__"
+ ) as call:
+ client.list_specialist_pools(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.ListSpecialistPoolsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_specialist_pool_empty_call_rest():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_specialist_pool), "__call__"
+ ) as call:
+ client.delete_specialist_pool(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.DeleteSpecialistPoolRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_specialist_pool_empty_call_rest():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_specialist_pool), "__call__"
+ ) as call:
+ client.update_specialist_pool(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.UpdateSpecialistPoolRequest()
+
+ assert args[0] == request_msg
+
+
+def test_specialist_pool_service_rest_lro_client():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ transport = client.transport
+
+ # Ensure that we have an api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.AbstractOperationsClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_transport_kind_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = SpecialistPoolServiceAsyncClient.get_transport_class("rest_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "rest_asyncio"
+
+
+@pytest.mark.asyncio
+async def test_create_specialist_pool_rest_asyncio_bad_request(
+ request_type=specialist_pool_service.CreateSpecialistPoolRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.create_specialist_pool(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ specialist_pool_service.CreateSpecialistPoolRequest,
+ dict,
+ ],
+)
+async def test_create_specialist_pool_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["specialist_pool"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "specialist_managers_count": 2662,
+ "specialist_manager_emails": [
+ "specialist_manager_emails_value1",
+ "specialist_manager_emails_value2",
+ ],
+ "pending_data_labeling_jobs": [
+ "pending_data_labeling_jobs_value1",
+ "pending_data_labeling_jobs_value2",
+ ],
+ "specialist_worker_emails": [
+ "specialist_worker_emails_value1",
+ "specialist_worker_emails_value2",
+ ],
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = specialist_pool_service.CreateSpecialistPoolRequest.meta.fields[
+ "specialist_pool"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["specialist_pool"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["specialist_pool"][field])):
+ del request_init["specialist_pool"][field][i][subfield]
+ else:
+ del request_init["specialist_pool"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.create_specialist_pool(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_create_specialist_pool_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncSpecialistPoolServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncSpecialistPoolServiceRestInterceptor(),
+ )
+ client = SpecialistPoolServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncSpecialistPoolServiceRestInterceptor,
+ "post_create_specialist_pool",
+ ) as post, mock.patch.object(
+ transports.AsyncSpecialistPoolServiceRestInterceptor,
+ "pre_create_specialist_pool",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = specialist_pool_service.CreateSpecialistPoolRequest.pb(
+ specialist_pool_service.CreateSpecialistPoolRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = specialist_pool_service.CreateSpecialistPoolRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.create_specialist_pool(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_specialist_pool_rest_asyncio_bad_request(
+ request_type=specialist_pool_service.GetSpecialistPoolRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_specialist_pool(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ specialist_pool_service.GetSpecialistPoolRequest,
+ dict,
+ ],
+)
+async def test_get_specialist_pool_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = specialist_pool.SpecialistPool(
+ name="name_value",
+ display_name="display_name_value",
+ specialist_managers_count=2662,
+ specialist_manager_emails=["specialist_manager_emails_value"],
+ pending_data_labeling_jobs=["pending_data_labeling_jobs_value"],
+ specialist_worker_emails=["specialist_worker_emails_value"],
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = specialist_pool.SpecialistPool.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.get_specialist_pool(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, specialist_pool.SpecialistPool)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.specialist_managers_count == 2662
+ assert response.specialist_manager_emails == ["specialist_manager_emails_value"]
+ assert response.pending_data_labeling_jobs == ["pending_data_labeling_jobs_value"]
+ assert response.specialist_worker_emails == ["specialist_worker_emails_value"]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_get_specialist_pool_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncSpecialistPoolServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncSpecialistPoolServiceRestInterceptor(),
+ )
+ client = SpecialistPoolServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncSpecialistPoolServiceRestInterceptor, "post_get_specialist_pool"
+ ) as post, mock.patch.object(
+ transports.AsyncSpecialistPoolServiceRestInterceptor, "pre_get_specialist_pool"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = specialist_pool_service.GetSpecialistPoolRequest.pb(
+ specialist_pool_service.GetSpecialistPoolRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = specialist_pool.SpecialistPool.to_json(
+ specialist_pool.SpecialistPool()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = specialist_pool_service.GetSpecialistPoolRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = specialist_pool.SpecialistPool()
+
+ await client.get_specialist_pool(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_list_specialist_pools_rest_asyncio_bad_request(
+ request_type=specialist_pool_service.ListSpecialistPoolsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_specialist_pools(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ specialist_pool_service.ListSpecialistPoolsRequest,
+ dict,
+ ],
+)
+async def test_list_specialist_pools_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = specialist_pool_service.ListSpecialistPoolsResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = specialist_pool_service.ListSpecialistPoolsResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.list_specialist_pools(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListSpecialistPoolsAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_list_specialist_pools_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncSpecialistPoolServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncSpecialistPoolServiceRestInterceptor(),
+ )
+ client = SpecialistPoolServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncSpecialistPoolServiceRestInterceptor,
+ "post_list_specialist_pools",
+ ) as post, mock.patch.object(
+ transports.AsyncSpecialistPoolServiceRestInterceptor,
+ "pre_list_specialist_pools",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = specialist_pool_service.ListSpecialistPoolsRequest.pb(
+ specialist_pool_service.ListSpecialistPoolsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = specialist_pool_service.ListSpecialistPoolsResponse.to_json(
+ specialist_pool_service.ListSpecialistPoolsResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = specialist_pool_service.ListSpecialistPoolsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = specialist_pool_service.ListSpecialistPoolsResponse()
+
+ await client.list_specialist_pools(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_delete_specialist_pool_rest_asyncio_bad_request(
+ request_type=specialist_pool_service.DeleteSpecialistPoolRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_specialist_pool(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ specialist_pool_service.DeleteSpecialistPoolRequest,
+ dict,
+ ],
+)
+async def test_delete_specialist_pool_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.delete_specialist_pool(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_delete_specialist_pool_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncSpecialistPoolServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncSpecialistPoolServiceRestInterceptor(),
+ )
+ client = SpecialistPoolServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncSpecialistPoolServiceRestInterceptor,
+ "post_delete_specialist_pool",
+ ) as post, mock.patch.object(
+ transports.AsyncSpecialistPoolServiceRestInterceptor,
+ "pre_delete_specialist_pool",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = specialist_pool_service.DeleteSpecialistPoolRequest.pb(
+ specialist_pool_service.DeleteSpecialistPoolRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = specialist_pool_service.DeleteSpecialistPoolRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.delete_specialist_pool(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_update_specialist_pool_rest_asyncio_bad_request(
+ request_type=specialist_pool_service.UpdateSpecialistPoolRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "specialist_pool": {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3"
+ }
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.update_specialist_pool(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ specialist_pool_service.UpdateSpecialistPoolRequest,
+ dict,
+ ],
+)
+async def test_update_specialist_pool_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "specialist_pool": {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3"
+ }
+ }
+ request_init["specialist_pool"] = {
+ "name": "projects/sample1/locations/sample2/specialistPools/sample3",
+ "display_name": "display_name_value",
+ "specialist_managers_count": 2662,
+ "specialist_manager_emails": [
+ "specialist_manager_emails_value1",
+ "specialist_manager_emails_value2",
+ ],
+ "pending_data_labeling_jobs": [
+ "pending_data_labeling_jobs_value1",
+ "pending_data_labeling_jobs_value2",
+ ],
+ "specialist_worker_emails": [
+ "specialist_worker_emails_value1",
+ "specialist_worker_emails_value2",
+ ],
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = specialist_pool_service.UpdateSpecialistPoolRequest.meta.fields[
+ "specialist_pool"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["specialist_pool"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["specialist_pool"][field])):
+ del request_init["specialist_pool"][field][i][subfield]
+ else:
+ del request_init["specialist_pool"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.update_specialist_pool(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_update_specialist_pool_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncSpecialistPoolServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncSpecialistPoolServiceRestInterceptor(),
+ )
+ client = SpecialistPoolServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncSpecialistPoolServiceRestInterceptor,
+ "post_update_specialist_pool",
+ ) as post, mock.patch.object(
+ transports.AsyncSpecialistPoolServiceRestInterceptor,
+ "pre_update_specialist_pool",
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = specialist_pool_service.UpdateSpecialistPoolRequest.pb(
+ specialist_pool_service.UpdateSpecialistPoolRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = specialist_pool_service.UpdateSpecialistPoolRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.update_specialist_pool(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_location_rest_asyncio_bad_request(
+ request_type=locations_pb2.GetLocationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_location(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+async def test_get_location_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_rest_asyncio_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_locations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+async def test_list_locations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_get_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.set_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_set_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.test_iam_permissions(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+async def test_test_iam_permissions_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+async def test_cancel_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+async def test_delete_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_get_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+async def test_get_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_rest_asyncio_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_operations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+async def test_list_operations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.wait_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+async def test_wait_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_specialist_pool_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_specialist_pool), "__call__"
+ ) as call:
+ await client.create_specialist_pool(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.CreateSpecialistPoolRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_specialist_pool_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_specialist_pool), "__call__"
+ ) as call:
+ await client.get_specialist_pool(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.GetSpecialistPoolRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_specialist_pools_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_specialist_pools), "__call__"
+ ) as call:
+ await client.list_specialist_pools(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.ListSpecialistPoolsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_specialist_pool_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_specialist_pool), "__call__"
+ ) as call:
+ await client.delete_specialist_pool(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.DeleteSpecialistPoolRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_update_specialist_pool_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_specialist_pool), "__call__"
+ ) as call:
+ await client.update_specialist_pool(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = specialist_pool_service.UpdateSpecialistPoolRequest()
+
+ assert args[0] == request_msg
+
+
+def test_specialist_pool_service_rest_asyncio_lro_client():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ transport = client.transport
+
+ # Ensure that we have an api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.AsyncOperationsRestClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_unsupported_parameter_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with pytest.raises(core_exceptions.AsyncRestUnsupportedParameterError, match="google.api_core.client_options.ClientOptions.quota_project_id") as exc: # type: ignore
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ client_options=options,
+ )
+
+
+def test_transport_grpc_default():
+ # A client should use the gRPC transport by default.
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert isinstance(
+ client.transport,
+ transports.SpecialistPoolServiceGrpcTransport,
+ )
+
+
+def test_specialist_pool_service_base_transport_error():
+ # Passing both a credentials object and credentials_file should raise an error
+ with pytest.raises(core_exceptions.DuplicateCredentialArgs):
+ transport = transports.SpecialistPoolServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ credentials_file="credentials.json",
+ )
+
+
+def test_specialist_pool_service_base_transport():
+ # Instantiate the base transport.
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport.__init__"
+ ) as Transport:
+ Transport.return_value = None
+ transport = transports.SpecialistPoolServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Every method on the transport should just blindly
+ # raise NotImplementedError.
+ methods = (
+ "create_specialist_pool",
+ "get_specialist_pool",
+ "list_specialist_pools",
+ "delete_specialist_pool",
+ "update_specialist_pool",
+ "set_iam_policy",
+ "get_iam_policy",
+ "test_iam_permissions",
+ "get_location",
+ "list_locations",
+ "get_operation",
+ "wait_operation",
+ "cancel_operation",
+ "delete_operation",
+ "list_operations",
+ )
+ for method in methods:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, method)(request=object())
+
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
+ # Additionally, the LRO client (a property) should
+ # also raise NotImplementedError
+ with pytest.raises(NotImplementedError):
+ transport.operations_client
+
+ # Catch all for all remaining methods and properties
+ remainder = [
+ "kind",
+ ]
+ for r in remainder:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, r)()
+
+
+def test_specialist_pool_service_base_transport_with_credentials_file():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.SpecialistPoolServiceTransport(
+ credentials_file="credentials.json",
+ quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+def test_specialist_pool_service_base_transport_with_adc():
+ # Test the default credentials are used if credentials and credentials_file are None.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.SpecialistPoolServiceTransport()
+ adc.assert_called_once()
+
+
+def test_specialist_pool_service_auth_adc():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ SpecialistPoolServiceClient()
+ adc.assert_called_once_with(
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.SpecialistPoolServiceGrpcTransport,
+ transports.SpecialistPoolServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_specialist_pool_service_transport_auth_adc(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+ adc.assert_called_once_with(
+ scopes=["1", "2"],
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.SpecialistPoolServiceGrpcTransport,
+ transports.SpecialistPoolServiceGrpcAsyncIOTransport,
+ transports.SpecialistPoolServiceRestTransport,
+ ],
+)
+def test_specialist_pool_service_transport_auth_gdch_credentials(transport_class):
+ host = "https://language.com"
+ api_audience_tests = [None, "https://language2.com"]
+ api_audience_expect = [host, "https://language2.com"]
+ for t, e in zip(api_audience_tests, api_audience_expect):
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ gdch_mock = mock.MagicMock()
+ type(gdch_mock).with_gdch_audience = mock.PropertyMock(
+ return_value=gdch_mock
+ )
+ adc.return_value = (gdch_mock, None)
+ transport_class(host=host, api_audience=t)
+ gdch_mock.with_gdch_audience.assert_called_once_with(e)
+
+
+@pytest.mark.parametrize(
+ "transport_class,grpc_helpers",
+ [
+ (transports.SpecialistPoolServiceGrpcTransport, grpc_helpers),
+ (transports.SpecialistPoolServiceGrpcAsyncIOTransport, grpc_helpers_async),
+ ],
+)
+def test_specialist_pool_service_transport_create_channel(
+ transport_class, grpc_helpers
+):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel", autospec=True
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ adc.return_value = (creds, None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=creds,
+ credentials_file=None,
+ quota_project_id="octopus",
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=["1", "2"],
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.SpecialistPoolServiceGrpcTransport,
+ transports.SpecialistPoolServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_specialist_pool_service_grpc_transport_client_cert_source_for_mtls(
+ transport_class,
+):
+ cred = ga_credentials.AnonymousCredentials()
+
+ # Check ssl_channel_credentials is used if provided.
+ with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
+ mock_ssl_channel_creds = mock.Mock()
+ transport_class(
+ host="squid.clam.whelk",
+ credentials=cred,
+ ssl_channel_credentials=mock_ssl_channel_creds,
+ )
+ mock_create_channel.assert_called_once_with(
+ "squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_channel_creds,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
+ # is used.
+ with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
+ with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
+ transport_class(
+ credentials=cred,
+ client_cert_source_for_mtls=client_cert_source_callback,
+ )
+ expected_cert, expected_key = client_cert_source_callback()
+ mock_ssl_cred.assert_called_once_with(
+ certificate_chain=expected_cert, private_key=expected_key
+ )
+
+
+def test_specialist_pool_service_http_transport_client_cert_source_for_mtls():
+ cred = ga_credentials.AnonymousCredentials()
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ) as mock_configure_mtls_channel:
+ transports.SpecialistPoolServiceRestTransport(
+ credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
+ )
+ mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_specialist_pool_service_host_no_port(transport_name):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_specialist_pool_service_host_with_port(transport_name):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com:8000"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:8000"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com:8000"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "rest",
+ ],
+)
+def test_specialist_pool_service_client_transport_session_collision(transport_name):
+ creds1 = ga_credentials.AnonymousCredentials()
+ creds2 = ga_credentials.AnonymousCredentials()
+ client1 = SpecialistPoolServiceClient(
+ credentials=creds1,
+ transport=transport_name,
+ )
+ client2 = SpecialistPoolServiceClient(
+ credentials=creds2,
+ transport=transport_name,
+ )
+ session1 = client1.transport.create_specialist_pool._session
+ session2 = client2.transport.create_specialist_pool._session
+ assert session1 != session2
+ session1 = client1.transport.get_specialist_pool._session
+ session2 = client2.transport.get_specialist_pool._session
+ assert session1 != session2
+ session1 = client1.transport.list_specialist_pools._session
+ session2 = client2.transport.list_specialist_pools._session
+ assert session1 != session2
+ session1 = client1.transport.delete_specialist_pool._session
+ session2 = client2.transport.delete_specialist_pool._session
+ assert session1 != session2
+ session1 = client1.transport.update_specialist_pool._session
+ session2 = client2.transport.update_specialist_pool._session
+ assert session1 != session2
+
+
+def test_specialist_pool_service_grpc_transport_channel():
+ channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.SpecialistPoolServiceGrpcTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+def test_specialist_pool_service_grpc_asyncio_transport_channel():
+ channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.SpecialistPoolServiceGrpcTransport,
+ transports.SpecialistPoolServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_specialist_pool_service_transport_channel_mtls_with_client_cert_source(
+ transport_class,
+):
+ with mock.patch(
+ "grpc.ssl_channel_credentials", autospec=True
+ ) as grpc_ssl_channel_cred:
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_ssl_cred = mock.Mock()
+ grpc_ssl_channel_cred.return_value = mock_ssl_cred
+
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+
+ cred = ga_credentials.AnonymousCredentials()
+ with pytest.warns(DeprecationWarning):
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (cred, None)
+ transport = transport_class(
+ host="squid.clam.whelk",
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=client_cert_source_callback,
+ )
+ adc.assert_called_once()
+
+ grpc_ssl_channel_cred.assert_called_once_with(
+ certificate_chain=b"cert bytes", private_key=b"key bytes"
+ )
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.SpecialistPoolServiceGrpcTransport,
+ transports.SpecialistPoolServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_specialist_pool_service_transport_channel_mtls_with_adc(transport_class):
+ mock_ssl_cred = mock.Mock()
+ with mock.patch.multiple(
+ "google.auth.transport.grpc.SslCredentials",
+ __init__=mock.Mock(return_value=None),
+ ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
+ ):
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+ mock_cred = mock.Mock()
+
+ with pytest.warns(DeprecationWarning):
+ transport = transport_class(
+ host="squid.clam.whelk",
+ credentials=mock_cred,
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=None,
+ )
+
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=mock_cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+
+
+def test_specialist_pool_service_grpc_lro_client():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.OperationsClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_specialist_pool_service_grpc_lro_async_client():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc_asyncio",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.OperationsAsyncClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_specialist_pool_path():
+ project = "squid"
+ location = "clam"
+ specialist_pool = "whelk"
+ expected = "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(
+ project=project,
+ location=location,
+ specialist_pool=specialist_pool,
+ )
+ actual = SpecialistPoolServiceClient.specialist_pool_path(
+ project, location, specialist_pool
+ )
+ assert expected == actual
+
+
+def test_parse_specialist_pool_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "specialist_pool": "nudibranch",
+ }
+ path = SpecialistPoolServiceClient.specialist_pool_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = SpecialistPoolServiceClient.parse_specialist_pool_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "cuttlefish"
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = SpecialistPoolServiceClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "mussel",
+ }
+ path = SpecialistPoolServiceClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = SpecialistPoolServiceClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "winkle"
+ expected = "folders/{folder}".format(
+ folder=folder,
+ )
+ actual = SpecialistPoolServiceClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "nautilus",
+ }
+ path = SpecialistPoolServiceClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = SpecialistPoolServiceClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "scallop"
+ expected = "organizations/{organization}".format(
+ organization=organization,
+ )
+ actual = SpecialistPoolServiceClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "abalone",
+ }
+ path = SpecialistPoolServiceClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = SpecialistPoolServiceClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "squid"
+ expected = "projects/{project}".format(
+ project=project,
+ )
+ actual = SpecialistPoolServiceClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "clam",
+ }
+ path = SpecialistPoolServiceClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = SpecialistPoolServiceClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "whelk"
+ location = "octopus"
+ expected = "projects/{project}/locations/{location}".format(
+ project=project,
+ location=location,
+ )
+ actual = SpecialistPoolServiceClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "oyster",
+ "location": "nudibranch",
+ }
+ path = SpecialistPoolServiceClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = SpecialistPoolServiceClient.parse_common_location_path(path)
+ assert expected == actual
+
+
+def test_client_with_default_client_info():
+ client_info = gapic_v1.client_info.ClientInfo()
+
+ with mock.patch.object(
+ transports.SpecialistPoolServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+ with mock.patch.object(
+ transports.SpecialistPoolServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ transport_class = SpecialistPoolServiceClient.get_transport_class()
+ transport = transport_class(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+
+def test_delete_operation(transport: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_async(transport: str = "grpc_asyncio"):
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_field_headers():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = None
+
+ client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_field_headers_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_delete_operation_from_dict():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_from_dict_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_cancel_operation(transport: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_async(transport: str = "grpc_asyncio"):
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_operation_field_headers():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = None
+
+ client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_field_headers_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_cancel_operation_from_dict():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_from_dict_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_wait_operation(transport: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation(transport: str = "grpc_asyncio"):
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_wait_operation_field_headers():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_field_headers_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_wait_operation_from_dict():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_from_dict_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_operation(transport: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_get_operation_async(transport: str = "grpc_asyncio"):
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_get_operation_field_headers():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_operation_field_headers_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_get_operation_from_dict():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_operation_from_dict_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_operations(transport: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+ response = client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_async(transport: str = "grpc_asyncio"):
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_list_operations_field_headers():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_operations_field_headers_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_operations_from_dict():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ response = client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_operations_from_dict_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_locations(transport: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+ response = client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_async(transport: str = "grpc_asyncio"):
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_list_locations_field_headers():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_locations_field_headers_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_locations_from_dict():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ response = client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_locations_from_dict_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_location(transport: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+ response = client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_get_location_async(transport: str = "grpc_asyncio"):
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_get_location_field_headers():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = locations_pb2.Location()
+
+ client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_location_field_headers_async():
+ client = SpecialistPoolServiceAsyncClient(credentials=async_anonymous_credentials())
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+def test_get_location_from_dict():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+
+ response = client.get_location(
+ request={
+ "name": "locations/abc",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_location_from_dict_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_set_iam_policy(transport: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ response = client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+ response = await client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_set_iam_policy_field_headers():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_field_headers_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_set_iam_policy_from_dict():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_from_dict_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+def test_get_iam_policy(transport: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_get_iam_policy_field_headers():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_field_headers_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_get_iam_policy_from_dict():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_from_dict_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+def test_test_iam_permissions(transport: str = "grpc"):
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"):
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+ )
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+def test_test_iam_permissions_field_headers():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_field_headers_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_test_iam_permissions_from_dict():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ response = client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_from_dict_async():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ response = await client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+def test_transport_close_grpc():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_grpc_asyncio():
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close_rest():
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = SpecialistPoolServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "rest",
+ "grpc",
+ ]
+ for transport in transports:
+ client = SpecialistPoolServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class",
+ [
+ (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport),
+ (
+ SpecialistPoolServiceAsyncClient,
+ transports.SpecialistPoolServiceGrpcAsyncIOTransport,
+ ),
+ ],
+)
+def test_api_key_credentials(client_class, transport_class):
+ with mock.patch.object(
+ google.auth._default, "get_api_key_credentials", create=True
+ ) as get_api_key_credentials:
+ mock_cred = mock.Mock()
+ get_api_key_credentials.return_value = mock_cred
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=mock_cred,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4e40803c264e8b28944bf87c44405b9a9c1c819
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py
@@ -0,0 +1,14966 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+ from unittest.mock import AsyncMock # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ import mock
+
+import grpc
+from grpc.experimental import aio
+from collections.abc import Iterable, AsyncIterable
+from google.protobuf import json_format
+import json
+import math
+import pytest
+from google.api_core import api_core_version
+from proto.marshal.rules.dates import DurationRule, TimestampRule
+from proto.marshal.rules import wrappers
+
+try:
+ import aiohttp # type: ignore
+ from google.auth.aio.transport.sessions import AsyncAuthorizedSession
+ from google.api_core.operations_v1 import AsyncOperationsRestClient
+
+ HAS_ASYNC_REST_EXTRA = True
+except ImportError: # pragma: NO COVER
+ HAS_ASYNC_REST_EXTRA = False
+from requests import Response
+from requests import Request, PreparedRequest
+from requests.sessions import Session
+from google.protobuf import json_format
+
+try:
+ from google.auth.aio import credentials as ga_credentials_async
+
+ HAS_GOOGLE_AUTH_AIO = True
+except ImportError: # pragma: NO COVER
+ HAS_GOOGLE_AUTH_AIO = False
+
+from google.api_core import client_options
+from google.api_core import exceptions as core_exceptions
+from google.api_core import future
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers
+from google.api_core import grpc_helpers_async
+from google.api_core import operation
+from google.api_core import operation_async # type: ignore
+from google.api_core import operations_v1
+from google.api_core import path_template
+from google.api_core import retry as retries
+from google.auth import credentials as ga_credentials
+from google.auth.exceptions import MutualTLSChannelError
+from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import (
+ VertexRagDataServiceAsyncClient,
+)
+from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import (
+ VertexRagDataServiceClient,
+)
+from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import pagers
+from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import transports
+from google.cloud.aiplatform_v1beta1.types import api_auth
+from google.cloud.aiplatform_v1beta1.types import io
+from google.cloud.aiplatform_v1beta1.types import operation as gca_operation
+from google.cloud.aiplatform_v1beta1.types import vertex_rag_data
+from google.cloud.aiplatform_v1beta1.types import vertex_rag_data_service
+from google.cloud.location import locations_pb2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import options_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.oauth2 import service_account
+from google.protobuf import empty_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
+from google.rpc import status_pb2 # type: ignore
+import google.auth
+
+
+async def mock_async_gen(data, chunk_size=1):
+ for i in range(0, len(data)): # pragma: NO COVER
+ chunk = data[i : i + chunk_size]
+ yield chunk.encode("utf-8")
+
+
+def client_cert_source_callback():
+ return b"cert bytes", b"key bytes"
+
+
+# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded.
+# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107.
+def async_anonymous_credentials():
+ if HAS_GOOGLE_AUTH_AIO:
+ return ga_credentials_async.AnonymousCredentials()
+ return ga_credentials.AnonymousCredentials()
+
+
+# If default endpoint is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint(client):
+ return (
+ "foo.googleapis.com"
+ if ("localhost" in client.DEFAULT_ENDPOINT)
+ else client.DEFAULT_ENDPOINT
+ )
+
+
+# If default endpoint template is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint template so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint_template(client):
+ return (
+ "test.{UNIVERSE_DOMAIN}"
+ if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE)
+ else client._DEFAULT_ENDPOINT_TEMPLATE
+ )
+
+
+def test__get_default_mtls_endpoint():
+ api_endpoint = "example.googleapis.com"
+ api_mtls_endpoint = "example.mtls.googleapis.com"
+ sandbox_endpoint = "example.sandbox.googleapis.com"
+ sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
+ non_googleapi = "api.example.com"
+
+ assert VertexRagDataServiceClient._get_default_mtls_endpoint(None) is None
+ assert (
+ VertexRagDataServiceClient._get_default_mtls_endpoint(api_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ VertexRagDataServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ VertexRagDataServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ VertexRagDataServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ VertexRagDataServiceClient._get_default_mtls_endpoint(non_googleapi)
+ == non_googleapi
+ )
+
+
+def test__read_environment_variables():
+ assert VertexRagDataServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert VertexRagDataServiceClient._read_environment_variables() == (
+ True,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ assert VertexRagDataServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ VertexRagDataServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ assert VertexRagDataServiceClient._read_environment_variables() == (
+ False,
+ "never",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ assert VertexRagDataServiceClient._read_environment_variables() == (
+ False,
+ "always",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}):
+ assert VertexRagDataServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ VertexRagDataServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}):
+ assert VertexRagDataServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ "foo.com",
+ )
+
+
+def test__get_client_cert_source():
+ mock_provided_cert_source = mock.Mock()
+ mock_default_cert_source = mock.Mock()
+
+ assert VertexRagDataServiceClient._get_client_cert_source(None, False) is None
+ assert (
+ VertexRagDataServiceClient._get_client_cert_source(
+ mock_provided_cert_source, False
+ )
+ is None
+ )
+ assert (
+ VertexRagDataServiceClient._get_client_cert_source(
+ mock_provided_cert_source, True
+ )
+ == mock_provided_cert_source
+ )
+
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source", return_value=True
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_default_cert_source,
+ ):
+ assert (
+ VertexRagDataServiceClient._get_client_cert_source(None, True)
+ is mock_default_cert_source
+ )
+ assert (
+ VertexRagDataServiceClient._get_client_cert_source(
+ mock_provided_cert_source, "true"
+ )
+ is mock_provided_cert_source
+ )
+
+
+@mock.patch.object(
+ VertexRagDataServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(VertexRagDataServiceClient),
+)
+@mock.patch.object(
+ VertexRagDataServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(VertexRagDataServiceAsyncClient),
+)
+def test__get_api_endpoint():
+ api_override = "foo.com"
+ mock_client_cert_source = mock.Mock()
+ default_universe = VertexRagDataServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = VertexRagDataServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = VertexRagDataServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ assert (
+ VertexRagDataServiceClient._get_api_endpoint(
+ api_override, mock_client_cert_source, default_universe, "always"
+ )
+ == api_override
+ )
+ assert (
+ VertexRagDataServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "auto"
+ )
+ == VertexRagDataServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ VertexRagDataServiceClient._get_api_endpoint(
+ None, None, default_universe, "auto"
+ )
+ == default_endpoint
+ )
+ assert (
+ VertexRagDataServiceClient._get_api_endpoint(
+ None, None, default_universe, "always"
+ )
+ == VertexRagDataServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ VertexRagDataServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "always"
+ )
+ == VertexRagDataServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ VertexRagDataServiceClient._get_api_endpoint(None, None, mock_universe, "never")
+ == mock_endpoint
+ )
+ assert (
+ VertexRagDataServiceClient._get_api_endpoint(
+ None, None, default_universe, "never"
+ )
+ == default_endpoint
+ )
+
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ VertexRagDataServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, mock_universe, "auto"
+ )
+ assert (
+ str(excinfo.value)
+ == "mTLS is not supported in any universe other than googleapis.com."
+ )
+
+
+def test__get_universe_domain():
+ client_universe_domain = "foo.com"
+ universe_domain_env = "bar.com"
+
+ assert (
+ VertexRagDataServiceClient._get_universe_domain(
+ client_universe_domain, universe_domain_env
+ )
+ == client_universe_domain
+ )
+ assert (
+ VertexRagDataServiceClient._get_universe_domain(None, universe_domain_env)
+ == universe_domain_env
+ )
+ assert (
+ VertexRagDataServiceClient._get_universe_domain(None, None)
+ == VertexRagDataServiceClient._DEFAULT_UNIVERSE
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ VertexRagDataServiceClient._get_universe_domain("", None)
+ assert str(excinfo.value) == "Universe Domain cannot be an empty string."
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (VertexRagDataServiceClient, "grpc"),
+ (VertexRagDataServiceAsyncClient, "grpc_asyncio"),
+ (VertexRagDataServiceClient, "rest"),
+ ],
+)
+def test_vertex_rag_data_service_client_from_service_account_info(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_info"
+ ) as factory:
+ factory.return_value = creds
+ info = {"valid": True}
+ client = client_class.from_service_account_info(info, transport=transport_name)
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (transports.VertexRagDataServiceGrpcTransport, "grpc"),
+ (transports.VertexRagDataServiceGrpcAsyncIOTransport, "grpc_asyncio"),
+ (transports.VertexRagDataServiceRestTransport, "rest"),
+ ],
+)
+def test_vertex_rag_data_service_client_service_account_always_use_jwt(
+ transport_class, transport_name
+):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=True)
+ use_jwt.assert_called_once_with(True)
+
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=False)
+ use_jwt.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (VertexRagDataServiceClient, "grpc"),
+ (VertexRagDataServiceAsyncClient, "grpc_asyncio"),
+ (VertexRagDataServiceClient, "rest"),
+ ],
+)
+def test_vertex_rag_data_service_client_from_service_account_file(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_file"
+ ) as factory:
+ factory.return_value = creds
+ client = client_class.from_service_account_file(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ client = client_class.from_service_account_json(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+def test_vertex_rag_data_service_client_get_transport_class():
+ transport = VertexRagDataServiceClient.get_transport_class()
+ available_transports = [
+ transports.VertexRagDataServiceGrpcTransport,
+ transports.VertexRagDataServiceRestTransport,
+ ]
+ assert transport in available_transports
+
+ transport = VertexRagDataServiceClient.get_transport_class("grpc")
+ assert transport == transports.VertexRagDataServiceGrpcTransport
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (
+ VertexRagDataServiceClient,
+ transports.VertexRagDataServiceGrpcTransport,
+ "grpc",
+ ),
+ (
+ VertexRagDataServiceAsyncClient,
+ transports.VertexRagDataServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (
+ VertexRagDataServiceClient,
+ transports.VertexRagDataServiceRestTransport,
+ "rest",
+ ),
+ ],
+)
+@mock.patch.object(
+ VertexRagDataServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(VertexRagDataServiceClient),
+)
+@mock.patch.object(
+ VertexRagDataServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(VertexRagDataServiceAsyncClient),
+)
+def test_vertex_rag_data_service_client_client_options(
+ client_class, transport_class, transport_name
+):
+ # Check that if channel is provided we won't create a new one.
+ with mock.patch.object(VertexRagDataServiceClient, "get_transport_class") as gtc:
+ transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
+ client = client_class(transport=transport)
+ gtc.assert_not_called()
+
+ # Check that if channel is provided via str we will create a new one.
+ with mock.patch.object(VertexRagDataServiceClient, "get_transport_class") as gtc:
+ client = client_class(transport=transport_name)
+ gtc.assert_called()
+
+ # Check the case api_endpoint is provided.
+ options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name, client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_MTLS_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ # Check the case quota_project_id is provided
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id="octopus",
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+ # Check the case api_endpoint is provided
+ options = client_options.ClientOptions(
+ api_audience="https://language.googleapis.com"
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience="https://language.googleapis.com",
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,use_client_cert_env",
+ [
+ (
+ VertexRagDataServiceClient,
+ transports.VertexRagDataServiceGrpcTransport,
+ "grpc",
+ "true",
+ ),
+ (
+ VertexRagDataServiceAsyncClient,
+ transports.VertexRagDataServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "true",
+ ),
+ (
+ VertexRagDataServiceClient,
+ transports.VertexRagDataServiceGrpcTransport,
+ "grpc",
+ "false",
+ ),
+ (
+ VertexRagDataServiceAsyncClient,
+ transports.VertexRagDataServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "false",
+ ),
+ (
+ VertexRagDataServiceClient,
+ transports.VertexRagDataServiceRestTransport,
+ "rest",
+ "true",
+ ),
+ (
+ VertexRagDataServiceClient,
+ transports.VertexRagDataServiceRestTransport,
+ "rest",
+ "false",
+ ),
+ ],
+)
+@mock.patch.object(
+ VertexRagDataServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(VertexRagDataServiceClient),
+)
+@mock.patch.object(
+ VertexRagDataServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(VertexRagDataServiceAsyncClient),
+)
+@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
+def test_vertex_rag_data_service_client_mtls_env_auto(
+ client_class, transport_class, transport_name, use_client_cert_env
+):
+ # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
+ # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
+
+ # Check the case client_cert_source is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=client_cert_source_callback
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+
+ if use_client_cert_env == "false":
+ expected_client_cert_source = None
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ else:
+ expected_client_cert_source = client_cert_source_callback
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case ADC client cert is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=client_cert_source_callback,
+ ):
+ if use_client_cert_env == "false":
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ expected_client_cert_source = None
+ else:
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+ expected_client_cert_source = client_cert_source_callback
+
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case client_cert_source and ADC client cert are not provided.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class", [VertexRagDataServiceClient, VertexRagDataServiceAsyncClient]
+)
+@mock.patch.object(
+ VertexRagDataServiceClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(VertexRagDataServiceClient),
+)
+@mock.patch.object(
+ VertexRagDataServiceAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(VertexRagDataServiceAsyncClient),
+)
+def test_vertex_rag_data_service_client_get_mtls_endpoint_and_cert_source(client_class):
+ mock_client_cert_source = mock.Mock()
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source == mock_client_cert_source
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_client_cert_source,
+ ):
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source == mock_client_cert_source
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class", [VertexRagDataServiceClient, VertexRagDataServiceAsyncClient]
+)
+@mock.patch.object(
+ VertexRagDataServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(VertexRagDataServiceClient),
+)
+@mock.patch.object(
+ VertexRagDataServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(VertexRagDataServiceAsyncClient),
+)
+def test_vertex_rag_data_service_client_client_api_endpoint(client_class):
+ mock_client_cert_source = client_cert_source_callback
+ api_override = "foo.com"
+ default_universe = VertexRagDataServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = VertexRagDataServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = VertexRagDataServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true",
+ # use ClientOptions.api_endpoint as the api endpoint regardless.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=api_override
+ )
+ client = client_class(
+ client_options=options,
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert client.api_endpoint == api_override
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == default_endpoint
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always",
+ # use the DEFAULT_MTLS_ENDPOINT as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+
+ # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default),
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist,
+ # and ClientOptions.universe_domain="bar.com",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint.
+ options = client_options.ClientOptions()
+ universe_exists = hasattr(options, "universe_domain")
+ if universe_exists:
+ options = client_options.ClientOptions(universe_domain=mock_universe)
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ else:
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == (
+ mock_endpoint if universe_exists else default_endpoint
+ )
+ assert client.universe_domain == (
+ mock_universe if universe_exists else default_universe
+ )
+
+ # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ options = client_options.ClientOptions()
+ if hasattr(options, "universe_domain"):
+ delattr(options, "universe_domain")
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == default_endpoint
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (
+ VertexRagDataServiceClient,
+ transports.VertexRagDataServiceGrpcTransport,
+ "grpc",
+ ),
+ (
+ VertexRagDataServiceAsyncClient,
+ transports.VertexRagDataServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (
+ VertexRagDataServiceClient,
+ transports.VertexRagDataServiceRestTransport,
+ "rest",
+ ),
+ ],
+)
+def test_vertex_rag_data_service_client_client_options_scopes(
+ client_class, transport_class, transport_name
+):
+ # Check the case scopes are provided.
+ options = client_options.ClientOptions(
+ scopes=["1", "2"],
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=["1", "2"],
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ VertexRagDataServiceClient,
+ transports.VertexRagDataServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ VertexRagDataServiceAsyncClient,
+ transports.VertexRagDataServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ (
+ VertexRagDataServiceClient,
+ transports.VertexRagDataServiceRestTransport,
+ "rest",
+ None,
+ ),
+ ],
+)
+def test_vertex_rag_data_service_client_client_options_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+def test_vertex_rag_data_service_client_client_options_from_dict():
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.transports.VertexRagDataServiceGrpcTransport.__init__"
+ ) as grpc_transport:
+ grpc_transport.return_value = None
+ client = VertexRagDataServiceClient(
+ client_options={"api_endpoint": "squid.clam.whelk"}
+ )
+ grpc_transport.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ VertexRagDataServiceClient,
+ transports.VertexRagDataServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ VertexRagDataServiceAsyncClient,
+ transports.VertexRagDataServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_vertex_rag_data_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.CreateRagCorpusRequest,
+ dict,
+ ],
+)
+def test_create_rag_corpus(request_type, transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_rag_corpus), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.create_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.CreateRagCorpusRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_create_rag_corpus_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = vertex_rag_data_service.CreateRagCorpusRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_rag_corpus), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.create_rag_corpus(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == vertex_rag_data_service.CreateRagCorpusRequest(
+ parent="parent_value",
+ )
+
+
+def test_create_rag_corpus_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.create_rag_corpus in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_rag_corpus
+ ] = mock_rpc
+ request = {}
+ client.create_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.create_rag_corpus(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_rag_corpus_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.create_rag_corpus
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.create_rag_corpus
+ ] = mock_rpc
+
+ request = {}
+ await client.create_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.create_rag_corpus(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_create_rag_corpus_async(
+ transport: str = "grpc_asyncio",
+ request_type=vertex_rag_data_service.CreateRagCorpusRequest,
+):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_rag_corpus), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.create_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.CreateRagCorpusRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_create_rag_corpus_async_from_dict():
+ await test_create_rag_corpus_async(request_type=dict)
+
+
+def test_create_rag_corpus_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.CreateRagCorpusRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_rag_corpus), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.create_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_create_rag_corpus_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.CreateRagCorpusRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_rag_corpus), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.create_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_create_rag_corpus_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_rag_corpus), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.create_rag_corpus(
+ parent="parent_value",
+ rag_corpus=vertex_rag_data.RagCorpus(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].rag_corpus
+ mock_val = vertex_rag_data.RagCorpus(name="name_value")
+ assert arg == mock_val
+
+
+def test_create_rag_corpus_flattened_error():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_rag_corpus(
+ vertex_rag_data_service.CreateRagCorpusRequest(),
+ parent="parent_value",
+ rag_corpus=vertex_rag_data.RagCorpus(name="name_value"),
+ )
+
+
+@pytest.mark.asyncio
+async def test_create_rag_corpus_flattened_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_rag_corpus), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.create_rag_corpus(
+ parent="parent_value",
+ rag_corpus=vertex_rag_data.RagCorpus(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].rag_corpus
+ mock_val = vertex_rag_data.RagCorpus(name="name_value")
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_create_rag_corpus_flattened_error_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.create_rag_corpus(
+ vertex_rag_data_service.CreateRagCorpusRequest(),
+ parent="parent_value",
+ rag_corpus=vertex_rag_data.RagCorpus(name="name_value"),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.UpdateRagCorpusRequest,
+ dict,
+ ],
+)
+def test_update_rag_corpus(request_type, transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_rag_corpus), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.update_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.UpdateRagCorpusRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_update_rag_corpus_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = vertex_rag_data_service.UpdateRagCorpusRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_rag_corpus), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.update_rag_corpus(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == vertex_rag_data_service.UpdateRagCorpusRequest()
+
+
+def test_update_rag_corpus_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.update_rag_corpus in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.update_rag_corpus
+ ] = mock_rpc
+ request = {}
+ client.update_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.update_rag_corpus(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_update_rag_corpus_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.update_rag_corpus
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.update_rag_corpus
+ ] = mock_rpc
+
+ request = {}
+ await client.update_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.update_rag_corpus(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_update_rag_corpus_async(
+ transport: str = "grpc_asyncio",
+ request_type=vertex_rag_data_service.UpdateRagCorpusRequest,
+):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_rag_corpus), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.update_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.UpdateRagCorpusRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_update_rag_corpus_async_from_dict():
+ await test_update_rag_corpus_async(request_type=dict)
+
+
+def test_update_rag_corpus_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.UpdateRagCorpusRequest()
+
+ request.rag_corpus.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_rag_corpus), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.update_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "rag_corpus.name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_update_rag_corpus_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.UpdateRagCorpusRequest()
+
+ request.rag_corpus.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_rag_corpus), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.update_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "rag_corpus.name=name_value",
+ ) in kw["metadata"]
+
+
+def test_update_rag_corpus_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_rag_corpus), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.update_rag_corpus(
+ rag_corpus=vertex_rag_data.RagCorpus(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].rag_corpus
+ mock_val = vertex_rag_data.RagCorpus(name="name_value")
+ assert arg == mock_val
+
+
+def test_update_rag_corpus_flattened_error():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.update_rag_corpus(
+ vertex_rag_data_service.UpdateRagCorpusRequest(),
+ rag_corpus=vertex_rag_data.RagCorpus(name="name_value"),
+ )
+
+
+@pytest.mark.asyncio
+async def test_update_rag_corpus_flattened_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_rag_corpus), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.update_rag_corpus(
+ rag_corpus=vertex_rag_data.RagCorpus(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].rag_corpus
+ mock_val = vertex_rag_data.RagCorpus(name="name_value")
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_update_rag_corpus_flattened_error_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.update_rag_corpus(
+ vertex_rag_data_service.UpdateRagCorpusRequest(),
+ rag_corpus=vertex_rag_data.RagCorpus(name="name_value"),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.GetRagCorpusRequest,
+ dict,
+ ],
+)
+def test_get_rag_corpus(request_type, transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_data.RagCorpus(
+ name="name_value",
+ display_name="display_name_value",
+ description="description_value",
+ )
+ response = client.get_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.GetRagCorpusRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_data.RagCorpus)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.description == "description_value"
+
+
+def test_get_rag_corpus_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = vertex_rag_data_service.GetRagCorpusRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.get_rag_corpus(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == vertex_rag_data_service.GetRagCorpusRequest(
+ name="name_value",
+ )
+
+
+def test_get_rag_corpus_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.get_rag_corpus in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.get_rag_corpus] = mock_rpc
+ request = {}
+ client.get_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_rag_corpus(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_rag_corpus_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.get_rag_corpus
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.get_rag_corpus
+ ] = mock_rpc
+
+ request = {}
+ await client.get_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.get_rag_corpus(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_rag_corpus_async(
+ transport: str = "grpc_asyncio",
+ request_type=vertex_rag_data_service.GetRagCorpusRequest,
+):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data.RagCorpus(
+ name="name_value",
+ display_name="display_name_value",
+ description="description_value",
+ )
+ )
+ response = await client.get_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.GetRagCorpusRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_data.RagCorpus)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.description == "description_value"
+
+
+@pytest.mark.asyncio
+async def test_get_rag_corpus_async_from_dict():
+ await test_get_rag_corpus_async(request_type=dict)
+
+
+def test_get_rag_corpus_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.GetRagCorpusRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call:
+ call.return_value = vertex_rag_data.RagCorpus()
+ client.get_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_rag_corpus_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.GetRagCorpusRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data.RagCorpus()
+ )
+ await client.get_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_get_rag_corpus_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_data.RagCorpus()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_rag_corpus(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_get_rag_corpus_flattened_error():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_rag_corpus(
+ vertex_rag_data_service.GetRagCorpusRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_rag_corpus_flattened_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_data.RagCorpus()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data.RagCorpus()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_rag_corpus(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_get_rag_corpus_flattened_error_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_rag_corpus(
+ vertex_rag_data_service.GetRagCorpusRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.ListRagCorporaRequest,
+ dict,
+ ],
+)
+def test_list_rag_corpora(request_type, transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_data_service.ListRagCorporaResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_rag_corpora(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.ListRagCorporaRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListRagCorporaPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_rag_corpora_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = vertex_rag_data_service.ListRagCorporaRequest(
+ parent="parent_value",
+ page_token="page_token_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.list_rag_corpora(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == vertex_rag_data_service.ListRagCorporaRequest(
+ parent="parent_value",
+ page_token="page_token_value",
+ )
+
+
+def test_list_rag_corpora_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.list_rag_corpora in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_rag_corpora
+ ] = mock_rpc
+ request = {}
+ client.list_rag_corpora(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_rag_corpora(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_rag_corpora_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.list_rag_corpora
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.list_rag_corpora
+ ] = mock_rpc
+
+ request = {}
+ await client.list_rag_corpora(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.list_rag_corpora(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_rag_corpora_async(
+ transport: str = "grpc_asyncio",
+ request_type=vertex_rag_data_service.ListRagCorporaRequest,
+):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data_service.ListRagCorporaResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.list_rag_corpora(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.ListRagCorporaRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListRagCorporaAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_rag_corpora_async_from_dict():
+ await test_list_rag_corpora_async(request_type=dict)
+
+
+def test_list_rag_corpora_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.ListRagCorporaRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call:
+ call.return_value = vertex_rag_data_service.ListRagCorporaResponse()
+ client.list_rag_corpora(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_rag_corpora_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.ListRagCorporaRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data_service.ListRagCorporaResponse()
+ )
+ await client.list_rag_corpora(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_list_rag_corpora_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_data_service.ListRagCorporaResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_rag_corpora(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+def test_list_rag_corpora_flattened_error():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_rag_corpora(
+ vertex_rag_data_service.ListRagCorporaRequest(),
+ parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_rag_corpora_flattened_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_data_service.ListRagCorporaResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data_service.ListRagCorporaResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_rag_corpora(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_list_rag_corpora_flattened_error_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_rag_corpora(
+ vertex_rag_data_service.ListRagCorporaRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_rag_corpora_pager(transport_name: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[
+ vertex_rag_data.RagCorpus(),
+ vertex_rag_data.RagCorpus(),
+ vertex_rag_data.RagCorpus(),
+ ],
+ next_page_token="abc",
+ ),
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[],
+ next_page_token="def",
+ ),
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[
+ vertex_rag_data.RagCorpus(),
+ ],
+ next_page_token="ghi",
+ ),
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[
+ vertex_rag_data.RagCorpus(),
+ vertex_rag_data.RagCorpus(),
+ ],
+ ),
+ RuntimeError,
+ )
+
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_rag_corpora(request={}, retry=retry, timeout=timeout)
+
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, vertex_rag_data.RagCorpus) for i in results)
+
+
+def test_list_rag_corpora_pages(transport_name: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[
+ vertex_rag_data.RagCorpus(),
+ vertex_rag_data.RagCorpus(),
+ vertex_rag_data.RagCorpus(),
+ ],
+ next_page_token="abc",
+ ),
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[],
+ next_page_token="def",
+ ),
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[
+ vertex_rag_data.RagCorpus(),
+ ],
+ next_page_token="ghi",
+ ),
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[
+ vertex_rag_data.RagCorpus(),
+ vertex_rag_data.RagCorpus(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_rag_corpora(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_rag_corpora_async_pager():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_rag_corpora), "__call__", new_callable=mock.AsyncMock
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[
+ vertex_rag_data.RagCorpus(),
+ vertex_rag_data.RagCorpus(),
+ vertex_rag_data.RagCorpus(),
+ ],
+ next_page_token="abc",
+ ),
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[],
+ next_page_token="def",
+ ),
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[
+ vertex_rag_data.RagCorpus(),
+ ],
+ next_page_token="ghi",
+ ),
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[
+ vertex_rag_data.RagCorpus(),
+ vertex_rag_data.RagCorpus(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_rag_corpora(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(isinstance(i, vertex_rag_data.RagCorpus) for i in responses)
+
+
+@pytest.mark.asyncio
+async def test_list_rag_corpora_async_pages():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_rag_corpora), "__call__", new_callable=mock.AsyncMock
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[
+ vertex_rag_data.RagCorpus(),
+ vertex_rag_data.RagCorpus(),
+ vertex_rag_data.RagCorpus(),
+ ],
+ next_page_token="abc",
+ ),
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[],
+ next_page_token="def",
+ ),
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[
+ vertex_rag_data.RagCorpus(),
+ ],
+ next_page_token="ghi",
+ ),
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[
+ vertex_rag_data.RagCorpus(),
+ vertex_rag_data.RagCorpus(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.list_rag_corpora(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.DeleteRagCorpusRequest,
+ dict,
+ ],
+)
+def test_delete_rag_corpus(request_type, transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_rag_corpus), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.delete_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.DeleteRagCorpusRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_delete_rag_corpus_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = vertex_rag_data_service.DeleteRagCorpusRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_rag_corpus), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.delete_rag_corpus(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == vertex_rag_data_service.DeleteRagCorpusRequest(
+ name="name_value",
+ )
+
+
+def test_delete_rag_corpus_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.delete_rag_corpus in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_rag_corpus
+ ] = mock_rpc
+ request = {}
+ client.delete_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_rag_corpus(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_rag_corpus_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.delete_rag_corpus
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.delete_rag_corpus
+ ] = mock_rpc
+
+ request = {}
+ await client.delete_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.delete_rag_corpus(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_rag_corpus_async(
+ transport: str = "grpc_asyncio",
+ request_type=vertex_rag_data_service.DeleteRagCorpusRequest,
+):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_rag_corpus), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.delete_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.DeleteRagCorpusRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_delete_rag_corpus_async_from_dict():
+ await test_delete_rag_corpus_async(request_type=dict)
+
+
+def test_delete_rag_corpus_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.DeleteRagCorpusRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_rag_corpus), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_rag_corpus_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.DeleteRagCorpusRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_rag_corpus), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.delete_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_delete_rag_corpus_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_rag_corpus), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_rag_corpus(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_delete_rag_corpus_flattened_error():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_rag_corpus(
+ vertex_rag_data_service.DeleteRagCorpusRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_delete_rag_corpus_flattened_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_rag_corpus), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_rag_corpus(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_delete_rag_corpus_flattened_error_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.delete_rag_corpus(
+ vertex_rag_data_service.DeleteRagCorpusRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.UploadRagFileRequest,
+ dict,
+ ],
+)
+def test_upload_rag_file(request_type, transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_data_service.UploadRagFileResponse()
+ response = client.upload_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.UploadRagFileRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_data_service.UploadRagFileResponse)
+
+
+def test_upload_rag_file_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = vertex_rag_data_service.UploadRagFileRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.upload_rag_file(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == vertex_rag_data_service.UploadRagFileRequest(
+ parent="parent_value",
+ )
+
+
+def test_upload_rag_file_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.upload_rag_file in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.upload_rag_file] = mock_rpc
+ request = {}
+ client.upload_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.upload_rag_file(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_upload_rag_file_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.upload_rag_file
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.upload_rag_file
+ ] = mock_rpc
+
+ request = {}
+ await client.upload_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.upload_rag_file(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_upload_rag_file_async(
+ transport: str = "grpc_asyncio",
+ request_type=vertex_rag_data_service.UploadRagFileRequest,
+):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data_service.UploadRagFileResponse()
+ )
+ response = await client.upload_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.UploadRagFileRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_data_service.UploadRagFileResponse)
+
+
+@pytest.mark.asyncio
+async def test_upload_rag_file_async_from_dict():
+ await test_upload_rag_file_async(request_type=dict)
+
+
+def test_upload_rag_file_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.UploadRagFileRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call:
+ call.return_value = vertex_rag_data_service.UploadRagFileResponse()
+ client.upload_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_upload_rag_file_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.UploadRagFileRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data_service.UploadRagFileResponse()
+ )
+ await client.upload_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_upload_rag_file_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_data_service.UploadRagFileResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.upload_rag_file(
+ parent="parent_value",
+ rag_file=vertex_rag_data.RagFile(
+ gcs_source=io.GcsSource(uris=["uris_value"])
+ ),
+ upload_rag_file_config=vertex_rag_data.UploadRagFileConfig(
+ rag_file_chunking_config=vertex_rag_data.RagFileChunkingConfig(
+ fixed_length_chunking=vertex_rag_data.RagFileChunkingConfig.FixedLengthChunking(
+ chunk_size=1075
+ )
+ )
+ ),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].rag_file
+ mock_val = vertex_rag_data.RagFile(gcs_source=io.GcsSource(uris=["uris_value"]))
+ assert arg == mock_val
+ arg = args[0].upload_rag_file_config
+ mock_val = vertex_rag_data.UploadRagFileConfig(
+ rag_file_chunking_config=vertex_rag_data.RagFileChunkingConfig(
+ fixed_length_chunking=vertex_rag_data.RagFileChunkingConfig.FixedLengthChunking(
+ chunk_size=1075
+ )
+ )
+ )
+ assert arg == mock_val
+
+
+def test_upload_rag_file_flattened_error():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.upload_rag_file(
+ vertex_rag_data_service.UploadRagFileRequest(),
+ parent="parent_value",
+ rag_file=vertex_rag_data.RagFile(
+ gcs_source=io.GcsSource(uris=["uris_value"])
+ ),
+ upload_rag_file_config=vertex_rag_data.UploadRagFileConfig(
+ rag_file_chunking_config=vertex_rag_data.RagFileChunkingConfig(
+ fixed_length_chunking=vertex_rag_data.RagFileChunkingConfig.FixedLengthChunking(
+ chunk_size=1075
+ )
+ )
+ ),
+ )
+
+
+@pytest.mark.asyncio
+async def test_upload_rag_file_flattened_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_data_service.UploadRagFileResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data_service.UploadRagFileResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.upload_rag_file(
+ parent="parent_value",
+ rag_file=vertex_rag_data.RagFile(
+ gcs_source=io.GcsSource(uris=["uris_value"])
+ ),
+ upload_rag_file_config=vertex_rag_data.UploadRagFileConfig(
+ rag_file_chunking_config=vertex_rag_data.RagFileChunkingConfig(
+ fixed_length_chunking=vertex_rag_data.RagFileChunkingConfig.FixedLengthChunking(
+ chunk_size=1075
+ )
+ )
+ ),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].rag_file
+ mock_val = vertex_rag_data.RagFile(gcs_source=io.GcsSource(uris=["uris_value"]))
+ assert arg == mock_val
+ arg = args[0].upload_rag_file_config
+ mock_val = vertex_rag_data.UploadRagFileConfig(
+ rag_file_chunking_config=vertex_rag_data.RagFileChunkingConfig(
+ fixed_length_chunking=vertex_rag_data.RagFileChunkingConfig.FixedLengthChunking(
+ chunk_size=1075
+ )
+ )
+ )
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_upload_rag_file_flattened_error_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.upload_rag_file(
+ vertex_rag_data_service.UploadRagFileRequest(),
+ parent="parent_value",
+ rag_file=vertex_rag_data.RagFile(
+ gcs_source=io.GcsSource(uris=["uris_value"])
+ ),
+ upload_rag_file_config=vertex_rag_data.UploadRagFileConfig(
+ rag_file_chunking_config=vertex_rag_data.RagFileChunkingConfig(
+ fixed_length_chunking=vertex_rag_data.RagFileChunkingConfig.FixedLengthChunking(
+ chunk_size=1075
+ )
+ )
+ ),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.ImportRagFilesRequest,
+ dict,
+ ],
+)
+def test_import_rag_files(request_type, transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.import_rag_files(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.ImportRagFilesRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_import_rag_files_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = vertex_rag_data_service.ImportRagFilesRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.import_rag_files(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == vertex_rag_data_service.ImportRagFilesRequest(
+ parent="parent_value",
+ )
+
+
+def test_import_rag_files_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.import_rag_files in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.import_rag_files
+ ] = mock_rpc
+ request = {}
+ client.import_rag_files(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.import_rag_files(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_import_rag_files_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.import_rag_files
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.import_rag_files
+ ] = mock_rpc
+
+ request = {}
+ await client.import_rag_files(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.import_rag_files(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_import_rag_files_async(
+ transport: str = "grpc_asyncio",
+ request_type=vertex_rag_data_service.ImportRagFilesRequest,
+):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.import_rag_files(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.ImportRagFilesRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_import_rag_files_async_from_dict():
+ await test_import_rag_files_async(request_type=dict)
+
+
+def test_import_rag_files_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.ImportRagFilesRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.import_rag_files(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_import_rag_files_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.ImportRagFilesRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.import_rag_files(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_import_rag_files_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.import_rag_files(
+ parent="parent_value",
+ import_rag_files_config=vertex_rag_data.ImportRagFilesConfig(
+ gcs_source=io.GcsSource(uris=["uris_value"])
+ ),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].import_rag_files_config
+ mock_val = vertex_rag_data.ImportRagFilesConfig(
+ gcs_source=io.GcsSource(uris=["uris_value"])
+ )
+ assert arg == mock_val
+
+
+def test_import_rag_files_flattened_error():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.import_rag_files(
+ vertex_rag_data_service.ImportRagFilesRequest(),
+ parent="parent_value",
+ import_rag_files_config=vertex_rag_data.ImportRagFilesConfig(
+ gcs_source=io.GcsSource(uris=["uris_value"])
+ ),
+ )
+
+
+@pytest.mark.asyncio
+async def test_import_rag_files_flattened_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.import_rag_files(
+ parent="parent_value",
+ import_rag_files_config=vertex_rag_data.ImportRagFilesConfig(
+ gcs_source=io.GcsSource(uris=["uris_value"])
+ ),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].import_rag_files_config
+ mock_val = vertex_rag_data.ImportRagFilesConfig(
+ gcs_source=io.GcsSource(uris=["uris_value"])
+ )
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_import_rag_files_flattened_error_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.import_rag_files(
+ vertex_rag_data_service.ImportRagFilesRequest(),
+ parent="parent_value",
+ import_rag_files_config=vertex_rag_data.ImportRagFilesConfig(
+ gcs_source=io.GcsSource(uris=["uris_value"])
+ ),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.GetRagFileRequest,
+ dict,
+ ],
+)
+def test_get_rag_file(request_type, transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_data.RagFile(
+ name="name_value",
+ display_name="display_name_value",
+ description="description_value",
+ size_bytes=1089,
+ rag_file_type=vertex_rag_data.RagFile.RagFileType.RAG_FILE_TYPE_TXT,
+ )
+ response = client.get_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.GetRagFileRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_data.RagFile)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.description == "description_value"
+ assert response.size_bytes == 1089
+ assert (
+ response.rag_file_type == vertex_rag_data.RagFile.RagFileType.RAG_FILE_TYPE_TXT
+ )
+
+
+def test_get_rag_file_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = vertex_rag_data_service.GetRagFileRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.get_rag_file(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == vertex_rag_data_service.GetRagFileRequest(
+ name="name_value",
+ )
+
+
+def test_get_rag_file_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.get_rag_file in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.get_rag_file] = mock_rpc
+ request = {}
+ client.get_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_rag_file(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_rag_file_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.get_rag_file
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.get_rag_file
+ ] = mock_rpc
+
+ request = {}
+ await client.get_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.get_rag_file(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_rag_file_async(
+ transport: str = "grpc_asyncio",
+ request_type=vertex_rag_data_service.GetRagFileRequest,
+):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data.RagFile(
+ name="name_value",
+ display_name="display_name_value",
+ description="description_value",
+ size_bytes=1089,
+ rag_file_type=vertex_rag_data.RagFile.RagFileType.RAG_FILE_TYPE_TXT,
+ )
+ )
+ response = await client.get_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.GetRagFileRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_data.RagFile)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.description == "description_value"
+ assert response.size_bytes == 1089
+ assert (
+ response.rag_file_type == vertex_rag_data.RagFile.RagFileType.RAG_FILE_TYPE_TXT
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_rag_file_async_from_dict():
+ await test_get_rag_file_async(request_type=dict)
+
+
+def test_get_rag_file_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.GetRagFileRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call:
+ call.return_value = vertex_rag_data.RagFile()
+ client.get_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_rag_file_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.GetRagFileRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data.RagFile()
+ )
+ await client.get_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_get_rag_file_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_data.RagFile()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_rag_file(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_get_rag_file_flattened_error():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_rag_file(
+ vertex_rag_data_service.GetRagFileRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_rag_file_flattened_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_data.RagFile()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data.RagFile()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_rag_file(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_get_rag_file_flattened_error_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_rag_file(
+ vertex_rag_data_service.GetRagFileRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.ListRagFilesRequest,
+ dict,
+ ],
+)
+def test_list_rag_files(request_type, transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_data_service.ListRagFilesResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_rag_files(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.ListRagFilesRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListRagFilesPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_rag_files_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = vertex_rag_data_service.ListRagFilesRequest(
+ parent="parent_value",
+ page_token="page_token_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.list_rag_files(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == vertex_rag_data_service.ListRagFilesRequest(
+ parent="parent_value",
+ page_token="page_token_value",
+ )
+
+
+def test_list_rag_files_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.list_rag_files in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.list_rag_files] = mock_rpc
+ request = {}
+ client.list_rag_files(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_rag_files(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_rag_files_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.list_rag_files
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.list_rag_files
+ ] = mock_rpc
+
+ request = {}
+ await client.list_rag_files(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.list_rag_files(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_list_rag_files_async(
+ transport: str = "grpc_asyncio",
+ request_type=vertex_rag_data_service.ListRagFilesRequest,
+):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data_service.ListRagFilesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.list_rag_files(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.ListRagFilesRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListRagFilesAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_rag_files_async_from_dict():
+ await test_list_rag_files_async(request_type=dict)
+
+
+def test_list_rag_files_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.ListRagFilesRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call:
+ call.return_value = vertex_rag_data_service.ListRagFilesResponse()
+ client.list_rag_files(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_rag_files_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.ListRagFilesRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data_service.ListRagFilesResponse()
+ )
+ await client.list_rag_files(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_list_rag_files_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_data_service.ListRagFilesResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_rag_files(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+def test_list_rag_files_flattened_error():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_rag_files(
+ vertex_rag_data_service.ListRagFilesRequest(),
+ parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_rag_files_flattened_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_data_service.ListRagFilesResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data_service.ListRagFilesResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_rag_files(
+ parent="parent_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_list_rag_files_flattened_error_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_rag_files(
+ vertex_rag_data_service.ListRagFilesRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_rag_files_pager(transport_name: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[
+ vertex_rag_data.RagFile(),
+ vertex_rag_data.RagFile(),
+ vertex_rag_data.RagFile(),
+ ],
+ next_page_token="abc",
+ ),
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[],
+ next_page_token="def",
+ ),
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[
+ vertex_rag_data.RagFile(),
+ ],
+ next_page_token="ghi",
+ ),
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[
+ vertex_rag_data.RagFile(),
+ vertex_rag_data.RagFile(),
+ ],
+ ),
+ RuntimeError,
+ )
+
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_rag_files(request={}, retry=retry, timeout=timeout)
+
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, vertex_rag_data.RagFile) for i in results)
+
+
+def test_list_rag_files_pages(transport_name: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[
+ vertex_rag_data.RagFile(),
+ vertex_rag_data.RagFile(),
+ vertex_rag_data.RagFile(),
+ ],
+ next_page_token="abc",
+ ),
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[],
+ next_page_token="def",
+ ),
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[
+ vertex_rag_data.RagFile(),
+ ],
+ next_page_token="ghi",
+ ),
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[
+ vertex_rag_data.RagFile(),
+ vertex_rag_data.RagFile(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_rag_files(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_rag_files_async_pager():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_rag_files), "__call__", new_callable=mock.AsyncMock
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[
+ vertex_rag_data.RagFile(),
+ vertex_rag_data.RagFile(),
+ vertex_rag_data.RagFile(),
+ ],
+ next_page_token="abc",
+ ),
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[],
+ next_page_token="def",
+ ),
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[
+ vertex_rag_data.RagFile(),
+ ],
+ next_page_token="ghi",
+ ),
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[
+ vertex_rag_data.RagFile(),
+ vertex_rag_data.RagFile(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_rag_files(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(isinstance(i, vertex_rag_data.RagFile) for i in responses)
+
+
+@pytest.mark.asyncio
+async def test_list_rag_files_async_pages():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_rag_files), "__call__", new_callable=mock.AsyncMock
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[
+ vertex_rag_data.RagFile(),
+ vertex_rag_data.RagFile(),
+ vertex_rag_data.RagFile(),
+ ],
+ next_page_token="abc",
+ ),
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[],
+ next_page_token="def",
+ ),
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[
+ vertex_rag_data.RagFile(),
+ ],
+ next_page_token="ghi",
+ ),
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[
+ vertex_rag_data.RagFile(),
+ vertex_rag_data.RagFile(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.list_rag_files(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.DeleteRagFileRequest,
+ dict,
+ ],
+)
+def test_delete_rag_file(request_type, transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.delete_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.DeleteRagFileRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_delete_rag_file_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = vertex_rag_data_service.DeleteRagFileRequest(
+ name="name_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.delete_rag_file(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == vertex_rag_data_service.DeleteRagFileRequest(
+ name="name_value",
+ )
+
+
+def test_delete_rag_file_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.delete_rag_file in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.delete_rag_file] = mock_rpc
+ request = {}
+ client.delete_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_rag_file(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_rag_file_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.delete_rag_file
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.delete_rag_file
+ ] = mock_rpc
+
+ request = {}
+ await client.delete_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ await client.delete_rag_file(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_delete_rag_file_async(
+ transport: str = "grpc_asyncio",
+ request_type=vertex_rag_data_service.DeleteRagFileRequest,
+):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.delete_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_data_service.DeleteRagFileRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_delete_rag_file_async_from_dict():
+ await test_delete_rag_file_async(request_type=dict)
+
+
+def test_delete_rag_file_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.DeleteRagFileRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_rag_file_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_data_service.DeleteRagFileRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.delete_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_delete_rag_file_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_rag_file(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+def test_delete_rag_file_flattened_error():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_rag_file(
+ vertex_rag_data_service.DeleteRagFileRequest(),
+ name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_delete_rag_file_flattened_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_rag_file(
+ name="name_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_delete_rag_file_flattened_error_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.delete_rag_file(
+ vertex_rag_data_service.DeleteRagFileRequest(),
+ name="name_value",
+ )
+
+
+def test_create_rag_corpus_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.create_rag_corpus in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.create_rag_corpus
+ ] = mock_rpc
+
+ request = {}
+ client.create_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.create_rag_corpus(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_create_rag_corpus_rest_required_fields(
+ request_type=vertex_rag_data_service.CreateRagCorpusRequest,
+):
+ transport_class = transports.VertexRagDataServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_rag_corpus._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_rag_corpus._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.create_rag_corpus(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_create_rag_corpus_rest_unset_required_fields():
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.create_rag_corpus._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "ragCorpus",
+ )
+ )
+ )
+
+
+def test_create_rag_corpus_rest_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ rag_corpus=vertex_rag_data.RagCorpus(name="name_value"),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.create_rag_corpus(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}/ragCorpora"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_create_rag_corpus_rest_flattened_error(transport: str = "rest"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_rag_corpus(
+ vertex_rag_data_service.CreateRagCorpusRequest(),
+ parent="parent_value",
+ rag_corpus=vertex_rag_data.RagCorpus(name="name_value"),
+ )
+
+
+def test_update_rag_corpus_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.update_rag_corpus in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.update_rag_corpus
+ ] = mock_rpc
+
+ request = {}
+ client.update_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.update_rag_corpus(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_update_rag_corpus_rest_required_fields(
+ request_type=vertex_rag_data_service.UpdateRagCorpusRequest,
+):
+ transport_class = transports.VertexRagDataServiceRestTransport
+
+ request_init = {}
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).update_rag_corpus._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).update_rag_corpus._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "patch",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.update_rag_corpus(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_update_rag_corpus_rest_unset_required_fields():
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.update_rag_corpus._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("ragCorpus",)))
+
+
+def test_update_rag_corpus_rest_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "rag_corpus": {
+ "name": "projects/sample1/locations/sample2/ragCorpora/sample3"
+ }
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ rag_corpus=vertex_rag_data.RagCorpus(name="name_value"),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.update_rag_corpus(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{rag_corpus.name=projects/*/locations/*/ragCorpora/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_update_rag_corpus_rest_flattened_error(transport: str = "rest"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.update_rag_corpus(
+ vertex_rag_data_service.UpdateRagCorpusRequest(),
+ rag_corpus=vertex_rag_data.RagCorpus(name="name_value"),
+ )
+
+
+def test_get_rag_corpus_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.get_rag_corpus in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.get_rag_corpus] = mock_rpc
+
+ request = {}
+ client.get_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_rag_corpus(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_get_rag_corpus_rest_required_fields(
+ request_type=vertex_rag_data_service.GetRagCorpusRequest,
+):
+ transport_class = transports.VertexRagDataServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_rag_corpus._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_rag_corpus._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data.RagCorpus()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data.RagCorpus.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_rag_corpus(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_get_rag_corpus_rest_unset_required_fields():
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.get_rag_corpus._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_rag_corpus_rest_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data.RagCorpus()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/ragCorpora/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data.RagCorpus.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_rag_corpus(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/ragCorpora/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_get_rag_corpus_rest_flattened_error(transport: str = "rest"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_rag_corpus(
+ vertex_rag_data_service.GetRagCorpusRequest(),
+ name="name_value",
+ )
+
+
+def test_list_rag_corpora_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.list_rag_corpora in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_rag_corpora
+ ] = mock_rpc
+
+ request = {}
+ client.list_rag_corpora(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_rag_corpora(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_list_rag_corpora_rest_required_fields(
+ request_type=vertex_rag_data_service.ListRagCorporaRequest,
+):
+ transport_class = transports.VertexRagDataServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_rag_corpora._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_rag_corpora._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "page_size",
+ "page_token",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data_service.ListRagCorporaResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data_service.ListRagCorporaResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_rag_corpora(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_list_rag_corpora_rest_unset_required_fields():
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.list_rag_corpora._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(
+ (
+ "pageSize",
+ "pageToken",
+ )
+ )
+ & set(("parent",))
+ )
+
+
+def test_list_rag_corpora_rest_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data_service.ListRagCorporaResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data_service.ListRagCorporaResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.list_rag_corpora(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}/ragCorpora"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_list_rag_corpora_rest_flattened_error(transport: str = "rest"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_rag_corpora(
+ vertex_rag_data_service.ListRagCorporaRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_rag_corpora_rest_pager(transport: str = "rest"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[
+ vertex_rag_data.RagCorpus(),
+ vertex_rag_data.RagCorpus(),
+ vertex_rag_data.RagCorpus(),
+ ],
+ next_page_token="abc",
+ ),
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[],
+ next_page_token="def",
+ ),
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[
+ vertex_rag_data.RagCorpus(),
+ ],
+ next_page_token="ghi",
+ ),
+ vertex_rag_data_service.ListRagCorporaResponse(
+ rag_corpora=[
+ vertex_rag_data.RagCorpus(),
+ vertex_rag_data.RagCorpus(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ vertex_rag_data_service.ListRagCorporaResponse.to_json(x) for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ pager = client.list_rag_corpora(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, vertex_rag_data.RagCorpus) for i in results)
+
+ pages = list(client.list_rag_corpora(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_delete_rag_corpus_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.delete_rag_corpus in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_rag_corpus
+ ] = mock_rpc
+
+ request = {}
+ client.delete_rag_corpus(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_rag_corpus(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_delete_rag_corpus_rest_required_fields(
+ request_type=vertex_rag_data_service.DeleteRagCorpusRequest,
+):
+ transport_class = transports.VertexRagDataServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_rag_corpus._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_rag_corpus._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(("force",))
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "delete",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_rag_corpus(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_delete_rag_corpus_rest_unset_required_fields():
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.delete_rag_corpus._get_unset_required_fields({})
+ assert set(unset_fields) == (set(("force",)) & set(("name",)))
+
+
+def test_delete_rag_corpus_rest_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/ragCorpora/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.delete_rag_corpus(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/ragCorpora/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_rag_corpus_rest_flattened_error(transport: str = "rest"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_rag_corpus(
+ vertex_rag_data_service.DeleteRagCorpusRequest(),
+ name="name_value",
+ )
+
+
+def test_upload_rag_file_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.upload_rag_file in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.upload_rag_file] = mock_rpc
+
+ request = {}
+ client.upload_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.upload_rag_file(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_upload_rag_file_rest_required_fields(
+ request_type=vertex_rag_data_service.UploadRagFileRequest,
+):
+ transport_class = transports.VertexRagDataServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).upload_rag_file._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).upload_rag_file._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data_service.UploadRagFileResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data_service.UploadRagFileResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.upload_rag_file(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_upload_rag_file_rest_unset_required_fields():
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.upload_rag_file._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "ragFile",
+ "uploadRagFileConfig",
+ )
+ )
+ )
+
+
+def test_upload_rag_file_rest_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data_service.UploadRagFileResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "parent": "projects/sample1/locations/sample2/ragCorpora/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ rag_file=vertex_rag_data.RagFile(
+ gcs_source=io.GcsSource(uris=["uris_value"])
+ ),
+ upload_rag_file_config=vertex_rag_data.UploadRagFileConfig(
+ rag_file_chunking_config=vertex_rag_data.RagFileChunkingConfig(
+ fixed_length_chunking=vertex_rag_data.RagFileChunkingConfig.FixedLengthChunking(
+ chunk_size=1075
+ )
+ )
+ ),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data_service.UploadRagFileResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.upload_rag_file(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*/ragCorpora/*}/ragFiles:upload"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_upload_rag_file_rest_flattened_error(transport: str = "rest"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.upload_rag_file(
+ vertex_rag_data_service.UploadRagFileRequest(),
+ parent="parent_value",
+ rag_file=vertex_rag_data.RagFile(
+ gcs_source=io.GcsSource(uris=["uris_value"])
+ ),
+ upload_rag_file_config=vertex_rag_data.UploadRagFileConfig(
+ rag_file_chunking_config=vertex_rag_data.RagFileChunkingConfig(
+ fixed_length_chunking=vertex_rag_data.RagFileChunkingConfig.FixedLengthChunking(
+ chunk_size=1075
+ )
+ )
+ ),
+ )
+
+
+def test_import_rag_files_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.import_rag_files in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.import_rag_files
+ ] = mock_rpc
+
+ request = {}
+ client.import_rag_files(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.import_rag_files(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_import_rag_files_rest_required_fields(
+ request_type=vertex_rag_data_service.ImportRagFilesRequest,
+):
+ transport_class = transports.VertexRagDataServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).import_rag_files._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).import_rag_files._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.import_rag_files(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_import_rag_files_rest_unset_required_fields():
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.import_rag_files._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "importRagFilesConfig",
+ )
+ )
+ )
+
+
+def test_import_rag_files_rest_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "parent": "projects/sample1/locations/sample2/ragCorpora/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ import_rag_files_config=vertex_rag_data.ImportRagFilesConfig(
+ gcs_source=io.GcsSource(uris=["uris_value"])
+ ),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.import_rag_files(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*/ragCorpora/*}/ragFiles:import"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_import_rag_files_rest_flattened_error(transport: str = "rest"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.import_rag_files(
+ vertex_rag_data_service.ImportRagFilesRequest(),
+ parent="parent_value",
+ import_rag_files_config=vertex_rag_data.ImportRagFilesConfig(
+ gcs_source=io.GcsSource(uris=["uris_value"])
+ ),
+ )
+
+
+def test_get_rag_file_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.get_rag_file in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.get_rag_file] = mock_rpc
+
+ request = {}
+ client.get_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_rag_file(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_get_rag_file_rest_required_fields(
+ request_type=vertex_rag_data_service.GetRagFileRequest,
+):
+ transport_class = transports.VertexRagDataServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_rag_file._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_rag_file._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data.RagFile()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data.RagFile.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_rag_file(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_get_rag_file_rest_unset_required_fields():
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.get_rag_file._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_rag_file_rest_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data.RagFile()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/ragCorpora/sample3/ragFiles/sample4"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data.RagFile.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_rag_file(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_get_rag_file_rest_flattened_error(transport: str = "rest"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_rag_file(
+ vertex_rag_data_service.GetRagFileRequest(),
+ name="name_value",
+ )
+
+
+def test_list_rag_files_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.list_rag_files in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.list_rag_files] = mock_rpc
+
+ request = {}
+ client.list_rag_files(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_rag_files(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_list_rag_files_rest_required_fields(
+ request_type=vertex_rag_data_service.ListRagFilesRequest,
+):
+ transport_class = transports.VertexRagDataServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_rag_files._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_rag_files._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "page_size",
+ "page_token",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data_service.ListRagFilesResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data_service.ListRagFilesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_rag_files(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_list_rag_files_rest_unset_required_fields():
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.list_rag_files._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(
+ (
+ "pageSize",
+ "pageToken",
+ )
+ )
+ & set(("parent",))
+ )
+
+
+def test_list_rag_files_rest_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data_service.ListRagFilesResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "parent": "projects/sample1/locations/sample2/ragCorpora/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data_service.ListRagFilesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.list_rag_files(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*/ragCorpora/*}/ragFiles"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_list_rag_files_rest_flattened_error(transport: str = "rest"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_rag_files(
+ vertex_rag_data_service.ListRagFilesRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_rag_files_rest_pager(transport: str = "rest"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[
+ vertex_rag_data.RagFile(),
+ vertex_rag_data.RagFile(),
+ vertex_rag_data.RagFile(),
+ ],
+ next_page_token="abc",
+ ),
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[],
+ next_page_token="def",
+ ),
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[
+ vertex_rag_data.RagFile(),
+ ],
+ next_page_token="ghi",
+ ),
+ vertex_rag_data_service.ListRagFilesResponse(
+ rag_files=[
+ vertex_rag_data.RagFile(),
+ vertex_rag_data.RagFile(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ vertex_rag_data_service.ListRagFilesResponse.to_json(x) for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {
+ "parent": "projects/sample1/locations/sample2/ragCorpora/sample3"
+ }
+
+ pager = client.list_rag_files(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, vertex_rag_data.RagFile) for i in results)
+
+ pages = list(client.list_rag_files(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_delete_rag_file_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.delete_rag_file in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.delete_rag_file] = mock_rpc
+
+ request = {}
+ client.delete_rag_file(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.delete_rag_file(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_delete_rag_file_rest_required_fields(
+ request_type=vertex_rag_data_service.DeleteRagFileRequest,
+):
+ transport_class = transports.VertexRagDataServiceRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_rag_file._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_rag_file._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "delete",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_rag_file(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_delete_rag_file_rest_unset_required_fields():
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.delete_rag_file._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_delete_rag_file_rest_flattened():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/locations/sample2/ragCorpora/sample3/ragFiles/sample4"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.delete_rag_file(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_rag_file_rest_flattened_error(transport: str = "rest"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_rag_file(
+ vertex_rag_data_service.DeleteRagFileRequest(),
+ name="name_value",
+ )
+
+
+def test_credentials_transport_error():
+ # It is an error to provide credentials and a transport instance.
+ transport = transports.VertexRagDataServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # It is an error to provide a credentials file and a transport instance.
+ transport = transports.VertexRagDataServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = VertexRagDataServiceClient(
+ client_options={"credentials_file": "credentials.json"},
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a transport instance.
+ transport = transports.VertexRagDataServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = VertexRagDataServiceClient(
+ client_options=options,
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a credential.
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = VertexRagDataServiceClient(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # It is an error to provide scopes and a transport instance.
+ transport = transports.VertexRagDataServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = VertexRagDataServiceClient(
+ client_options={"scopes": ["1", "2"]},
+ transport=transport,
+ )
+
+
+def test_transport_instance():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.VertexRagDataServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ client = VertexRagDataServiceClient(transport=transport)
+ assert client.transport is transport
+
+
+def test_transport_get_channel():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.VertexRagDataServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+ transport = transports.VertexRagDataServiceGrpcAsyncIOTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.VertexRagDataServiceGrpcTransport,
+ transports.VertexRagDataServiceGrpcAsyncIOTransport,
+ transports.VertexRagDataServiceRestTransport,
+ ],
+)
+def test_transport_adc(transport_class):
+ # Test default credentials are used if not provided.
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class()
+ adc.assert_called_once()
+
+
+def test_transport_kind_grpc():
+ transport = VertexRagDataServiceClient.get_transport_class("grpc")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "grpc"
+
+
+def test_initialize_client_w_grpc():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_rag_corpus_empty_call_grpc():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_rag_corpus), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.create_rag_corpus(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.CreateRagCorpusRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_rag_corpus_empty_call_grpc():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_rag_corpus), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.update_rag_corpus(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.UpdateRagCorpusRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_rag_corpus_empty_call_grpc():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call:
+ call.return_value = vertex_rag_data.RagCorpus()
+ client.get_rag_corpus(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.GetRagCorpusRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_rag_corpora_empty_call_grpc():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call:
+ call.return_value = vertex_rag_data_service.ListRagCorporaResponse()
+ client.list_rag_corpora(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.ListRagCorporaRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_rag_corpus_empty_call_grpc():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_rag_corpus), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_rag_corpus(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.DeleteRagCorpusRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_upload_rag_file_empty_call_grpc():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call:
+ call.return_value = vertex_rag_data_service.UploadRagFileResponse()
+ client.upload_rag_file(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.UploadRagFileRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_import_rag_files_empty_call_grpc():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.import_rag_files(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.ImportRagFilesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_rag_file_empty_call_grpc():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call:
+ call.return_value = vertex_rag_data.RagFile()
+ client.get_rag_file(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.GetRagFileRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_rag_files_empty_call_grpc():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call:
+ call.return_value = vertex_rag_data_service.ListRagFilesResponse()
+ client.list_rag_files(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.ListRagFilesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_rag_file_empty_call_grpc():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.delete_rag_file(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.DeleteRagFileRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_grpc_asyncio():
+ transport = VertexRagDataServiceAsyncClient.get_transport_class("grpc_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "grpc_asyncio"
+
+
+def test_initialize_client_w_grpc_asyncio():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_rag_corpus_empty_call_grpc_asyncio():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_rag_corpus), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.create_rag_corpus(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.CreateRagCorpusRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_update_rag_corpus_empty_call_grpc_asyncio():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_rag_corpus), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.update_rag_corpus(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.UpdateRagCorpusRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_rag_corpus_empty_call_grpc_asyncio():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data.RagCorpus(
+ name="name_value",
+ display_name="display_name_value",
+ description="description_value",
+ )
+ )
+ await client.get_rag_corpus(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.GetRagCorpusRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_rag_corpora_empty_call_grpc_asyncio():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data_service.ListRagCorporaResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_rag_corpora(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.ListRagCorporaRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_rag_corpus_empty_call_grpc_asyncio():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_rag_corpus), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.delete_rag_corpus(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.DeleteRagCorpusRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_upload_rag_file_empty_call_grpc_asyncio():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data_service.UploadRagFileResponse()
+ )
+ await client.upload_rag_file(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.UploadRagFileRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_import_rag_files_empty_call_grpc_asyncio():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.import_rag_files(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.ImportRagFilesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_rag_file_empty_call_grpc_asyncio():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data.RagFile(
+ name="name_value",
+ display_name="display_name_value",
+ description="description_value",
+ size_bytes=1089,
+ rag_file_type=vertex_rag_data.RagFile.RagFileType.RAG_FILE_TYPE_TXT,
+ )
+ )
+ await client.get_rag_file(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.GetRagFileRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_rag_files_empty_call_grpc_asyncio():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_data_service.ListRagFilesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_rag_files(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.ListRagFilesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_rag_file_empty_call_grpc_asyncio():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.delete_rag_file(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.DeleteRagFileRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_rest():
+ transport = VertexRagDataServiceClient.get_transport_class("rest")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "rest"
+
+
+def test_create_rag_corpus_rest_bad_request(
+ request_type=vertex_rag_data_service.CreateRagCorpusRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.create_rag_corpus(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.CreateRagCorpusRequest,
+ dict,
+ ],
+)
+def test_create_rag_corpus_rest_call_success(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["rag_corpus"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "description": "description_value",
+ "rag_embedding_model_config": {
+ "vertex_prediction_endpoint": {
+ "endpoint": "endpoint_value",
+ "model": "model_value",
+ "model_version_id": "model_version_id_value",
+ },
+ "hybrid_search_config": {
+ "sparse_embedding_config": {
+ "bm25": {"multilingual": True, "k1": 0.156, "b": 0.98}
+ },
+ "dense_embedding_model_prediction_endpoint": {},
+ },
+ },
+ "rag_vector_db_config": {
+ "rag_managed_db": {},
+ "weaviate": {
+ "http_endpoint": "http_endpoint_value",
+ "collection_name": "collection_name_value",
+ },
+ "pinecone": {"index_name": "index_name_value"},
+ "vertex_feature_store": {
+ "feature_view_resource_name": "feature_view_resource_name_value"
+ },
+ "vertex_vector_search": {
+ "index_endpoint": "index_endpoint_value",
+ "index": "index_value",
+ },
+ "api_auth": {
+ "api_key_config": {
+ "api_key_secret_version": "api_key_secret_version_value"
+ }
+ },
+ "rag_embedding_model_config": {},
+ },
+ "create_time": {"seconds": 751, "nanos": 543},
+ "update_time": {},
+ "corpus_status": {"state": 1, "error_status": "error_status_value"},
+ "vector_db_config": {},
+ "vertex_ai_search_config": {"serving_config": "serving_config_value"},
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = vertex_rag_data_service.CreateRagCorpusRequest.meta.fields[
+ "rag_corpus"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["rag_corpus"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["rag_corpus"][field])):
+ del request_init["rag_corpus"][field][i][subfield]
+ else:
+ del request_init["rag_corpus"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.create_rag_corpus(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_create_rag_corpus_rest_interceptors(null_interceptor):
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.VertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "post_create_rag_corpus"
+ ) as post, mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "pre_create_rag_corpus"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.CreateRagCorpusRequest.pb(
+ vertex_rag_data_service.CreateRagCorpusRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = vertex_rag_data_service.CreateRagCorpusRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.create_rag_corpus(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_update_rag_corpus_rest_bad_request(
+ request_type=vertex_rag_data_service.UpdateRagCorpusRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "rag_corpus": {"name": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.update_rag_corpus(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.UpdateRagCorpusRequest,
+ dict,
+ ],
+)
+def test_update_rag_corpus_rest_call_success(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "rag_corpus": {"name": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ }
+ request_init["rag_corpus"] = {
+ "name": "projects/sample1/locations/sample2/ragCorpora/sample3",
+ "display_name": "display_name_value",
+ "description": "description_value",
+ "rag_embedding_model_config": {
+ "vertex_prediction_endpoint": {
+ "endpoint": "endpoint_value",
+ "model": "model_value",
+ "model_version_id": "model_version_id_value",
+ },
+ "hybrid_search_config": {
+ "sparse_embedding_config": {
+ "bm25": {"multilingual": True, "k1": 0.156, "b": 0.98}
+ },
+ "dense_embedding_model_prediction_endpoint": {},
+ },
+ },
+ "rag_vector_db_config": {
+ "rag_managed_db": {},
+ "weaviate": {
+ "http_endpoint": "http_endpoint_value",
+ "collection_name": "collection_name_value",
+ },
+ "pinecone": {"index_name": "index_name_value"},
+ "vertex_feature_store": {
+ "feature_view_resource_name": "feature_view_resource_name_value"
+ },
+ "vertex_vector_search": {
+ "index_endpoint": "index_endpoint_value",
+ "index": "index_value",
+ },
+ "api_auth": {
+ "api_key_config": {
+ "api_key_secret_version": "api_key_secret_version_value"
+ }
+ },
+ "rag_embedding_model_config": {},
+ },
+ "create_time": {"seconds": 751, "nanos": 543},
+ "update_time": {},
+ "corpus_status": {"state": 1, "error_status": "error_status_value"},
+ "vector_db_config": {},
+ "vertex_ai_search_config": {"serving_config": "serving_config_value"},
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = vertex_rag_data_service.UpdateRagCorpusRequest.meta.fields[
+ "rag_corpus"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["rag_corpus"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["rag_corpus"][field])):
+ del request_init["rag_corpus"][field][i][subfield]
+ else:
+ del request_init["rag_corpus"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.update_rag_corpus(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_update_rag_corpus_rest_interceptors(null_interceptor):
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.VertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "post_update_rag_corpus"
+ ) as post, mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "pre_update_rag_corpus"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.UpdateRagCorpusRequest.pb(
+ vertex_rag_data_service.UpdateRagCorpusRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = vertex_rag_data_service.UpdateRagCorpusRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.update_rag_corpus(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_rag_corpus_rest_bad_request(
+ request_type=vertex_rag_data_service.GetRagCorpusRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_rag_corpus(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.GetRagCorpusRequest,
+ dict,
+ ],
+)
+def test_get_rag_corpus_rest_call_success(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data.RagCorpus(
+ name="name_value",
+ display_name="display_name_value",
+ description="description_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data.RagCorpus.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_rag_corpus(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_data.RagCorpus)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.description == "description_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_get_rag_corpus_rest_interceptors(null_interceptor):
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.VertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "post_get_rag_corpus"
+ ) as post, mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "pre_get_rag_corpus"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.GetRagCorpusRequest.pb(
+ vertex_rag_data_service.GetRagCorpusRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = vertex_rag_data.RagCorpus.to_json(vertex_rag_data.RagCorpus())
+ req.return_value.content = return_value
+
+ request = vertex_rag_data_service.GetRagCorpusRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = vertex_rag_data.RagCorpus()
+
+ client.get_rag_corpus(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_list_rag_corpora_rest_bad_request(
+ request_type=vertex_rag_data_service.ListRagCorporaRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_rag_corpora(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.ListRagCorporaRequest,
+ dict,
+ ],
+)
+def test_list_rag_corpora_rest_call_success(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data_service.ListRagCorporaResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data_service.ListRagCorporaResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.list_rag_corpora(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListRagCorporaPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_list_rag_corpora_rest_interceptors(null_interceptor):
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.VertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "post_list_rag_corpora"
+ ) as post, mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "pre_list_rag_corpora"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.ListRagCorporaRequest.pb(
+ vertex_rag_data_service.ListRagCorporaRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = vertex_rag_data_service.ListRagCorporaResponse.to_json(
+ vertex_rag_data_service.ListRagCorporaResponse()
+ )
+ req.return_value.content = return_value
+
+ request = vertex_rag_data_service.ListRagCorporaRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = vertex_rag_data_service.ListRagCorporaResponse()
+
+ client.list_rag_corpora(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_delete_rag_corpus_rest_bad_request(
+ request_type=vertex_rag_data_service.DeleteRagCorpusRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_rag_corpus(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.DeleteRagCorpusRequest,
+ dict,
+ ],
+)
+def test_delete_rag_corpus_rest_call_success(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.delete_rag_corpus(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_delete_rag_corpus_rest_interceptors(null_interceptor):
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.VertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "post_delete_rag_corpus"
+ ) as post, mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "pre_delete_rag_corpus"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.DeleteRagCorpusRequest.pb(
+ vertex_rag_data_service.DeleteRagCorpusRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = vertex_rag_data_service.DeleteRagCorpusRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.delete_rag_corpus(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_upload_rag_file_rest_bad_request(
+ request_type=vertex_rag_data_service.UploadRagFileRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.upload_rag_file(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.UploadRagFileRequest,
+ dict,
+ ],
+)
+def test_upload_rag_file_rest_call_success(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data_service.UploadRagFileResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data_service.UploadRagFileResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.upload_rag_file(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_data_service.UploadRagFileResponse)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_upload_rag_file_rest_interceptors(null_interceptor):
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.VertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "post_upload_rag_file"
+ ) as post, mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "pre_upload_rag_file"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.UploadRagFileRequest.pb(
+ vertex_rag_data_service.UploadRagFileRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = vertex_rag_data_service.UploadRagFileResponse.to_json(
+ vertex_rag_data_service.UploadRagFileResponse()
+ )
+ req.return_value.content = return_value
+
+ request = vertex_rag_data_service.UploadRagFileRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = vertex_rag_data_service.UploadRagFileResponse()
+
+ client.upload_rag_file(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_import_rag_files_rest_bad_request(
+ request_type=vertex_rag_data_service.ImportRagFilesRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.import_rag_files(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.ImportRagFilesRequest,
+ dict,
+ ],
+)
+def test_import_rag_files_rest_call_success(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.import_rag_files(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_import_rag_files_rest_interceptors(null_interceptor):
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.VertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "post_import_rag_files"
+ ) as post, mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "pre_import_rag_files"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.ImportRagFilesRequest.pb(
+ vertex_rag_data_service.ImportRagFilesRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = vertex_rag_data_service.ImportRagFilesRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.import_rag_files(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_rag_file_rest_bad_request(
+ request_type=vertex_rag_data_service.GetRagFileRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/ragCorpora/sample3/ragFiles/sample4"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_rag_file(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.GetRagFileRequest,
+ dict,
+ ],
+)
+def test_get_rag_file_rest_call_success(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/ragCorpora/sample3/ragFiles/sample4"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data.RagFile(
+ name="name_value",
+ display_name="display_name_value",
+ description="description_value",
+ size_bytes=1089,
+ rag_file_type=vertex_rag_data.RagFile.RagFileType.RAG_FILE_TYPE_TXT,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data.RagFile.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_rag_file(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_data.RagFile)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.description == "description_value"
+ assert response.size_bytes == 1089
+ assert (
+ response.rag_file_type == vertex_rag_data.RagFile.RagFileType.RAG_FILE_TYPE_TXT
+ )
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_get_rag_file_rest_interceptors(null_interceptor):
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.VertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "post_get_rag_file"
+ ) as post, mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "pre_get_rag_file"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.GetRagFileRequest.pb(
+ vertex_rag_data_service.GetRagFileRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = vertex_rag_data.RagFile.to_json(vertex_rag_data.RagFile())
+ req.return_value.content = return_value
+
+ request = vertex_rag_data_service.GetRagFileRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = vertex_rag_data.RagFile()
+
+ client.get_rag_file(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_list_rag_files_rest_bad_request(
+ request_type=vertex_rag_data_service.ListRagFilesRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_rag_files(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.ListRagFilesRequest,
+ dict,
+ ],
+)
+def test_list_rag_files_rest_call_success(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data_service.ListRagFilesResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data_service.ListRagFilesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.list_rag_files(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListRagFilesPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_list_rag_files_rest_interceptors(null_interceptor):
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.VertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "post_list_rag_files"
+ ) as post, mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "pre_list_rag_files"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.ListRagFilesRequest.pb(
+ vertex_rag_data_service.ListRagFilesRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = vertex_rag_data_service.ListRagFilesResponse.to_json(
+ vertex_rag_data_service.ListRagFilesResponse()
+ )
+ req.return_value.content = return_value
+
+ request = vertex_rag_data_service.ListRagFilesRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = vertex_rag_data_service.ListRagFilesResponse()
+
+ client.list_rag_files(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_delete_rag_file_rest_bad_request(
+ request_type=vertex_rag_data_service.DeleteRagFileRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/ragCorpora/sample3/ragFiles/sample4"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_rag_file(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.DeleteRagFileRequest,
+ dict,
+ ],
+)
+def test_delete_rag_file_rest_call_success(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/ragCorpora/sample3/ragFiles/sample4"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.delete_rag_file(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_delete_rag_file_rest_interceptors(null_interceptor):
+ transport = transports.VertexRagDataServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.VertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "post_delete_rag_file"
+ ) as post, mock.patch.object(
+ transports.VertexRagDataServiceRestInterceptor, "pre_delete_rag_file"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.DeleteRagFileRequest.pb(
+ vertex_rag_data_service.DeleteRagFileRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = vertex_rag_data_service.DeleteRagFileRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.delete_rag_file(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_location_rest_bad_request(request_type=locations_pb2.GetLocationRequest):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_location(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+def test_get_location_rest(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_list_locations_rest_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_locations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+def test_list_locations_rest(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_get_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_get_iam_policy_rest(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_set_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.set_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_set_iam_policy_rest(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_test_iam_permissions_rest_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.test_iam_permissions(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+def test_test_iam_permissions_rest(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+def test_cancel_operation_rest_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+def test_cancel_operation_rest(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_rest_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+def test_delete_operation_rest(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_get_operation_rest_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+def test_get_operation_rest(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_list_operations_rest_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_operations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+def test_list_operations_rest(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_wait_operation_rest_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.wait_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+def test_wait_operation_rest(request_type):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_rag_corpus_empty_call_rest():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_rag_corpus), "__call__"
+ ) as call:
+ client.create_rag_corpus(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.CreateRagCorpusRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_rag_corpus_empty_call_rest():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_rag_corpus), "__call__"
+ ) as call:
+ client.update_rag_corpus(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.UpdateRagCorpusRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_rag_corpus_empty_call_rest():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call:
+ client.get_rag_corpus(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.GetRagCorpusRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_rag_corpora_empty_call_rest():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call:
+ client.list_rag_corpora(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.ListRagCorporaRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_rag_corpus_empty_call_rest():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_rag_corpus), "__call__"
+ ) as call:
+ client.delete_rag_corpus(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.DeleteRagCorpusRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_upload_rag_file_empty_call_rest():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call:
+ client.upload_rag_file(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.UploadRagFileRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_import_rag_files_empty_call_rest():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call:
+ client.import_rag_files(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.ImportRagFilesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_rag_file_empty_call_rest():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call:
+ client.get_rag_file(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.GetRagFileRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_rag_files_empty_call_rest():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call:
+ client.list_rag_files(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.ListRagFilesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_rag_file_empty_call_rest():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call:
+ client.delete_rag_file(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.DeleteRagFileRequest()
+
+ assert args[0] == request_msg
+
+
+def test_vertex_rag_data_service_rest_lro_client():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ transport = client.transport
+
+ # Ensure that we have an api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.AbstractOperationsClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_transport_kind_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = VertexRagDataServiceAsyncClient.get_transport_class("rest_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "rest_asyncio"
+
+
+@pytest.mark.asyncio
+async def test_create_rag_corpus_rest_asyncio_bad_request(
+ request_type=vertex_rag_data_service.CreateRagCorpusRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.create_rag_corpus(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.CreateRagCorpusRequest,
+ dict,
+ ],
+)
+async def test_create_rag_corpus_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request_init["rag_corpus"] = {
+ "name": "name_value",
+ "display_name": "display_name_value",
+ "description": "description_value",
+ "rag_embedding_model_config": {
+ "vertex_prediction_endpoint": {
+ "endpoint": "endpoint_value",
+ "model": "model_value",
+ "model_version_id": "model_version_id_value",
+ },
+ "hybrid_search_config": {
+ "sparse_embedding_config": {
+ "bm25": {"multilingual": True, "k1": 0.156, "b": 0.98}
+ },
+ "dense_embedding_model_prediction_endpoint": {},
+ },
+ },
+ "rag_vector_db_config": {
+ "rag_managed_db": {},
+ "weaviate": {
+ "http_endpoint": "http_endpoint_value",
+ "collection_name": "collection_name_value",
+ },
+ "pinecone": {"index_name": "index_name_value"},
+ "vertex_feature_store": {
+ "feature_view_resource_name": "feature_view_resource_name_value"
+ },
+ "vertex_vector_search": {
+ "index_endpoint": "index_endpoint_value",
+ "index": "index_value",
+ },
+ "api_auth": {
+ "api_key_config": {
+ "api_key_secret_version": "api_key_secret_version_value"
+ }
+ },
+ "rag_embedding_model_config": {},
+ },
+ "create_time": {"seconds": 751, "nanos": 543},
+ "update_time": {},
+ "corpus_status": {"state": 1, "error_status": "error_status_value"},
+ "vector_db_config": {},
+ "vertex_ai_search_config": {"serving_config": "serving_config_value"},
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = vertex_rag_data_service.CreateRagCorpusRequest.meta.fields[
+ "rag_corpus"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["rag_corpus"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["rag_corpus"][field])):
+ del request_init["rag_corpus"][field][i][subfield]
+ else:
+ del request_init["rag_corpus"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.create_rag_corpus(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_create_rag_corpus_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncVertexRagDataServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncVertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "post_create_rag_corpus"
+ ) as post, mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "pre_create_rag_corpus"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.CreateRagCorpusRequest.pb(
+ vertex_rag_data_service.CreateRagCorpusRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = vertex_rag_data_service.CreateRagCorpusRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.create_rag_corpus(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_update_rag_corpus_rest_asyncio_bad_request(
+ request_type=vertex_rag_data_service.UpdateRagCorpusRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "rag_corpus": {"name": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.update_rag_corpus(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.UpdateRagCorpusRequest,
+ dict,
+ ],
+)
+async def test_update_rag_corpus_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "rag_corpus": {"name": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ }
+ request_init["rag_corpus"] = {
+ "name": "projects/sample1/locations/sample2/ragCorpora/sample3",
+ "display_name": "display_name_value",
+ "description": "description_value",
+ "rag_embedding_model_config": {
+ "vertex_prediction_endpoint": {
+ "endpoint": "endpoint_value",
+ "model": "model_value",
+ "model_version_id": "model_version_id_value",
+ },
+ "hybrid_search_config": {
+ "sparse_embedding_config": {
+ "bm25": {"multilingual": True, "k1": 0.156, "b": 0.98}
+ },
+ "dense_embedding_model_prediction_endpoint": {},
+ },
+ },
+ "rag_vector_db_config": {
+ "rag_managed_db": {},
+ "weaviate": {
+ "http_endpoint": "http_endpoint_value",
+ "collection_name": "collection_name_value",
+ },
+ "pinecone": {"index_name": "index_name_value"},
+ "vertex_feature_store": {
+ "feature_view_resource_name": "feature_view_resource_name_value"
+ },
+ "vertex_vector_search": {
+ "index_endpoint": "index_endpoint_value",
+ "index": "index_value",
+ },
+ "api_auth": {
+ "api_key_config": {
+ "api_key_secret_version": "api_key_secret_version_value"
+ }
+ },
+ "rag_embedding_model_config": {},
+ },
+ "create_time": {"seconds": 751, "nanos": 543},
+ "update_time": {},
+ "corpus_status": {"state": 1, "error_status": "error_status_value"},
+ "vector_db_config": {},
+ "vertex_ai_search_config": {"serving_config": "serving_config_value"},
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = vertex_rag_data_service.UpdateRagCorpusRequest.meta.fields[
+ "rag_corpus"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["rag_corpus"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["rag_corpus"][field])):
+ del request_init["rag_corpus"][field][i][subfield]
+ else:
+ del request_init["rag_corpus"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.update_rag_corpus(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_update_rag_corpus_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncVertexRagDataServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncVertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "post_update_rag_corpus"
+ ) as post, mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "pre_update_rag_corpus"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.UpdateRagCorpusRequest.pb(
+ vertex_rag_data_service.UpdateRagCorpusRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = vertex_rag_data_service.UpdateRagCorpusRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.update_rag_corpus(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_rag_corpus_rest_asyncio_bad_request(
+ request_type=vertex_rag_data_service.GetRagCorpusRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_rag_corpus(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.GetRagCorpusRequest,
+ dict,
+ ],
+)
+async def test_get_rag_corpus_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data.RagCorpus(
+ name="name_value",
+ display_name="display_name_value",
+ description="description_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data.RagCorpus.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.get_rag_corpus(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_data.RagCorpus)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.description == "description_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_get_rag_corpus_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncVertexRagDataServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncVertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "post_get_rag_corpus"
+ ) as post, mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "pre_get_rag_corpus"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.GetRagCorpusRequest.pb(
+ vertex_rag_data_service.GetRagCorpusRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = vertex_rag_data.RagCorpus.to_json(vertex_rag_data.RagCorpus())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = vertex_rag_data_service.GetRagCorpusRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = vertex_rag_data.RagCorpus()
+
+ await client.get_rag_corpus(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_list_rag_corpora_rest_asyncio_bad_request(
+ request_type=vertex_rag_data_service.ListRagCorporaRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_rag_corpora(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.ListRagCorporaRequest,
+ dict,
+ ],
+)
+async def test_list_rag_corpora_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data_service.ListRagCorporaResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data_service.ListRagCorporaResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.list_rag_corpora(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListRagCorporaAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_list_rag_corpora_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncVertexRagDataServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncVertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "post_list_rag_corpora"
+ ) as post, mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "pre_list_rag_corpora"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.ListRagCorporaRequest.pb(
+ vertex_rag_data_service.ListRagCorporaRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = vertex_rag_data_service.ListRagCorporaResponse.to_json(
+ vertex_rag_data_service.ListRagCorporaResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = vertex_rag_data_service.ListRagCorporaRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = vertex_rag_data_service.ListRagCorporaResponse()
+
+ await client.list_rag_corpora(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_delete_rag_corpus_rest_asyncio_bad_request(
+ request_type=vertex_rag_data_service.DeleteRagCorpusRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_rag_corpus(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.DeleteRagCorpusRequest,
+ dict,
+ ],
+)
+async def test_delete_rag_corpus_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.delete_rag_corpus(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_delete_rag_corpus_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncVertexRagDataServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncVertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "post_delete_rag_corpus"
+ ) as post, mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "pre_delete_rag_corpus"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.DeleteRagCorpusRequest.pb(
+ vertex_rag_data_service.DeleteRagCorpusRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = vertex_rag_data_service.DeleteRagCorpusRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.delete_rag_corpus(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_upload_rag_file_rest_asyncio_bad_request(
+ request_type=vertex_rag_data_service.UploadRagFileRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.upload_rag_file(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.UploadRagFileRequest,
+ dict,
+ ],
+)
+async def test_upload_rag_file_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data_service.UploadRagFileResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data_service.UploadRagFileResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.upload_rag_file(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_data_service.UploadRagFileResponse)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_upload_rag_file_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncVertexRagDataServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncVertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "post_upload_rag_file"
+ ) as post, mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "pre_upload_rag_file"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.UploadRagFileRequest.pb(
+ vertex_rag_data_service.UploadRagFileRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = vertex_rag_data_service.UploadRagFileResponse.to_json(
+ vertex_rag_data_service.UploadRagFileResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = vertex_rag_data_service.UploadRagFileRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = vertex_rag_data_service.UploadRagFileResponse()
+
+ await client.upload_rag_file(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_import_rag_files_rest_asyncio_bad_request(
+ request_type=vertex_rag_data_service.ImportRagFilesRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.import_rag_files(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.ImportRagFilesRequest,
+ dict,
+ ],
+)
+async def test_import_rag_files_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.import_rag_files(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_import_rag_files_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncVertexRagDataServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncVertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "post_import_rag_files"
+ ) as post, mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "pre_import_rag_files"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.ImportRagFilesRequest.pb(
+ vertex_rag_data_service.ImportRagFilesRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = vertex_rag_data_service.ImportRagFilesRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.import_rag_files(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_rag_file_rest_asyncio_bad_request(
+ request_type=vertex_rag_data_service.GetRagFileRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/ragCorpora/sample3/ragFiles/sample4"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_rag_file(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.GetRagFileRequest,
+ dict,
+ ],
+)
+async def test_get_rag_file_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/ragCorpora/sample3/ragFiles/sample4"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data.RagFile(
+ name="name_value",
+ display_name="display_name_value",
+ description="description_value",
+ size_bytes=1089,
+ rag_file_type=vertex_rag_data.RagFile.RagFileType.RAG_FILE_TYPE_TXT,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data.RagFile.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.get_rag_file(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_data.RagFile)
+ assert response.name == "name_value"
+ assert response.display_name == "display_name_value"
+ assert response.description == "description_value"
+ assert response.size_bytes == 1089
+ assert (
+ response.rag_file_type == vertex_rag_data.RagFile.RagFileType.RAG_FILE_TYPE_TXT
+ )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_get_rag_file_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncVertexRagDataServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncVertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "post_get_rag_file"
+ ) as post, mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "pre_get_rag_file"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.GetRagFileRequest.pb(
+ vertex_rag_data_service.GetRagFileRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = vertex_rag_data.RagFile.to_json(vertex_rag_data.RagFile())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = vertex_rag_data_service.GetRagFileRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = vertex_rag_data.RagFile()
+
+ await client.get_rag_file(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_list_rag_files_rest_asyncio_bad_request(
+ request_type=vertex_rag_data_service.ListRagFilesRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_rag_files(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.ListRagFilesRequest,
+ dict,
+ ],
+)
+async def test_list_rag_files_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_data_service.ListRagFilesResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_data_service.ListRagFilesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.list_rag_files(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListRagFilesAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_list_rag_files_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncVertexRagDataServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncVertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "post_list_rag_files"
+ ) as post, mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "pre_list_rag_files"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.ListRagFilesRequest.pb(
+ vertex_rag_data_service.ListRagFilesRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = vertex_rag_data_service.ListRagFilesResponse.to_json(
+ vertex_rag_data_service.ListRagFilesResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = vertex_rag_data_service.ListRagFilesRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = vertex_rag_data_service.ListRagFilesResponse()
+
+ await client.list_rag_files(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_delete_rag_file_rest_asyncio_bad_request(
+ request_type=vertex_rag_data_service.DeleteRagFileRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/ragCorpora/sample3/ragFiles/sample4"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_rag_file(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_data_service.DeleteRagFileRequest,
+ dict,
+ ],
+)
+async def test_delete_rag_file_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "name": "projects/sample1/locations/sample2/ragCorpora/sample3/ragFiles/sample4"
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.delete_rag_file(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_delete_rag_file_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncVertexRagDataServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncVertexRagDataServiceRestInterceptor(),
+ )
+ client = VertexRagDataServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "post_delete_rag_file"
+ ) as post, mock.patch.object(
+ transports.AsyncVertexRagDataServiceRestInterceptor, "pre_delete_rag_file"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_data_service.DeleteRagFileRequest.pb(
+ vertex_rag_data_service.DeleteRagFileRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = vertex_rag_data_service.DeleteRagFileRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ await client.delete_rag_file(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_location_rest_asyncio_bad_request(
+ request_type=locations_pb2.GetLocationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_location(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+async def test_get_location_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_rest_asyncio_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_locations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+async def test_list_locations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_get_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.set_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_set_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.test_iam_permissions(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+async def test_test_iam_permissions_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+async def test_cancel_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+async def test_delete_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_get_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+async def test_get_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_rest_asyncio_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_operations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+async def test_list_operations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.wait_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+async def test_wait_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_rag_corpus_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_rag_corpus), "__call__"
+ ) as call:
+ await client.create_rag_corpus(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.CreateRagCorpusRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_update_rag_corpus_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_rag_corpus), "__call__"
+ ) as call:
+ await client.update_rag_corpus(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.UpdateRagCorpusRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_rag_corpus_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call:
+ await client.get_rag_corpus(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.GetRagCorpusRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_rag_corpora_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call:
+ await client.list_rag_corpora(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.ListRagCorporaRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_rag_corpus_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_rag_corpus), "__call__"
+ ) as call:
+ await client.delete_rag_corpus(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.DeleteRagCorpusRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_upload_rag_file_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call:
+ await client.upload_rag_file(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.UploadRagFileRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_import_rag_files_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call:
+ await client.import_rag_files(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.ImportRagFilesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_rag_file_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call:
+ await client.get_rag_file(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.GetRagFileRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_rag_files_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call:
+ await client.list_rag_files(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.ListRagFilesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_rag_file_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call:
+ await client.delete_rag_file(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_data_service.DeleteRagFileRequest()
+
+ assert args[0] == request_msg
+
+
+def test_vertex_rag_data_service_rest_asyncio_lro_client():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ transport = client.transport
+
+ # Ensure that we have an api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.AsyncOperationsRestClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_unsupported_parameter_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with pytest.raises(core_exceptions.AsyncRestUnsupportedParameterError, match="google.api_core.client_options.ClientOptions.quota_project_id") as exc: # type: ignore
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ client_options=options,
+ )
+
+
+def test_transport_grpc_default():
+ # A client should use the gRPC transport by default.
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert isinstance(
+ client.transport,
+ transports.VertexRagDataServiceGrpcTransport,
+ )
+
+
+def test_vertex_rag_data_service_base_transport_error():
+ # Passing both a credentials object and credentials_file should raise an error
+ with pytest.raises(core_exceptions.DuplicateCredentialArgs):
+ transport = transports.VertexRagDataServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ credentials_file="credentials.json",
+ )
+
+
+def test_vertex_rag_data_service_base_transport():
+ # Instantiate the base transport.
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.transports.VertexRagDataServiceTransport.__init__"
+ ) as Transport:
+ Transport.return_value = None
+ transport = transports.VertexRagDataServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Every method on the transport should just blindly
+ # raise NotImplementedError.
+ methods = (
+ "create_rag_corpus",
+ "update_rag_corpus",
+ "get_rag_corpus",
+ "list_rag_corpora",
+ "delete_rag_corpus",
+ "upload_rag_file",
+ "import_rag_files",
+ "get_rag_file",
+ "list_rag_files",
+ "delete_rag_file",
+ "set_iam_policy",
+ "get_iam_policy",
+ "test_iam_permissions",
+ "get_location",
+ "list_locations",
+ "get_operation",
+ "wait_operation",
+ "cancel_operation",
+ "delete_operation",
+ "list_operations",
+ )
+ for method in methods:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, method)(request=object())
+
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
+ # Additionally, the LRO client (a property) should
+ # also raise NotImplementedError
+ with pytest.raises(NotImplementedError):
+ transport.operations_client
+
+ # Catch all for all remaining methods and properties
+ remainder = [
+ "kind",
+ ]
+ for r in remainder:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, r)()
+
+
+def test_vertex_rag_data_service_base_transport_with_credentials_file():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.transports.VertexRagDataServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.VertexRagDataServiceTransport(
+ credentials_file="credentials.json",
+ quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+def test_vertex_rag_data_service_base_transport_with_adc():
+ # Test the default credentials are used if credentials and credentials_file are None.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.transports.VertexRagDataServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.VertexRagDataServiceTransport()
+ adc.assert_called_once()
+
+
+def test_vertex_rag_data_service_auth_adc():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ VertexRagDataServiceClient()
+ adc.assert_called_once_with(
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.VertexRagDataServiceGrpcTransport,
+ transports.VertexRagDataServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_vertex_rag_data_service_transport_auth_adc(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+ adc.assert_called_once_with(
+ scopes=["1", "2"],
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.VertexRagDataServiceGrpcTransport,
+ transports.VertexRagDataServiceGrpcAsyncIOTransport,
+ transports.VertexRagDataServiceRestTransport,
+ ],
+)
+def test_vertex_rag_data_service_transport_auth_gdch_credentials(transport_class):
+ host = "https://language.com"
+ api_audience_tests = [None, "https://language2.com"]
+ api_audience_expect = [host, "https://language2.com"]
+ for t, e in zip(api_audience_tests, api_audience_expect):
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ gdch_mock = mock.MagicMock()
+ type(gdch_mock).with_gdch_audience = mock.PropertyMock(
+ return_value=gdch_mock
+ )
+ adc.return_value = (gdch_mock, None)
+ transport_class(host=host, api_audience=t)
+ gdch_mock.with_gdch_audience.assert_called_once_with(e)
+
+
+@pytest.mark.parametrize(
+ "transport_class,grpc_helpers",
+ [
+ (transports.VertexRagDataServiceGrpcTransport, grpc_helpers),
+ (transports.VertexRagDataServiceGrpcAsyncIOTransport, grpc_helpers_async),
+ ],
+)
+def test_vertex_rag_data_service_transport_create_channel(
+ transport_class, grpc_helpers
+):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel", autospec=True
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ adc.return_value = (creds, None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=creds,
+ credentials_file=None,
+ quota_project_id="octopus",
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=["1", "2"],
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.VertexRagDataServiceGrpcTransport,
+ transports.VertexRagDataServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_vertex_rag_data_service_grpc_transport_client_cert_source_for_mtls(
+ transport_class,
+):
+ cred = ga_credentials.AnonymousCredentials()
+
+ # Check ssl_channel_credentials is used if provided.
+ with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
+ mock_ssl_channel_creds = mock.Mock()
+ transport_class(
+ host="squid.clam.whelk",
+ credentials=cred,
+ ssl_channel_credentials=mock_ssl_channel_creds,
+ )
+ mock_create_channel.assert_called_once_with(
+ "squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_channel_creds,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
+ # is used.
+ with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
+ with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
+ transport_class(
+ credentials=cred,
+ client_cert_source_for_mtls=client_cert_source_callback,
+ )
+ expected_cert, expected_key = client_cert_source_callback()
+ mock_ssl_cred.assert_called_once_with(
+ certificate_chain=expected_cert, private_key=expected_key
+ )
+
+
+def test_vertex_rag_data_service_http_transport_client_cert_source_for_mtls():
+ cred = ga_credentials.AnonymousCredentials()
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ) as mock_configure_mtls_channel:
+ transports.VertexRagDataServiceRestTransport(
+ credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
+ )
+ mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_vertex_rag_data_service_host_no_port(transport_name):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_vertex_rag_data_service_host_with_port(transport_name):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com:8000"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:8000"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com:8000"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "rest",
+ ],
+)
+def test_vertex_rag_data_service_client_transport_session_collision(transport_name):
+ creds1 = ga_credentials.AnonymousCredentials()
+ creds2 = ga_credentials.AnonymousCredentials()
+ client1 = VertexRagDataServiceClient(
+ credentials=creds1,
+ transport=transport_name,
+ )
+ client2 = VertexRagDataServiceClient(
+ credentials=creds2,
+ transport=transport_name,
+ )
+ session1 = client1.transport.create_rag_corpus._session
+ session2 = client2.transport.create_rag_corpus._session
+ assert session1 != session2
+ session1 = client1.transport.update_rag_corpus._session
+ session2 = client2.transport.update_rag_corpus._session
+ assert session1 != session2
+ session1 = client1.transport.get_rag_corpus._session
+ session2 = client2.transport.get_rag_corpus._session
+ assert session1 != session2
+ session1 = client1.transport.list_rag_corpora._session
+ session2 = client2.transport.list_rag_corpora._session
+ assert session1 != session2
+ session1 = client1.transport.delete_rag_corpus._session
+ session2 = client2.transport.delete_rag_corpus._session
+ assert session1 != session2
+ session1 = client1.transport.upload_rag_file._session
+ session2 = client2.transport.upload_rag_file._session
+ assert session1 != session2
+ session1 = client1.transport.import_rag_files._session
+ session2 = client2.transport.import_rag_files._session
+ assert session1 != session2
+ session1 = client1.transport.get_rag_file._session
+ session2 = client2.transport.get_rag_file._session
+ assert session1 != session2
+ session1 = client1.transport.list_rag_files._session
+ session2 = client2.transport.list_rag_files._session
+ assert session1 != session2
+ session1 = client1.transport.delete_rag_file._session
+ session2 = client2.transport.delete_rag_file._session
+ assert session1 != session2
+
+
+def test_vertex_rag_data_service_grpc_transport_channel():
+ channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.VertexRagDataServiceGrpcTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+def test_vertex_rag_data_service_grpc_asyncio_transport_channel():
+ channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.VertexRagDataServiceGrpcAsyncIOTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.VertexRagDataServiceGrpcTransport,
+ transports.VertexRagDataServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_vertex_rag_data_service_transport_channel_mtls_with_client_cert_source(
+ transport_class,
+):
+ with mock.patch(
+ "grpc.ssl_channel_credentials", autospec=True
+ ) as grpc_ssl_channel_cred:
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_ssl_cred = mock.Mock()
+ grpc_ssl_channel_cred.return_value = mock_ssl_cred
+
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+
+ cred = ga_credentials.AnonymousCredentials()
+ with pytest.warns(DeprecationWarning):
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (cred, None)
+ transport = transport_class(
+ host="squid.clam.whelk",
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=client_cert_source_callback,
+ )
+ adc.assert_called_once()
+
+ grpc_ssl_channel_cred.assert_called_once_with(
+ certificate_chain=b"cert bytes", private_key=b"key bytes"
+ )
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.VertexRagDataServiceGrpcTransport,
+ transports.VertexRagDataServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_vertex_rag_data_service_transport_channel_mtls_with_adc(transport_class):
+ mock_ssl_cred = mock.Mock()
+ with mock.patch.multiple(
+ "google.auth.transport.grpc.SslCredentials",
+ __init__=mock.Mock(return_value=None),
+ ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
+ ):
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+ mock_cred = mock.Mock()
+
+ with pytest.warns(DeprecationWarning):
+ transport = transport_class(
+ host="squid.clam.whelk",
+ credentials=mock_cred,
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=None,
+ )
+
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=mock_cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+
+
+def test_vertex_rag_data_service_grpc_lro_client():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.OperationsClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_vertex_rag_data_service_grpc_lro_async_client():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc_asyncio",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(
+ transport.operations_client,
+ operations_v1.OperationsAsyncClient,
+ )
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_endpoint_path():
+ project = "squid"
+ location = "clam"
+ endpoint = "whelk"
+ expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(
+ project=project,
+ location=location,
+ endpoint=endpoint,
+ )
+ actual = VertexRagDataServiceClient.endpoint_path(project, location, endpoint)
+ assert expected == actual
+
+
+def test_parse_endpoint_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "endpoint": "nudibranch",
+ }
+ path = VertexRagDataServiceClient.endpoint_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = VertexRagDataServiceClient.parse_endpoint_path(path)
+ assert expected == actual
+
+
+def test_model_path():
+ project = "cuttlefish"
+ location = "mussel"
+ model = "winkle"
+ expected = "projects/{project}/locations/{location}/models/{model}".format(
+ project=project,
+ location=location,
+ model=model,
+ )
+ actual = VertexRagDataServiceClient.model_path(project, location, model)
+ assert expected == actual
+
+
+def test_parse_model_path():
+ expected = {
+ "project": "nautilus",
+ "location": "scallop",
+ "model": "abalone",
+ }
+ path = VertexRagDataServiceClient.model_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = VertexRagDataServiceClient.parse_model_path(path)
+ assert expected == actual
+
+
+def test_rag_corpus_path():
+ project = "squid"
+ location = "clam"
+ rag_corpus = "whelk"
+ expected = "projects/{project}/locations/{location}/ragCorpora/{rag_corpus}".format(
+ project=project,
+ location=location,
+ rag_corpus=rag_corpus,
+ )
+ actual = VertexRagDataServiceClient.rag_corpus_path(project, location, rag_corpus)
+ assert expected == actual
+
+
+def test_parse_rag_corpus_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "rag_corpus": "nudibranch",
+ }
+ path = VertexRagDataServiceClient.rag_corpus_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = VertexRagDataServiceClient.parse_rag_corpus_path(path)
+ assert expected == actual
+
+
+def test_rag_file_path():
+ project = "cuttlefish"
+ location = "mussel"
+ rag_corpus = "winkle"
+ rag_file = "nautilus"
+ expected = "projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}".format(
+ project=project,
+ location=location,
+ rag_corpus=rag_corpus,
+ rag_file=rag_file,
+ )
+ actual = VertexRagDataServiceClient.rag_file_path(
+ project, location, rag_corpus, rag_file
+ )
+ assert expected == actual
+
+
+def test_parse_rag_file_path():
+ expected = {
+ "project": "scallop",
+ "location": "abalone",
+ "rag_corpus": "squid",
+ "rag_file": "clam",
+ }
+ path = VertexRagDataServiceClient.rag_file_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = VertexRagDataServiceClient.parse_rag_file_path(path)
+ assert expected == actual
+
+
+def test_secret_version_path():
+ project = "whelk"
+ secret = "octopus"
+ secret_version = "oyster"
+ expected = "projects/{project}/secrets/{secret}/versions/{secret_version}".format(
+ project=project,
+ secret=secret,
+ secret_version=secret_version,
+ )
+ actual = VertexRagDataServiceClient.secret_version_path(
+ project, secret, secret_version
+ )
+ assert expected == actual
+
+
+def test_parse_secret_version_path():
+ expected = {
+ "project": "nudibranch",
+ "secret": "cuttlefish",
+ "secret_version": "mussel",
+ }
+ path = VertexRagDataServiceClient.secret_version_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = VertexRagDataServiceClient.parse_secret_version_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "winkle"
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = VertexRagDataServiceClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "nautilus",
+ }
+ path = VertexRagDataServiceClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = VertexRagDataServiceClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "scallop"
+ expected = "folders/{folder}".format(
+ folder=folder,
+ )
+ actual = VertexRagDataServiceClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "abalone",
+ }
+ path = VertexRagDataServiceClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = VertexRagDataServiceClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "squid"
+ expected = "organizations/{organization}".format(
+ organization=organization,
+ )
+ actual = VertexRagDataServiceClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "clam",
+ }
+ path = VertexRagDataServiceClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = VertexRagDataServiceClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "whelk"
+ expected = "projects/{project}".format(
+ project=project,
+ )
+ actual = VertexRagDataServiceClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "octopus",
+ }
+ path = VertexRagDataServiceClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = VertexRagDataServiceClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "oyster"
+ location = "nudibranch"
+ expected = "projects/{project}/locations/{location}".format(
+ project=project,
+ location=location,
+ )
+ actual = VertexRagDataServiceClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "cuttlefish",
+ "location": "mussel",
+ }
+ path = VertexRagDataServiceClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = VertexRagDataServiceClient.parse_common_location_path(path)
+ assert expected == actual
+
+
+def test_client_with_default_client_info():
+ client_info = gapic_v1.client_info.ClientInfo()
+
+ with mock.patch.object(
+ transports.VertexRagDataServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+ with mock.patch.object(
+ transports.VertexRagDataServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ transport_class = VertexRagDataServiceClient.get_transport_class()
+ transport = transport_class(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+
+def test_delete_operation(transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_async(transport: str = "grpc_asyncio"):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = None
+
+ client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_delete_operation_from_dict():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_from_dict_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_cancel_operation(transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_async(transport: str = "grpc_asyncio"):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_operation_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = None
+
+ client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_cancel_operation_from_dict():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_from_dict_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_wait_operation(transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation(transport: str = "grpc_asyncio"):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_wait_operation_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_wait_operation_from_dict():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_from_dict_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_operation(transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_get_operation_async(transport: str = "grpc_asyncio"):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_get_operation_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_operation_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_get_operation_from_dict():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_operation_from_dict_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_operations(transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+ response = client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_async(transport: str = "grpc_asyncio"):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_list_operations_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_operations_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_operations_from_dict():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ response = client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_operations_from_dict_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_locations(transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+ response = client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_async(transport: str = "grpc_asyncio"):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_list_locations_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_locations_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_locations_from_dict():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ response = client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_locations_from_dict_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_location(transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+ response = client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_get_location_async(transport: str = "grpc_asyncio"):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_get_location_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = locations_pb2.Location()
+
+ client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_location_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(credentials=async_anonymous_credentials())
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+def test_get_location_from_dict():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+
+ response = client.get_location(
+ request={
+ "name": "locations/abc",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_location_from_dict_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_set_iam_policy(transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ response = client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+ response = await client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_set_iam_policy_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_set_iam_policy_from_dict():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_from_dict_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+def test_get_iam_policy(transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_get_iam_policy_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_get_iam_policy_from_dict():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_from_dict_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+def test_test_iam_permissions(transport: str = "grpc"):
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"):
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+ )
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+def test_test_iam_permissions_field_headers():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_field_headers_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_test_iam_permissions_from_dict():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ response = client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_from_dict_async():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ response = await client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+def test_transport_close_grpc():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_grpc_asyncio():
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close_rest():
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagDataServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "rest",
+ "grpc",
+ ]
+ for transport in transports:
+ client = VertexRagDataServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class",
+ [
+ (VertexRagDataServiceClient, transports.VertexRagDataServiceGrpcTransport),
+ (
+ VertexRagDataServiceAsyncClient,
+ transports.VertexRagDataServiceGrpcAsyncIOTransport,
+ ),
+ ],
+)
+def test_api_key_credentials(client_class, transport_class):
+ with mock.patch.object(
+ google.auth._default, "get_api_key_credentials", create=True
+ ) as get_api_key_credentials:
+ mock_cred = mock.Mock()
+ get_api_key_credentials.return_value = mock_cred
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=mock_cred,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_service.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..57e83323f5b0b5dcf5e0fbbb94aa2b16f7c88a39
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_service.py
@@ -0,0 +1,7526 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+ from unittest.mock import AsyncMock # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ import mock
+
+import grpc
+from grpc.experimental import aio
+from collections.abc import Iterable, AsyncIterable
+from google.protobuf import json_format
+import json
+import math
+import pytest
+from google.api_core import api_core_version
+from proto.marshal.rules.dates import DurationRule, TimestampRule
+from proto.marshal.rules import wrappers
+
+try:
+ import aiohttp # type: ignore
+ from google.auth.aio.transport.sessions import AsyncAuthorizedSession
+ from google.api_core.operations_v1 import AsyncOperationsRestClient
+
+ HAS_ASYNC_REST_EXTRA = True
+except ImportError: # pragma: NO COVER
+ HAS_ASYNC_REST_EXTRA = False
+from requests import Response
+from requests import Request, PreparedRequest
+from requests.sessions import Session
+from google.protobuf import json_format
+
+try:
+ from google.auth.aio import credentials as ga_credentials_async
+
+ HAS_GOOGLE_AUTH_AIO = True
+except ImportError: # pragma: NO COVER
+ HAS_GOOGLE_AUTH_AIO = False
+
+from google.api_core import client_options
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers
+from google.api_core import grpc_helpers_async
+from google.api_core import path_template
+from google.api_core import retry as retries
+from google.auth import credentials as ga_credentials
+from google.auth.exceptions import MutualTLSChannelError
+from google.cloud.aiplatform_v1beta1.services.vertex_rag_service import (
+ VertexRagServiceAsyncClient,
+)
+from google.cloud.aiplatform_v1beta1.services.vertex_rag_service import (
+ VertexRagServiceClient,
+)
+from google.cloud.aiplatform_v1beta1.services.vertex_rag_service import transports
+from google.cloud.aiplatform_v1beta1.types import content
+from google.cloud.aiplatform_v1beta1.types import content as gca_content
+from google.cloud.aiplatform_v1beta1.types import tool
+from google.cloud.aiplatform_v1beta1.types import vertex_rag_service
+from google.cloud.location import locations_pb2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import options_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.oauth2 import service_account
+from google.protobuf import duration_pb2 # type: ignore
+from google.protobuf import struct_pb2 # type: ignore
+import google.auth
+
+
+async def mock_async_gen(data, chunk_size=1):
+ for i in range(0, len(data)): # pragma: NO COVER
+ chunk = data[i : i + chunk_size]
+ yield chunk.encode("utf-8")
+
+
+def client_cert_source_callback():
+ return b"cert bytes", b"key bytes"
+
+
+# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded.
+# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107.
+def async_anonymous_credentials():
+ if HAS_GOOGLE_AUTH_AIO:
+ return ga_credentials_async.AnonymousCredentials()
+ return ga_credentials.AnonymousCredentials()
+
+
+# If default endpoint is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint(client):
+ return (
+ "foo.googleapis.com"
+ if ("localhost" in client.DEFAULT_ENDPOINT)
+ else client.DEFAULT_ENDPOINT
+ )
+
+
+# If default endpoint template is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint template so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint_template(client):
+ return (
+ "test.{UNIVERSE_DOMAIN}"
+ if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE)
+ else client._DEFAULT_ENDPOINT_TEMPLATE
+ )
+
+
+def test__get_default_mtls_endpoint():
+ api_endpoint = "example.googleapis.com"
+ api_mtls_endpoint = "example.mtls.googleapis.com"
+ sandbox_endpoint = "example.sandbox.googleapis.com"
+ sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
+ non_googleapi = "api.example.com"
+
+ assert VertexRagServiceClient._get_default_mtls_endpoint(None) is None
+ assert (
+ VertexRagServiceClient._get_default_mtls_endpoint(api_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ VertexRagServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ VertexRagServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ VertexRagServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ VertexRagServiceClient._get_default_mtls_endpoint(non_googleapi)
+ == non_googleapi
+ )
+
+
+def test__read_environment_variables():
+ assert VertexRagServiceClient._read_environment_variables() == (False, "auto", None)
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert VertexRagServiceClient._read_environment_variables() == (
+ True,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ assert VertexRagServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ VertexRagServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ assert VertexRagServiceClient._read_environment_variables() == (
+ False,
+ "never",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ assert VertexRagServiceClient._read_environment_variables() == (
+ False,
+ "always",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}):
+ assert VertexRagServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ VertexRagServiceClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}):
+ assert VertexRagServiceClient._read_environment_variables() == (
+ False,
+ "auto",
+ "foo.com",
+ )
+
+
+def test__get_client_cert_source():
+ mock_provided_cert_source = mock.Mock()
+ mock_default_cert_source = mock.Mock()
+
+ assert VertexRagServiceClient._get_client_cert_source(None, False) is None
+ assert (
+ VertexRagServiceClient._get_client_cert_source(mock_provided_cert_source, False)
+ is None
+ )
+ assert (
+ VertexRagServiceClient._get_client_cert_source(mock_provided_cert_source, True)
+ == mock_provided_cert_source
+ )
+
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source", return_value=True
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_default_cert_source,
+ ):
+ assert (
+ VertexRagServiceClient._get_client_cert_source(None, True)
+ is mock_default_cert_source
+ )
+ assert (
+ VertexRagServiceClient._get_client_cert_source(
+ mock_provided_cert_source, "true"
+ )
+ is mock_provided_cert_source
+ )
+
+
+@mock.patch.object(
+ VertexRagServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(VertexRagServiceClient),
+)
+@mock.patch.object(
+ VertexRagServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(VertexRagServiceAsyncClient),
+)
+def test__get_api_endpoint():
+ api_override = "foo.com"
+ mock_client_cert_source = mock.Mock()
+ default_universe = VertexRagServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = VertexRagServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = VertexRagServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ assert (
+ VertexRagServiceClient._get_api_endpoint(
+ api_override, mock_client_cert_source, default_universe, "always"
+ )
+ == api_override
+ )
+ assert (
+ VertexRagServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "auto"
+ )
+ == VertexRagServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ VertexRagServiceClient._get_api_endpoint(None, None, default_universe, "auto")
+ == default_endpoint
+ )
+ assert (
+ VertexRagServiceClient._get_api_endpoint(None, None, default_universe, "always")
+ == VertexRagServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ VertexRagServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, default_universe, "always"
+ )
+ == VertexRagServiceClient.DEFAULT_MTLS_ENDPOINT
+ )
+ assert (
+ VertexRagServiceClient._get_api_endpoint(None, None, mock_universe, "never")
+ == mock_endpoint
+ )
+ assert (
+ VertexRagServiceClient._get_api_endpoint(None, None, default_universe, "never")
+ == default_endpoint
+ )
+
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ VertexRagServiceClient._get_api_endpoint(
+ None, mock_client_cert_source, mock_universe, "auto"
+ )
+ assert (
+ str(excinfo.value)
+ == "mTLS is not supported in any universe other than googleapis.com."
+ )
+
+
+def test__get_universe_domain():
+ client_universe_domain = "foo.com"
+ universe_domain_env = "bar.com"
+
+ assert (
+ VertexRagServiceClient._get_universe_domain(
+ client_universe_domain, universe_domain_env
+ )
+ == client_universe_domain
+ )
+ assert (
+ VertexRagServiceClient._get_universe_domain(None, universe_domain_env)
+ == universe_domain_env
+ )
+ assert (
+ VertexRagServiceClient._get_universe_domain(None, None)
+ == VertexRagServiceClient._DEFAULT_UNIVERSE
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ VertexRagServiceClient._get_universe_domain("", None)
+ assert str(excinfo.value) == "Universe Domain cannot be an empty string."
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (VertexRagServiceClient, "grpc"),
+ (VertexRagServiceAsyncClient, "grpc_asyncio"),
+ (VertexRagServiceClient, "rest"),
+ ],
+)
+def test_vertex_rag_service_client_from_service_account_info(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_info"
+ ) as factory:
+ factory.return_value = creds
+ info = {"valid": True}
+ client = client_class.from_service_account_info(info, transport=transport_name)
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (transports.VertexRagServiceGrpcTransport, "grpc"),
+ (transports.VertexRagServiceGrpcAsyncIOTransport, "grpc_asyncio"),
+ (transports.VertexRagServiceRestTransport, "rest"),
+ ],
+)
+def test_vertex_rag_service_client_service_account_always_use_jwt(
+ transport_class, transport_name
+):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=True)
+ use_jwt.assert_called_once_with(True)
+
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=False)
+ use_jwt.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_name",
+ [
+ (VertexRagServiceClient, "grpc"),
+ (VertexRagServiceAsyncClient, "grpc_asyncio"),
+ (VertexRagServiceClient, "rest"),
+ ],
+)
+def test_vertex_rag_service_client_from_service_account_file(
+ client_class, transport_name
+):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_file"
+ ) as factory:
+ factory.return_value = creds
+ client = client_class.from_service_account_file(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ client = client_class.from_service_account_json(
+ "dummy/file/path.json", transport=transport_name
+ )
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+def test_vertex_rag_service_client_get_transport_class():
+ transport = VertexRagServiceClient.get_transport_class()
+ available_transports = [
+ transports.VertexRagServiceGrpcTransport,
+ transports.VertexRagServiceRestTransport,
+ ]
+ assert transport in available_transports
+
+ transport = VertexRagServiceClient.get_transport_class("grpc")
+ assert transport == transports.VertexRagServiceGrpcTransport
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (VertexRagServiceClient, transports.VertexRagServiceGrpcTransport, "grpc"),
+ (
+ VertexRagServiceAsyncClient,
+ transports.VertexRagServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (VertexRagServiceClient, transports.VertexRagServiceRestTransport, "rest"),
+ ],
+)
+@mock.patch.object(
+ VertexRagServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(VertexRagServiceClient),
+)
+@mock.patch.object(
+ VertexRagServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(VertexRagServiceAsyncClient),
+)
+def test_vertex_rag_service_client_client_options(
+ client_class, transport_class, transport_name
+):
+ # Check that if channel is provided we won't create a new one.
+ with mock.patch.object(VertexRagServiceClient, "get_transport_class") as gtc:
+ transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
+ client = client_class(transport=transport)
+ gtc.assert_not_called()
+
+ # Check that if channel is provided via str we will create a new one.
+ with mock.patch.object(VertexRagServiceClient, "get_transport_class") as gtc:
+ client = client_class(transport=transport_name)
+ gtc.assert_called()
+
+ # Check the case api_endpoint is provided.
+ options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name, client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_MTLS_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client = client_class(transport=transport_name)
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+ # Check the case quota_project_id is provided
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id="octopus",
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+ # Check the case api_endpoint is provided
+ options = client_options.ClientOptions(
+ api_audience="https://language.googleapis.com"
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience="https://language.googleapis.com",
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,use_client_cert_env",
+ [
+ (
+ VertexRagServiceClient,
+ transports.VertexRagServiceGrpcTransport,
+ "grpc",
+ "true",
+ ),
+ (
+ VertexRagServiceAsyncClient,
+ transports.VertexRagServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "true",
+ ),
+ (
+ VertexRagServiceClient,
+ transports.VertexRagServiceGrpcTransport,
+ "grpc",
+ "false",
+ ),
+ (
+ VertexRagServiceAsyncClient,
+ transports.VertexRagServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "false",
+ ),
+ (
+ VertexRagServiceClient,
+ transports.VertexRagServiceRestTransport,
+ "rest",
+ "true",
+ ),
+ (
+ VertexRagServiceClient,
+ transports.VertexRagServiceRestTransport,
+ "rest",
+ "false",
+ ),
+ ],
+)
+@mock.patch.object(
+ VertexRagServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(VertexRagServiceClient),
+)
+@mock.patch.object(
+ VertexRagServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(VertexRagServiceAsyncClient),
+)
+@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
+def test_vertex_rag_service_client_mtls_env_auto(
+ client_class, transport_class, transport_name, use_client_cert_env
+):
+ # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
+ # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
+
+ # Check the case client_cert_source is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=client_cert_source_callback
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+
+ if use_client_cert_env == "false":
+ expected_client_cert_source = None
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ else:
+ expected_client_cert_source = client_cert_source_callback
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case ADC client cert is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=client_cert_source_callback,
+ ):
+ if use_client_cert_env == "false":
+ expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ )
+ expected_client_cert_source = None
+ else:
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+ expected_client_cert_source = client_cert_source_callback
+
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # Check the case client_cert_source and ADC client cert are not provided.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ patched.return_value = None
+ client = client_class(transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class", [VertexRagServiceClient, VertexRagServiceAsyncClient]
+)
+@mock.patch.object(
+ VertexRagServiceClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(VertexRagServiceClient),
+)
+@mock.patch.object(
+ VertexRagServiceAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(VertexRagServiceAsyncClient),
+)
+def test_vertex_rag_service_client_get_mtls_endpoint_and_cert_source(client_class):
+ mock_client_cert_source = mock.Mock()
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source == mock_client_cert_source
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_client_cert_source,
+ ):
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source == mock_client_cert_source
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError) as excinfo:
+ client_class.get_mtls_endpoint_and_cert_source()
+
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class", [VertexRagServiceClient, VertexRagServiceAsyncClient]
+)
+@mock.patch.object(
+ VertexRagServiceClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(VertexRagServiceClient),
+)
+@mock.patch.object(
+ VertexRagServiceAsyncClient,
+ "_DEFAULT_ENDPOINT_TEMPLATE",
+ modify_default_endpoint_template(VertexRagServiceAsyncClient),
+)
+def test_vertex_rag_service_client_client_api_endpoint(client_class):
+ mock_client_cert_source = client_cert_source_callback
+ api_override = "foo.com"
+ default_universe = VertexRagServiceClient._DEFAULT_UNIVERSE
+ default_endpoint = VertexRagServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=default_universe
+ )
+ mock_universe = "bar.com"
+ mock_endpoint = VertexRagServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=mock_universe
+ )
+
+ # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true",
+ # use ClientOptions.api_endpoint as the api endpoint regardless.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=api_override
+ )
+ client = client_class(
+ client_options=options,
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert client.api_endpoint == api_override
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == default_endpoint
+
+ # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always",
+ # use the DEFAULT_MTLS_ENDPOINT as the api endpoint.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ client = client_class(credentials=ga_credentials.AnonymousCredentials())
+ assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+
+ # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default),
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist,
+ # and ClientOptions.universe_domain="bar.com",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint.
+ options = client_options.ClientOptions()
+ universe_exists = hasattr(options, "universe_domain")
+ if universe_exists:
+ options = client_options.ClientOptions(universe_domain=mock_universe)
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ else:
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == (
+ mock_endpoint if universe_exists else default_endpoint
+ )
+ assert client.universe_domain == (
+ mock_universe if universe_exists else default_universe
+ )
+
+ # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never",
+ # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint.
+ options = client_options.ClientOptions()
+ if hasattr(options, "universe_domain"):
+ delattr(options, "universe_domain")
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ client = client_class(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert client.api_endpoint == default_endpoint
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (VertexRagServiceClient, transports.VertexRagServiceGrpcTransport, "grpc"),
+ (
+ VertexRagServiceAsyncClient,
+ transports.VertexRagServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ (VertexRagServiceClient, transports.VertexRagServiceRestTransport, "rest"),
+ ],
+)
+def test_vertex_rag_service_client_client_options_scopes(
+ client_class, transport_class, transport_name
+):
+ # Check the case scopes are provided.
+ options = client_options.ClientOptions(
+ scopes=["1", "2"],
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=["1", "2"],
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ VertexRagServiceClient,
+ transports.VertexRagServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ VertexRagServiceAsyncClient,
+ transports.VertexRagServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ (
+ VertexRagServiceClient,
+ transports.VertexRagServiceRestTransport,
+ "rest",
+ None,
+ ),
+ ],
+)
+def test_vertex_rag_service_client_client_options_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+def test_vertex_rag_service_client_client_options_from_dict():
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.vertex_rag_service.transports.VertexRagServiceGrpcTransport.__init__"
+ ) as grpc_transport:
+ grpc_transport.return_value = None
+ client = VertexRagServiceClient(
+ client_options={"api_endpoint": "squid.clam.whelk"}
+ )
+ grpc_transport.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ VertexRagServiceClient,
+ transports.VertexRagServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ VertexRagServiceAsyncClient,
+ transports.VertexRagServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_vertex_rag_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_service.RetrieveContextsRequest,
+ dict,
+ ],
+)
+def test_retrieve_contexts(request_type, transport: str = "grpc"):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.retrieve_contexts), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_service.RetrieveContextsResponse()
+ response = client.retrieve_contexts(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_service.RetrieveContextsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_service.RetrieveContextsResponse)
+
+
+def test_retrieve_contexts_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = vertex_rag_service.RetrieveContextsRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.retrieve_contexts), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.retrieve_contexts(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == vertex_rag_service.RetrieveContextsRequest(
+ parent="parent_value",
+ )
+
+
+def test_retrieve_contexts_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.retrieve_contexts in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.retrieve_contexts
+ ] = mock_rpc
+ request = {}
+ client.retrieve_contexts(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.retrieve_contexts(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_retrieve_contexts_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.retrieve_contexts
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.retrieve_contexts
+ ] = mock_rpc
+
+ request = {}
+ await client.retrieve_contexts(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.retrieve_contexts(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_retrieve_contexts_async(
+ transport: str = "grpc_asyncio",
+ request_type=vertex_rag_service.RetrieveContextsRequest,
+):
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.retrieve_contexts), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_service.RetrieveContextsResponse()
+ )
+ response = await client.retrieve_contexts(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_service.RetrieveContextsRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_service.RetrieveContextsResponse)
+
+
+@pytest.mark.asyncio
+async def test_retrieve_contexts_async_from_dict():
+ await test_retrieve_contexts_async(request_type=dict)
+
+
+def test_retrieve_contexts_field_headers():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_service.RetrieveContextsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.retrieve_contexts), "__call__"
+ ) as call:
+ call.return_value = vertex_rag_service.RetrieveContextsResponse()
+ client.retrieve_contexts(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_retrieve_contexts_field_headers_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_service.RetrieveContextsRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.retrieve_contexts), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_service.RetrieveContextsResponse()
+ )
+ await client.retrieve_contexts(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_retrieve_contexts_flattened():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.retrieve_contexts), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_service.RetrieveContextsResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.retrieve_contexts(
+ parent="parent_value",
+ query=vertex_rag_service.RagQuery(text="text_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].query
+ mock_val = vertex_rag_service.RagQuery(text="text_value")
+ assert arg == mock_val
+
+
+def test_retrieve_contexts_flattened_error():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.retrieve_contexts(
+ vertex_rag_service.RetrieveContextsRequest(),
+ parent="parent_value",
+ query=vertex_rag_service.RagQuery(text="text_value"),
+ )
+
+
+@pytest.mark.asyncio
+async def test_retrieve_contexts_flattened_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.retrieve_contexts), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_service.RetrieveContextsResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_service.RetrieveContextsResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.retrieve_contexts(
+ parent="parent_value",
+ query=vertex_rag_service.RagQuery(text="text_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].query
+ mock_val = vertex_rag_service.RagQuery(text="text_value")
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_retrieve_contexts_flattened_error_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.retrieve_contexts(
+ vertex_rag_service.RetrieveContextsRequest(),
+ parent="parent_value",
+ query=vertex_rag_service.RagQuery(text="text_value"),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_service.AugmentPromptRequest,
+ dict,
+ ],
+)
+def test_augment_prompt(request_type, transport: str = "grpc"):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.augment_prompt), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_service.AugmentPromptResponse()
+ response = client.augment_prompt(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_service.AugmentPromptRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_service.AugmentPromptResponse)
+
+
+def test_augment_prompt_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = vertex_rag_service.AugmentPromptRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.augment_prompt), "__call__") as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.augment_prompt(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == vertex_rag_service.AugmentPromptRequest(
+ parent="parent_value",
+ )
+
+
+def test_augment_prompt_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.augment_prompt in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.augment_prompt] = mock_rpc
+ request = {}
+ client.augment_prompt(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.augment_prompt(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_augment_prompt_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.augment_prompt
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.augment_prompt
+ ] = mock_rpc
+
+ request = {}
+ await client.augment_prompt(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.augment_prompt(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_augment_prompt_async(
+ transport: str = "grpc_asyncio",
+ request_type=vertex_rag_service.AugmentPromptRequest,
+):
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.augment_prompt), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_service.AugmentPromptResponse()
+ )
+ response = await client.augment_prompt(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_service.AugmentPromptRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_service.AugmentPromptResponse)
+
+
+@pytest.mark.asyncio
+async def test_augment_prompt_async_from_dict():
+ await test_augment_prompt_async(request_type=dict)
+
+
+def test_augment_prompt_field_headers():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_service.AugmentPromptRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.augment_prompt), "__call__") as call:
+ call.return_value = vertex_rag_service.AugmentPromptResponse()
+ client.augment_prompt(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_augment_prompt_field_headers_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_service.AugmentPromptRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.augment_prompt), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_service.AugmentPromptResponse()
+ )
+ await client.augment_prompt(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_augment_prompt_flattened():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.augment_prompt), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_service.AugmentPromptResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.augment_prompt(
+ parent="parent_value",
+ model=vertex_rag_service.AugmentPromptRequest.Model(model="model_value"),
+ vertex_rag_store=tool.VertexRagStore(rag_corpora=["rag_corpora_value"]),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].model
+ mock_val = vertex_rag_service.AugmentPromptRequest.Model(model="model_value")
+ assert arg == mock_val
+ assert args[0].vertex_rag_store == tool.VertexRagStore(
+ rag_corpora=["rag_corpora_value"]
+ )
+
+
+def test_augment_prompt_flattened_error():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.augment_prompt(
+ vertex_rag_service.AugmentPromptRequest(),
+ parent="parent_value",
+ model=vertex_rag_service.AugmentPromptRequest.Model(model="model_value"),
+ vertex_rag_store=tool.VertexRagStore(rag_corpora=["rag_corpora_value"]),
+ )
+
+
+@pytest.mark.asyncio
+async def test_augment_prompt_flattened_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.augment_prompt), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_service.AugmentPromptResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_service.AugmentPromptResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.augment_prompt(
+ parent="parent_value",
+ model=vertex_rag_service.AugmentPromptRequest.Model(model="model_value"),
+ vertex_rag_store=tool.VertexRagStore(rag_corpora=["rag_corpora_value"]),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].model
+ mock_val = vertex_rag_service.AugmentPromptRequest.Model(model="model_value")
+ assert arg == mock_val
+ assert args[0].vertex_rag_store == tool.VertexRagStore(
+ rag_corpora=["rag_corpora_value"]
+ )
+
+
+@pytest.mark.asyncio
+async def test_augment_prompt_flattened_error_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.augment_prompt(
+ vertex_rag_service.AugmentPromptRequest(),
+ parent="parent_value",
+ model=vertex_rag_service.AugmentPromptRequest.Model(model="model_value"),
+ vertex_rag_store=tool.VertexRagStore(rag_corpora=["rag_corpora_value"]),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_service.CorroborateContentRequest,
+ dict,
+ ],
+)
+def test_corroborate_content(request_type, transport: str = "grpc"):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.corroborate_content), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_service.CorroborateContentResponse(
+ corroboration_score=0.2046,
+ )
+ response = client.corroborate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_service.CorroborateContentRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_service.CorroborateContentResponse)
+ assert math.isclose(response.corroboration_score, 0.2046, rel_tol=1e-6)
+
+
+def test_corroborate_content_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = vertex_rag_service.CorroborateContentRequest(
+ parent="parent_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.corroborate_content), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.corroborate_content(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == vertex_rag_service.CorroborateContentRequest(
+ parent="parent_value",
+ )
+
+
+def test_corroborate_content_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.corroborate_content in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.corroborate_content
+ ] = mock_rpc
+ request = {}
+ client.corroborate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.corroborate_content(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_corroborate_content_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._client._transport.corroborate_content
+ in client._client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.corroborate_content
+ ] = mock_rpc
+
+ request = {}
+ await client.corroborate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ await client.corroborate_content(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_corroborate_content_async(
+ transport: str = "grpc_asyncio",
+ request_type=vertex_rag_service.CorroborateContentRequest,
+):
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.corroborate_content), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_service.CorroborateContentResponse(
+ corroboration_score=0.2046,
+ )
+ )
+ response = await client.corroborate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = vertex_rag_service.CorroborateContentRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_service.CorroborateContentResponse)
+ assert math.isclose(response.corroboration_score, 0.2046, rel_tol=1e-6)
+
+
+@pytest.mark.asyncio
+async def test_corroborate_content_async_from_dict():
+ await test_corroborate_content_async(request_type=dict)
+
+
+def test_corroborate_content_field_headers():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_service.CorroborateContentRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.corroborate_content), "__call__"
+ ) as call:
+ call.return_value = vertex_rag_service.CorroborateContentResponse()
+ client.corroborate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_corroborate_content_field_headers_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = vertex_rag_service.CorroborateContentRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.corroborate_content), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_service.CorroborateContentResponse()
+ )
+ await client.corroborate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_corroborate_content_flattened():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.corroborate_content), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_service.CorroborateContentResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.corroborate_content(
+ parent="parent_value",
+ content=gca_content.Content(role="role_value"),
+ facts=[vertex_rag_service.Fact(query="query_value")],
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].content
+ mock_val = gca_content.Content(role="role_value")
+ assert arg == mock_val
+ arg = args[0].facts
+ mock_val = [vertex_rag_service.Fact(query="query_value")]
+ assert arg == mock_val
+
+
+def test_corroborate_content_flattened_error():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.corroborate_content(
+ vertex_rag_service.CorroborateContentRequest(),
+ parent="parent_value",
+ content=gca_content.Content(role="role_value"),
+ facts=[vertex_rag_service.Fact(query="query_value")],
+ )
+
+
+@pytest.mark.asyncio
+async def test_corroborate_content_flattened_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.corroborate_content), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = vertex_rag_service.CorroborateContentResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_service.CorroborateContentResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.corroborate_content(
+ parent="parent_value",
+ content=gca_content.Content(role="role_value"),
+ facts=[vertex_rag_service.Fact(query="query_value")],
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].content
+ mock_val = gca_content.Content(role="role_value")
+ assert arg == mock_val
+ arg = args[0].facts
+ mock_val = [vertex_rag_service.Fact(query="query_value")]
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_corroborate_content_flattened_error_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.corroborate_content(
+ vertex_rag_service.CorroborateContentRequest(),
+ parent="parent_value",
+ content=gca_content.Content(role="role_value"),
+ facts=[vertex_rag_service.Fact(query="query_value")],
+ )
+
+
+def test_retrieve_contexts_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.retrieve_contexts in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.retrieve_contexts
+ ] = mock_rpc
+
+ request = {}
+ client.retrieve_contexts(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.retrieve_contexts(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_retrieve_contexts_rest_required_fields(
+ request_type=vertex_rag_service.RetrieveContextsRequest,
+):
+ transport_class = transports.VertexRagServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).retrieve_contexts._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).retrieve_contexts._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_service.RetrieveContextsResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_service.RetrieveContextsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.retrieve_contexts(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_retrieve_contexts_rest_unset_required_fields():
+ transport = transports.VertexRagServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.retrieve_contexts._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "query",
+ )
+ )
+ )
+
+
+def test_retrieve_contexts_rest_flattened():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_service.RetrieveContextsResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ query=vertex_rag_service.RagQuery(text="text_value"),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = vertex_rag_service.RetrieveContextsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.retrieve_contexts(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}:retrieveContexts"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_retrieve_contexts_rest_flattened_error(transport: str = "rest"):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.retrieve_contexts(
+ vertex_rag_service.RetrieveContextsRequest(),
+ parent="parent_value",
+ query=vertex_rag_service.RagQuery(text="text_value"),
+ )
+
+
+def test_augment_prompt_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.augment_prompt in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.augment_prompt] = mock_rpc
+
+ request = {}
+ client.augment_prompt(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.augment_prompt(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_augment_prompt_rest_required_fields(
+ request_type=vertex_rag_service.AugmentPromptRequest,
+):
+ transport_class = transports.VertexRagServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).augment_prompt._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).augment_prompt._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_service.AugmentPromptResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_service.AugmentPromptResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.augment_prompt(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_augment_prompt_rest_unset_required_fields():
+ transport = transports.VertexRagServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.augment_prompt._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("parent",)))
+
+
+def test_augment_prompt_rest_flattened():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_service.AugmentPromptResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ model=vertex_rag_service.AugmentPromptRequest.Model(model="model_value"),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = vertex_rag_service.AugmentPromptResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.augment_prompt(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}:augmentPrompt"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_augment_prompt_rest_flattened_error(transport: str = "rest"):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.augment_prompt(
+ vertex_rag_service.AugmentPromptRequest(),
+ parent="parent_value",
+ model=vertex_rag_service.AugmentPromptRequest.Model(model="model_value"),
+ vertex_rag_store=tool.VertexRagStore(rag_corpora=["rag_corpora_value"]),
+ )
+
+
+def test_corroborate_content_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.corroborate_content in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.corroborate_content
+ ] = mock_rpc
+
+ request = {}
+ client.corroborate_content(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.corroborate_content(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_corroborate_content_rest_required_fields(
+ request_type=vertex_rag_service.CorroborateContentRequest,
+):
+ transport_class = transports.VertexRagServiceRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).corroborate_content._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).corroborate_content._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_service.CorroborateContentResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_service.CorroborateContentResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.corroborate_content(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_corroborate_content_rest_unset_required_fields():
+ transport = transports.VertexRagServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.corroborate_content._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("parent",)))
+
+
+def test_corroborate_content_rest_flattened():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_service.CorroborateContentResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/locations/sample2"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ content=gca_content.Content(role="role_value"),
+ facts=[vertex_rag_service.Fact(query="query_value")],
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = vertex_rag_service.CorroborateContentResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.corroborate_content(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v1beta1/{parent=projects/*/locations/*}:corroborateContent"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_corroborate_content_rest_flattened_error(transport: str = "rest"):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.corroborate_content(
+ vertex_rag_service.CorroborateContentRequest(),
+ parent="parent_value",
+ content=gca_content.Content(role="role_value"),
+ facts=[vertex_rag_service.Fact(query="query_value")],
+ )
+
+
+def test_credentials_transport_error():
+ # It is an error to provide credentials and a transport instance.
+ transport = transports.VertexRagServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # It is an error to provide a credentials file and a transport instance.
+ transport = transports.VertexRagServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = VertexRagServiceClient(
+ client_options={"credentials_file": "credentials.json"},
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a transport instance.
+ transport = transports.VertexRagServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = VertexRagServiceClient(
+ client_options=options,
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a credential.
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = VertexRagServiceClient(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # It is an error to provide scopes and a transport instance.
+ transport = transports.VertexRagServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = VertexRagServiceClient(
+ client_options={"scopes": ["1", "2"]},
+ transport=transport,
+ )
+
+
+def test_transport_instance():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.VertexRagServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ client = VertexRagServiceClient(transport=transport)
+ assert client.transport is transport
+
+
+def test_transport_get_channel():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.VertexRagServiceGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+ transport = transports.VertexRagServiceGrpcAsyncIOTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.VertexRagServiceGrpcTransport,
+ transports.VertexRagServiceGrpcAsyncIOTransport,
+ transports.VertexRagServiceRestTransport,
+ ],
+)
+def test_transport_adc(transport_class):
+ # Test default credentials are used if not provided.
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class()
+ adc.assert_called_once()
+
+
+def test_transport_kind_grpc():
+ transport = VertexRagServiceClient.get_transport_class("grpc")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "grpc"
+
+
+def test_initialize_client_w_grpc():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_retrieve_contexts_empty_call_grpc():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.retrieve_contexts), "__call__"
+ ) as call:
+ call.return_value = vertex_rag_service.RetrieveContextsResponse()
+ client.retrieve_contexts(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_service.RetrieveContextsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_augment_prompt_empty_call_grpc():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.augment_prompt), "__call__") as call:
+ call.return_value = vertex_rag_service.AugmentPromptResponse()
+ client.augment_prompt(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_service.AugmentPromptRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_corroborate_content_empty_call_grpc():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.corroborate_content), "__call__"
+ ) as call:
+ call.return_value = vertex_rag_service.CorroborateContentResponse()
+ client.corroborate_content(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_service.CorroborateContentRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_grpc_asyncio():
+ transport = VertexRagServiceAsyncClient.get_transport_class("grpc_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "grpc_asyncio"
+
+
+def test_initialize_client_w_grpc_asyncio():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_retrieve_contexts_empty_call_grpc_asyncio():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.retrieve_contexts), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_service.RetrieveContextsResponse()
+ )
+ await client.retrieve_contexts(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_service.RetrieveContextsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_augment_prompt_empty_call_grpc_asyncio():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.augment_prompt), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_service.AugmentPromptResponse()
+ )
+ await client.augment_prompt(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_service.AugmentPromptRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_corroborate_content_empty_call_grpc_asyncio():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.corroborate_content), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ vertex_rag_service.CorroborateContentResponse(
+ corroboration_score=0.2046,
+ )
+ )
+ await client.corroborate_content(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_service.CorroborateContentRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_rest():
+ transport = VertexRagServiceClient.get_transport_class("rest")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "rest"
+
+
+def test_retrieve_contexts_rest_bad_request(
+ request_type=vertex_rag_service.RetrieveContextsRequest,
+):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.retrieve_contexts(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_service.RetrieveContextsRequest,
+ dict,
+ ],
+)
+def test_retrieve_contexts_rest_call_success(request_type):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_service.RetrieveContextsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_service.RetrieveContextsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.retrieve_contexts(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_service.RetrieveContextsResponse)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_retrieve_contexts_rest_interceptors(null_interceptor):
+ transport = transports.VertexRagServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.VertexRagServiceRestInterceptor(),
+ )
+ client = VertexRagServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.VertexRagServiceRestInterceptor, "post_retrieve_contexts"
+ ) as post, mock.patch.object(
+ transports.VertexRagServiceRestInterceptor, "pre_retrieve_contexts"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_service.RetrieveContextsRequest.pb(
+ vertex_rag_service.RetrieveContextsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = vertex_rag_service.RetrieveContextsResponse.to_json(
+ vertex_rag_service.RetrieveContextsResponse()
+ )
+ req.return_value.content = return_value
+
+ request = vertex_rag_service.RetrieveContextsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = vertex_rag_service.RetrieveContextsResponse()
+
+ client.retrieve_contexts(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_augment_prompt_rest_bad_request(
+ request_type=vertex_rag_service.AugmentPromptRequest,
+):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.augment_prompt(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_service.AugmentPromptRequest,
+ dict,
+ ],
+)
+def test_augment_prompt_rest_call_success(request_type):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_service.AugmentPromptResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_service.AugmentPromptResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.augment_prompt(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_service.AugmentPromptResponse)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_augment_prompt_rest_interceptors(null_interceptor):
+ transport = transports.VertexRagServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.VertexRagServiceRestInterceptor(),
+ )
+ client = VertexRagServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.VertexRagServiceRestInterceptor, "post_augment_prompt"
+ ) as post, mock.patch.object(
+ transports.VertexRagServiceRestInterceptor, "pre_augment_prompt"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_service.AugmentPromptRequest.pb(
+ vertex_rag_service.AugmentPromptRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = vertex_rag_service.AugmentPromptResponse.to_json(
+ vertex_rag_service.AugmentPromptResponse()
+ )
+ req.return_value.content = return_value
+
+ request = vertex_rag_service.AugmentPromptRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = vertex_rag_service.AugmentPromptResponse()
+
+ client.augment_prompt(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_corroborate_content_rest_bad_request(
+ request_type=vertex_rag_service.CorroborateContentRequest,
+):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.corroborate_content(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_service.CorroborateContentRequest,
+ dict,
+ ],
+)
+def test_corroborate_content_rest_call_success(request_type):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_service.CorroborateContentResponse(
+ corroboration_score=0.2046,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_service.CorroborateContentResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.corroborate_content(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_service.CorroborateContentResponse)
+ assert math.isclose(response.corroboration_score, 0.2046, rel_tol=1e-6)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_corroborate_content_rest_interceptors(null_interceptor):
+ transport = transports.VertexRagServiceRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.VertexRagServiceRestInterceptor(),
+ )
+ client = VertexRagServiceClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.VertexRagServiceRestInterceptor, "post_corroborate_content"
+ ) as post, mock.patch.object(
+ transports.VertexRagServiceRestInterceptor, "pre_corroborate_content"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_service.CorroborateContentRequest.pb(
+ vertex_rag_service.CorroborateContentRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = vertex_rag_service.CorroborateContentResponse.to_json(
+ vertex_rag_service.CorroborateContentResponse()
+ )
+ req.return_value.content = return_value
+
+ request = vertex_rag_service.CorroborateContentRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = vertex_rag_service.CorroborateContentResponse()
+
+ client.corroborate_content(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_get_location_rest_bad_request(request_type=locations_pb2.GetLocationRequest):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_location(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+def test_get_location_rest(request_type):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_list_locations_rest_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_locations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+def test_list_locations_rest(request_type):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_get_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_get_iam_policy_rest(request_type):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_set_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.set_iam_policy(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+def test_set_iam_policy_rest(request_type):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+def test_test_iam_permissions_rest_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.test_iam_permissions(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+def test_test_iam_permissions_rest(request_type):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+def test_cancel_operation_rest_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.cancel_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+def test_cancel_operation_rest(request_type):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_rest_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+def test_delete_operation_rest(request_type):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_get_operation_rest_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+def test_get_operation_rest(request_type):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_list_operations_rest_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_operations(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+def test_list_operations_rest(request_type):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_wait_operation_rest_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.wait_operation(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+def test_wait_operation_rest(request_type):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_retrieve_contexts_empty_call_rest():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.retrieve_contexts), "__call__"
+ ) as call:
+ client.retrieve_contexts(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_service.RetrieveContextsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_augment_prompt_empty_call_rest():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.augment_prompt), "__call__") as call:
+ client.augment_prompt(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_service.AugmentPromptRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_corroborate_content_empty_call_rest():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.corroborate_content), "__call__"
+ ) as call:
+ client.corroborate_content(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_service.CorroborateContentRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = VertexRagServiceAsyncClient.get_transport_class("rest_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "rest_asyncio"
+
+
+@pytest.mark.asyncio
+async def test_retrieve_contexts_rest_asyncio_bad_request(
+ request_type=vertex_rag_service.RetrieveContextsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.retrieve_contexts(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_service.RetrieveContextsRequest,
+ dict,
+ ],
+)
+async def test_retrieve_contexts_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_service.RetrieveContextsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_service.RetrieveContextsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.retrieve_contexts(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_service.RetrieveContextsResponse)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_retrieve_contexts_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncVertexRagServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncVertexRagServiceRestInterceptor(),
+ )
+ client = VertexRagServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncVertexRagServiceRestInterceptor, "post_retrieve_contexts"
+ ) as post, mock.patch.object(
+ transports.AsyncVertexRagServiceRestInterceptor, "pre_retrieve_contexts"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_service.RetrieveContextsRequest.pb(
+ vertex_rag_service.RetrieveContextsRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = vertex_rag_service.RetrieveContextsResponse.to_json(
+ vertex_rag_service.RetrieveContextsResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = vertex_rag_service.RetrieveContextsRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = vertex_rag_service.RetrieveContextsResponse()
+
+ await client.retrieve_contexts(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_augment_prompt_rest_asyncio_bad_request(
+ request_type=vertex_rag_service.AugmentPromptRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.augment_prompt(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_service.AugmentPromptRequest,
+ dict,
+ ],
+)
+async def test_augment_prompt_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_service.AugmentPromptResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_service.AugmentPromptResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.augment_prompt(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_service.AugmentPromptResponse)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_augment_prompt_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncVertexRagServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncVertexRagServiceRestInterceptor(),
+ )
+ client = VertexRagServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncVertexRagServiceRestInterceptor, "post_augment_prompt"
+ ) as post, mock.patch.object(
+ transports.AsyncVertexRagServiceRestInterceptor, "pre_augment_prompt"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_service.AugmentPromptRequest.pb(
+ vertex_rag_service.AugmentPromptRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = vertex_rag_service.AugmentPromptResponse.to_json(
+ vertex_rag_service.AugmentPromptResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = vertex_rag_service.AugmentPromptRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = vertex_rag_service.AugmentPromptResponse()
+
+ await client.augment_prompt(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_corroborate_content_rest_asyncio_bad_request(
+ request_type=vertex_rag_service.CorroborateContentRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.corroborate_content(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ vertex_rag_service.CorroborateContentRequest,
+ dict,
+ ],
+)
+async def test_corroborate_content_rest_asyncio_call_success(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = vertex_rag_service.CorroborateContentResponse(
+ corroboration_score=0.2046,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = vertex_rag_service.CorroborateContentResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = await client.corroborate_content(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, vertex_rag_service.CorroborateContentResponse)
+ assert math.isclose(response.corroboration_score, 0.2046, rel_tol=1e-6)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("null_interceptor", [True, False])
+async def test_corroborate_content_rest_asyncio_interceptors(null_interceptor):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ transport = transports.AsyncVertexRagServiceRestTransport(
+ credentials=async_anonymous_credentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.AsyncVertexRagServiceRestInterceptor(),
+ )
+ client = VertexRagServiceAsyncClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.AsyncVertexRagServiceRestInterceptor, "post_corroborate_content"
+ ) as post, mock.patch.object(
+ transports.AsyncVertexRagServiceRestInterceptor, "pre_corroborate_content"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = vertex_rag_service.CorroborateContentRequest.pb(
+ vertex_rag_service.CorroborateContentRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = vertex_rag_service.CorroborateContentResponse.to_json(
+ vertex_rag_service.CorroborateContentResponse()
+ )
+ req.return_value.read = mock.AsyncMock(return_value=return_value)
+
+ request = vertex_rag_service.CorroborateContentRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = vertex_rag_service.CorroborateContentResponse()
+
+ await client.corroborate_content(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_get_location_rest_asyncio_bad_request(
+ request_type=locations_pb2.GetLocationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_location(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.GetLocationRequest,
+ dict,
+ ],
+)
+async def test_get_location_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.Location()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_location(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_rest_asyncio_bad_request(
+ request_type=locations_pb2.ListLocationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict({"name": "projects/sample1"}, request)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_locations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ locations_pb2.ListLocationsRequest,
+ dict,
+ ],
+)
+async def test_list_locations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = locations_pb2.ListLocationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_locations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.GetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_get_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.set_iam_policy(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.SetIamPolicyRequest,
+ dict,
+ ],
+)
+async def test_set_iam_policy_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.set_iam_policy(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_rest_asyncio_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"resource": "projects/sample1/locations/sample2/featurestores/sample3"},
+ request,
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.test_iam_permissions(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ iam_policy_pb2.TestIamPermissionsRequest,
+ dict,
+ ],
+)
+async def test_test_iam_permissions_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {
+ "resource": "projects/sample1/locations/sample2/featurestores/sample3"
+ }
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.CancelOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.cancel_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.CancelOperationRequest,
+ dict,
+ ],
+)
+async def test_cancel_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.cancel_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.DeleteOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.delete_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.DeleteOperationRequest,
+ dict,
+ ],
+)
+async def test_delete_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = "{}"
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.delete_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_get_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.GetOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.get_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.GetOperationRequest,
+ dict,
+ ],
+)
+async def test_get_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.get_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_rest_asyncio_bad_request(
+ request_type=operations_pb2.ListOperationsRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.list_operations(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.ListOperationsRequest,
+ dict,
+ ],
+)
+async def test_list_operations_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.ListOperationsResponse()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.list_operations(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_rest_asyncio_bad_request(
+ request_type=operations_pb2.WaitOperationRequest,
+):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+ request = request_type()
+ request = json_format.ParseDict(
+ {"name": "projects/sample1/locations/sample2/operations/sample3"}, request
+ )
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.read = mock.AsyncMock(return_value=b"{}")
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ await client.wait_operation(request)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ operations_pb2.WaitOperationRequest,
+ dict,
+ ],
+)
+async def test_wait_operation_rest_asyncio(request_type):
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"}
+ request = request_type(**request_init)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(AsyncAuthorizedSession, "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation()
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.read = mock.AsyncMock(
+ return_value=json_return_value.encode("UTF-8")
+ )
+
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = await client.wait_operation(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_initialize_client_w_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_retrieve_contexts_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.retrieve_contexts), "__call__"
+ ) as call:
+ await client.retrieve_contexts(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_service.RetrieveContextsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_augment_prompt_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.augment_prompt), "__call__") as call:
+ await client.augment_prompt(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_service.AugmentPromptRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_corroborate_content_empty_call_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.corroborate_content), "__call__"
+ ) as call:
+ await client.corroborate_content(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = vertex_rag_service.CorroborateContentRequest()
+
+ assert args[0] == request_msg
+
+
+def test_unsupported_parameter_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with pytest.raises(core_exceptions.AsyncRestUnsupportedParameterError, match="google.api_core.client_options.ClientOptions.quota_project_id") as exc: # type: ignore
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="rest_asyncio",
+ client_options=options,
+ )
+
+
+def test_transport_grpc_default():
+ # A client should use the gRPC transport by default.
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ assert isinstance(
+ client.transport,
+ transports.VertexRagServiceGrpcTransport,
+ )
+
+
+def test_vertex_rag_service_base_transport_error():
+ # Passing both a credentials object and credentials_file should raise an error
+ with pytest.raises(core_exceptions.DuplicateCredentialArgs):
+ transport = transports.VertexRagServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ credentials_file="credentials.json",
+ )
+
+
+def test_vertex_rag_service_base_transport():
+ # Instantiate the base transport.
+ with mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.vertex_rag_service.transports.VertexRagServiceTransport.__init__"
+ ) as Transport:
+ Transport.return_value = None
+ transport = transports.VertexRagServiceTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Every method on the transport should just blindly
+ # raise NotImplementedError.
+ methods = (
+ "retrieve_contexts",
+ "augment_prompt",
+ "corroborate_content",
+ "set_iam_policy",
+ "get_iam_policy",
+ "test_iam_permissions",
+ "get_location",
+ "list_locations",
+ "get_operation",
+ "wait_operation",
+ "cancel_operation",
+ "delete_operation",
+ "list_operations",
+ )
+ for method in methods:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, method)(request=object())
+
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
+ # Catch all for all remaining methods and properties
+ remainder = [
+ "kind",
+ ]
+ for r in remainder:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, r)()
+
+
+def test_vertex_rag_service_base_transport_with_credentials_file():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.vertex_rag_service.transports.VertexRagServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.VertexRagServiceTransport(
+ credentials_file="credentials.json",
+ quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+def test_vertex_rag_service_base_transport_with_adc():
+ # Test the default credentials are used if credentials and credentials_file are None.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
+ "google.cloud.aiplatform_v1beta1.services.vertex_rag_service.transports.VertexRagServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.VertexRagServiceTransport()
+ adc.assert_called_once()
+
+
+def test_vertex_rag_service_auth_adc():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ VertexRagServiceClient()
+ adc.assert_called_once_with(
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.VertexRagServiceGrpcTransport,
+ transports.VertexRagServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_vertex_rag_service_transport_auth_adc(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+ adc.assert_called_once_with(
+ scopes=["1", "2"],
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.VertexRagServiceGrpcTransport,
+ transports.VertexRagServiceGrpcAsyncIOTransport,
+ transports.VertexRagServiceRestTransport,
+ ],
+)
+def test_vertex_rag_service_transport_auth_gdch_credentials(transport_class):
+ host = "https://language.com"
+ api_audience_tests = [None, "https://language2.com"]
+ api_audience_expect = [host, "https://language2.com"]
+ for t, e in zip(api_audience_tests, api_audience_expect):
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ gdch_mock = mock.MagicMock()
+ type(gdch_mock).with_gdch_audience = mock.PropertyMock(
+ return_value=gdch_mock
+ )
+ adc.return_value = (gdch_mock, None)
+ transport_class(host=host, api_audience=t)
+ gdch_mock.with_gdch_audience.assert_called_once_with(e)
+
+
+@pytest.mark.parametrize(
+ "transport_class,grpc_helpers",
+ [
+ (transports.VertexRagServiceGrpcTransport, grpc_helpers),
+ (transports.VertexRagServiceGrpcAsyncIOTransport, grpc_helpers_async),
+ ],
+)
+def test_vertex_rag_service_transport_create_channel(transport_class, grpc_helpers):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel", autospec=True
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ adc.return_value = (creds, None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=creds,
+ credentials_file=None,
+ quota_project_id="octopus",
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=["1", "2"],
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.VertexRagServiceGrpcTransport,
+ transports.VertexRagServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_vertex_rag_service_grpc_transport_client_cert_source_for_mtls(transport_class):
+ cred = ga_credentials.AnonymousCredentials()
+
+ # Check ssl_channel_credentials is used if provided.
+ with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
+ mock_ssl_channel_creds = mock.Mock()
+ transport_class(
+ host="squid.clam.whelk",
+ credentials=cred,
+ ssl_channel_credentials=mock_ssl_channel_creds,
+ )
+ mock_create_channel.assert_called_once_with(
+ "squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_channel_creds,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
+ # is used.
+ with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
+ with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
+ transport_class(
+ credentials=cred,
+ client_cert_source_for_mtls=client_cert_source_callback,
+ )
+ expected_cert, expected_key = client_cert_source_callback()
+ mock_ssl_cred.assert_called_once_with(
+ certificate_chain=expected_cert, private_key=expected_key
+ )
+
+
+def test_vertex_rag_service_http_transport_client_cert_source_for_mtls():
+ cred = ga_credentials.AnonymousCredentials()
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
+ ) as mock_configure_mtls_channel:
+ transports.VertexRagServiceRestTransport(
+ credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
+ )
+ mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_vertex_rag_service_host_no_port(transport_name):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:443"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "grpc",
+ "grpc_asyncio",
+ "rest",
+ ],
+)
+def test_vertex_rag_service_host_with_port(transport_name):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="aiplatform.googleapis.com:8000"
+ ),
+ transport=transport_name,
+ )
+ assert client.transport._host == (
+ "aiplatform.googleapis.com:8000"
+ if transport_name in ["grpc", "grpc_asyncio"]
+ else "https://aiplatform.googleapis.com:8000"
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_name",
+ [
+ "rest",
+ ],
+)
+def test_vertex_rag_service_client_transport_session_collision(transport_name):
+ creds1 = ga_credentials.AnonymousCredentials()
+ creds2 = ga_credentials.AnonymousCredentials()
+ client1 = VertexRagServiceClient(
+ credentials=creds1,
+ transport=transport_name,
+ )
+ client2 = VertexRagServiceClient(
+ credentials=creds2,
+ transport=transport_name,
+ )
+ session1 = client1.transport.retrieve_contexts._session
+ session2 = client2.transport.retrieve_contexts._session
+ assert session1 != session2
+ session1 = client1.transport.augment_prompt._session
+ session2 = client2.transport.augment_prompt._session
+ assert session1 != session2
+ session1 = client1.transport.corroborate_content._session
+ session2 = client2.transport.corroborate_content._session
+ assert session1 != session2
+
+
+def test_vertex_rag_service_grpc_transport_channel():
+ channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.VertexRagServiceGrpcTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+def test_vertex_rag_service_grpc_asyncio_transport_channel():
+ channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.VertexRagServiceGrpcAsyncIOTransport(
+ host="squid.clam.whelk",
+ channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.VertexRagServiceGrpcTransport,
+ transports.VertexRagServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_vertex_rag_service_transport_channel_mtls_with_client_cert_source(
+ transport_class,
+):
+ with mock.patch(
+ "grpc.ssl_channel_credentials", autospec=True
+ ) as grpc_ssl_channel_cred:
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_ssl_cred = mock.Mock()
+ grpc_ssl_channel_cred.return_value = mock_ssl_cred
+
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+
+ cred = ga_credentials.AnonymousCredentials()
+ with pytest.warns(DeprecationWarning):
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (cred, None)
+ transport = transport_class(
+ host="squid.clam.whelk",
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=client_cert_source_callback,
+ )
+ adc.assert_called_once()
+
+ grpc_ssl_channel_cred.assert_called_once_with(
+ certificate_chain=b"cert bytes", private_key=b"key bytes"
+ )
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.VertexRagServiceGrpcTransport,
+ transports.VertexRagServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_vertex_rag_service_transport_channel_mtls_with_adc(transport_class):
+ mock_ssl_cred = mock.Mock()
+ with mock.patch.multiple(
+ "google.auth.transport.grpc.SslCredentials",
+ __init__=mock.Mock(return_value=None),
+ ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
+ ):
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+ mock_cred = mock.Mock()
+
+ with pytest.warns(DeprecationWarning):
+ transport = transport_class(
+ host="squid.clam.whelk",
+ credentials=mock_cred,
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=None,
+ )
+
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=mock_cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+
+
+def test_rag_corpus_path():
+ project = "squid"
+ location = "clam"
+ rag_corpus = "whelk"
+ expected = "projects/{project}/locations/{location}/ragCorpora/{rag_corpus}".format(
+ project=project,
+ location=location,
+ rag_corpus=rag_corpus,
+ )
+ actual = VertexRagServiceClient.rag_corpus_path(project, location, rag_corpus)
+ assert expected == actual
+
+
+def test_parse_rag_corpus_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "rag_corpus": "nudibranch",
+ }
+ path = VertexRagServiceClient.rag_corpus_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = VertexRagServiceClient.parse_rag_corpus_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "cuttlefish"
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = VertexRagServiceClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "mussel",
+ }
+ path = VertexRagServiceClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = VertexRagServiceClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "winkle"
+ expected = "folders/{folder}".format(
+ folder=folder,
+ )
+ actual = VertexRagServiceClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "nautilus",
+ }
+ path = VertexRagServiceClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = VertexRagServiceClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "scallop"
+ expected = "organizations/{organization}".format(
+ organization=organization,
+ )
+ actual = VertexRagServiceClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "abalone",
+ }
+ path = VertexRagServiceClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = VertexRagServiceClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "squid"
+ expected = "projects/{project}".format(
+ project=project,
+ )
+ actual = VertexRagServiceClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "clam",
+ }
+ path = VertexRagServiceClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = VertexRagServiceClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "whelk"
+ location = "octopus"
+ expected = "projects/{project}/locations/{location}".format(
+ project=project,
+ location=location,
+ )
+ actual = VertexRagServiceClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "oyster",
+ "location": "nudibranch",
+ }
+ path = VertexRagServiceClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = VertexRagServiceClient.parse_common_location_path(path)
+ assert expected == actual
+
+
+def test_client_with_default_client_info():
+ client_info = gapic_v1.client_info.ClientInfo()
+
+ with mock.patch.object(
+ transports.VertexRagServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+ with mock.patch.object(
+ transports.VertexRagServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ transport_class = VertexRagServiceClient.get_transport_class()
+ transport = transport_class(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+
+def test_delete_operation(transport: str = "grpc"):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_async(transport: str = "grpc_asyncio"):
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.DeleteOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_operation_field_headers():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = None
+
+ client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_field_headers_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.DeleteOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_delete_operation_from_dict():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_delete_operation_from_dict_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_cancel_operation(transport: str = "grpc"):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_async(transport: str = "grpc_asyncio"):
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.CancelOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_cancel_operation_field_headers():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = None
+
+ client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_field_headers_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.CancelOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.cancel_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_cancel_operation_from_dict():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_cancel_operation_from_dict_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.cancel_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_wait_operation(transport: str = "grpc"):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_wait_operation(transport: str = "grpc_asyncio"):
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.WaitOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_wait_operation_field_headers():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_field_headers_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.WaitOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.wait_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_wait_operation_from_dict():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_wait_operation_from_dict_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.wait_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_operation(transport: str = "grpc"):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+ response = client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+@pytest.mark.asyncio
+async def test_get_operation_async(transport: str = "grpc_asyncio"):
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.GetOperationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.Operation)
+
+
+def test_get_operation_field_headers():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = operations_pb2.Operation()
+
+ client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_operation_field_headers_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.GetOperationRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ await client.get_operation(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_get_operation_from_dict():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation()
+
+ response = client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_operation_from_dict_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation()
+ )
+ response = await client.get_operation(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_operations(transport: str = "grpc"):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+ response = client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_operations_async(transport: str = "grpc_asyncio"):
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = operations_pb2.ListOperationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, operations_pb2.ListOperationsResponse)
+
+
+def test_list_operations_field_headers():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_operations_field_headers_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = operations_pb2.ListOperationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ await client.list_operations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_operations_from_dict():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.ListOperationsResponse()
+
+ response = client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_operations_from_dict_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.ListOperationsResponse()
+ )
+ response = await client.list_operations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_list_locations(transport: str = "grpc"):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+ response = client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_locations_async(transport: str = "grpc_asyncio"):
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.ListLocationsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.ListLocationsResponse)
+
+
+def test_list_locations_field_headers():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_locations_field_headers_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.ListLocationsRequest()
+ request.name = "locations"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ await client.list_locations(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations",
+ ) in kw["metadata"]
+
+
+def test_list_locations_from_dict():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.ListLocationsResponse()
+
+ response = client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_list_locations_from_dict_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.ListLocationsResponse()
+ )
+ response = await client.list_locations(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_get_location(transport: str = "grpc"):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+ response = client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+@pytest.mark.asyncio
+async def test_get_location_async(transport: str = "grpc_asyncio"):
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = locations_pb2.GetLocationRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, locations_pb2.Location)
+
+
+def test_get_location_field_headers():
+ client = VertexRagServiceClient(credentials=ga_credentials.AnonymousCredentials())
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = locations_pb2.Location()
+
+ client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_location_field_headers_async():
+ client = VertexRagServiceAsyncClient(credentials=async_anonymous_credentials())
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = locations_pb2.GetLocationRequest()
+ request.name = "locations/abc"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_location), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ await client.get_location(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=locations/abc",
+ ) in kw["metadata"]
+
+
+def test_get_location_from_dict():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = locations_pb2.Location()
+
+ response = client.get_location(
+ request={
+ "name": "locations/abc",
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_location_from_dict_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ locations_pb2.Location()
+ )
+ response = await client.get_location(
+ request={
+ "name": "locations",
+ }
+ )
+ call.assert_called()
+
+
+def test_set_iam_policy(transport: str = "grpc"):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ response = client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+ response = await client.set_iam_policy(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_set_iam_policy_field_headers():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_field_headers_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.SetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.set_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_set_iam_policy_from_dict():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_set_iam_policy_from_dict_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.set_iam_policy(
+ request={
+ "resource": "resource_value",
+ "policy": policy_pb2.Policy(version=774),
+ }
+ )
+ call.assert_called()
+
+
+def test_get_iam_policy(transport: str = "grpc"):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+
+ response = client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_async(transport: str = "grpc_asyncio"):
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+
+ response = await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, policy_pb2.Policy)
+
+ assert response.version == 774
+
+ assert response.etag == b"etag_blob"
+
+
+def test_get_iam_policy_field_headers():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+
+ client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_field_headers_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.GetIamPolicyRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ await client.get_iam_policy(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_get_iam_policy_from_dict():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = policy_pb2.Policy()
+
+ response = client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_get_iam_policy_from_dict_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
+
+ response = await client.get_iam_policy(
+ request={
+ "resource": "resource_value",
+ "options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
+ }
+ )
+ call.assert_called()
+
+
+def test_test_iam_permissions(transport: str = "grpc"):
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+
+ response = client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"):
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+ )
+
+ response = await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+
+ assert response.permissions == ["permissions_value"]
+
+
+def test_test_iam_permissions_field_headers():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_field_headers_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = iam_policy_pb2.TestIamPermissionsRequest()
+ request.resource = "resource/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ await client.test_iam_permissions(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "resource=resource/value",
+ ) in kw["metadata"]
+
+
+def test_test_iam_permissions_from_dict():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+
+ response = client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+@pytest.mark.asyncio
+async def test_test_iam_permissions_from_dict_async():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse()
+ )
+
+ response = await client.test_iam_permissions(
+ request={
+ "resource": "resource_value",
+ "permissions": ["permissions_value"],
+ }
+ )
+ call.assert_called()
+
+
+def test_transport_close_grpc():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_grpc_asyncio():
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close_rest():
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_transport_close_rest_asyncio():
+ if not HAS_ASYNC_REST_EXTRA:
+ pytest.skip(
+ "the library must be installed with the `async_rest` extra to test this feature."
+ )
+ client = VertexRagServiceAsyncClient(
+ credentials=async_anonymous_credentials(), transport="rest_asyncio"
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "_session")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "rest",
+ "grpc",
+ ]
+ for transport in transports:
+ client = VertexRagServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class",
+ [
+ (VertexRagServiceClient, transports.VertexRagServiceGrpcTransport),
+ (VertexRagServiceAsyncClient, transports.VertexRagServiceGrpcAsyncIOTransport),
+ ],
+)
+def test_api_key_credentials(client_class, transport_class):
+ with mock.patch.object(
+ google.auth._default, "get_api_key_credentials", create=True
+ ) as get_api_key_credentials:
+ mock_cred = mock.Mock()
+ get_api_key_credentials.return_value = mock_cred
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=mock_cred,
+ credentials_file=None,
+ host=client._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE
+ ),
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ api_audience=None,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/definition_v1/__init__.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/definition_v1/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f6cf068242c282e78ed205a7a66af26b6f1928f
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/definition_v1/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/definition_v1beta1/__init__.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/definition_v1beta1/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f6cf068242c282e78ed205a7a66af26b6f1928f
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/definition_v1beta1/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/instance_v1/__init__.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/instance_v1/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f6cf068242c282e78ed205a7a66af26b6f1928f
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/instance_v1/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/instance_v1beta1/__init__.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/instance_v1beta1/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f6cf068242c282e78ed205a7a66af26b6f1928f
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/instance_v1beta1/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/params_v1/__init__.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/params_v1/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f6cf068242c282e78ed205a7a66af26b6f1928f
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/params_v1/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/params_v1beta1/__init__.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/params_v1beta1/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f6cf068242c282e78ed205a7a66af26b6f1928f
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/params_v1beta1/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/prediction_v1/__init__.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/prediction_v1/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f6cf068242c282e78ed205a7a66af26b6f1928f
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/prediction_v1/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/gapic/prediction_v1beta1/__init__.py b/testbed/googleapis__python-aiplatform/tests/unit/gapic/prediction_v1beta1/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f6cf068242c282e78ed205a7a66af26b6f1928f
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/gapic/prediction_v1beta1/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertex_langchain/test_reasoning_engine_templates_langchain.py b/testbed/googleapis__python-aiplatform/tests/unit/vertex_langchain/test_reasoning_engine_templates_langchain.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f93b89b825386baeee6379d696d52f0fd67ebf6
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertex_langchain/test_reasoning_engine_templates_langchain.py
@@ -0,0 +1,300 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import importlib
+from typing import Optional
+from unittest import mock
+
+from google import auth
+import vertexai
+from google.cloud.aiplatform import initializer
+from vertexai.preview import reasoning_engines
+from vertexai.preview.generative_models import grounding
+from vertexai.generative_models import Tool
+from vertexai.reasoning_engines import _utils
+import pytest
+
+
+from langchain_core import prompts
+from langchain.load import dump as langchain_load_dump
+from langchain.agents.format_scratchpad import format_to_openai_function_messages
+from langchain.tools.base import StructuredTool
+
+
+_DEFAULT_PLACE_TOOL_ACTIVITY = "museums"
+_DEFAULT_PLACE_TOOL_PAGE_SIZE = 3
+_DEFAULT_PLACE_PHOTO_MAXWIDTH = 400
+_TEST_LOCATION = "us-central1"
+_TEST_PROJECT = "test-project"
+_TEST_MODEL = "gemini-1.0-pro"
+_TEST_SYSTEM_INSTRUCTION = "You are a helpful bot."
+
+
+def place_tool_query(
+ city: str,
+ activity: str = _DEFAULT_PLACE_TOOL_ACTIVITY,
+ page_size: int = _DEFAULT_PLACE_TOOL_PAGE_SIZE,
+):
+ """Searches the city for recommendations on the activity."""
+ return {"city": city, "activity": activity, "page_size": page_size}
+
+
+def place_photo_query(
+ photo_reference: str,
+ maxwidth: int = _DEFAULT_PLACE_PHOTO_MAXWIDTH,
+ maxheight: Optional[int] = None,
+):
+ """Returns the photo for a given reference."""
+ result = {"photo_reference": photo_reference, "maxwidth": maxwidth}
+ if maxheight:
+ result["maxheight"] = maxheight
+ return result
+
+
+@pytest.fixture(scope="module")
+def google_auth_mock():
+ with mock.patch.object(auth, "default") as google_auth_mock:
+ credentials_mock = mock.Mock()
+ credentials_mock.with_quota_project.return_value = None
+ google_auth_mock.return_value = (
+ credentials_mock,
+ _TEST_PROJECT,
+ )
+ yield google_auth_mock
+
+
+@pytest.fixture
+def vertexai_init_mock():
+ with mock.patch.object(vertexai, "init") as vertexai_init_mock:
+ yield vertexai_init_mock
+
+
+@pytest.fixture
+def langchain_dump_mock():
+ with mock.patch.object(langchain_load_dump, "dumpd") as langchain_dump_mock:
+ yield langchain_dump_mock
+
+
+@pytest.fixture
+def cloud_trace_exporter_mock():
+ with mock.patch.object(
+ _utils,
+ "_import_cloud_trace_exporter_or_warn",
+ ) as cloud_trace_exporter_mock:
+ yield cloud_trace_exporter_mock
+
+
+@pytest.fixture
+def tracer_provider_mock():
+ with mock.patch("opentelemetry.sdk.trace.TracerProvider") as tracer_provider_mock:
+ yield tracer_provider_mock
+
+
+@pytest.fixture
+def simple_span_processor_mock():
+ with mock.patch(
+ "opentelemetry.sdk.trace.export.SimpleSpanProcessor"
+ ) as simple_span_processor_mock:
+ yield simple_span_processor_mock
+
+
+@pytest.fixture
+def langchain_instrumentor_mock():
+ with mock.patch.object(
+ _utils,
+ "_import_openinference_langchain_or_warn",
+ ) as langchain_instrumentor_mock:
+ yield langchain_instrumentor_mock
+
+
+@pytest.fixture
+def langchain_instrumentor_none_mock():
+ with mock.patch.object(
+ _utils,
+ "_import_openinference_langchain_or_warn",
+ ) as langchain_instrumentor_mock:
+ langchain_instrumentor_mock.return_value = None
+ yield langchain_instrumentor_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestLangchainAgent:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(vertexai)
+ vertexai.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+ self.prompt = {
+ "input": lambda x: x["input"],
+ "agent_scratchpad": (
+ lambda x: format_to_openai_function_messages(x["intermediate_steps"])
+ ),
+ } | prompts.ChatPromptTemplate.from_messages(
+ [
+ ("user", "{input}"),
+ prompts.MessagesPlaceholder(variable_name="agent_scratchpad"),
+ ]
+ )
+ self.output_parser = mock.Mock()
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_initialization(self):
+ agent = reasoning_engines.LangchainAgent(model=_TEST_MODEL)
+ assert agent._model_name == _TEST_MODEL
+ assert agent._project == _TEST_PROJECT
+ assert agent._location == _TEST_LOCATION
+ assert agent._runnable is None
+
+ def test_initialization_with_tools(self):
+ tools = [
+ place_tool_query,
+ StructuredTool.from_function(place_photo_query),
+ Tool.from_google_search_retrieval(grounding.GoogleSearchRetrieval()),
+ ]
+ agent = reasoning_engines.LangchainAgent(
+ model=_TEST_MODEL,
+ system_instruction=_TEST_SYSTEM_INSTRUCTION,
+ tools=tools,
+ model_builder=lambda **kwargs: kwargs,
+ runnable_builder=lambda **kwargs: kwargs,
+ )
+ for tool, agent_tool in zip(tools, agent._tools):
+ assert isinstance(agent_tool, type(tool))
+ assert agent._runnable is None
+ agent.set_up()
+ assert agent._runnable is not None
+
+ def test_set_up(self):
+ agent = reasoning_engines.LangchainAgent(
+ model=_TEST_MODEL,
+ prompt=self.prompt,
+ output_parser=self.output_parser,
+ model_builder=lambda **kwargs: kwargs,
+ runnable_builder=lambda **kwargs: kwargs,
+ )
+ assert agent._runnable is None
+ agent.set_up()
+ assert agent._runnable is not None
+
+ def test_clone(self):
+ agent = reasoning_engines.LangchainAgent(
+ model=_TEST_MODEL,
+ prompt=self.prompt,
+ output_parser=self.output_parser,
+ model_builder=lambda **kwargs: kwargs,
+ runnable_builder=lambda **kwargs: kwargs,
+ )
+ agent.set_up()
+ assert agent._runnable is not None
+ agent_clone = agent.clone()
+ assert agent._runnable is not None
+ assert agent_clone._runnable is None
+ agent_clone.set_up()
+ assert agent_clone._runnable is not None
+
+ def test_query(self, langchain_dump_mock):
+ agent = reasoning_engines.LangchainAgent(
+ model=_TEST_MODEL,
+ prompt=self.prompt,
+ output_parser=self.output_parser,
+ )
+ agent._runnable = mock.Mock()
+ mocks = mock.Mock()
+ mocks.attach_mock(mock=agent._runnable, attribute="invoke")
+ agent.query(input="test query")
+ mocks.assert_has_calls(
+ [mock.call.invoke.invoke(input={"input": "test query"}, config=None)]
+ )
+
+ def test_stream_query(self, langchain_dump_mock):
+ agent = reasoning_engines.LangchainAgent(model=_TEST_MODEL)
+ agent._runnable = mock.Mock()
+ agent._runnable.stream.return_value = []
+ list(agent.stream_query(input="test stream query"))
+ agent._runnable.stream.assert_called_once_with(
+ input={"input": "test stream query"},
+ config=None,
+ )
+
+ @pytest.mark.usefixtures("caplog")
+ def test_enable_tracing(
+ self,
+ caplog,
+ cloud_trace_exporter_mock,
+ tracer_provider_mock,
+ simple_span_processor_mock,
+ langchain_instrumentor_mock,
+ ):
+ agent = reasoning_engines.LangchainAgent(
+ model=_TEST_MODEL,
+ prompt=self.prompt,
+ output_parser=self.output_parser,
+ enable_tracing=True,
+ )
+ assert agent._instrumentor is None
+ # TODO(b/384730642): Re-enable this test once the parent issue is fixed.
+ # agent.set_up()
+ # assert agent._instrumentor is not None
+ # assert (
+ # "enable_tracing=True but proceeding with tracing disabled"
+ # not in caplog.text
+ # )
+
+ @pytest.mark.usefixtures("caplog")
+ def test_enable_tracing_warning(self, caplog, langchain_instrumentor_none_mock):
+ agent = reasoning_engines.LangchainAgent(
+ model=_TEST_MODEL,
+ prompt=self.prompt,
+ output_parser=self.output_parser,
+ enable_tracing=True,
+ )
+ assert agent._instrumentor is None
+ # TODO(b/384730642): Re-enable this test once the parent issue is fixed.
+ # agent.set_up()
+ # assert "enable_tracing=True but proceeding with tracing disabled" in caplog.text
+
+
+def _return_input_no_typing(input_):
+ """Returns input back to user."""
+ return input_
+
+
+class TestConvertToolsOrRaiseErrors:
+ def test_raise_untyped_input_args(self, vertexai_init_mock):
+ with pytest.raises(TypeError, match=r"has untyped input_arg"):
+ reasoning_engines.LangchainAgent(
+ model=_TEST_MODEL,
+ tools=[_return_input_no_typing],
+ )
+
+
+class TestSystemInstructionAndPromptRaisesErrors:
+ def test_raise_both_system_instruction_and_prompt_error(self, vertexai_init_mock):
+ with pytest.raises(
+ ValueError,
+ match=r"Only one of `prompt` or `system_instruction` should be specified.",
+ ):
+ reasoning_engines.LangchainAgent(
+ model=_TEST_MODEL,
+ system_instruction=_TEST_SYSTEM_INSTRUCTION,
+ prompt=prompts.ChatPromptTemplate.from_messages(
+ [
+ ("user", "{input}"),
+ ]
+ ),
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/conftest.py b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..9bebe10e1f4ce8d8b6fd5dce2f23acdbd17761e1
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/conftest.py
@@ -0,0 +1,131 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google import auth
+from google.api_core import exceptions
+from google.api_core import operation as ga_operation
+from google.auth import credentials as auth_credentials
+from google.cloud import resourcemanager
+from google.cloud.aiplatform import vertex_ray
+from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import (
+ PersistentResourceServiceClient,
+)
+from google.cloud.aiplatform_v1beta1.types.persistent_resource import (
+ PersistentResource,
+)
+from google.cloud.aiplatform_v1beta1.types.persistent_resource import (
+ ResourceRuntime,
+)
+from google.cloud.aiplatform_v1beta1.types.persistent_resource_service import (
+ DeletePersistentResourceRequest,
+)
+import test_constants as tc
+import mock
+import pytest
+
+
+# -*- coding: utf-8 -*-
+
+# STOPPING
+_TEST_RESPONSE_STOPPING = PersistentResource()
+_TEST_RESPONSE_STOPPING.name = tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+resource_runtime = ResourceRuntime()
+_TEST_RESPONSE_STOPPING.resource_runtime = resource_runtime
+_TEST_RESPONSE_STOPPING.state = "STOPPING"
+
+# ERROR
+_TEST_RESPONSE_ERROR = PersistentResource()
+_TEST_RESPONSE_ERROR.name = tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+resource_runtime = ResourceRuntime()
+_TEST_RESPONSE_ERROR.resource_runtime = resource_runtime
+_TEST_RESPONSE_ERROR.state = "ERROR"
+
+
+@pytest.fixture(scope="module")
+def google_auth_mock():
+ with mock.patch.object(auth, "default") as auth_mock:
+ auth_mock.return_value = (
+ auth_credentials.AnonymousCredentials(),
+ tc.ProjectConstants.TEST_GCP_PROJECT_ID,
+ )
+ yield auth_mock
+
+
+@pytest.fixture
+def get_project_number_mock():
+ with mock.patch.object(
+ resourcemanager.ProjectsClient, "get_project"
+ ) as get_project_number_mock:
+ test_project = resourcemanager.Project(
+ project_id=tc.ProjectConstants.TEST_GCP_PROJECT_ID
+ )
+ test_project.name = f"projects/{tc.ProjectConstants.TEST_GCP_PROJECT_NUMBER}"
+ get_project_number_mock.return_value = test_project
+ yield get_project_number_mock
+
+
+@pytest.fixture
+def api_client_mock():
+ yield mock.create_autospec(
+ PersistentResourceServiceClient, spec_set=True, instance=True
+ )
+
+
+@pytest.fixture
+def persistent_client_mock(api_client_mock):
+ with mock.patch.object(
+ vertex_ray.util._gapic_utils,
+ "create_persistent_resource_client",
+ ) as persistent_client_mock:
+
+ # get_persistent_resource
+ api_client_mock.get_persistent_resource.return_value = (
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_1_POOL
+ )
+ # delete_persistent_resource
+ delete_persistent_resource_lro_mock = mock.Mock(ga_operation.Operation)
+ delete_persistent_resource_lro_mock.result.return_value = (
+ DeletePersistentResourceRequest()
+ )
+ api_client_mock.delete_persistent_resource.return_value = (
+ delete_persistent_resource_lro_mock
+ )
+
+ persistent_client_mock.return_value = api_client_mock
+ yield persistent_client_mock
+
+
+@pytest.fixture
+def persistent_client_stopping_mock(api_client_mock):
+ with mock.patch.object(
+ vertex_ray.util._gapic_utils, "create_persistent_resource_client"
+ ) as persistent_client_stopping_mock:
+ api_client_mock.get_persistent_resource.return_value = _TEST_RESPONSE_STOPPING
+ persistent_client_stopping_mock.return_value = api_client_mock
+ yield persistent_client_stopping_mock
+
+
+@pytest.fixture
+def persistent_client_error_mock(api_client_mock):
+ with mock.patch.object(
+ vertex_ray.util._gapic_utils, "create_persistent_resource_client"
+ ) as persistent_client_error_mock:
+ # get_persistent_resource
+ api_client_mock.get_persistent_resource.return_value = _TEST_RESPONSE_ERROR
+ # delete_persistent_resource
+ api_client_mock.delete_persistent_resource.side_effect = exceptions.NotFound
+
+ persistent_client_error_mock.return_value = api_client_mock
+ yield persistent_client_error_mock
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_bigquery.py b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_bigquery.py
new file mode 100644
index 0000000000000000000000000000000000000000..d111bc8051fea76055cee7a5f9c123fd0f78ad9f
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_bigquery.py
@@ -0,0 +1,293 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import importlib
+
+from google.api_core import exceptions
+from google.api_core import operation
+from google.cloud import bigquery
+from google.cloud import bigquery_storage
+from google.cloud import aiplatform
+from google.cloud.aiplatform.vertex_ray import bigquery_datasource
+from google.cloud.aiplatform.vertex_ray.bigquery_datasink import (
+ _BigQueryDatasink,
+)
+import test_constants as tc
+from google.cloud.bigquery import job
+from google.cloud.bigquery_storage_v1.types import stream as gcbqs_stream
+import mock
+import pytest
+import pyarrow as pa
+import ray
+
+
+_TEST_BQ_DATASET_ID = "mockdataset"
+_TEST_BQ_TABLE_ID = "mocktable"
+_TEST_BQ_DATASET = _TEST_BQ_DATASET_ID + "." + _TEST_BQ_TABLE_ID
+_TEST_BQ_TEMP_DESTINATION = (
+ tc.ProjectConstants.TEST_GCP_PROJECT_ID + ".tempdataset.temptable"
+)
+_TEST_DISPLAY_NAME = "display_name"
+
+
+@pytest.fixture(autouse=True)
+def bq_client_full_mock(monkeypatch):
+ client_mock = mock.create_autospec(bigquery.Client)
+ client_mock.return_value = client_mock
+
+ def bq_get_dataset_mock(dataset_id):
+ if dataset_id != _TEST_BQ_DATASET_ID:
+ raise exceptions.NotFound(
+ "[Ray on Vertex AI]: Dataset {} is not found. Please ensure that it"
+ " exists.".format(_TEST_BQ_DATASET)
+ )
+
+ def bq_get_table_mock(table_id):
+ if table_id != _TEST_BQ_DATASET:
+ raise exceptions.NotFound(
+ "[Ray on Vertex AI]: Table {} is not found. Please ensure that it"
+ " exists.".format(_TEST_BQ_DATASET)
+ )
+
+ def bq_create_dataset_mock(dataset_id, **kwargs):
+ if dataset_id == "existingdataset":
+ raise exceptions.Conflict("Dataset already exists")
+ return mock.Mock(operation.Operation)
+
+ def bq_delete_table_mock(table, **kwargs):
+ return None
+
+ def bq_query_mock(query):
+ fake_job_ref = job._JobReference(
+ "fake_job_id",
+ tc.ProjectConstants.TEST_GCP_PROJECT_ID,
+ "us-central1",
+ )
+ fake_query_job = job.QueryJob(fake_job_ref, query, None)
+ try:
+ fake_query_job.configuration.destination = _TEST_BQ_TEMP_DESTINATION
+ except AttributeError:
+ fake_query_job._configuration.destination = _TEST_BQ_TEMP_DESTINATION
+ return fake_query_job
+
+ client_mock.get_dataset = bq_get_dataset_mock
+ client_mock.get_table = bq_get_table_mock
+ client_mock.create_dataset = bq_create_dataset_mock
+ client_mock.delete_table = bq_delete_table_mock
+ client_mock.query = bq_query_mock
+
+ monkeypatch.setattr(bigquery, "Client", client_mock)
+ client_mock.reset_mock()
+ return client_mock
+
+
+@pytest.fixture(autouse=True)
+def bqs_client_full_mock(monkeypatch):
+ client_mock = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ client_mock.return_value = client_mock
+
+ def bqs_create_read_session(max_stream_count=0, **kwargs):
+ read_session_proto = gcbqs_stream.ReadSession()
+ read_session_proto.streams = [
+ gcbqs_stream.ReadStream() for _ in range(max_stream_count)
+ ]
+ return read_session_proto
+
+ client_mock.create_read_session = bqs_create_read_session
+
+ monkeypatch.setattr(bigquery_storage, "BigQueryReadClient", client_mock)
+ client_mock.reset_mock()
+ return client_mock
+
+
+@pytest.fixture
+def bq_query_result_mock():
+ with mock.patch.object(bigquery.job.QueryJob, "result") as query_result_mock:
+ yield query_result_mock
+
+
+@pytest.fixture
+def bq_query_result_mock_fail():
+ with mock.patch.object(bigquery.job.QueryJob, "result") as query_result_mock_fail:
+ query_result_mock_fail.side_effect = exceptions.BadRequest("400 Syntax error")
+ yield query_result_mock_fail
+
+
+@pytest.fixture
+def ray_remote_function_mock():
+ with mock.patch.object(ray.remote_function.RemoteFunction, "_remote") as remote_fn:
+ remote_fn.return_value = 1
+ yield remote_fn
+
+
+@pytest.fixture
+def ray_get_mock():
+ with mock.patch.object(ray, "get") as ray_get:
+ ray_get.return_value = None
+ yield ray_get
+
+
+class TestReadBigQuery:
+ """Tests for BigQuery Read."""
+
+ def setup_method(self):
+ importlib.reload(aiplatform.initializer)
+ importlib.reload(aiplatform)
+
+ def teardown_method(self):
+ aiplatform.initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize(
+ "parallelism",
+ [1, 2, 3, 4, 10, 100],
+ )
+ def test_create_reader(self, parallelism):
+ bq_ds = bigquery_datasource._BigQueryDatasource(
+ project_id=tc.ProjectConstants.TEST_GCP_PROJECT_ID,
+ dataset=_TEST_BQ_DATASET,
+ )
+ read_tasks_list = bq_ds.get_read_tasks(parallelism)
+ assert len(read_tasks_list) == parallelism
+
+ @pytest.mark.parametrize(
+ "parallelism",
+ [1, 2, 3, 4, 10, 100],
+ )
+ def test_create_reader_initialized(self, parallelism):
+ """If initialized, create_reader doesn't need to specify project_id."""
+ aiplatform.init(
+ project=tc.ProjectConstants.TEST_GCP_PROJECT_ID,
+ staging_bucket=tc.ProjectConstants.TEST_ARTIFACT_URI,
+ )
+ bq_ds = bigquery_datasource._BigQueryDatasource(
+ dataset=_TEST_BQ_DATASET,
+ )
+ read_tasks_list = bq_ds.get_read_tasks(parallelism)
+ assert len(read_tasks_list) == parallelism
+
+ @pytest.mark.parametrize(
+ "parallelism",
+ [1, 2, 3, 4, 10, 100],
+ )
+ def test_create_reader_query(self, parallelism, bq_query_result_mock):
+ bq_ds = bigquery_datasource._BigQueryDatasource(
+ project_id=tc.ProjectConstants.TEST_GCP_PROJECT_ID,
+ query="SELECT * FROM mockdataset.mocktable",
+ )
+ read_tasks_list = bq_ds.get_read_tasks(parallelism)
+ bq_query_result_mock.assert_called_once()
+ assert len(read_tasks_list) == parallelism
+
+ @pytest.mark.parametrize(
+ "parallelism",
+ [1, 2, 3, 4, 10, 100],
+ )
+ def test_create_reader_query_bad_request(
+ self,
+ parallelism,
+ bq_query_result_mock_fail,
+ ):
+ bq_ds = bigquery_datasource._BigQueryDatasource(
+ project_id=tc.ProjectConstants.TEST_GCP_PROJECT_ID,
+ query="SELECT * FROM mockdataset.mocktable",
+ )
+ with pytest.raises(exceptions.BadRequest):
+ bq_ds.get_read_tasks(parallelism)
+ bq_query_result_mock_fail.assert_called()
+
+ def test_dataset_query_kwargs_provided(self):
+ with pytest.raises(ValueError) as exception:
+ bigquery_datasource._BigQueryDatasource(
+ project_id=tc.ProjectConstants.TEST_GCP_PROJECT_ID,
+ dataset=_TEST_BQ_DATASET,
+ query="SELECT * FROM mockdataset.mocktable",
+ )
+ expected_message = (
+ "[Ray on Vertex AI]: Query and dataset kwargs cannot both be provided"
+ " (must be mutually exclusive)."
+ )
+ assert str(exception.value) == expected_message
+
+ def test_create_reader_dataset_not_found(self):
+ parallelism = 4
+ bq_ds = bigquery_datasource._BigQueryDatasource(
+ project_id=tc.ProjectConstants.TEST_GCP_PROJECT_ID,
+ dataset="nonexistentdataset.mocktable",
+ )
+ with pytest.raises(ValueError) as exception:
+ bq_ds.get_read_tasks(parallelism)
+ expected_message = (
+ "[Ray on Vertex AI]: Dataset nonexistentdataset is not found. Please"
+ " ensure that it exists."
+ )
+ assert str(exception.value) == expected_message
+
+ def test_create_reader_table_not_found(self):
+ parallelism = 4
+ bq_ds = bigquery_datasource._BigQueryDatasource(
+ project_id=tc.ProjectConstants.TEST_GCP_PROJECT_ID,
+ dataset="mockdataset.nonexistenttable",
+ )
+ with pytest.raises(ValueError) as exception:
+ bq_ds.get_read_tasks(parallelism)
+ expected_message = (
+ "[Ray on Vertex AI]: Table mockdataset.nonexistenttable is not found."
+ " Please ensure that it exists."
+ )
+ assert str(exception.value) == expected_message
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestWriteBigQuery:
+ """Tests for BigQuery Write."""
+
+ def setup_method(self):
+ importlib.reload(aiplatform.initializer)
+ importlib.reload(aiplatform)
+
+ def teardown_method(self):
+ aiplatform.initializer.global_pool.shutdown(wait=True)
+
+ def test_write(self, ray_get_mock, ray_remote_function_mock):
+ if _BigQueryDatasink is None:
+ return
+ bq_datasink = _BigQueryDatasink(
+ project_id=tc.ProjectConstants.TEST_GCP_PROJECT_ID,
+ dataset=_TEST_BQ_DATASET,
+ )
+ arr = pa.array([2, 4, 5, 100])
+ block = pa.Table.from_arrays([arr], names=["data"])
+ status = bq_datasink.write(
+ blocks=[block],
+ ctx=None,
+ )
+ assert status == "ok"
+
+ def test_write_dataset_exists(self, ray_get_mock, ray_remote_function_mock):
+ if _BigQueryDatasink is None:
+ return
+ bq_datasink = _BigQueryDatasink(
+ project_id=tc.ProjectConstants.TEST_GCP_PROJECT_ID,
+ dataset="existingdataset" + "." + _TEST_BQ_TABLE_ID,
+ )
+ arr = pa.array([2, 4, 5, 100])
+ block = pa.Table.from_arrays([arr], names=["data"])
+ status = bq_datasink.write(
+ blocks=[block],
+ ctx=None,
+ )
+ assert status == "ok"
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_cluster_init.py b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_cluster_init.py
new file mode 100644
index 0000000000000000000000000000000000000000..7cbfab8ba02b97164633f0b7da6ec1bb1617744f
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_cluster_init.py
@@ -0,0 +1,814 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import copy
+import importlib
+import re
+
+from google.api_core import operation as ga_operation
+from google.cloud import aiplatform
+from google.cloud.aiplatform import vertex_ray
+from google.cloud.aiplatform.vertex_ray.util.resources import (
+ Resources,
+ NodeImages,
+)
+from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import (
+ PersistentResourceServiceClient,
+)
+from google.cloud.aiplatform_v1beta1.types import persistent_resource_service
+import test_constants as tc
+import mock
+import pytest
+
+from google.protobuf import field_mask_pb2 # type: ignore
+
+
+# -*- coding: utf-8 -*-
+_EXPECTED_MASK = field_mask_pb2.FieldMask(paths=["resource_pools.replica_count"])
+
+# for manual scaling
+_TEST_RESPONSE_RUNNING_1_POOL_RESIZE = copy.deepcopy(
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_1_POOL
+)
+_TEST_RESPONSE_RUNNING_1_POOL_RESIZE.resource_pools[0].replica_count = 2
+_TEST_RESPONSE_RUNNING_2_POOLS_RESIZE = copy.deepcopy(
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_2_POOLS
+)
+_TEST_RESPONSE_RUNNING_2_POOLS_RESIZE.resource_pools[1].replica_count = 1
+
+_TEST_RESPONSE_RUNNING_1_POOL_RESIZE_0_WORKER = copy.deepcopy(
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_1_POOL
+)
+_TEST_RESPONSE_RUNNING_1_POOL_RESIZE_0_WORKER.resource_pools[0].replica_count = 1
+
+_TEST_V2_4_WARNING_MESSAGE = (
+ "After google-cloud-aiplatform>1.53.0, using Ray version = 2.4 will result"
+ " in an error. Please use Ray version = 2.33.0 (default)."
+)
+
+
+@pytest.fixture
+def create_persistent_resource_1_pool_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "create_persistent_resource",
+ ) as create_persistent_resource_1_pool_mock:
+ create_persistent_resource_lro_mock = mock.Mock(ga_operation.Operation)
+ create_persistent_resource_lro_mock.result.return_value = (
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_1_POOL
+ )
+ create_persistent_resource_1_pool_mock.return_value = (
+ create_persistent_resource_lro_mock
+ )
+ yield create_persistent_resource_1_pool_mock
+
+
+@pytest.fixture
+def get_persistent_resource_1_pool_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "get_persistent_resource",
+ ) as get_persistent_resource_1_pool_mock:
+ get_persistent_resource_1_pool_mock.return_value = (
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_1_POOL
+ )
+ yield get_persistent_resource_1_pool_mock
+
+
+@pytest.fixture
+def get_persistent_resource_1_pool_custom_image_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "get_persistent_resource",
+ ) as get_persistent_resource_1_pool_custom_image_mock:
+ get_persistent_resource_1_pool_custom_image_mock.return_value = (
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_1_POOL_CUSTOM_IMAGES
+ )
+ yield get_persistent_resource_1_pool_custom_image_mock
+
+
+@pytest.fixture
+def create_persistent_resource_1_pool_byosa_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "create_persistent_resource",
+ ) as create_persistent_resource_1_pool_byosa_mock:
+ create_persistent_resource_lro_mock = mock.Mock(ga_operation.Operation)
+ create_persistent_resource_lro_mock.result.return_value = (
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_1_POOL_BYOSA
+ )
+ create_persistent_resource_1_pool_byosa_mock.return_value = (
+ create_persistent_resource_lro_mock
+ )
+ yield create_persistent_resource_1_pool_byosa_mock
+
+
+@pytest.fixture
+def get_persistent_resource_1_pool_byosa_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "get_persistent_resource",
+ ) as get_persistent_resource_1_pool_byosa_mock:
+ get_persistent_resource_1_pool_byosa_mock.return_value = (
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_1_POOL_BYOSA
+ )
+ yield get_persistent_resource_1_pool_byosa_mock
+
+
+@pytest.fixture
+def create_persistent_resource_2_pools_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "create_persistent_resource",
+ ) as create_persistent_resource_2_pools_mock:
+ create_persistent_resource_lro_mock = mock.Mock(ga_operation.Operation)
+ create_persistent_resource_lro_mock.result.return_value = (
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_2_POOLS
+ )
+ create_persistent_resource_2_pools_mock.return_value = (
+ create_persistent_resource_lro_mock
+ )
+ yield create_persistent_resource_2_pools_mock
+
+
+@pytest.fixture
+def create_persistent_resource_2_pools_custom_image_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "create_persistent_resource",
+ ) as create_persistent_resource_2_pools_custom_image_mock:
+ create_persistent_resource_lro_mock = mock.Mock(ga_operation.Operation)
+ create_persistent_resource_lro_mock.result.return_value = (
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_2_POOLS_CUSTOM_IMAGE
+ )
+ create_persistent_resource_2_pools_custom_image_mock.return_value = (
+ create_persistent_resource_lro_mock
+ )
+ yield create_persistent_resource_2_pools_custom_image_mock
+
+
+@pytest.fixture
+def get_persistent_resource_2_pools_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "get_persistent_resource",
+ ) as get_persistent_resource_2_pools_mock:
+ get_persistent_resource_2_pools_mock.return_value = (
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_2_POOLS
+ )
+ yield get_persistent_resource_2_pools_mock
+
+
+@pytest.fixture
+def get_persistent_resource_2_pools_custom_image_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "get_persistent_resource",
+ ) as get_persistent_resource_2_pools_custom_image_mock:
+ get_persistent_resource_2_pools_custom_image_mock.return_value = (
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_2_POOLS_CUSTOM_IMAGE
+ )
+ yield get_persistent_resource_2_pools_custom_image_mock
+
+
+@pytest.fixture
+def list_persistent_resources_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "list_persistent_resources",
+ ) as list_persistent_resources_mock:
+ list_persistent_resources_mock.return_value = [
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_1_POOL,
+ tc.ClusterConstants.TEST_RESPONSE_NO_RAY_RUNNING, # should be ignored
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_2_POOLS,
+ ]
+ yield list_persistent_resources_mock
+
+
+@pytest.fixture
+def create_persistent_resource_exception_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "create_persistent_resource",
+ ) as create_persistent_resource_exception_mock:
+ create_persistent_resource_exception_mock.side_effect = Exception
+ yield create_persistent_resource_exception_mock
+
+
+@pytest.fixture
+def get_persistent_resource_exception_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "get_persistent_resource",
+ ) as get_persistent_resource_exception_mock:
+ get_persistent_resource_exception_mock.side_effect = Exception
+ yield get_persistent_resource_exception_mock
+
+
+@pytest.fixture
+def list_persistent_resources_exception_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "list_persistent_resources",
+ ) as list_persistent_resources_exception_mock:
+ list_persistent_resources_exception_mock.side_effect = Exception
+ yield list_persistent_resources_exception_mock
+
+
+@pytest.fixture
+def update_persistent_resource_1_pool_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "update_persistent_resource",
+ ) as update_persistent_resource_1_pool_mock:
+ update_persistent_resource_lro_mock = mock.Mock(ga_operation.Operation)
+ update_persistent_resource_lro_mock.result.return_value = (
+ _TEST_RESPONSE_RUNNING_1_POOL_RESIZE
+ )
+ update_persistent_resource_1_pool_mock.return_value = (
+ update_persistent_resource_lro_mock
+ )
+ yield update_persistent_resource_1_pool_mock
+
+
+@pytest.fixture
+def update_persistent_resource_1_pool_0_worker_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "update_persistent_resource",
+ ) as update_persistent_resource_1_pool_0_worker_mock:
+ update_persistent_resource_lro_mock = mock.Mock(ga_operation.Operation)
+ update_persistent_resource_lro_mock.result.return_value = (
+ _TEST_RESPONSE_RUNNING_1_POOL_RESIZE_0_WORKER
+ )
+ update_persistent_resource_1_pool_0_worker_mock.return_value = (
+ update_persistent_resource_lro_mock
+ )
+ yield update_persistent_resource_1_pool_0_worker_mock
+
+
+@pytest.fixture
+def update_persistent_resource_2_pools_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "update_persistent_resource",
+ ) as update_persistent_resource_2_pools_mock:
+ update_persistent_resource_lro_mock = mock.Mock(ga_operation.Operation)
+ update_persistent_resource_lro_mock.result.return_value = (
+ _TEST_RESPONSE_RUNNING_2_POOLS_RESIZE
+ )
+ update_persistent_resource_2_pools_mock.return_value = (
+ update_persistent_resource_lro_mock
+ )
+ yield update_persistent_resource_2_pools_mock
+
+
+def cluster_eq(returned_cluster, expected_cluster):
+ assert vars(returned_cluster.head_node_type) == vars(
+ expected_cluster.head_node_type
+ )
+ assert vars(returned_cluster.worker_node_types[0]) == vars(
+ expected_cluster.worker_node_types[0]
+ )
+ assert (
+ returned_cluster.cluster_resource_name == expected_cluster.cluster_resource_name
+ )
+ assert returned_cluster.python_version == expected_cluster.python_version
+ assert returned_cluster.ray_version == expected_cluster.ray_version
+ assert returned_cluster.network == expected_cluster.network
+ assert returned_cluster.state == expected_cluster.state
+
+
+@pytest.mark.parametrize("ray_version", ["2.9", "2.33"])
+@pytest.mark.usefixtures("google_auth_mock", "get_project_number_mock")
+class TestClusterManagement:
+ def setup_method(self, ray_version):
+ importlib.reload(aiplatform.initializer)
+ importlib.reload(aiplatform)
+ aiplatform.init()
+
+ def teardown_method(self, ray_version):
+ aiplatform.initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.usefixtures("get_persistent_resource_1_pool_mock")
+ def test_create_ray_cluster_1_pool_gpu_success(
+ self, create_persistent_resource_1_pool_mock, ray_version
+ ):
+ """If head and worker nodes are duplicate, merge to head pool."""
+ cluster_name = vertex_ray.create_ray_cluster(
+ head_node_type=tc.ClusterConstants.TEST_HEAD_NODE_TYPE_1_POOL,
+ worker_node_types=tc.ClusterConstants.TEST_WORKER_NODE_TYPES_1_POOL,
+ network=tc.ProjectConstants.TEST_VPC_NETWORK,
+ cluster_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID,
+ ray_version=ray_version,
+ )
+
+ assert tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS == cluster_name
+
+ test_persistent_resource = tc.ClusterConstants.TEST_REQUEST_RUNNING_1_POOL
+ if ray_version == "2.9":
+ test_persistent_resource.resource_runtime_spec.ray_spec.resource_pool_images[
+ "head-node"
+ ] = tc.ClusterConstants.TEST_GPU_IMAGE_2_9
+ else:
+ test_persistent_resource.resource_runtime_spec.ray_spec.resource_pool_images[
+ "head-node"
+ ] = tc.ClusterConstants.TEST_GPU_IMAGE_2_33
+
+ request = persistent_resource_service.CreatePersistentResourceRequest(
+ parent=tc.ProjectConstants.TEST_PARENT,
+ persistent_resource=test_persistent_resource,
+ persistent_resource_id=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID,
+ )
+
+ create_persistent_resource_1_pool_mock.assert_called_with(
+ request,
+ )
+
+ @pytest.mark.usefixtures("get_persistent_resource_1_pool_custom_image_mock")
+ def test_create_ray_cluster_1_pool_custom_image_success(
+ self, create_persistent_resource_1_pool_mock, ray_version
+ ):
+ """If head and worker nodes are duplicate, merge to head pool."""
+ custom_images = NodeImages(
+ head=tc.ClusterConstants.TEST_CUSTOM_IMAGE,
+ worker=tc.ClusterConstants.TEST_CUSTOM_IMAGE,
+ )
+ cluster_name = vertex_ray.create_ray_cluster(
+ head_node_type=tc.ClusterConstants.TEST_HEAD_NODE_TYPE_1_POOL,
+ worker_node_types=tc.ClusterConstants.TEST_WORKER_NODE_TYPES_1_POOL,
+ network=tc.ProjectConstants.TEST_VPC_NETWORK,
+ cluster_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID,
+ custom_images=custom_images,
+ nfs_mounts=[tc.ClusterConstants.TEST_NFS_MOUNT],
+ )
+
+ assert tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS == cluster_name
+
+ request = persistent_resource_service.CreatePersistentResourceRequest(
+ parent=tc.ProjectConstants.TEST_PARENT,
+ persistent_resource=tc.ClusterConstants.TEST_REQUEST_RUNNING_1_POOL_CUSTOM_IMAGES,
+ persistent_resource_id=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID,
+ )
+
+ create_persistent_resource_1_pool_mock.assert_called_with(
+ request,
+ )
+
+ @pytest.mark.usefixtures("get_persistent_resource_1_pool_mock")
+ def test_create_ray_cluster_1_pool_gpu_with_labels_success(
+ self, create_persistent_resource_1_pool_mock, ray_version
+ ):
+ """If head and worker nodes are duplicate, merge to head pool."""
+ # Also test disable logging and metrics collection.
+ cluster_name = vertex_ray.create_ray_cluster(
+ head_node_type=tc.ClusterConstants.TEST_HEAD_NODE_TYPE_1_POOL,
+ worker_node_types=tc.ClusterConstants.TEST_WORKER_NODE_TYPES_1_POOL,
+ network=tc.ProjectConstants.TEST_VPC_NETWORK,
+ cluster_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID,
+ labels=tc.ClusterConstants.TEST_LABELS,
+ enable_metrics_collection=False,
+ enable_logging=False,
+ ray_version=ray_version,
+ )
+
+ assert tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS == cluster_name
+
+ test_persistent_resource = (
+ tc.ClusterConstants.TEST_REQUEST_RUNNING_1_POOL_WITH_LABELS
+ )
+ if ray_version == "2.9":
+ test_persistent_resource.resource_runtime_spec.ray_spec.resource_pool_images[
+ "head-node"
+ ] = tc.ClusterConstants.TEST_GPU_IMAGE_2_9
+ else:
+ test_persistent_resource.resource_runtime_spec.ray_spec.resource_pool_images[
+ "head-node"
+ ] = tc.ClusterConstants.TEST_GPU_IMAGE_2_33
+
+ request = persistent_resource_service.CreatePersistentResourceRequest(
+ parent=tc.ProjectConstants.TEST_PARENT,
+ persistent_resource=test_persistent_resource,
+ persistent_resource_id=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID,
+ )
+
+ create_persistent_resource_1_pool_mock.assert_called_with(
+ request,
+ )
+
+ @pytest.mark.usefixtures("get_persistent_resource_2_pools_custom_image_mock")
+ def test_create_ray_cluster_2_pools_custom_images_success(
+ self, create_persistent_resource_2_pools_custom_image_mock, ray_version
+ ):
+ """If head and worker nodes are not duplicate, create separate resource_pools."""
+ cluster_name = vertex_ray.create_ray_cluster(
+ head_node_type=tc.ClusterConstants.TEST_HEAD_NODE_TYPE_2_POOLS_CUSTOM_IMAGE,
+ worker_node_types=tc.ClusterConstants.TEST_WORKER_NODE_TYPES_2_POOLS_CUSTOM_IMAGE,
+ network=tc.ProjectConstants.TEST_VPC_NETWORK,
+ reserved_ip_ranges=["vertex-dedicated-range"],
+ cluster_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID,
+ )
+
+ assert tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS == cluster_name
+ request = persistent_resource_service.CreatePersistentResourceRequest(
+ parent=tc.ProjectConstants.TEST_PARENT,
+ persistent_resource=tc.ClusterConstants.TEST_REQUEST_RUNNING_2_POOLS_CUSTOM_IMAGE,
+ persistent_resource_id=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID,
+ )
+
+ create_persistent_resource_2_pools_custom_image_mock.assert_called_with(
+ request,
+ )
+
+ @pytest.mark.usefixtures("get_persistent_resource_2_pools_mock")
+ def test_create_ray_cluster_2_pools_success(
+ self, create_persistent_resource_2_pools_mock, ray_version
+ ):
+ """If head and worker nodes are not duplicate, create separate resource_pools."""
+ # Also test PSC-I.
+ psc_interface_config = vertex_ray.PscIConfig(
+ network_attachment=tc.ClusterConstants.TEST_PSC_NETWORK_ATTACHMENT
+ )
+ cluster_name = vertex_ray.create_ray_cluster(
+ head_node_type=tc.ClusterConstants.TEST_HEAD_NODE_TYPE_2_POOLS,
+ worker_node_types=tc.ClusterConstants.TEST_WORKER_NODE_TYPES_2_POOLS,
+ cluster_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID,
+ psc_interface_config=psc_interface_config,
+ ray_version=ray_version,
+ )
+
+ test_persistent_resource = tc.ClusterConstants.TEST_REQUEST_RUNNING_2_POOLS
+ if ray_version == "2.9":
+ test_persistent_resource.resource_runtime_spec.ray_spec.resource_pool_images[
+ "head-node"
+ ] = tc.ClusterConstants.TEST_CPU_IMAGE_2_9
+ test_persistent_resource.resource_runtime_spec.ray_spec.resource_pool_images[
+ "worker-pool1"
+ ] = tc.ClusterConstants.TEST_GPU_IMAGE_2_9
+ else:
+ test_persistent_resource.resource_runtime_spec.ray_spec.resource_pool_images[
+ "head-node"
+ ] = tc.ClusterConstants.TEST_CPU_IMAGE_2_33
+ test_persistent_resource.resource_runtime_spec.ray_spec.resource_pool_images[
+ "worker-pool1"
+ ] = tc.ClusterConstants.TEST_GPU_IMAGE_2_33
+
+ assert tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS == cluster_name
+ request = persistent_resource_service.CreatePersistentResourceRequest(
+ parent=tc.ProjectConstants.TEST_PARENT,
+ persistent_resource=test_persistent_resource,
+ persistent_resource_id=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID,
+ )
+
+ create_persistent_resource_2_pools_mock.assert_called_with(
+ request,
+ )
+
+ @pytest.mark.usefixtures("persistent_client_mock")
+ def test_create_ray_cluster_initialized_success(
+ self, get_project_number_mock, api_client_mock, ray_version
+ ):
+ """If initialized, create_ray_cluster doesn't need many call args."""
+ aiplatform.init(
+ project=tc.ProjectConstants.TEST_GCP_PROJECT_ID_OVERRIDE,
+ location=tc.ProjectConstants.TEST_GCP_REGION_OVERRIDE,
+ staging_bucket=tc.ProjectConstants.TEST_ARTIFACT_URI,
+ )
+
+ _ = vertex_ray.create_ray_cluster(
+ network=tc.ProjectConstants.TEST_VPC_NETWORK,
+ )
+
+ create_method_mock = api_client_mock.create_persistent_resource
+
+ # Assert that project override took effect.
+ get_project_number_mock.assert_called_once_with(
+ name="projects/{}".format(tc.ProjectConstants.TEST_GCP_PROJECT_ID_OVERRIDE)
+ )
+ # Assert that location override took effect.
+ assert (
+ tc.ProjectConstants.TEST_GCP_REGION_OVERRIDE
+ in create_method_mock.call_args.args[0].parent
+ )
+ assert (
+ "asia-docker"
+ in create_method_mock.call_args.args[
+ 0
+ ].persistent_resource.resource_runtime_spec.ray_spec.resource_pool_images[
+ "head-node"
+ ]
+ )
+
+ @pytest.mark.usefixtures("get_persistent_resource_1_pool_byosa_mock")
+ def test_create_ray_cluster_byosa_success(
+ self, create_persistent_resource_1_pool_byosa_mock, ray_version
+ ):
+ """If head and worker nodes are duplicate, merge to head pool."""
+ cluster_name = vertex_ray.create_ray_cluster(
+ head_node_type=tc.ClusterConstants.TEST_HEAD_NODE_TYPE_1_POOL,
+ worker_node_types=tc.ClusterConstants.TEST_WORKER_NODE_TYPES_1_POOL,
+ service_account=tc.ProjectConstants.TEST_SERVICE_ACCOUNT,
+ cluster_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID,
+ ray_version=ray_version,
+ )
+
+ assert tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS == cluster_name
+
+ test_persistent_resource = tc.ClusterConstants.TEST_REQUEST_RUNNING_1_POOL_BYOSA
+ if ray_version == "2.9":
+ test_persistent_resource.resource_runtime_spec.ray_spec.resource_pool_images[
+ "head-node"
+ ] = tc.ClusterConstants.TEST_GPU_IMAGE_2_9
+ else:
+ test_persistent_resource.resource_runtime_spec.ray_spec.resource_pool_images[
+ "head-node"
+ ] = tc.ClusterConstants.TEST_GPU_IMAGE_2_33
+
+ request = persistent_resource_service.CreatePersistentResourceRequest(
+ parent=tc.ProjectConstants.TEST_PARENT,
+ persistent_resource=test_persistent_resource,
+ persistent_resource_id=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID,
+ )
+
+ create_persistent_resource_1_pool_byosa_mock.assert_called_with(
+ request,
+ )
+
+ def test_create_ray_cluster_2_4_deprecated_error(self, ray_version):
+ with pytest.raises(RuntimeError) as e:
+ vertex_ray.create_ray_cluster(
+ head_node_type=Resources(node_count=3),
+ network=tc.ProjectConstants.TEST_VPC_NETWORK,
+ ray_version="2.4",
+ )
+ e.match(regexp=re.escape(_TEST_V2_4_WARNING_MESSAGE))
+
+ def test_create_ray_cluster_head_multinode_error(self, ray_version):
+ with pytest.raises(ValueError) as e:
+ vertex_ray.create_ray_cluster(
+ head_node_type=Resources(node_count=3),
+ network=tc.ProjectConstants.TEST_VPC_NETWORK,
+ )
+ e.match(regexp=r"Resources.node_count must be 1.")
+
+ def test_create_ray_cluster_python_version_error(self, ray_version):
+ with pytest.raises(ValueError) as e:
+ vertex_ray.create_ray_cluster(
+ network=tc.ProjectConstants.TEST_VPC_NETWORK,
+ python_version="3.8",
+ )
+ e.match(regexp=r"The supported Python version is 3")
+
+ def test_create_ray_cluster_ray_version_error(self, ray_version):
+ with pytest.raises(ValueError) as e:
+ vertex_ray.create_ray_cluster(
+ network=tc.ProjectConstants.TEST_VPC_NETWORK,
+ ray_version="2.1",
+ )
+ e.match(regexp=r"The supported Ray versions are ")
+
+ def test_create_ray_cluster_same_pool_different_disk_error(self, ray_version):
+ with pytest.raises(ValueError) as e:
+ vertex_ray.create_ray_cluster(
+ head_node_type=Resources(machine_type="n1-highmem-32", node_count=1),
+ worker_node_types=[
+ Resources(
+ machine_type="n1-highmem-32",
+ node_count=32,
+ boot_disk_size_gb=1000,
+ )
+ ],
+ network=tc.ProjectConstants.TEST_VPC_NETWORK,
+ )
+ e.match(regexp=r"Worker disk size must match the head node's disk size if")
+
+ @pytest.mark.usefixtures("create_persistent_resource_exception_mock")
+ def test_create_ray_cluster_state_error(self, ray_version):
+ with pytest.raises(ValueError) as e:
+ vertex_ray.create_ray_cluster(
+ network=tc.ProjectConstants.TEST_VPC_NETWORK,
+ )
+
+ e.match(regexp=r"Failed in cluster creation due to: ")
+
+ def test_delete_ray_cluster_success(self, persistent_client_mock, ray_version):
+ vertex_ray.delete_ray_cluster(
+ cluster_resource_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+ )
+
+ persistent_client_mock.assert_called_once()
+
+ @pytest.mark.usefixtures("persistent_client_error_mock")
+ def test_delete_ray_cluster_error(self, ray_version):
+ with pytest.raises(ValueError) as e:
+ vertex_ray.delete_ray_cluster(
+ cluster_resource_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+ )
+
+ e.match(regexp=r"Failed in cluster deletion due to: ")
+
+ def test_get_ray_cluster_success(
+ self, get_persistent_resource_1_pool_mock, ray_version
+ ):
+ cluster = vertex_ray.get_ray_cluster(
+ cluster_resource_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+ )
+
+ get_persistent_resource_1_pool_mock.assert_called_once()
+ cluster_eq(cluster, tc.ClusterConstants.TEST_CLUSTER)
+
+ def test_get_ray_cluster_with_custom_image_success(
+ self, get_persistent_resource_2_pools_custom_image_mock, ray_version
+ ):
+ cluster = vertex_ray.get_ray_cluster(
+ cluster_resource_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+ )
+
+ get_persistent_resource_2_pools_custom_image_mock.assert_called_once()
+ cluster_eq(cluster, tc.ClusterConstants.TEST_CLUSTER_CUSTOM_IMAGE)
+
+ def test_get_ray_cluster_byosa_success(
+ self, get_persistent_resource_1_pool_byosa_mock, ray_version
+ ):
+ cluster = vertex_ray.get_ray_cluster(
+ cluster_resource_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+ )
+
+ get_persistent_resource_1_pool_byosa_mock.assert_called_once()
+ cluster_eq(cluster, tc.ClusterConstants.TEST_CLUSTER_BYOSA)
+
+ @pytest.mark.usefixtures("get_persistent_resource_exception_mock")
+ def test_get_ray_cluster_error(self, ray_version):
+ with pytest.raises(ValueError) as e:
+ vertex_ray.get_ray_cluster(
+ cluster_resource_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+ )
+
+ e.match(regexp=r"Failed in getting the cluster due to: ")
+
+ def test_list_ray_clusters_success(
+ self, list_persistent_resources_mock, ray_version
+ ):
+ clusters = vertex_ray.list_ray_clusters()
+
+ list_persistent_resources_mock.assert_called_once()
+
+ # first ray cluster
+ cluster_eq(clusters[0], tc.ClusterConstants.TEST_CLUSTER)
+ # second ray cluster
+ cluster_eq(clusters[1], tc.ClusterConstants.TEST_CLUSTER_2)
+
+ def test_list_ray_clusters_initialized_success(
+ self, get_project_number_mock, list_persistent_resources_mock, ray_version
+ ):
+ aiplatform.init(
+ project=tc.ProjectConstants.TEST_GCP_PROJECT_ID_OVERRIDE,
+ location=tc.ProjectConstants.TEST_GCP_REGION_OVERRIDE,
+ staging_bucket=tc.ProjectConstants.TEST_ARTIFACT_URI,
+ )
+ _ = vertex_ray.list_ray_clusters()
+
+ # Assert that project override took effect.
+ get_project_number_mock.assert_called_once_with(
+ name="projects/{}".format(tc.ProjectConstants.TEST_GCP_PROJECT_ID_OVERRIDE)
+ )
+ # Assert that location override took effect.
+ assert (
+ tc.ProjectConstants.TEST_GCP_REGION_OVERRIDE
+ in list_persistent_resources_mock.call_args.args[0].parent
+ )
+
+ @pytest.mark.usefixtures("list_persistent_resources_exception_mock")
+ def test_list_ray_clusters_error(self, ray_version):
+ with pytest.raises(ValueError) as e:
+ vertex_ray.list_ray_clusters()
+
+ e.match(regexp=r"Failed in listing the clusters due to: ")
+
+ @pytest.mark.usefixtures("get_persistent_resource_1_pool_mock")
+ def test_update_ray_cluster_1_pool(
+ self, update_persistent_resource_1_pool_mock, ray_version
+ ):
+ new_worker_node_types = []
+ for worker_node_type in tc.ClusterConstants.TEST_CLUSTER.worker_node_types:
+ # resize worker node to node_count = 1
+ worker_node_type.node_count = 1
+ new_worker_node_types.append(worker_node_type)
+
+ returned_name = vertex_ray.update_ray_cluster(
+ cluster_resource_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS,
+ worker_node_types=new_worker_node_types,
+ )
+
+ request = persistent_resource_service.UpdatePersistentResourceRequest(
+ persistent_resource=_TEST_RESPONSE_RUNNING_1_POOL_RESIZE,
+ update_mask=_EXPECTED_MASK,
+ )
+ update_persistent_resource_1_pool_mock.assert_called_once_with(request)
+
+ assert returned_name == tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+
+ @pytest.mark.usefixtures("get_persistent_resource_1_pool_mock")
+ def test_update_ray_cluster_1_pool_to_0_worker(
+ self, update_persistent_resource_1_pool_mock, ray_version
+ ):
+
+ new_worker_node_types = []
+ for worker_node_type in tc.ClusterConstants.TEST_CLUSTER.worker_node_types:
+ # resize worker node to node_count = 0
+ worker_node_type.node_count = 0
+ new_worker_node_types.append(worker_node_type)
+
+ returned_name = vertex_ray.update_ray_cluster(
+ cluster_resource_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS,
+ worker_node_types=new_worker_node_types,
+ )
+
+ request = persistent_resource_service.UpdatePersistentResourceRequest(
+ persistent_resource=_TEST_RESPONSE_RUNNING_1_POOL_RESIZE_0_WORKER,
+ update_mask=_EXPECTED_MASK,
+ )
+ update_persistent_resource_1_pool_mock.assert_called_once_with(request)
+
+ assert returned_name == tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+
+ @pytest.mark.usefixtures("get_persistent_resource_2_pools_mock")
+ def test_update_ray_cluster_2_pools(
+ self, update_persistent_resource_2_pools_mock, ray_version
+ ):
+
+ new_worker_node_types = []
+ for worker_node_type in tc.ClusterConstants.TEST_CLUSTER_2.worker_node_types:
+ # resize worker node to node_count = 1
+ worker_node_type.node_count = 1
+ new_worker_node_types.append(worker_node_type)
+
+ returned_name = vertex_ray.update_ray_cluster(
+ cluster_resource_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS,
+ worker_node_types=new_worker_node_types,
+ )
+
+ request = persistent_resource_service.UpdatePersistentResourceRequest(
+ persistent_resource=_TEST_RESPONSE_RUNNING_2_POOLS_RESIZE,
+ update_mask=_EXPECTED_MASK,
+ )
+ update_persistent_resource_2_pools_mock.assert_called_once_with(request)
+
+ assert returned_name == tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+
+ @pytest.mark.usefixtures("get_persistent_resource_2_pools_mock")
+ def test_update_ray_cluster_2_pools_0_worker_fail(self, ray_version):
+
+ new_worker_node_types = []
+ for worker_node_type in tc.ClusterConstants.TEST_CLUSTER_2.worker_node_types:
+ # resize worker node to node_count = 0
+ worker_node_type.node_count = 0
+ new_worker_node_types.append(worker_node_type)
+
+ with pytest.raises(ValueError) as e:
+ vertex_ray.update_ray_cluster(
+ cluster_resource_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS,
+ worker_node_types=new_worker_node_types,
+ )
+
+ e.match(regexp=r"must update to >= 1 nodes.")
+
+ @pytest.mark.usefixtures("get_persistent_resource_1_pool_mock")
+ def test_update_ray_cluster_duplicate_worker_node_types_error(self, ray_version):
+ new_worker_node_types = (
+ tc.ClusterConstants.TEST_CLUSTER_2.worker_node_types
+ + tc.ClusterConstants.TEST_CLUSTER_2.worker_node_types
+ )
+ with pytest.raises(ValueError) as e:
+ vertex_ray.update_ray_cluster(
+ cluster_resource_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS,
+ worker_node_types=new_worker_node_types,
+ )
+
+ e.match(regexp=r"Worker_node_types have duplicate machine specs")
+
+ @pytest.mark.usefixtures("get_persistent_resource_1_pool_mock")
+ def test_update_ray_cluster_mismatch_worker_node_types_count_error(
+ self, ray_version
+ ):
+ with pytest.raises(ValueError) as e:
+ new_worker_node_types = tc.ClusterConstants.TEST_CLUSTER_2.worker_node_types
+ vertex_ray.update_ray_cluster(
+ cluster_resource_name=tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS,
+ worker_node_types=new_worker_node_types,
+ )
+
+ e.match(
+ regexp=r"does not match the number of the existing worker_node_type"
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_constants.py b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..09c64ad7c8485b967d1174587650f2863f9fcad8
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_constants.py
@@ -0,0 +1,487 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import dataclasses
+import sys
+
+from google.cloud.aiplatform.vertex_ray.util.resources import Cluster
+from google.cloud.aiplatform.vertex_ray.util.resources import (
+ AutoscalingSpec,
+ NfsMount,
+ PscIConfig,
+ Resources,
+)
+from google.cloud.aiplatform_v1beta1.types.machine_resources import DiskSpec
+from google.cloud.aiplatform_v1beta1.types.machine_resources import (
+ NfsMount as GapicNfsMount,
+)
+from google.cloud.aiplatform_v1beta1.types.machine_resources import (
+ MachineSpec,
+)
+from google.cloud.aiplatform_v1beta1.types.persistent_resource import (
+ PersistentResource,
+)
+from google.cloud.aiplatform_v1beta1.types.persistent_resource import (
+ RayLogsSpec,
+ RayMetricSpec,
+)
+from google.cloud.aiplatform_v1beta1.types.persistent_resource import RaySpec
+from google.cloud.aiplatform_v1beta1.types.persistent_resource import (
+ ResourcePool,
+)
+from google.cloud.aiplatform_v1beta1.types.persistent_resource import (
+ ResourceRuntime,
+)
+from google.cloud.aiplatform_v1beta1.types.persistent_resource import (
+ ResourceRuntimeSpec,
+)
+from google.cloud.aiplatform_v1beta1.types.persistent_resource import (
+ ServiceAccountSpec,
+)
+from google.cloud.aiplatform_v1beta1.types.service_networking import (
+ PscInterfaceConfig,
+)
+import ray
+import pytest
+
+
+rovminversion = pytest.mark.skipif(
+ sys.version_info > (3, 10), reason="Requires python3.10 or lower"
+)
+# TODO(b/363340317)
+xgbversion = pytest.mark.skipif(
+ ray.__version__ != "2.9.3", reason="Requires xgboost 1.7 or higher"
+)
+
+predictionrayversion = pytest.mark.skipif(
+ ray.__version__ != "2.9.3", reason="Not currently supported on Ray 2.33"
+)
+
+
+@dataclasses.dataclass(frozen=True)
+class ProjectConstants:
+ """Defines project-specific constants used by tests."""
+
+ TEST_VPC_NETWORK = "mock-vpc-network"
+ TEST_GCP_PROJECT_ID = "mock-test-project-id"
+ TEST_GCP_PROJECT_ID_OVERRIDE = "mock-test-project-id-2"
+ TEST_GCP_REGION = "us-central1"
+ TEST_GCP_REGION_OVERRIDE = "asia-east1"
+ TEST_GCP_PROJECT_NUMBER = "12345"
+ TEST_PARENT = f"projects/{TEST_GCP_PROJECT_NUMBER}/locations/{TEST_GCP_REGION}"
+ TEST_ARTIFACT_URI = "gs://path/to/artifact/uri"
+ TEST_BAD_ARTIFACT_URI = "/path/to/artifact/uri"
+ TEST_MODEL_GCS_URI = "gs://test_model_dir"
+ TEST_MODEL_ID = (
+ f"projects/{TEST_GCP_PROJECT_NUMBER}/locations/{TEST_GCP_REGION}/models/456"
+ )
+ TEST_SERVICE_ACCOUNT = "service-account@project.iam.gserviceaccount.com"
+
+
+@dataclasses.dataclass(frozen=True)
+class ClusterConstants:
+ """Defines cluster constants used by tests."""
+
+ TEST_LABELS = {"my_key": "my_value"}
+ TEST_VERTEX_RAY_HEAD_NODE_IP = "1.2.3.4:10001"
+ TEST_VERTEX_RAY_JOB_CLIENT_IP = "1.2.3.4:8888"
+ TEST_VERTEX_RAY_DASHBOARD_ADDRESS = (
+ "48b400ad90b8dd3c-dot-us-central1.aiplatform-training.googleusercontent.com"
+ )
+ TEST_VERTEX_RAY_CLIENT_ENDPOINT = (
+ "88888.us-central1-1234567.staging-ray.vertexai.goog:443"
+ )
+ TEST_VERTEX_RAY_PR_ID = "user-persistent-resource-1234567890"
+ TEST_VERTEX_RAY_PR_ADDRESS = (
+ f"{ProjectConstants.TEST_PARENT}/persistentResources/" + TEST_VERTEX_RAY_PR_ID
+ )
+ TEST_NFS_MOUNT = NfsMount(
+ server="10.10.10.10", path="nfs_path", mount_point="nfs_mount_point"
+ )
+ TEST_GAPIC_NFS_MOUNT = GapicNfsMount(
+ server="10.10.10.10", path="nfs_path", mount_point="nfs_mount_point"
+ )
+ TEST_CPU_IMAGE_2_9 = "us-docker.pkg.dev/vertex-ai/training/ray-cpu.2-9.py310:latest"
+ TEST_GPU_IMAGE_2_9 = "us-docker.pkg.dev/vertex-ai/training/ray-gpu.2-9.py310:latest"
+ TEST_CPU_IMAGE_2_33 = (
+ "us-docker.pkg.dev/vertex-ai/training/ray-cpu.2-33.py310:latest"
+ )
+ TEST_GPU_IMAGE_2_33 = (
+ "us-docker.pkg.dev/vertex-ai/training/ray-gpu.2-33.py310:latest"
+ )
+ TEST_CUSTOM_IMAGE = "us-docker.pkg.dev/my-project/ray-custom-image.2.9:latest"
+ TEST_PSC_NETWORK_ATTACHMENT = "my-network-attachment"
+ # RUNNING Persistent Cluster w/o Ray
+ TEST_RESPONSE_NO_RAY_RUNNING = PersistentResource(
+ name=TEST_VERTEX_RAY_PR_ADDRESS,
+ resource_runtime_spec=ResourceRuntimeSpec(),
+ resource_runtime=ResourceRuntime(),
+ state="RUNNING",
+ )
+ # RUNNING
+ # 1_POOL: merged worker_node_types and head_node_type with duplicate MachineSpec
+ TEST_HEAD_NODE_TYPE_1_POOL = Resources(
+ accelerator_type="NVIDIA_TESLA_P100", accelerator_count=1
+ )
+ TEST_WORKER_NODE_TYPES_1_POOL = [
+ Resources(
+ accelerator_type="NVIDIA_TESLA_P100", accelerator_count=1, node_count=2
+ )
+ ]
+ TEST_RESOURCE_POOL_0 = ResourcePool(
+ id="head-node",
+ machine_spec=MachineSpec(
+ machine_type="n1-standard-16",
+ accelerator_type="NVIDIA_TESLA_P100",
+ accelerator_count=1,
+ ),
+ disk_spec=DiskSpec(
+ boot_disk_type="pd-ssd",
+ boot_disk_size_gb=100,
+ ),
+ replica_count=3,
+ )
+ TEST_REQUEST_RUNNING_1_POOL = PersistentResource(
+ resource_pools=[TEST_RESOURCE_POOL_0],
+ resource_runtime_spec=ResourceRuntimeSpec(
+ ray_spec=RaySpec(
+ resource_pool_images={"head-node": TEST_GPU_IMAGE_2_9},
+ ray_metric_spec=RayMetricSpec(disabled=False),
+ ray_logs_spec=RayLogsSpec(disabled=False),
+ ),
+ ),
+ psc_interface_config=None,
+ network=ProjectConstants.TEST_VPC_NETWORK,
+ )
+ TEST_REQUEST_RUNNING_1_POOL_WITH_LABELS = PersistentResource(
+ resource_pools=[TEST_RESOURCE_POOL_0],
+ resource_runtime_spec=ResourceRuntimeSpec(
+ ray_spec=RaySpec(
+ resource_pool_images={"head-node": TEST_GPU_IMAGE_2_9},
+ ray_metric_spec=RayMetricSpec(disabled=True),
+ ray_logs_spec=RayLogsSpec(disabled=True),
+ ),
+ ),
+ psc_interface_config=None,
+ network=ProjectConstants.TEST_VPC_NETWORK,
+ labels=TEST_LABELS,
+ )
+ TEST_REQUEST_RUNNING_1_POOL_CUSTOM_IMAGES = PersistentResource(
+ resource_pools=[TEST_RESOURCE_POOL_0],
+ resource_runtime_spec=ResourceRuntimeSpec(
+ ray_spec=RaySpec(
+ resource_pool_images={"head-node": TEST_CUSTOM_IMAGE},
+ ray_metric_spec=RayMetricSpec(disabled=False),
+ ray_logs_spec=RayLogsSpec(disabled=False),
+ nfs_mounts=[TEST_GAPIC_NFS_MOUNT],
+ ),
+ ),
+ psc_interface_config=None,
+ network=ProjectConstants.TEST_VPC_NETWORK,
+ )
+ TEST_REQUEST_RUNNING_1_POOL_BYOSA = PersistentResource(
+ resource_pools=[TEST_RESOURCE_POOL_0],
+ resource_runtime_spec=ResourceRuntimeSpec(
+ ray_spec=RaySpec(
+ resource_pool_images={"head-node": TEST_GPU_IMAGE_2_9},
+ ray_metric_spec=RayMetricSpec(disabled=False),
+ ray_logs_spec=RayLogsSpec(disabled=False),
+ ),
+ service_account_spec=ServiceAccountSpec(
+ enable_custom_service_account=True,
+ service_account=ProjectConstants.TEST_SERVICE_ACCOUNT,
+ ),
+ ),
+ psc_interface_config=None,
+ network=None,
+ )
+ # Get response has generated name, and URIs
+ TEST_RESPONSE_RUNNING_1_POOL = PersistentResource(
+ name=TEST_VERTEX_RAY_PR_ADDRESS,
+ resource_pools=[TEST_RESOURCE_POOL_0],
+ resource_runtime_spec=ResourceRuntimeSpec(
+ ray_spec=RaySpec(
+ resource_pool_images={"head-node": TEST_GPU_IMAGE_2_9},
+ ray_metric_spec=RayMetricSpec(disabled=False),
+ ray_logs_spec=RayLogsSpec(disabled=False),
+ ),
+ ),
+ psc_interface_config=None,
+ network=ProjectConstants.TEST_VPC_NETWORK,
+ resource_runtime=ResourceRuntime(
+ access_uris={
+ "RAY_DASHBOARD_URI": TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ "RAY_HEAD_NODE_INTERNAL_IP": TEST_VERTEX_RAY_HEAD_NODE_IP,
+ }
+ ),
+ state="RUNNING",
+ )
+ # Get response has generated name, and URIs
+ TEST_RESPONSE_RUNNING_1_POOL_CUSTOM_IMAGES = PersistentResource(
+ name=TEST_VERTEX_RAY_PR_ADDRESS,
+ resource_pools=[TEST_RESOURCE_POOL_0],
+ resource_runtime_spec=ResourceRuntimeSpec(
+ ray_spec=RaySpec(
+ resource_pool_images={"head-node": TEST_CUSTOM_IMAGE},
+ ray_metric_spec=RayMetricSpec(disabled=False),
+ nfs_mounts=[TEST_GAPIC_NFS_MOUNT],
+ ),
+ ),
+ psc_interface_config=None,
+ network=ProjectConstants.TEST_VPC_NETWORK,
+ resource_runtime=ResourceRuntime(
+ access_uris={
+ "RAY_DASHBOARD_URI": TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ "RAY_HEAD_NODE_INTERNAL_IP": TEST_VERTEX_RAY_HEAD_NODE_IP,
+ }
+ ),
+ state="RUNNING",
+ )
+ TEST_RESPONSE_RUNNING_1_POOL_BYOSA = PersistentResource(
+ name=TEST_VERTEX_RAY_PR_ADDRESS,
+ resource_pools=[TEST_RESOURCE_POOL_0],
+ resource_runtime_spec=ResourceRuntimeSpec(
+ ray_spec=RaySpec(
+ resource_pool_images={"head-node": TEST_GPU_IMAGE_2_9},
+ ray_metric_spec=RayMetricSpec(disabled=False),
+ ),
+ service_account_spec=ServiceAccountSpec(
+ enable_custom_service_account=True,
+ service_account=ProjectConstants.TEST_SERVICE_ACCOUNT,
+ ),
+ ),
+ psc_interface_config=None,
+ network=None,
+ resource_runtime=ResourceRuntime(
+ access_uris={
+ "RAY_DASHBOARD_URI": TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ "RAY_CLIENT_ENDPOINT": TEST_VERTEX_RAY_CLIENT_ENDPOINT,
+ }
+ ),
+ state="RUNNING",
+ )
+ TEST_RESPONSE_1_POOL_BYOSA_PRIVATE = PersistentResource(
+ name=TEST_VERTEX_RAY_PR_ADDRESS,
+ resource_pools=[TEST_RESOURCE_POOL_0],
+ resource_runtime_spec=ResourceRuntimeSpec(
+ ray_spec=RaySpec(
+ resource_pool_images={"head-node": TEST_GPU_IMAGE_2_9},
+ ray_metric_spec=RayMetricSpec(disabled=False),
+ ),
+ service_account_spec=ServiceAccountSpec(
+ enable_custom_service_account=True,
+ service_account=ProjectConstants.TEST_SERVICE_ACCOUNT,
+ ),
+ ),
+ psc_interface_config=None,
+ network=ProjectConstants.TEST_VPC_NETWORK,
+ resource_runtime=ResourceRuntime(
+ access_uris={
+ "RAY_DASHBOARD_URI": TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ "RAY_CLIENT_ENDPOINT": TEST_VERTEX_RAY_CLIENT_ENDPOINT,
+ }
+ ),
+ state="RUNNING",
+ )
+ # 2_POOL: worker_node_types and head_node_type have different MachineSpecs
+ TEST_HEAD_NODE_TYPE_2_POOLS = Resources()
+ TEST_WORKER_NODE_TYPES_2_POOLS = [
+ Resources(
+ machine_type="n1-standard-16",
+ autoscaling_spec=AutoscalingSpec(min_replica_count=1, max_replica_count=4),
+ accelerator_type="NVIDIA_TESLA_P100",
+ accelerator_count=1,
+ )
+ ]
+ TEST_HEAD_NODE_TYPE_2_POOLS_CUSTOM_IMAGE = Resources(custom_image=TEST_CUSTOM_IMAGE)
+ TEST_WORKER_NODE_TYPES_2_POOLS_CUSTOM_IMAGE = [
+ Resources(
+ machine_type="n1-standard-16",
+ autoscaling_spec=AutoscalingSpec(min_replica_count=1, max_replica_count=4),
+ accelerator_type="NVIDIA_TESLA_P100",
+ accelerator_count=1,
+ custom_image=TEST_CUSTOM_IMAGE,
+ )
+ ]
+ TEST_RESOURCE_POOL_1 = ResourcePool(
+ id="head-node",
+ machine_spec=MachineSpec(
+ machine_type="n1-standard-16",
+ ),
+ disk_spec=DiskSpec(
+ boot_disk_type="pd-ssd",
+ boot_disk_size_gb=100,
+ ),
+ replica_count=1,
+ )
+ TEST_RESOURCE_POOL_2 = ResourcePool(
+ id="worker-pool1",
+ machine_spec=MachineSpec(
+ machine_type="n1-standard-16",
+ accelerator_type="NVIDIA_TESLA_P100",
+ accelerator_count=1,
+ ),
+ disk_spec=DiskSpec(
+ boot_disk_type="pd-ssd",
+ boot_disk_size_gb=100,
+ ),
+ autoscaling_spec=ResourcePool.AutoscalingSpec(
+ min_replica_count=1,
+ max_replica_count=4,
+ ),
+ )
+ TEST_REQUEST_RUNNING_2_POOLS = PersistentResource(
+ resource_pools=[TEST_RESOURCE_POOL_1, TEST_RESOURCE_POOL_2],
+ resource_runtime_spec=ResourceRuntimeSpec(
+ ray_spec=RaySpec(
+ resource_pool_images={
+ "head-node": TEST_CPU_IMAGE_2_9,
+ "worker-pool1": TEST_GPU_IMAGE_2_9,
+ },
+ ray_metric_spec=RayMetricSpec(disabled=False),
+ ray_logs_spec=RayLogsSpec(disabled=False),
+ ),
+ ),
+ psc_interface_config=PscInterfaceConfig(
+ network_attachment=TEST_PSC_NETWORK_ATTACHMENT
+ ),
+ )
+ TEST_REQUEST_RUNNING_2_POOLS_CUSTOM_IMAGE = PersistentResource(
+ resource_pools=[TEST_RESOURCE_POOL_1, TEST_RESOURCE_POOL_2],
+ resource_runtime_spec=ResourceRuntimeSpec(
+ ray_spec=RaySpec(
+ resource_pool_images={
+ "head-node": TEST_CUSTOM_IMAGE,
+ "worker-pool1": TEST_CUSTOM_IMAGE,
+ },
+ ray_metric_spec=RayMetricSpec(disabled=False),
+ ray_logs_spec=RayLogsSpec(disabled=False),
+ ),
+ ),
+ psc_interface_config=None,
+ network=ProjectConstants.TEST_VPC_NETWORK,
+ reserved_ip_ranges=["vertex-dedicated-range"],
+ )
+ # Responses
+ TEST_RESOURCE_POOL_2.replica_count = 1
+ TEST_RESPONSE_RUNNING_2_POOLS = PersistentResource(
+ name=TEST_VERTEX_RAY_PR_ADDRESS,
+ resource_pools=[TEST_RESOURCE_POOL_1, TEST_RESOURCE_POOL_2],
+ resource_runtime_spec=ResourceRuntimeSpec(
+ ray_spec=RaySpec(
+ resource_pool_images={
+ "head-node": TEST_CPU_IMAGE_2_9,
+ "worker-pool1": TEST_GPU_IMAGE_2_9,
+ },
+ ray_metric_spec=RayMetricSpec(disabled=False),
+ ),
+ ),
+ psc_interface_config=PscInterfaceConfig(
+ network_attachment=TEST_PSC_NETWORK_ATTACHMENT
+ ),
+ network=None,
+ reserved_ip_ranges=None,
+ resource_runtime=ResourceRuntime(
+ access_uris={
+ "RAY_DASHBOARD_URI": TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ }
+ ),
+ state="RUNNING",
+ )
+ TEST_RESPONSE_RUNNING_2_POOLS_CUSTOM_IMAGE = PersistentResource(
+ name=TEST_VERTEX_RAY_PR_ADDRESS,
+ resource_pools=[TEST_RESOURCE_POOL_1, TEST_RESOURCE_POOL_2],
+ resource_runtime_spec=ResourceRuntimeSpec(
+ ray_spec=RaySpec(
+ resource_pool_images={
+ "head-node": TEST_CUSTOM_IMAGE,
+ "worker-pool1": TEST_CUSTOM_IMAGE,
+ },
+ ray_metric_spec=RayMetricSpec(disabled=False),
+ ),
+ ),
+ network=ProjectConstants.TEST_VPC_NETWORK,
+ reserved_ip_ranges=["vertex-dedicated-range"],
+ resource_runtime=ResourceRuntime(
+ access_uris={
+ "RAY_DASHBOARD_URI": TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ "RAY_HEAD_NODE_INTERNAL_IP": TEST_VERTEX_RAY_HEAD_NODE_IP,
+ }
+ ),
+ state="RUNNING",
+ )
+ TEST_CLUSTER = Cluster(
+ cluster_resource_name=TEST_VERTEX_RAY_PR_ADDRESS,
+ python_version="3.10",
+ ray_version="2.9",
+ network=ProjectConstants.TEST_VPC_NETWORK,
+ reserved_ip_ranges=None,
+ service_account=None,
+ state="RUNNING",
+ head_node_type=TEST_HEAD_NODE_TYPE_1_POOL,
+ worker_node_types=TEST_WORKER_NODE_TYPES_1_POOL,
+ dashboard_address=TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ ray_metric_enabled=True,
+ ray_logs_enabled=True,
+ )
+ TEST_CLUSTER_2 = Cluster(
+ cluster_resource_name=TEST_VERTEX_RAY_PR_ADDRESS,
+ python_version="3.10",
+ ray_version="2.9",
+ network="",
+ reserved_ip_ranges="",
+ service_account=None,
+ state="RUNNING",
+ head_node_type=TEST_HEAD_NODE_TYPE_2_POOLS,
+ worker_node_types=TEST_WORKER_NODE_TYPES_2_POOLS,
+ dashboard_address=TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ ray_metric_enabled=True,
+ ray_logs_enabled=True,
+ psc_interface_config=PscIConfig(network_attachment=TEST_PSC_NETWORK_ATTACHMENT),
+ )
+ TEST_CLUSTER_CUSTOM_IMAGE = Cluster(
+ cluster_resource_name=TEST_VERTEX_RAY_PR_ADDRESS,
+ network=ProjectConstants.TEST_VPC_NETWORK,
+ reserved_ip_ranges=["vertex-dedicated-range"],
+ service_account=None,
+ state="RUNNING",
+ head_node_type=TEST_HEAD_NODE_TYPE_2_POOLS_CUSTOM_IMAGE,
+ worker_node_types=TEST_WORKER_NODE_TYPES_2_POOLS_CUSTOM_IMAGE,
+ dashboard_address=TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ ray_metric_enabled=True,
+ ray_logs_enabled=True,
+ labels={},
+ )
+ TEST_CLUSTER_BYOSA = Cluster(
+ cluster_resource_name=TEST_VERTEX_RAY_PR_ADDRESS,
+ python_version="3.10",
+ ray_version="2.9",
+ network="",
+ reserved_ip_ranges="",
+ service_account=ProjectConstants.TEST_SERVICE_ACCOUNT,
+ state="RUNNING",
+ head_node_type=TEST_HEAD_NODE_TYPE_1_POOL,
+ worker_node_types=TEST_WORKER_NODE_TYPES_1_POOL,
+ dashboard_address=TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ ray_metric_enabled=True,
+ ray_logs_enabled=True,
+ )
+ TEST_BEARER_TOKEN = "test-bearer-token"
+ TEST_HEADERS = {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer {}".format(TEST_BEARER_TOKEN),
+ }
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_dashboard_sdk.py b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_dashboard_sdk.py
new file mode 100644
index 0000000000000000000000000000000000000000..a910a9d99e6c1945e3b28229ef71d814d56e5505
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_dashboard_sdk.py
@@ -0,0 +1,160 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import importlib
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import vertex_ray
+import test_constants as tc
+import mock
+import pytest
+from ray.dashboard.modules import dashboard_sdk as oss_dashboard_sdk
+
+
+# -*- coding: utf-8 -*-
+
+
+@pytest.fixture
+def ray_get_job_submission_client_cluster_info_mock():
+ with mock.patch.object(
+ oss_dashboard_sdk, "get_job_submission_client_cluster_info"
+ ) as ray_get_job_submission_client_cluster_info_mock:
+ yield ray_get_job_submission_client_cluster_info_mock
+
+
+@pytest.fixture
+def get_persistent_resource_status_running_mock():
+ with mock.patch.object(
+ vertex_ray.util._gapic_utils, "get_persistent_resource"
+ ) as get_persistent_resource:
+ get_persistent_resource.return_value = (
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_1_POOL
+ )
+ yield get_persistent_resource
+
+
+@pytest.fixture
+def get_persistent_resource_status_running_byosa_public_mock():
+ # Cluster with BYOSA and no peering
+ with mock.patch.object(
+ vertex_ray.util._gapic_utils, "get_persistent_resource"
+ ) as get_persistent_resource:
+ get_persistent_resource.return_value = (
+ tc.ClusterConstants.TEST_RESPONSE_RUNNING_1_POOL_BYOSA
+ )
+ yield get_persistent_resource
+
+
+@pytest.fixture
+def get_bearer_token_mock():
+ with mock.patch.object(
+ vertex_ray.util._validation_utils, "get_bearer_token"
+ ) as get_bearer_token_mock:
+ get_bearer_token_mock.return_value = tc.ClusterConstants.TEST_BEARER_TOKEN
+ yield get_bearer_token_mock
+
+
+class TestGetJobSubmissionClientClusterInfo:
+ def setup_method(self):
+ importlib.reload(aiplatform.initializer)
+ importlib.reload(aiplatform)
+
+ def teardown_method(self):
+ aiplatform.initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.usefixtures(
+ "get_persistent_resource_status_running_mock", "google_auth_mock"
+ )
+ def test_job_submission_client_cluster_info_with_full_resource_name(
+ self,
+ ray_get_job_submission_client_cluster_info_mock,
+ get_bearer_token_mock,
+ ):
+ vertex_ray.get_job_submission_client_cluster_info(
+ tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+ )
+ get_bearer_token_mock.assert_called_once_with()
+ ray_get_job_submission_client_cluster_info_mock.assert_called_once_with(
+ address=tc.ClusterConstants.TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ _use_tls=True,
+ headers=tc.ClusterConstants.TEST_HEADERS,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_persistent_resource_status_running_mock", "google_auth_mock"
+ )
+ def test_job_submission_client_cluster_info_with_cluster_name(
+ self,
+ ray_get_job_submission_client_cluster_info_mock,
+ get_project_number_mock,
+ get_bearer_token_mock,
+ ):
+ aiplatform.init(project=tc.ProjectConstants.TEST_GCP_PROJECT_ID)
+
+ vertex_ray.get_job_submission_client_cluster_info(
+ tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID
+ )
+ get_project_number_mock.assert_called_once_with(
+ name="projects/{}".format(tc.ProjectConstants.TEST_GCP_PROJECT_ID)
+ )
+ get_bearer_token_mock.assert_called_once_with()
+ ray_get_job_submission_client_cluster_info_mock.assert_called_once_with(
+ address=tc.ClusterConstants.TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ _use_tls=True,
+ headers=tc.ClusterConstants.TEST_HEADERS,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_persistent_resource_status_running_mock", "google_auth_mock"
+ )
+ def test_job_submission_client_cluster_info_with_dashboard_address(
+ self,
+ ray_get_job_submission_client_cluster_info_mock,
+ get_bearer_token_mock,
+ ):
+ aiplatform.init(project=tc.ProjectConstants.TEST_GCP_PROJECT_ID)
+
+ vertex_ray.get_job_submission_client_cluster_info(
+ tc.ClusterConstants.TEST_VERTEX_RAY_DASHBOARD_ADDRESS
+ )
+ get_bearer_token_mock.assert_called_once_with()
+ ray_get_job_submission_client_cluster_info_mock.assert_called_once_with(
+ address=tc.ClusterConstants.TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ _use_tls=True,
+ headers=tc.ClusterConstants.TEST_HEADERS,
+ )
+
+ @pytest.mark.usefixtures(
+ "get_persistent_resource_status_running_byosa_public_mock", "google_auth_mock"
+ )
+ def test_job_submission_client_cluster_info_with_cluster_name_byosa_public(
+ self,
+ ray_get_job_submission_client_cluster_info_mock,
+ get_bearer_token_mock,
+ get_project_number_mock,
+ ):
+ aiplatform.init(project=tc.ProjectConstants.TEST_GCP_PROJECT_ID)
+
+ vertex_ray.get_job_submission_client_cluster_info(
+ tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID
+ )
+ get_project_number_mock.assert_called_once_with(
+ name="projects/{}".format(tc.ProjectConstants.TEST_GCP_PROJECT_ID)
+ )
+ get_bearer_token_mock.assert_called_once_with()
+ ray_get_job_submission_client_cluster_info_mock.assert_called_once_with(
+ address=tc.ClusterConstants.TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ _use_tls=True,
+ headers=tc.ClusterConstants.TEST_HEADERS,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_prediction_utils.py b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_prediction_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..873f1c252d54c8c4b2aa78c467304c9865fb2b32
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_prediction_utils.py
@@ -0,0 +1,70 @@
+"""Test utils for Prediction Tests.
+"""
+
+import numpy as np
+import sklearn
+from sklearn import linear_model
+import tensorflow as tf
+import torch
+import xgboost
+
+
+def create_tf_model() -> tf.keras.Model:
+ """Create toy neural network : 1-layer."""
+ model = tf.keras.Sequential(
+ [tf.keras.layers.Dense(1, activation="linear", input_shape=(4,))]
+ )
+ model.compile(optimizer="Adam", loss="mean_squared_error", metrics=["mse"])
+ return model
+
+
+def train_tf_model(model: tf.keras.Model) -> None:
+ """Trains a Keras Model."""
+ n = 1
+ train_x = np.random.normal(0, 1, size=(n, 4))
+ train_y = np.random.uniform(0, 1, size=(n, 1))
+ model.fit(train_x, train_y, epochs=1)
+
+
+def get_tensorflow_trained_model() -> tf.keras.Model:
+ """Returns a tensorflow trained model."""
+ model = create_tf_model()
+ train_tf_model(model)
+ return model
+
+
+def get_sklearn_estimator() -> sklearn.base.BaseEstimator:
+ """Returns a sklearn estimator."""
+ estimator = linear_model.LinearRegression()
+ x = [[1, 2], [3, 4], [5, 6]]
+ y = [7, 8, 9]
+ estimator.fit(x, y)
+ return estimator
+
+
+def get_xgboost_model() -> xgboost.XGBClassifier:
+ train_x = np.array([[1, 2], [3, 4]])
+ train_y = np.array([0, 1])
+ return xgboost.XGBClassifier().fit(train_x, train_y)
+
+
+input_size = 1
+layer_size = 1
+output_size = 1
+num_epochs = 1
+
+
+class TorchModel(torch.nn.Module):
+ def __init__(self):
+ super(TorchModel, self).__init__()
+ self.layer1 = torch.nn.Linear(input_size, layer_size)
+ self.relu = torch.nn.ReLU()
+ self.layer2 = torch.nn.Linear(layer_size, output_size)
+
+ def forward(self, input_data):
+ return self.layer2(self.relu(self.layer1(input_data)))
+
+
+def get_pytorch_trained_model() -> torch.nn.Module:
+ """Returns a pytorch trained model."""
+ return TorchModel()
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_ray_prediction.py b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_ray_prediction.py
new file mode 100644
index 0000000000000000000000000000000000000000..f6be5c72285b9ea78b8b2e988b6c8b47044c2ef7
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_ray_prediction.py
@@ -0,0 +1,516 @@
+"""Tests for prediction."""
+
+# -*- coding: utf-8 -*-
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import importlib
+import pickle
+import tempfile
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform.vertex_ray.predict import (
+ sklearn as prediction_sklearn,
+)
+from google.cloud.aiplatform.vertex_ray.predict import (
+ tensorflow as prediction_tensorflow,
+)
+from google.cloud.aiplatform.vertex_ray.predict import (
+ torch as prediction_torch,
+)
+from google.cloud.aiplatform.vertex_ray.predict import (
+ xgboost as prediction_xgboost,
+)
+from google.cloud.aiplatform.utils import gcs_utils
+import test_constants as tc
+import test_prediction_utils
+
+import mock
+import numpy as np
+import pytest
+import ray
+import tensorflow as tf
+import torch
+import xgboost
+
+
+@pytest.fixture()
+def upload_tensorflow_saved_model_mock():
+ with mock.patch.object(
+ aiplatform.Model, "upload_tensorflow_saved_model"
+ ) as upload_tensorflow_saved_model_mock:
+ upload_tensorflow_saved_model_mock.return_value = None
+ yield upload_tensorflow_saved_model_mock
+
+
+@pytest.fixture()
+def ray_tensorflow_checkpoint():
+ defined_model = test_prediction_utils.get_tensorflow_trained_model()
+ checkpoint = ray.train.tensorflow.TensorflowCheckpoint.from_model(defined_model)
+ return checkpoint
+
+
+@pytest.fixture()
+def ray_checkpoint_from_dict():
+ try:
+ return ray.train.Checkpoint.from_directory("/tmp/checkpoint")
+ except AttributeError:
+ raise RuntimeError("Unsupported Ray version.")
+
+
+@pytest.fixture()
+def save_tf_model():
+ with mock.patch.object(tf.keras.Model, "save") as save_tf_model_mock:
+ save_tf_model_mock.return_value = None
+ yield save_tf_model_mock
+
+
+@pytest.fixture()
+def ray_sklearn_checkpoint():
+ estimator = test_prediction_utils.get_sklearn_estimator()
+ temp_dir = tempfile.mkdtemp()
+ checkpoint = ray.train.sklearn.SklearnCheckpoint.from_estimator(
+ estimator, path=temp_dir
+ )
+ return checkpoint
+
+
+@pytest.fixture()
+def ray_xgboost_checkpoint():
+ if ray.__version__ == "2.9.3":
+ from ray.train import xgboost as ray_xgboost
+
+ model = test_prediction_utils.get_xgboost_model()
+ checkpoint = ray_xgboost.XGBoostCheckpoint.from_model(model.get_booster())
+ return checkpoint
+ else:
+ return None
+
+
+@pytest.fixture()
+def pickle_dump():
+ with mock.patch.object(pickle, "dump") as pickle_dump:
+ pickle_dump.return_value = None
+ yield pickle_dump
+
+
+@pytest.fixture
+def mock_vertex_model():
+ model = mock.MagicMock(aiplatform.Model)
+ model.uri = tc.ProjectConstants.TEST_MODEL_GCS_URI
+ model.container_spec.image_uri = "us-docker.xxx/sklearn-cpu.1-0:latest"
+ model.labels = {"registered_by_vertex_ai": "true"}
+ yield model
+
+
+@pytest.fixture()
+def upload_sklearn_mock(mock_vertex_model):
+ with mock.patch.object(
+ aiplatform.Model, "upload_scikit_learn_model_file"
+ ) as upload_sklearn_mock:
+ upload_sklearn_mock.return_value = mock_vertex_model
+ yield upload_sklearn_mock
+
+
+@pytest.fixture
+def mock_xgboost_vertex_model():
+ model = mock.MagicMock(aiplatform.Model)
+ model.uri = tc.ProjectConstants.TEST_MODEL_GCS_URI
+ model.container_spec.image_uri = "us-docker.xxx/xgboost-cpu.1-6:latest"
+ model.labels = {"registered_by_vertex_ai": "true"}
+ yield model
+
+
+@pytest.fixture()
+def upload_xgboost_mock(mock_xgboost_vertex_model):
+ with mock.patch.object(
+ aiplatform.Model, "upload_xgboost_model_file"
+ ) as upload_xgboost_mock:
+ upload_xgboost_mock.return_value = mock_xgboost_vertex_model
+ yield upload_xgboost_mock
+
+
+@pytest.fixture()
+def gcs_utils_upload_to_gcs():
+ with mock.patch.object(gcs_utils, "upload_to_gcs") as gcs_utils_upload_to_gcs:
+ gcs_utils.return_value = None
+ yield gcs_utils_upload_to_gcs
+
+
+@pytest.fixture()
+def ray_torch_checkpoint():
+ defined_model = test_prediction_utils.get_pytorch_trained_model()
+ checkpoint = ray.train.torch.TorchCheckpoint.from_model(defined_model)
+ return checkpoint
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestPredictionFunctionality:
+ """Tests for Prediction."""
+
+ def setup_method(self):
+ importlib.reload(aiplatform.initializer)
+ importlib.reload(aiplatform)
+
+ def teardown_method(self):
+ aiplatform.initializer.global_pool.shutdown(wait=True)
+
+ # Tensorflow Tests
+ @tc.rovminversion
+ def test_convert_checkpoint_to_tf_model_raise_exception(
+ self, ray_checkpoint_from_dict
+ ) -> None:
+ """Test if a checkpoint is not an instance of TensflowCheckpoint should
+ fail with exception ValueError."""
+ with pytest.raises(ValueError) as ve:
+ prediction_tensorflow.register._get_tensorflow_model_from(
+ ray_checkpoint_from_dict
+ )
+
+ assert ve.match(
+ regexp=r".* arg checkpoint should be a "
+ "ray.train.tensorflow.TensorflowCheckpoint .*"
+ )
+
+ @tc.rovminversion
+ def test_convert_checkpoint_to_tensorflow_model_succeed(
+ self, ray_tensorflow_checkpoint
+ ) -> None:
+ """Test if a TensorflowCheckpoint conversion is successful."""
+ # Act
+ model = prediction_tensorflow.register._get_tensorflow_model_from(
+ ray_tensorflow_checkpoint, model=test_prediction_utils.create_tf_model
+ )
+
+ # Assert
+ assert model is not None
+ values = model.predict([[1, 1, 1, 1]])
+ assert values[0] is not None
+
+ @tc.rovminversion
+ def test_register_tensorflow_succeed(
+ self,
+ ray_tensorflow_checkpoint,
+ upload_tensorflow_saved_model_mock,
+ save_tf_model,
+ ) -> None:
+ """Test if a TensorflowCheckpoint upload is successful."""
+ # Act
+ prediction_tensorflow.register_tensorflow(
+ ray_tensorflow_checkpoint,
+ artifact_uri=tc.ProjectConstants.TEST_ARTIFACT_URI,
+ _model=test_prediction_utils.create_tf_model,
+ use_gpu=False,
+ )
+
+ # Assert
+ upload_tensorflow_saved_model_mock.assert_called_once()
+ save_tf_model.assert_called_once_with(
+ f"{tc.ProjectConstants.TEST_ARTIFACT_URI}/ray-on-vertex-registered-tensorflow-model"
+ )
+
+ @tc.rovminversion
+ def test_register_tensorflow_initialized_succeed(
+ self,
+ ray_tensorflow_checkpoint,
+ upload_tensorflow_saved_model_mock,
+ save_tf_model,
+ ) -> None:
+ """Test if a TensorflowCheckpoint upload is successful when artifact_uri is None but initialized."""
+ aiplatform.init(
+ project=tc.ProjectConstants.TEST_GCP_PROJECT_ID,
+ staging_bucket=tc.ProjectConstants.TEST_ARTIFACT_URI,
+ )
+ # Act
+ prediction_tensorflow.register_tensorflow(
+ ray_tensorflow_checkpoint,
+ _model=test_prediction_utils.create_tf_model,
+ use_gpu=False,
+ )
+
+ # Assert
+ upload_tensorflow_saved_model_mock.assert_called_once()
+ save_tf_model.assert_called_once_with(
+ f"{tc.ProjectConstants.TEST_ARTIFACT_URI}/ray-on-vertex-registered-tensorflow-model"
+ )
+
+ def test_register_tensorflowartifact_uri_is_none_raise_error(
+ self, ray_tensorflow_checkpoint
+ ) -> None:
+ """Test if a TensorflowCheckpoint upload gives ValueError."""
+ # Act and Assert
+ with pytest.raises(ValueError) as ve:
+ prediction_tensorflow.register_tensorflow(
+ checkpoint=ray_tensorflow_checkpoint,
+ artifact_uri=None,
+ _model=test_prediction_utils.create_tf_model,
+ )
+ assert ve.match(regexp=r".*'artifact_uri' should " "start with 'gs://'.*")
+
+ def test_register_tensorflowartifact_uri_not_gcs_uri_raise_error(
+ self, ray_tensorflow_checkpoint
+ ) -> None:
+ """Test if a TensorflowCheckpoint upload gives ValueError."""
+ # Act and Assert
+ with pytest.raises(ValueError) as ve:
+ prediction_tensorflow.register_tensorflow(
+ checkpoint=ray_tensorflow_checkpoint,
+ artifact_uri=tc.ProjectConstants.TEST_BAD_ARTIFACT_URI,
+ _model=test_prediction_utils.create_tf_model,
+ )
+ assert ve.match(regexp=r".*'artifact_uri' should " "start with 'gs://'.*")
+
+ # Sklearn Tests
+ @tc.rovminversion
+ def test_convert_checkpoint_to_sklearn_raise_exception(
+ self, ray_checkpoint_from_dict
+ ) -> None:
+ """Test if a checkpoint is not an instance of SklearnCheckpoint should
+ fail with exception ValueError."""
+
+ with pytest.raises(ValueError) as ve:
+ prediction_sklearn.register._get_estimator_from(ray_checkpoint_from_dict)
+ assert ve.match(
+ regexp=r".* arg checkpoint should be a "
+ "ray.train.sklearn.SklearnCheckpoint .*"
+ )
+
+ @tc.rovminversion
+ def test_convert_checkpoint_to_sklearn_model_succeed(
+ self, ray_sklearn_checkpoint
+ ) -> None:
+ """Test if a SklearnCheckpoint conversion is successful."""
+ # Act
+ estimator = prediction_sklearn.register._get_estimator_from(
+ ray_sklearn_checkpoint
+ )
+
+ # Assert
+ assert estimator is not None
+ y_pred = estimator.predict([[10, 11]])
+ assert y_pred[0] is not None
+
+ @tc.rovminversion
+ def test_register_sklearn_succeed(
+ self,
+ ray_sklearn_checkpoint,
+ upload_sklearn_mock,
+ pickle_dump,
+ gcs_utils_upload_to_gcs,
+ ) -> None:
+ """Test if a SklearnCheckpoint upload is successful."""
+ # Act
+ vertex_ai_model = prediction_sklearn.register_sklearn(
+ ray_sklearn_checkpoint,
+ artifact_uri=tc.ProjectConstants.TEST_ARTIFACT_URI,
+ )
+
+ # Assert
+ vertex_ai_model.uri = tc.ProjectConstants.TEST_MODEL_GCS_URI
+ vertex_ai_model.container_spec.image_uri = (
+ "us-docker.xxx/sklearn-cpu.1-0:latest"
+ )
+ upload_sklearn_mock.assert_called_once()
+ pickle_dump.assert_called_once()
+ gcs_utils_upload_to_gcs.assert_called_once()
+
+ @tc.rovminversion
+ def test_register_sklearn_initialized_succeed(
+ self,
+ ray_sklearn_checkpoint,
+ upload_sklearn_mock,
+ pickle_dump,
+ gcs_utils_upload_to_gcs,
+ ) -> None:
+ """Test if a SklearnCheckpoint upload is successful when artifact_uri is None but initialized."""
+ aiplatform.init(
+ project=tc.ProjectConstants.TEST_GCP_PROJECT_ID,
+ staging_bucket=tc.ProjectConstants.TEST_ARTIFACT_URI,
+ )
+ # Act
+ vertex_ai_model = prediction_sklearn.register_sklearn(
+ ray_sklearn_checkpoint,
+ )
+
+ # Assert
+ vertex_ai_model.uri = tc.ProjectConstants.TEST_MODEL_GCS_URI
+ vertex_ai_model.container_spec.image_uri = (
+ "us-docker.xxx/sklearn-cpu.1-0:latest"
+ )
+ upload_sklearn_mock.assert_called_once()
+ pickle_dump.assert_called_once()
+ gcs_utils_upload_to_gcs.assert_called_once()
+
+ @tc.predictionrayversion
+ def test_register_sklearnartifact_uri_is_none_raise_error(
+ self, ray_sklearn_checkpoint
+ ) -> None:
+ """Test if a SklearnCheckpoint upload gives ValueError."""
+ # Act and Assert
+ with pytest.raises(ValueError) as ve:
+ prediction_sklearn.register_sklearn(
+ checkpoint=ray_sklearn_checkpoint,
+ artifact_uri=None,
+ )
+ assert ve.match(regexp=r".*'artifact_uri' should " "start with 'gs://'.*")
+
+ @tc.predictionrayversion
+ def test_register_sklearnartifact_uri_not_gcs_uri_raise_error(
+ self, ray_sklearn_checkpoint
+ ) -> None:
+ """Test if a SklearnCheckpoint upload gives ValueError."""
+ # Act and Assert
+ with pytest.raises(ValueError) as ve:
+ prediction_sklearn.register_sklearn(
+ checkpoint=ray_sklearn_checkpoint,
+ artifact_uri=tc.ProjectConstants.TEST_BAD_ARTIFACT_URI,
+ )
+ assert ve.match(regexp=r".*'artifact_uri' should " "start with 'gs://'.*")
+
+ # XGBoost Tests
+ @tc.xgbversion
+ @tc.rovminversion
+ def test_convert_checkpoint_to_xgboost_raise_exception(
+ self, ray_checkpoint_from_dict
+ ) -> None:
+ """Test if a checkpoint is not an instance of XGBoostCheckpoint should
+
+ fail with exception ValueError.
+ """
+
+ with pytest.raises(ValueError) as ve:
+ prediction_xgboost.register._get_xgboost_model_from(
+ ray_checkpoint_from_dict
+ )
+ assert ve.match(
+ regexp=r".* arg checkpoint should be a "
+ "ray.train.xgboost.XGBoostCheckpoint .*"
+ )
+
+ @tc.xgbversion
+ def test_convert_checkpoint_to_xgboost_model_succeed(
+ self, ray_xgboost_checkpoint
+ ) -> None:
+ """Test if a XGBoostCheckpoint conversion is successful."""
+ # Act
+ model = prediction_xgboost.register._get_xgboost_model_from(
+ ray_xgboost_checkpoint
+ )
+
+ # Assert
+ assert model is not None
+ y_pred = model.predict(xgboost.DMatrix(np.array([[1, 2]])))
+ assert y_pred[0] is not None
+
+ @tc.xgbversion
+ def test_register_xgboost_succeed(
+ self,
+ ray_xgboost_checkpoint,
+ upload_xgboost_mock,
+ pickle_dump,
+ gcs_utils_upload_to_gcs,
+ ) -> None:
+ """Test if a XGBoostCheckpoint upload is successful."""
+ # Act
+ vertex_ai_model = prediction_xgboost.register_xgboost(
+ ray_xgboost_checkpoint,
+ artifact_uri=tc.ProjectConstants.TEST_ARTIFACT_URI,
+ )
+
+ # Assert
+ vertex_ai_model.uri = tc.ProjectConstants.TEST_MODEL_GCS_URI
+ vertex_ai_model.container_spec.image_uri = (
+ "us-docker.xxx/xgboost-cpu.1-6:latest"
+ )
+ upload_xgboost_mock.assert_called_once()
+ pickle_dump.assert_called_once()
+ gcs_utils_upload_to_gcs.assert_called_once()
+
+ @tc.xgbversion
+ def test_register_xgboost_initialized_succeed(
+ self,
+ ray_xgboost_checkpoint,
+ upload_xgboost_mock,
+ pickle_dump,
+ gcs_utils_upload_to_gcs,
+ ) -> None:
+ """Test if a XGBoostCheckpoint upload is successful when artifact_uri is None but initialized."""
+ aiplatform.init(
+ project=tc.ProjectConstants.TEST_GCP_PROJECT_ID,
+ staging_bucket=tc.ProjectConstants.TEST_ARTIFACT_URI,
+ )
+ # Act
+ vertex_ai_model = prediction_xgboost.register_xgboost(
+ ray_xgboost_checkpoint,
+ )
+
+ # Assert
+ vertex_ai_model.uri = tc.ProjectConstants.TEST_MODEL_GCS_URI
+ vertex_ai_model.container_spec.image_uri = (
+ "us-docker.xxx/xgboost-cpu.1-6:latest"
+ )
+ upload_xgboost_mock.assert_called_once()
+ pickle_dump.assert_called_once()
+ gcs_utils_upload_to_gcs.assert_called_once()
+
+ @tc.xgbversion
+ def test_register_xgboostartifact_uri_is_none_raise_error(
+ self, ray_xgboost_checkpoint
+ ) -> None:
+ """Test if a XGBoostCheckpoint upload gives ValueError."""
+ # Act and Assert
+ with pytest.raises(ValueError) as ve:
+ prediction_xgboost.register_xgboost(
+ checkpoint=ray_xgboost_checkpoint,
+ artifact_uri=None,
+ )
+ assert ve.match(regexp=r".*'artifact_uri' should " "start with 'gs://'.*")
+
+ @tc.xgbversion
+ def test_register_xgboostartifact_uri_not_gcs_uri_raise_error(
+ self, ray_xgboost_checkpoint
+ ) -> None:
+ """Test if a XGBoostCheckpoint upload gives ValueError."""
+ # Act and Assert
+ with pytest.raises(ValueError) as ve:
+ prediction_xgboost.register_xgboost(
+ checkpoint=ray_xgboost_checkpoint,
+ artifact_uri=tc.ProjectConstants.TEST_BAD_ARTIFACT_URI,
+ )
+ assert ve.match(regexp=r".*'artifact_uri' should " "start with 'gs://'.*")
+
+ # Pytorch Tests
+ @tc.rovminversion
+ def test_convert_checkpoint_to_torch_model_raises_exception(
+ self, ray_checkpoint_from_dict
+ ) -> None:
+ """Test if a checkpoint is not an instance of TorchCheckpoint should
+ fail with exception ValueError."""
+ with pytest.raises(ValueError):
+ prediction_torch.register.get_pytorch_model_from(ray_checkpoint_from_dict)
+
+ @tc.predictionrayversion
+ def test_convert_checkpoint_to_pytorch_model_succeed(
+ self, ray_torch_checkpoint
+ ) -> None:
+ """Test if a TorchCheckpoint conversion is successful."""
+ # Act
+ model = prediction_torch.register.get_pytorch_model_from(ray_torch_checkpoint)
+
+ # Assert
+ assert model is not None
+ values = model(torch.tensor([10000], dtype=torch.float))
+ print(values[0])
+ assert values[0] is not None
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_ray_utils.py b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_ray_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..713aa37ddcf42ebefadc9d1b3bee6735210f8dc2
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_ray_utils.py
@@ -0,0 +1,46 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google.cloud.aiplatform import vertex_ray
+import test_constants as tc
+import pytest
+
+
+class TestUtils:
+ def test_get_persistent_resource_success(self, persistent_client_mock):
+ response = vertex_ray.util._gapic_utils.get_persistent_resource(
+ tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+ )
+
+ persistent_client_mock.assert_called_once()
+ assert response == tc.ClusterConstants.TEST_RESPONSE_RUNNING_1_POOL
+
+ def test_get_persistent_resource_stopping(self, persistent_client_stopping_mock):
+ with pytest.raises(RuntimeError) as e:
+ vertex_ray.util._gapic_utils.get_persistent_resource(
+ tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+ )
+
+ persistent_client_stopping_mock.assert_called_once()
+ e.match(regexp=r"The cluster is stopping.")
+
+ def test_get_persistent_resource_error(self, persistent_client_error_mock):
+ with pytest.raises(RuntimeError) as e:
+ vertex_ray.util._gapic_utils.get_persistent_resource(
+ tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+ )
+
+ persistent_client_error_mock.assert_called_once()
+ e.match(regexp=r"The cluster encountered an error.")
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_vertex_ray_client.py b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_vertex_ray_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..c38b1727cbb1b047528b81121222ac861a5ad01a
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertex_ray/test_vertex_ray_client.py
@@ -0,0 +1,229 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import importlib
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import vertex_ray
+import test_constants as tc
+import mock
+import pytest
+import ray
+
+
+# -*- coding: utf-8 -*-
+
+_TEST_CLIENT_CONTEXT = ray.client_builder.ClientContext(
+ dashboard_url=tc.ClusterConstants.TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ python_version="MOCK_PYTHON_VERSION",
+ ray_version="MOCK_RAY_VERSION",
+ ray_commit="MOCK_RAY_COMMIT",
+ protocol_version="MOCK_PROTOCOL_VERSION",
+ _num_clients=1,
+ _context_to_restore=None,
+)
+
+_TEST_VERTEX_RAY_CLIENT_CONTEXT = vertex_ray.client_builder._VertexRayClientContext(
+ persistent_resource_id="MOCK_PERSISTENT_RESOURCE_ID",
+ ray_head_uris={
+ "RAY_DASHBOARD_URI": tc.ClusterConstants.TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ "RAY_HEAD_NODE_INTERNAL_IP": tc.ClusterConstants.TEST_VERTEX_RAY_HEAD_NODE_IP,
+ },
+ ray_client_context=_TEST_CLIENT_CONTEXT,
+)
+
+_TEST_VERTEX_RAY_CLIENT_CONTEXT_PUBLIC = (
+ vertex_ray.client_builder._VertexRayClientContext(
+ persistent_resource_id="MOCK_PERSISTENT_RESOURCE_ID",
+ ray_head_uris={
+ "RAY_DASHBOARD_URI": tc.ClusterConstants.TEST_VERTEX_RAY_DASHBOARD_ADDRESS,
+ "RAY_CLIENT_ENDPOINT": tc.ClusterConstants.TEST_VERTEX_RAY_CLIENT_ENDPOINT,
+ },
+ ray_client_context=_TEST_CLIENT_CONTEXT,
+ )
+)
+
+
+@pytest.fixture
+def ray_client_init_mock():
+ with mock.patch.object(ray.ClientBuilder, "__init__") as ray_client_init:
+ ray_client_init.return_value = None
+ yield ray_client_init
+
+
+@pytest.fixture
+def ray_client_connect_mock():
+ with mock.patch.object(ray.ClientBuilder, "connect") as ray_client_connect:
+ ray_client_connect.return_value = _TEST_CLIENT_CONTEXT
+ yield ray_client_connect
+
+
+@pytest.fixture
+def get_persistent_resource_status_running_mock():
+ with mock.patch.object(
+ vertex_ray.util._gapic_utils, "get_persistent_resource"
+ ) as resolve_head_ip:
+ resolve_head_ip.return_value = tc.ClusterConstants.TEST_RESPONSE_RUNNING_1_POOL
+ yield resolve_head_ip
+
+
+@pytest.fixture
+def get_persistent_resource_status_running_no_ray_mock():
+ with mock.patch.object(
+ vertex_ray.util._gapic_utils, "get_persistent_resource"
+ ) as resolve_head_ip:
+ resolve_head_ip.return_value = tc.ClusterConstants.TEST_RESPONSE_NO_RAY_RUNNING
+ yield resolve_head_ip
+
+
+@pytest.fixture
+def get_persistent_resource_status_running_byosa_public_mock():
+ with mock.patch.object(
+ vertex_ray.util._gapic_utils, "get_persistent_resource"
+ ) as resolve_head_ip:
+ resolve_head_ip.return_value = tc.ClusterConstants.TEST_RESPONSE_1_POOL_BYOSA
+ yield resolve_head_ip
+
+
+@pytest.fixture
+def get_persistent_resource_status_running_byosa_private_mock():
+ with mock.patch.object(
+ vertex_ray.util._gapic_utils, "get_persistent_resource"
+ ) as resolve_head_ip:
+ resolve_head_ip.return_value = (
+ tc.ClusterConstants.TEST_RESPONSE_1_POOL_BYOSA_PRIVATE
+ )
+ yield resolve_head_ip
+
+
+class TestClientBuilder:
+ def setup_method(self):
+ importlib.reload(aiplatform.initializer)
+ importlib.reload(aiplatform)
+
+ def teardown_method(self):
+ aiplatform.initializer.global_pool.shutdown(wait=True)
+
+ @tc.rovminversion
+ @pytest.mark.usefixtures("get_persistent_resource_status_running_mock")
+ def test_init_with_full_resource_name(
+ self,
+ ray_client_init_mock,
+ ):
+ vertex_ray.ClientBuilder(tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS)
+ ray_client_init_mock.assert_called_once_with(
+ tc.ClusterConstants.TEST_VERTEX_RAY_HEAD_NODE_IP,
+ )
+
+ @tc.rovminversion
+ @pytest.mark.usefixtures(
+ "get_persistent_resource_status_running_mock", "google_auth_mock"
+ )
+ def test_init_with_cluster_name(
+ self,
+ ray_client_init_mock,
+ get_project_number_mock,
+ ):
+ aiplatform.init(project=tc.ProjectConstants.TEST_GCP_PROJECT_ID)
+
+ vertex_ray.ClientBuilder(tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID)
+ get_project_number_mock.assert_called_once_with(
+ name="projects/{}".format(tc.ProjectConstants.TEST_GCP_PROJECT_ID)
+ )
+ ray_client_init_mock.assert_called_once_with(
+ tc.ClusterConstants.TEST_VERTEX_RAY_HEAD_NODE_IP,
+ )
+
+ @tc.rovminversion
+ @pytest.mark.usefixtures("get_persistent_resource_status_running_mock")
+ def test_connect_running(self, ray_client_connect_mock):
+ connect_result = vertex_ray.ClientBuilder(
+ tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+ ).connect()
+ ray_client_connect_mock.assert_called_once_with()
+ assert connect_result == _TEST_VERTEX_RAY_CLIENT_CONTEXT
+ assert (
+ connect_result.persistent_resource_id
+ == tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID
+ )
+
+ @tc.rovminversion
+ @pytest.mark.usefixtures("get_persistent_resource_status_running_no_ray_mock")
+ def test_connect_running_no_ray(self, ray_client_connect_mock):
+ expected_message = (
+ "Ray Cluster ",
+ tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID,
+ " failed to start Head node properly.",
+ )
+ with pytest.raises(ValueError) as exception:
+ vertex_ray.ClientBuilder(
+ tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+ ).connect()
+
+ ray_client_connect_mock.assert_called_once_with()
+ assert str(exception.value) == expected_message
+
+ @tc.rovminversion
+ @pytest.mark.usefixtures("get_persistent_resource_status_running_byosa_public_mock")
+ def test_connect_running_byosa_public(self, ray_client_connect_mock):
+ connect_result = vertex_ray.ClientBuilder(
+ tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+ ).connect()
+ ray_client_connect_mock.assert_called_once_with()
+ assert connect_result == _TEST_VERTEX_RAY_CLIENT_CONTEXT_PUBLIC
+ assert (
+ connect_result.persistent_resource_id
+ == tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID
+ )
+
+ @tc.rovminversion
+ @pytest.mark.usefixtures(
+ "get_persistent_resource_status_running_byosa_private_mock"
+ )
+ def test_connect_running_byosa_private(self, ray_client_connect_mock):
+ expected_message = (
+ "Ray Cluster ",
+ tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID,
+ " failed to start Head node properly because custom service"
+ " account isn't supported in peered VPC network. ",
+ )
+ with pytest.raises(ValueError) as exception:
+ vertex_ray.ClientBuilder(
+ tc.ClusterConstants.TEST_VERTEX_RAY_PR_ADDRESS
+ ).connect()
+
+ ray_client_connect_mock.assert_called_once_with()
+ assert str(exception.value) == expected_message
+
+ @tc.rovminversion
+ @pytest.mark.parametrize(
+ "address",
+ [
+ "bad/format/address",
+ "must/have/exactly/five/backslashes/no/more/or/less",
+ "do/not/append/a/trailing/backslash/",
+ tc.ClusterConstants.TEST_VERTEX_RAY_HEAD_NODE_IP, # cannot input raw head node ip
+ ],
+ )
+ def test_bad_format_address(self, address):
+ expected_message = (
+ "[Ray on Vertex AI]: Address must be in the following format: "
+ "vertex_ray://projects//locations//"
+ "persistentResources/ or vertex_ray://."
+ )
+
+ with pytest.raises(ValueError) as exception:
+ vertex_ray.ClientBuilder(address)
+
+ assert str(exception.value) == expected_message
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertexai/conftest.py b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d5bb4af45568d12d58e6705e505be24ecef64cf
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/conftest.py
@@ -0,0 +1,559 @@
+# -*- coding: utf-8 -*-
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import copy
+import os
+import shutil
+import tempfile
+from typing import Any
+from unittest import mock
+from unittest.mock import patch
+import uuid
+
+from google import auth
+from google.api_core import operation as ga_operation
+from google.auth import credentials as auth_credentials
+from google.cloud import storage
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform.compat.services import (
+ feature_online_store_admin_service_client,
+)
+from google.cloud.aiplatform.compat.services import (
+ feature_registry_service_client,
+)
+from google.cloud.aiplatform_v1beta1.services.feature_registry_service import (
+ FeatureRegistryServiceClient,
+)
+from google.cloud.aiplatform.compat.services import job_service_client
+from google.cloud.aiplatform.compat.types import (
+ custom_job as gca_custom_job_compat,
+)
+from google.cloud.aiplatform.compat.types import io as gca_io_compat
+from google.cloud.aiplatform.compat.types import (
+ job_state as gca_job_state_compat,
+)
+from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import (
+ PersistentResourceServiceClient,
+)
+from google.cloud.aiplatform_v1beta1.types.persistent_resource import (
+ PersistentResource,
+ ResourcePool,
+ ResourceRuntimeSpec,
+ ServiceAccountSpec,
+)
+from feature_store_constants import (
+ _TEST_BIGTABLE_FOS1,
+ _TEST_EMBEDDING_FV1,
+ _TEST_ESF_OPTIMIZED_FOS,
+ _TEST_ESF_OPTIMIZED_FOS2,
+ _TEST_FG1,
+ _TEST_FG1_F1,
+ _TEST_FG1_F2,
+ _TEST_FG1_FM1,
+ _TEST_FV1,
+ _TEST_FV3,
+ _TEST_OPTIMIZED_EMBEDDING_FV,
+ _TEST_OPTIMIZED_FV1,
+ _TEST_OPTIMIZED_FV2,
+ _TEST_PSC_OPTIMIZED_FOS,
+)
+import pytest
+
+
+_TEST_PROJECT = "test-project"
+_TEST_PROJECT_NUMBER = "12345678"
+_TEST_LOCATION = "us-central1"
+_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+_TEST_DISPLAY_NAME = f"{_TEST_PARENT}/customJobs/12345"
+_TEST_BUCKET_NAME = "gs://test_bucket"
+_TEST_BASE_OUTPUT_DIR = f"{_TEST_BUCKET_NAME}/test_base_output_dir"
+_TEST_SERVICE_ACCOUNT = f"{_TEST_PROJECT_NUMBER}-compute@developer.gserviceaccount.com"
+
+_TEST_INPUTS = [
+ "--arg_0=string_val_0",
+ "--arg_1=string_val_1",
+ "--arg_2=int_val_0",
+ "--arg_3=int_val_1",
+]
+_TEST_IMAGE_URI = "test_image_uri"
+_TEST_MACHINE_TYPE = "test_machine_type"
+_TEST_WORKER_POOL_SPEC = [
+ {
+ "machine_spec": {
+ "machine_type": _TEST_MACHINE_TYPE,
+ },
+ "replica_count": 1,
+ "container_spec": {
+ "image_uri": _TEST_IMAGE_URI,
+ "args": _TEST_INPUTS,
+ },
+ }
+]
+_TEST_CUSTOM_JOB_PROTO = gca_custom_job_compat.CustomJob(
+ display_name=_TEST_DISPLAY_NAME,
+ job_spec={
+ "worker_pool_specs": _TEST_WORKER_POOL_SPEC,
+ "base_output_directory": gca_io_compat.GcsDestination(
+ output_uri_prefix=_TEST_BASE_OUTPUT_DIR
+ ),
+ },
+ labels={"trained_by_vertex_ai": "true"},
+)
+
+_TEST_REQUEST_RUNNING_DEFAULT = PersistentResource(
+ resource_runtime_spec=ResourceRuntimeSpec(service_account_spec=ServiceAccountSpec())
+)
+resource_pool = ResourcePool()
+resource_pool.machine_spec.machine_type = "n1-standard-4"
+resource_pool.replica_count = 1
+resource_pool.disk_spec.boot_disk_type = "pd-ssd"
+resource_pool.disk_spec.boot_disk_size_gb = 100
+_TEST_REQUEST_RUNNING_DEFAULT.resource_pools = [resource_pool]
+
+
+_TEST_PERSISTENT_RESOURCE_RUNNING = PersistentResource(state="RUNNING")
+_TEST_PERSISTENT_RESOURCE_SERVICE_ACCOUNT_RUNNING = PersistentResource(
+ state="RUNNING",
+ resource_runtime_spec=ResourceRuntimeSpec(
+ service_account_spec=ServiceAccountSpec(
+ enable_custom_service_account=True, service_account=_TEST_SERVICE_ACCOUNT
+ )
+ ),
+)
+
+
+@pytest.fixture(scope="module")
+def google_auth_mock():
+ with mock.patch.object(auth, "default") as auth_mock:
+ auth_mock.return_value = (
+ auth_credentials.AnonymousCredentials(),
+ "test-project",
+ )
+ yield auth_mock
+
+
+@pytest.fixture
+def mock_storage_blob(mock_filesystem):
+ """Mocks the storage Blob API.
+
+ Replaces the Blob factory method by a simpler method that records the
+ destination_file_uri and, instead of uploading the file to gcs, copying it
+ to the fake local file system.
+ """
+
+ class MockStorageBlob:
+ """Mocks storage.Blob."""
+
+ def __init__(self, destination_file_uri: str, client: Any):
+ del client
+ self.destination_file_uri = destination_file_uri
+
+ @classmethod
+ def from_string(cls, destination_file_uri: str, client: Any):
+ if destination_file_uri.startswith("gs://"):
+ # Do not copy files to gs:// since it's not a valid path in the fake
+ # filesystem.
+ destination_file_uri = destination_file_uri.split("/")[-1]
+ return cls(destination_file_uri, client)
+
+ def upload_from_filename(self, filename: str):
+ shutil.copy(filename, self.destination_file_uri)
+
+ def download_to_filename(self, filename: str):
+ """To be replaced by an implementation of testing needs."""
+ raise NotImplementedError
+
+ with mock.patch.object(storage, "Blob", new=MockStorageBlob) as storage_blob:
+ yield storage_blob
+
+
+@pytest.fixture
+def mock_storage_blob_tmp_dir(tmp_path):
+ """Mocks the storage Blob API.
+
+ Replaces the Blob factory method by a simpler method that records the
+ destination_file_uri and, instead of uploading the file to gcs, copying it
+ to a temporaray path in the local file system.
+ """
+
+ class MockStorageBlob:
+ """Mocks storage.Blob."""
+
+ def __init__(self, destination_file_uri: str, client: Any):
+ del client
+ self.destination_file_uri = destination_file_uri
+
+ @classmethod
+ def from_string(cls, destination_file_uri: str, client: Any):
+ if destination_file_uri.startswith("gs://"):
+ # Do not copy files to gs:// since it's not a valid path in the fake
+ # filesystem.
+ destination_file_uri = os.fspath(
+ tmp_path / destination_file_uri.split("/")[-1]
+ )
+ return cls(destination_file_uri, client)
+
+ def upload_from_filename(self, filename: str):
+ shutil.copy(filename, self.destination_file_uri)
+
+ def download_to_filename(self, filename: str):
+ """To be replaced by an implementation of testing needs."""
+ raise NotImplementedError
+
+ with mock.patch.object(storage, "Blob", new=MockStorageBlob) as storage_blob:
+ yield storage_blob
+
+
+@pytest.fixture
+def mock_gcs_upload():
+ def fake_upload_to_gcs(local_filename: str, gcs_destination: str):
+ if gcs_destination.startswith("gs://") or gcs_destination.startswith("gcs/"):
+ raise ValueError("Please don't use the real gcs path with mock_gcs_upload.")
+ # instead of upload, just copy the file.
+ shutil.copyfile(local_filename, gcs_destination)
+
+ with mock.patch(
+ "google.cloud.aiplatform.utils.gcs_utils.upload_to_gcs",
+ new=fake_upload_to_gcs,
+ ) as gcs_upload:
+ yield gcs_upload
+
+
+@pytest.fixture
+def mock_temp_dir():
+ with mock.patch.object(tempfile, "TemporaryDirectory") as temp_dir_mock:
+ yield temp_dir_mock
+
+
+@pytest.fixture
+def mock_named_temp_file():
+ with mock.patch.object(tempfile, "NamedTemporaryFile") as named_temp_file_mock:
+ yield named_temp_file_mock
+
+
+@pytest.fixture
+def mock_create_custom_job():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "create_custom_job"
+ ) as create_custom_job_mock:
+ custom_job_proto = copy.deepcopy(_TEST_CUSTOM_JOB_PROTO)
+ custom_job_proto.name = _TEST_DISPLAY_NAME
+ custom_job_proto.state = gca_job_state_compat.JobState.JOB_STATE_PENDING
+ create_custom_job_mock.return_value = custom_job_proto
+ yield create_custom_job_mock
+
+
+@pytest.fixture
+def mock_get_custom_job_succeeded():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "get_custom_job"
+ ) as get_custom_job_mock:
+ custom_job_proto = copy.deepcopy(_TEST_CUSTOM_JOB_PROTO)
+ custom_job_proto.name = _TEST_DISPLAY_NAME
+ custom_job_proto.state = gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED
+ get_custom_job_mock.return_value = custom_job_proto
+ yield get_custom_job_mock
+
+
+@pytest.fixture
+def mock_blob_upload_from_filename():
+ with mock.patch.object(storage.Blob, "upload_from_filename") as upload_mock:
+ yield upload_mock
+
+
+@pytest.fixture
+def mock_blob_download_to_filename():
+ with mock.patch.object(storage.Blob, "download_to_filename") as download_mock:
+ yield download_mock
+
+
+@pytest.fixture
+def mock_uuid():
+ with mock.patch.object(uuid, "uuid4") as uuid_mock:
+ uuid_mock.return_value = 0
+ yield uuid_mock
+
+
+@pytest.fixture
+def base_logger_mock():
+ with patch.object(
+ base._LOGGER,
+ "info",
+ wraps=base._LOGGER.info,
+ ) as logger_mock:
+ yield logger_mock
+
+
+@pytest.fixture
+def persistent_resource_running_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "get_persistent_resource",
+ ) as persistent_resource_running_mock:
+ persistent_resource_running_mock.return_value = (
+ _TEST_PERSISTENT_RESOURCE_RUNNING
+ )
+ yield persistent_resource_running_mock
+
+
+@pytest.fixture
+def persistent_resource_service_account_running_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "get_persistent_resource",
+ ) as persistent_resource_service_account_running_mock:
+ persistent_resource_service_account_running_mock.return_value = (
+ _TEST_PERSISTENT_RESOURCE_SERVICE_ACCOUNT_RUNNING
+ )
+ yield persistent_resource_service_account_running_mock
+
+
+@pytest.fixture
+def persistent_resource_exception_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "get_persistent_resource",
+ ) as persistent_resource_exception_mock:
+ persistent_resource_exception_mock.side_effect = Exception
+ yield persistent_resource_exception_mock
+
+
+@pytest.fixture
+def create_persistent_resource_default_mock():
+ with mock.patch.object(
+ PersistentResourceServiceClient,
+ "create_persistent_resource",
+ ) as create_persistent_resource_default_mock:
+ create_persistent_resource_lro_mock = mock.Mock(ga_operation.Operation)
+ create_persistent_resource_lro_mock.result.return_value = (
+ _TEST_REQUEST_RUNNING_DEFAULT
+ )
+ create_persistent_resource_default_mock.return_value = (
+ create_persistent_resource_lro_mock
+ )
+ yield create_persistent_resource_default_mock
+
+
+@pytest.fixture
+def get_fos_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "get_feature_online_store",
+ ) as get_fos_mock:
+ get_fos_mock.return_value = _TEST_BIGTABLE_FOS1
+ yield get_fos_mock
+
+
+@pytest.fixture
+def get_esf_optimized_fos_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "get_feature_online_store",
+ ) as get_fos_mock:
+ get_fos_mock.return_value = _TEST_ESF_OPTIMIZED_FOS
+ yield get_fos_mock
+
+
+@pytest.fixture
+def get_psc_optimized_fos_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "get_feature_online_store",
+ ) as get_fos_mock:
+ get_fos_mock.return_value = _TEST_PSC_OPTIMIZED_FOS
+ yield get_fos_mock
+
+
+@pytest.fixture
+def get_esf_optimized_fos_no_endpoint_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "get_feature_online_store",
+ ) as get_fos_mock:
+ get_fos_mock.return_value = _TEST_ESF_OPTIMIZED_FOS2
+ yield get_fos_mock
+
+
+@pytest.fixture
+def create_bigtable_fos_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "create_feature_online_store",
+ ) as create_bigtable_fos_mock:
+ create_fos_lro_mock = mock.Mock(ga_operation.Operation)
+ create_fos_lro_mock.result.return_value = _TEST_BIGTABLE_FOS1
+ create_bigtable_fos_mock.return_value = create_fos_lro_mock
+ yield create_bigtable_fos_mock
+
+
+@pytest.fixture
+def create_esf_optimized_fos_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "create_feature_online_store",
+ ) as create_esf_optimized_fos_mock:
+ create_fos_lro_mock = mock.Mock(ga_operation.Operation)
+ create_fos_lro_mock.result.return_value = _TEST_ESF_OPTIMIZED_FOS
+ create_esf_optimized_fos_mock.return_value = create_fos_lro_mock
+ yield create_esf_optimized_fos_mock
+
+
+@pytest.fixture
+def create_psc_optimized_fos_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "create_feature_online_store",
+ ) as create_psc_optimized_fos_mock:
+ create_fos_lro_mock = mock.Mock(ga_operation.Operation)
+ create_fos_lro_mock.result.return_value = _TEST_PSC_OPTIMIZED_FOS
+ create_psc_optimized_fos_mock.return_value = create_fos_lro_mock
+ yield create_psc_optimized_fos_mock
+
+
+@pytest.fixture
+def get_fv_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "get_feature_view",
+ ) as get_fv_mock:
+ get_fv_mock.return_value = _TEST_FV1
+ yield get_fv_mock
+
+
+@pytest.fixture
+def get_rag_fv_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "get_feature_view",
+ ) as get_rag_fv_mock:
+ get_rag_fv_mock.return_value = _TEST_FV3
+ yield get_rag_fv_mock
+
+
+@pytest.fixture
+def create_bq_fv_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "create_feature_view",
+ ) as create_bq_fv_mock:
+ create_bq_fv_lro_mock = mock.Mock(ga_operation.Operation)
+ create_bq_fv_lro_mock.result.return_value = _TEST_FV1
+ create_bq_fv_mock.return_value = create_bq_fv_lro_mock
+ yield create_bq_fv_mock
+
+
+@pytest.fixture
+def create_rag_fv_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "create_feature_view",
+ ) as create_rag_fv_mock:
+ create_rag_fv_lro_mock = mock.Mock(ga_operation.Operation)
+ create_rag_fv_lro_mock.result.return_value = _TEST_FV3
+ create_rag_fv_mock.return_value = create_rag_fv_lro_mock
+ yield create_rag_fv_mock
+
+
+@pytest.fixture
+def create_embedding_fv_from_bq_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "create_feature_view",
+ ) as create_embedding_fv_mock:
+ create_embedding_fv_mock_lro = mock.Mock(ga_operation.Operation)
+ create_embedding_fv_mock_lro.result.return_value = _TEST_OPTIMIZED_EMBEDDING_FV
+ create_embedding_fv_mock.return_value = create_embedding_fv_mock_lro
+ yield create_embedding_fv_mock
+
+
+@pytest.fixture
+def get_optimized_embedding_fv_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "get_feature_view",
+ ) as get_fv_mock:
+ get_fv_mock.return_value = _TEST_OPTIMIZED_EMBEDDING_FV
+ yield get_fv_mock
+
+
+@pytest.fixture
+def get_optimized_fv_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "get_feature_view",
+ ) as get_optimized_fv_mock:
+ get_optimized_fv_mock.return_value = _TEST_OPTIMIZED_FV1
+ yield get_optimized_fv_mock
+
+
+@pytest.fixture
+def get_embedding_fv_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "get_feature_view",
+ ) as get_embedding_fv_mock:
+ get_embedding_fv_mock.return_value = _TEST_EMBEDDING_FV1
+ yield get_embedding_fv_mock
+
+
+@pytest.fixture
+def get_optimized_fv_no_endpointmock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "get_feature_view",
+ ) as get_optimized_fv_no_endpointmock:
+ get_optimized_fv_no_endpointmock.return_value = _TEST_OPTIMIZED_FV2
+ yield get_optimized_fv_no_endpointmock
+
+
+@pytest.fixture
+def get_fg_mock():
+ with patch.object(
+ feature_registry_service_client.FeatureRegistryServiceClient,
+ "get_feature_group",
+ ) as get_fg_mock:
+ get_fg_mock.return_value = _TEST_FG1
+ yield get_fg_mock
+
+
+@pytest.fixture
+def get_feature_mock():
+ with patch.object(
+ feature_registry_service_client.FeatureRegistryServiceClient,
+ "get_feature",
+ ) as get_fg_mock:
+ get_fg_mock.return_value = _TEST_FG1_F1
+ yield get_fg_mock
+
+
+@pytest.fixture
+def get_feature_with_version_column_mock():
+ with patch.object(
+ feature_registry_service_client.FeatureRegistryServiceClient,
+ "get_feature",
+ ) as get_fg_mock:
+ get_fg_mock.return_value = _TEST_FG1_F2
+ yield get_fg_mock
+
+
+@pytest.fixture
+def get_feature_monitor_mock():
+ with patch.object(
+ FeatureRegistryServiceClient,
+ "get_feature_monitor",
+ ) as get_fg_mock:
+ get_fg_mock.return_value = _TEST_FG1_FM1
+ yield get_fg_mock
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertexai/feature_store_constants.py b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/feature_store_constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..4203f2d014c117e095924d6f39a1f3a44379adec
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/feature_store_constants.py
@@ -0,0 +1,477 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google.cloud.aiplatform.compat import types
+
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+
+# Test feature online store 1
+_TEST_BIGTABLE_FOS1_ID = "my_fos1"
+_TEST_BIGTABLE_FOS1_PATH = (
+ f"{_TEST_PARENT}/featureOnlineStores/{_TEST_BIGTABLE_FOS1_ID}"
+)
+_TEST_BIGTABLE_FOS1_LABELS = {"my_key": "my_fos1"}
+_TEST_BIGTABLE_FOS1 = types.feature_online_store.FeatureOnlineStore(
+ name=_TEST_BIGTABLE_FOS1_PATH,
+ bigtable=types.feature_online_store.FeatureOnlineStore.Bigtable(
+ auto_scaling=types.feature_online_store.FeatureOnlineStore.Bigtable.AutoScaling(
+ min_node_count=1,
+ max_node_count=2,
+ cpu_utilization_target=50,
+ )
+ ),
+ labels=_TEST_BIGTABLE_FOS1_LABELS,
+)
+
+# Test feature online store 2
+_TEST_BIGTABLE_FOS2_ID = "my_fos2"
+_TEST_BIGTABLE_FOS2_PATH = (
+ f"{_TEST_PARENT}/featureOnlineStores/{_TEST_BIGTABLE_FOS2_ID}"
+)
+_TEST_BIGTABLE_FOS2_LABELS = {"my_key": "my_fos2"}
+_TEST_BIGTABLE_FOS2 = types.feature_online_store.FeatureOnlineStore(
+ name=_TEST_BIGTABLE_FOS2_PATH,
+ bigtable=types.feature_online_store.FeatureOnlineStore.Bigtable(
+ auto_scaling=types.feature_online_store.FeatureOnlineStore.Bigtable.AutoScaling(
+ min_node_count=2,
+ max_node_count=3,
+ cpu_utilization_target=60,
+ )
+ ),
+ labels=_TEST_BIGTABLE_FOS2_LABELS,
+)
+
+# Test feature online store 3
+_TEST_BIGTABLE_FOS3_ID = "my_fos3"
+_TEST_BIGTABLE_FOS3_PATH = (
+ f"{_TEST_PARENT}/featureOnlineStores/{_TEST_BIGTABLE_FOS3_ID}"
+)
+_TEST_BIGTABLE_FOS3_LABELS = {"my_key": "my_fos3"}
+_TEST_BIGTABLE_FOS3 = types.feature_online_store.FeatureOnlineStore(
+ name=_TEST_BIGTABLE_FOS3_PATH,
+ bigtable=types.feature_online_store.FeatureOnlineStore.Bigtable(
+ auto_scaling=types.feature_online_store.FeatureOnlineStore.Bigtable.AutoScaling(
+ min_node_count=3,
+ max_node_count=4,
+ cpu_utilization_target=70,
+ )
+ ),
+ labels=_TEST_BIGTABLE_FOS3_LABELS,
+)
+
+# Test feature online store for optimized with esf endpoint
+_TEST_ESF_OPTIMIZED_FOS_ID = "my_esf_optimized_fos"
+_TEST_ESF_OPTIMIZED_FOS_PATH = (
+ f"{_TEST_PARENT}/featureOnlineStores/{_TEST_ESF_OPTIMIZED_FOS_ID}"
+)
+_TEST_ESF_OPTIMIZED_FOS_LABELS = {"my_key": "my_esf_optimized_fos"}
+_TEST_ESF_OPTIMIZED_FOS = types.feature_online_store.FeatureOnlineStore(
+ name=_TEST_ESF_OPTIMIZED_FOS_PATH,
+ optimized=types.feature_online_store.FeatureOnlineStore.Optimized(),
+ dedicated_serving_endpoint=types.feature_online_store_v1.FeatureOnlineStore.DedicatedServingEndpoint(
+ public_endpoint_domain_name="test-esf-endpoint",
+ ),
+ labels=_TEST_ESF_OPTIMIZED_FOS_LABELS,
+)
+
+# Test feature online store for optimized with psc endpoint
+_TEST_PSC_OPTIMIZED_FOS_ID = "my_psc_optimized_fos"
+_TEST_PSC_OPTIMIZED_FOS_PATH = (
+ f"{_TEST_PARENT}/featureOnlineStores/{_TEST_PSC_OPTIMIZED_FOS_ID}"
+)
+_TEST_PSC_OPTIMIZED_FOS_LABELS = {"my_key": "my_psc_optimized_fos"}
+_TEST_PSC_PROJECT_ALLOWLIST = ["project-1", "project-2"]
+_TEST_PSC_OPTIMIZED_FOS = types.feature_online_store_v1.FeatureOnlineStore(
+ name=_TEST_PSC_OPTIMIZED_FOS_PATH,
+ optimized=types.feature_online_store_v1.FeatureOnlineStore.Optimized(),
+ dedicated_serving_endpoint=types.feature_online_store_v1.FeatureOnlineStore.DedicatedServingEndpoint(
+ private_service_connect_config=types.service_networking_v1.PrivateServiceConnectConfig(
+ enable_private_service_connect=True,
+ project_allowlist=_TEST_PSC_PROJECT_ALLOWLIST,
+ ),
+ ),
+ labels=_TEST_PSC_OPTIMIZED_FOS_LABELS,
+)
+
+_TEST_FOS_LIST = [_TEST_BIGTABLE_FOS1, _TEST_BIGTABLE_FOS2, _TEST_BIGTABLE_FOS3]
+
+# Test feature online store for optimized with esf endpoint but sync has not run yet.
+_TEST_ESF_OPTIMIZED_FOS2_ID = "my_esf_optimised_fos2"
+_TEST_ESF_OPTIMIZED_FOS2_PATH = (
+ f"{_TEST_PARENT}/featureOnlineStores/{_TEST_ESF_OPTIMIZED_FOS2_ID}"
+)
+_TEST_ESF_OPTIMIZED_FOS2_LABELS = {"my_key": "my_esf_optimized_fos2"}
+_TEST_ESF_OPTIMIZED_FOS2 = types.feature_online_store_v1.FeatureOnlineStore(
+ name=_TEST_ESF_OPTIMIZED_FOS2_PATH,
+ optimized=types.feature_online_store_v1.FeatureOnlineStore.Optimized(),
+ dedicated_serving_endpoint=types.feature_online_store_v1.FeatureOnlineStore.DedicatedServingEndpoint(),
+ labels=_TEST_ESF_OPTIMIZED_FOS_LABELS,
+)
+
+
+# Test feature view 1
+_TEST_FV1_ID = "my_fv1"
+_TEST_FV1_PATH = f"{_TEST_BIGTABLE_FOS1_PATH}/featureViews/my_fv1"
+_TEST_FV1_LABELS = {"my_key": "my_fv1"}
+_TEST_FV1_BQ_URI = f"bq://{_TEST_PROJECT}.my_dataset.my_table"
+_TEST_FV1_ENTITY_ID_COLUMNS = ["entity_id"]
+_TEST_FV1 = types.feature_view.FeatureView(
+ name=_TEST_FV1_PATH,
+ big_query_source=types.feature_view.FeatureView.BigQuerySource(
+ uri=_TEST_FV1_BQ_URI,
+ entity_id_columns=_TEST_FV1_ENTITY_ID_COLUMNS,
+ ),
+ labels=_TEST_FV1_LABELS,
+)
+
+# Test feature view 2
+_TEST_FV2_ID = "my_fv2"
+_TEST_FV2_PATH = f"{_TEST_BIGTABLE_FOS1_PATH}/featureViews/my_fv2"
+_TEST_FV2_LABELS = {"my_key": "my_fv2"}
+_TEST_FV2_BQ_URI = f"bq://{_TEST_PROJECT}.my_dataset.my_table"
+_TEST_FV2_ENTITY_ID_COLUMNS = ["entity_id"]
+_TEST_FV2 = types.feature_view.FeatureView(
+ name=_TEST_FV2_PATH,
+ big_query_source=types.feature_view.FeatureView.BigQuerySource(
+ uri=_TEST_FV2_BQ_URI,
+ entity_id_columns=_TEST_FV2_ENTITY_ID_COLUMNS,
+ ),
+ labels=_TEST_FV2_LABELS,
+)
+
+# Test feature view 3
+_TEST_FV3_ID = "my_fv3"
+_TEST_FV3_PATH = f"{_TEST_BIGTABLE_FOS1_PATH}/featureViews/my_fv3"
+_TEST_FV3_LABELS = {"my_key": "my_fv3"}
+_TEST_FV3_BQ_URI = f"bq://{_TEST_PROJECT}.my_dataset.my_table"
+_TEST_FV3 = types.feature_view.FeatureView(
+ name=_TEST_FV3_PATH,
+ vertex_rag_source=types.feature_view.FeatureView.VertexRagSource(
+ uri=_TEST_FV3_BQ_URI,
+ ),
+ labels=_TEST_FV3_LABELS,
+)
+
+
+_TEST_FV_LIST = [_TEST_FV1, _TEST_FV2, _TEST_FV3]
+
+# Test feature view sync 1
+_TEST_FV_SYNC1_ID = "my_fv_sync1"
+_TEST_FV_SYNC1_PATH = f"{_TEST_FV1_PATH}/featureViewSyncs/my_fv_sync1"
+_TEST_FV_SYNC1 = types.feature_view_sync.FeatureViewSync(
+ name=_TEST_FV_SYNC1_PATH,
+)
+_TEST_FV_SYNC1_RESPONSE = (
+ types.feature_online_store_admin_service.SyncFeatureViewResponse(
+ feature_view_sync=_TEST_FV_SYNC1_PATH,
+ )
+)
+
+# Test feature view sync 2
+_TEST_FV_SYNC2_ID = "my_fv_sync2"
+_TEST_FV_SYNC2_PATH = f"{_TEST_FV2_PATH}/featureViewSyncs/my_fv_sync2"
+_TEST_FV_SYNC2 = types.feature_view_sync.FeatureViewSync(
+ name=_TEST_FV_SYNC2_PATH,
+)
+
+_TEST_FV_SYNC_LIST = [_TEST_FV_SYNC1, _TEST_FV_SYNC2]
+
+# Test optimized feature view 1
+_TEST_OPTIMIZED_FV1_ID = "optimized_fv1"
+_TEST_OPTIMIZED_FV1_PATH = f"{_TEST_ESF_OPTIMIZED_FOS_PATH}/featureViews/optimized_fv1"
+_TEST_OPTIMIZED_FV1 = types.feature_view.FeatureView(
+ name=_TEST_OPTIMIZED_FV1_PATH,
+ big_query_source=types.feature_view.FeatureView.BigQuerySource(
+ uri=_TEST_FV1_BQ_URI,
+ entity_id_columns=_TEST_FV1_ENTITY_ID_COLUMNS,
+ ),
+ labels=_TEST_FV1_LABELS,
+)
+
+# Test optimized feature view 2
+_TEST_OPTIMIZED_FV2_ID = "optimized_fv2"
+_TEST_OPTIMIZED_FV2_PATH = f"{_TEST_ESF_OPTIMIZED_FOS2_PATH}/featureViews/optimized_fv2"
+_TEST_OPTIMIZED_FV2 = types.feature_view.FeatureView(
+ name=_TEST_OPTIMIZED_FV2_PATH,
+ big_query_source=types.feature_view.FeatureView.BigQuerySource(
+ uri=_TEST_FV1_BQ_URI,
+ entity_id_columns=_TEST_FV1_ENTITY_ID_COLUMNS,
+ ),
+ labels=_TEST_FV1_LABELS,
+)
+
+# Test embedding feature view 1
+_TEST_EMBEDDING_FV1_ID = "embedding_fv1"
+_TEST_EMBEDDING_FV1_PATH = f"{_TEST_ESF_OPTIMIZED_FOS_PATH}/featureViews/embedding_fv1"
+_TEST_EMBEDDING_FV1 = types.feature_view.FeatureView(
+ name=_TEST_EMBEDDING_FV1_PATH,
+ big_query_source=types.feature_view.FeatureView.BigQuerySource(
+ uri=_TEST_FV1_BQ_URI,
+ entity_id_columns=_TEST_FV1_ENTITY_ID_COLUMNS,
+ ),
+ labels=_TEST_FV1_LABELS,
+)
+
+_TEST_STRING_FILTER = (
+ types.feature_online_store_service.NearestNeighborQuery.StringFilter(
+ name="filter_name",
+ allow_tokens=["allow_token_1", "allow_token_2"],
+ )
+)
+
+# Test optimized embedding feature view
+_TEST_OPTIMIZED_EMBEDDING_FV_ID = "optimized_embedding_fv"
+_TEST_OPTIMIZED_EMBEDDING_FV_PATH = (
+ f"{_TEST_ESF_OPTIMIZED_FOS_PATH}/featureViews/{_TEST_OPTIMIZED_EMBEDDING_FV_ID}"
+)
+_TEST_OPTIMIZED_EMBEDDING_FV = types.feature_view.FeatureView(
+ name=_TEST_OPTIMIZED_EMBEDDING_FV_PATH,
+ big_query_source=types.feature_view.FeatureView.BigQuerySource(
+ uri=_TEST_FV1_BQ_URI,
+ entity_id_columns=_TEST_FV1_ENTITY_ID_COLUMNS,
+ ),
+ labels=_TEST_FV1_LABELS,
+ index_config=types.feature_view.FeatureView.IndexConfig(
+ embedding_column="embedding_column",
+ filter_columns=["col1", "col2"],
+ crowding_column="crowding_column",
+ embedding_dimension=123,
+ distance_measure_type=types.feature_view.FeatureView.IndexConfig.DistanceMeasureType.DOT_PRODUCT_DISTANCE,
+ ),
+)
+
+# Response for FetchFeatureValues
+_TEST_FV_FETCH1 = types.feature_online_store_service_v1.FetchFeatureValuesResponse(
+ key_values=types.feature_online_store_service_v1.FetchFeatureValuesResponse.FeatureNameValuePairList(
+ features=[
+ types.feature_online_store_service_v1.FetchFeatureValuesResponse.FeatureNameValuePairList.FeatureNameValuePair(
+ name="key1",
+ value=types.featurestore_online_service.FeatureValue(
+ string_value="value1",
+ ),
+ ),
+ ]
+ )
+)
+
+# Response for SearchNearestEntitiesResponse
+_TEST_FV_SEARCH1 = types.feature_online_store_service_v1.SearchNearestEntitiesResponse(
+ nearest_neighbors=types.feature_online_store_service_v1.NearestNeighbors(
+ neighbors=[
+ types.feature_online_store_service_v1.NearestNeighbors.Neighbor(
+ entity_id="neighbor_entity_id_1",
+ distance=0.1,
+ ),
+ ]
+ )
+)
+
+_TEST_FG1_ID = "my_fg1"
+_TEST_FG1_PATH = f"{_TEST_PARENT}/featureGroups/{_TEST_FG1_ID}"
+_TEST_FG1_BQ_URI = f"bq://{_TEST_PROJECT}.my_dataset.my_table_for_fg1"
+_TEST_FG1_ENTITY_ID_COLUMNS = ["entity_id"]
+_TEST_FG1_LABELS = {"my_key": "my_fg1"}
+_TEST_FG1 = types.feature_group.FeatureGroup(
+ name=_TEST_FG1_PATH,
+ big_query=types.feature_group.FeatureGroup.BigQuery(
+ big_query_source=types.io.BigQuerySource(
+ input_uri=_TEST_FG1_BQ_URI,
+ ),
+ entity_id_columns=_TEST_FG1_ENTITY_ID_COLUMNS,
+ ),
+ labels=_TEST_FG1_LABELS,
+)
+
+
+_TEST_FG2_ID = "my_fg2"
+_TEST_FG2_PATH = f"{_TEST_PARENT}/featureGroups/{_TEST_FG2_ID}"
+_TEST_FG2_BQ_URI = f"bq://{_TEST_PROJECT}.my_dataset.my_table_for_fg2"
+_TEST_FG2_ENTITY_ID_COLUMNS = ["entity_id1", "entity_id2"]
+_TEST_FG2_LABELS = {"my_key2": "my_fg2"}
+_TEST_FG2 = types.feature_group.FeatureGroup(
+ name=_TEST_FG2_PATH,
+ big_query=types.feature_group.FeatureGroup.BigQuery(
+ big_query_source=types.io.BigQuerySource(
+ input_uri=_TEST_FG2_BQ_URI,
+ ),
+ entity_id_columns=_TEST_FG2_ENTITY_ID_COLUMNS,
+ ),
+ labels=_TEST_FG2_LABELS,
+)
+
+
+_TEST_FG3_ID = "my_fg3"
+_TEST_FG3_PATH = f"{_TEST_PARENT}/featureGroups/{_TEST_FG3_ID}"
+_TEST_FG3_BQ_URI = f"bq://{_TEST_PROJECT}.my_dataset.my_table_for_fg3"
+_TEST_FG3_ENTITY_ID_COLUMNS = ["entity_id1", "entity_id2", "entity_id3"]
+_TEST_FG3_LABELS = {"my_key3": "my_fg3"}
+_TEST_FG3 = types.feature_group.FeatureGroup(
+ name=_TEST_FG3_PATH,
+ big_query=types.feature_group.FeatureGroup.BigQuery(
+ big_query_source=types.io.BigQuerySource(
+ input_uri=_TEST_FG3_BQ_URI,
+ ),
+ entity_id_columns=_TEST_FG3_ENTITY_ID_COLUMNS,
+ ),
+ labels=_TEST_FG3_LABELS,
+)
+
+_TEST_FG_LIST = [_TEST_FG1, _TEST_FG2, _TEST_FG3]
+
+_TEST_FG1_F1_ID = "my_fg1_f1"
+_TEST_FG1_F1_PATH = (
+ f"{_TEST_PARENT}/featureGroups/{_TEST_FG1_ID}/features/{_TEST_FG1_F1_ID}"
+)
+_TEST_FG1_F1_DESCRIPTION = "My feature 1 in feature group 1"
+_TEST_FG1_F1_LABELS = {"my_fg1_feature": "f1"}
+_TEST_FG1_F1_POINT_OF_CONTACT = "fg1-f1-announce-list"
+_TEST_FG1_F1 = types.feature.Feature(
+ name=_TEST_FG1_F1_PATH,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+)
+
+
+_TEST_FG1_F2_ID = "my_fg1_f2"
+_TEST_FG1_F2_PATH = (
+ f"{_TEST_PARENT}/featureGroups/{_TEST_FG1_ID}/features/{_TEST_FG1_F2_ID}"
+)
+_TEST_FG1_F2_DESCRIPTION = "My feature 2 in feature group 1"
+_TEST_FG1_F2_LABELS = {"my_fg1_feature": "f2"}
+_TEST_FG1_F2_POINT_OF_CONTACT = "fg1-f2-announce-list"
+_TEST_FG1_F2_VERSION_COLUMN_NAME = "specific_column_for_feature_2"
+_TEST_FG1_F2 = types.feature.Feature(
+ name=_TEST_FG1_F2_PATH,
+ version_column_name=_TEST_FG1_F2_VERSION_COLUMN_NAME,
+ description=_TEST_FG1_F2_DESCRIPTION,
+ labels=_TEST_FG1_F2_LABELS,
+ point_of_contact=_TEST_FG1_F2_POINT_OF_CONTACT,
+)
+
+_TEST_FG1_FEATURE_LIST = [_TEST_FG1_F1, _TEST_FG1_F2]
+
+_TEST_FG1_FM1_ID = "my_fg1_fm1"
+_TEST_FG1_FM1_PATH = (
+ f"{_TEST_PARENT}/featureGroups/{_TEST_FG1_ID}/featureMonitors/{_TEST_FG1_FM1_ID}"
+)
+_TEST_FG1_FM1_DESCRIPTION = "My feature monitor 1 in feature group 1"
+_TEST_FG1_FM1_LABELS = {"my_fg1_feature_monitor": "fm1"}
+_TEST_FG1_FM1 = types.feature_monitor.FeatureMonitor(
+ name=_TEST_FG1_FM1_PATH,
+ description=_TEST_FG1_FM1_DESCRIPTION,
+ labels=_TEST_FG1_FM1_LABELS,
+ schedule_config=types.feature_monitor.ScheduleConfig(cron="0 0 * * *"),
+ feature_selection_config=types.feature_monitor.FeatureSelectionConfig(
+ feature_configs=[
+ types.feature_monitor.FeatureSelectionConfig.FeatureConfig(
+ feature_id="my_fg1_f1",
+ drift_threshold=0.3,
+ ),
+ types.feature_monitor.FeatureSelectionConfig.FeatureConfig(
+ feature_id="my_fg1_f2",
+ drift_threshold=0.4,
+ ),
+ ]
+ ),
+)
+_TEST_FG1_FM1_FEATURE_SELECTION_CONFIGS = [("my_fg1_f1", 0.3), ("my_fg1_f2", 0.4)]
+_TEST_FG1_FM1_SCHEDULE_CONFIG = "0 0 * * *"
+_TEST_FG1_FM2_ID = "my_fg1_fm2"
+_TEST_FG1_FM2_PATH = (
+ f"{_TEST_PARENT}/featureGroups/{_TEST_FG1_ID}/featureMonitors/{_TEST_FG1_FM2_ID}"
+)
+_TEST_FG1_FM2_DESCRIPTION = "My feature monitor 2 in feature group 1"
+_TEST_FG1_FM2_LABELS = {"my_fg1_feature_monitor": "fm2"}
+_TEST_FG1_FM2_FEATURE_SELECTION_CONFIGS = [("my_fg1_f2", 0.5)]
+_TEST_FG1_FM2_SCHEDULE_CONFIG = "8 0 * * *"
+_TEST_FG1_FM2 = types.feature_monitor.FeatureMonitor(
+ name=_TEST_FG1_FM2_PATH,
+ description=_TEST_FG1_FM2_DESCRIPTION,
+ labels=_TEST_FG1_FM2_LABELS,
+ schedule_config=types.feature_monitor.ScheduleConfig(cron="8 0 * * *"),
+ feature_selection_config=types.feature_monitor.FeatureSelectionConfig(
+ feature_configs=[
+ types.feature_monitor.FeatureSelectionConfig.FeatureConfig(
+ feature_id="my_fg1_f2",
+ drift_threshold=0.5,
+ ),
+ ]
+ ),
+)
+_TEST_FG1_FM_LIST = [_TEST_FG1_FM1, _TEST_FG1_FM2]
+
+_TEST_FG1_FMJ1_ID = "1234567890"
+_TEST_FG1_FMJ1_PATH = f"{_TEST_PARENT}/featureGroups/{_TEST_FG1_ID}/featureMonitors/{_TEST_FG1_FM1_ID}/featureMonitorJobs/{_TEST_FG1_FMJ1_ID}"
+_TEST_FG1_FMJ1_DESCRIPTION = "My feature monitor job 1 in feature monitor 1"
+_TEST_FG1_FMJ1_LABELS = {"my_fg1_feature_monitor_job": "fmj1"}
+_TEST_FG1_F1_FEATURE_STATS_AND_ANOMALY = types.feature_monitor.FeatureStatsAndAnomaly(
+ feature_id="my_fg1_f1",
+ distribution_deviation=0.5,
+ drift_detection_threshold=0.4,
+ drift_detected=True,
+ feature_monitor_job_id=_TEST_FG1_FMJ1_ID,
+ feature_monitor_id=_TEST_FG1_FM1_ID,
+)
+_TEST_FG1_F2_FEATURE_STATS_AND_ANOMALY = types.feature_monitor.FeatureStatsAndAnomaly(
+ feature_id="my_fg1_f2",
+ distribution_deviation=0.2,
+ drift_detection_threshold=0.4,
+ drift_detected=False,
+ feature_monitor_job_id=_TEST_FG1_FMJ1_ID,
+ feature_monitor_id=_TEST_FG1_FM1_ID,
+)
+_TEST_FG1_FMJ1_FEATURE_STATS_AND_ANOMALIES = [
+ _TEST_FG1_F1_FEATURE_STATS_AND_ANOMALY,
+ _TEST_FG1_F2_FEATURE_STATS_AND_ANOMALY,
+]
+_TEST_FG1_FMJ1 = types.feature_monitor_job.FeatureMonitorJob(
+ name=_TEST_FG1_FMJ1_PATH,
+ description=_TEST_FG1_FMJ1_DESCRIPTION,
+ labels=_TEST_FG1_FMJ1_LABELS,
+ job_summary=types.feature_monitor_job.FeatureMonitorJob.JobSummary(
+ feature_stats_and_anomalies=_TEST_FG1_FMJ1_FEATURE_STATS_AND_ANOMALIES
+ ),
+)
+_TEST_FG1_FMJ2_ID = "1234567891"
+_TEST_FG1_FMJ2_PATH = f"{_TEST_PARENT}/featureGroups/{_TEST_FG1_ID}/featureMonitors/{_TEST_FG1_FM1_ID}/featureMonitorJobs/{_TEST_FG1_FMJ2_ID}"
+_TEST_FG1_FMJ2_DESCRIPTION = "My feature monitor job 2 in feature monitor 1"
+_TEST_FG1_FMJ2_LABELS = {"my_fg1_feature_monitor_job": "fmj2"}
+_TEST_FG1_FMJ2 = types.feature_monitor_job.FeatureMonitorJob(
+ name=_TEST_FG1_FMJ2_PATH,
+ description=_TEST_FG1_FMJ2_DESCRIPTION,
+ labels=_TEST_FG1_FMJ2_LABELS,
+)
+_TEST_FG1_FMJ_LIST = [_TEST_FG1_FMJ1, _TEST_FG1_FMJ2]
+
+_TEST_FG1_F1_FEATURE_STATS_AND_ANOMALY = types.feature_monitor.FeatureStatsAndAnomaly(
+ feature_id="my_fg1_f1",
+ distribution_deviation=0.5,
+ drift_detection_threshold=0.4,
+ drift_detected=True,
+ feature_monitor_job_id="1234567890",
+ feature_monitor_id="1234567891",
+)
+_TEST_FG1_F1_WITH_STATS = types.feature_v1beta1.Feature(
+ name=_TEST_FG1_F1_PATH,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ feature_stats_and_anomaly=[_TEST_FG1_F1_FEATURE_STATS_AND_ANOMALY],
+)
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_batch_prediction.py b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_batch_prediction.py
new file mode 100644
index 0000000000000000000000000000000000000000..b71ca67ada227411ad6f285052de4360148ca844
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_batch_prediction.py
@@ -0,0 +1,725 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Unit tests for generative model batch prediction."""
+# pylint: disable=protected-access
+
+import importlib
+import pytest
+from unittest import mock
+
+from google.cloud import aiplatform
+import vertexai
+from google.cloud.aiplatform import base as aiplatform_base
+from google.cloud.aiplatform import initializer as aiplatform_initializer
+from google.cloud.aiplatform.compat.services import (
+ job_service_client,
+ model_service_client,
+)
+from google.cloud.aiplatform.compat.types import (
+ batch_prediction_job as gca_batch_prediction_job_compat,
+ io as gca_io_compat,
+ job_state as gca_job_state_compat,
+ model as gca_model,
+)
+from vertexai.preview import batch_prediction
+from vertexai.generative_models import GenerativeModel
+
+
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_BUCKET = "gs://test-bucket"
+_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+_TEST_DISPLAY_NAME = "test-display-name"
+
+_TEST_GEMINI_MODEL_NAME = "gemini-1.0-pro"
+_TEST_GEMINI_MODEL_RESOURCE_NAME = f"publishers/google/models/{_TEST_GEMINI_MODEL_NAME}"
+_TEST_TUNED_GEMINI_MODEL_RESOURCE_NAME = "projects/123/locations/us-central1/models/456"
+_TEST_PALM_MODEL_NAME = "text-bison"
+_TEST_PALM_MODEL_RESOURCE_NAME = f"publishers/google/models/{_TEST_PALM_MODEL_NAME}"
+_TEST_LLAMA_MODEL_NAME = "llama3-405b-instruct-maas"
+_TEST_LLAMA_MODEL_RESOURCE_NAME = f"publishers/meta/models/{_TEST_LLAMA_MODEL_NAME}"
+_TEST_CLAUDE_MODEL_NAME = "claude-3-opus"
+_TEST_CLAUDE_MODEL_RESOURCE_NAME = (
+ f"publishers/anthropic/models/{_TEST_CLAUDE_MODEL_NAME}"
+)
+
+_TEST_GCS_INPUT_URI = "gs://test-bucket/test-input.jsonl"
+_TEST_GCS_INPUT_URI_2 = "gs://test-bucket/test-input-2.jsonl"
+_TEST_GCS_OUTPUT_PREFIX = "gs://test-bucket/test-output"
+_TEST_BQ_INPUT_URI = "bq://test-project.test-dataset.test-input"
+_TEST_BQ_OUTPUT_PREFIX = "bq://test-project.test-dataset.test-output"
+_TEST_INVALID_URI = "invalid-uri"
+
+
+_TEST_BATCH_PREDICTION_JOB_ID = "123456789"
+_TEST_BATCH_PREDICTION_JOB_NAME = (
+ f"{_TEST_PARENT}/batchPredictionJobs/{_TEST_BATCH_PREDICTION_JOB_ID}"
+)
+_TEST_JOB_STATE_RUNNING = gca_job_state_compat.JobState(3)
+_TEST_JOB_STATE_SUCCESS = gca_job_state_compat.JobState(4)
+
+_TEST_GAPIC_BATCH_PREDICTION_JOB = gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_GEMINI_MODEL_RESOURCE_NAME,
+ state=_TEST_JOB_STATE_RUNNING,
+)
+
+
+# TODO(b/339230025) Mock the whole service instead of methods.
+@pytest.fixture
+def generate_display_name_mock():
+ with mock.patch.object(
+ aiplatform_base.VertexAiResourceNoun, "_generate_display_name"
+ ) as generate_display_name_mock:
+ generate_display_name_mock.return_value = _TEST_DISPLAY_NAME
+ yield generate_display_name_mock
+
+
+@pytest.fixture
+def complete_bq_uri_mock():
+ with mock.patch.object(
+ batch_prediction.BatchPredictionJob, "_complete_bq_uri"
+ ) as complete_bq_uri_mock:
+ complete_bq_uri_mock.return_value = _TEST_BQ_OUTPUT_PREFIX
+ yield complete_bq_uri_mock
+
+
+@pytest.fixture
+def get_batch_prediction_job_with_bq_output_mock():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "get_batch_prediction_job"
+ ) as get_job_mock:
+ get_job_mock.return_value = gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_GEMINI_MODEL_RESOURCE_NAME,
+ state=_TEST_JOB_STATE_SUCCESS,
+ output_info=gca_batch_prediction_job_compat.BatchPredictionJob.OutputInfo(
+ bigquery_output_table=_TEST_BQ_OUTPUT_PREFIX
+ ),
+ )
+ yield get_job_mock
+
+
+@pytest.fixture
+def get_batch_prediction_job_with_gcs_output_mock():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "get_batch_prediction_job"
+ ) as get_job_mock:
+ get_job_mock.return_value = gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_GEMINI_MODEL_RESOURCE_NAME,
+ state=_TEST_JOB_STATE_SUCCESS,
+ output_info=gca_batch_prediction_job_compat.BatchPredictionJob.OutputInfo(
+ gcs_output_directory=_TEST_GCS_OUTPUT_PREFIX
+ ),
+ )
+ yield get_job_mock
+
+
+@pytest.fixture
+def get_batch_prediction_job_with_llama_model_mock():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "get_batch_prediction_job"
+ ) as get_job_mock:
+ get_job_mock.return_value = gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_LLAMA_MODEL_RESOURCE_NAME,
+ state=_TEST_JOB_STATE_SUCCESS,
+ output_info=gca_batch_prediction_job_compat.BatchPredictionJob.OutputInfo(
+ gcs_output_directory=_TEST_GCS_OUTPUT_PREFIX
+ ),
+ )
+ yield get_job_mock
+
+
+@pytest.fixture
+def get_batch_prediction_job_with_claude_model_mock():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "get_batch_prediction_job"
+ ) as get_job_mock:
+ get_job_mock.return_value = gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_CLAUDE_MODEL_RESOURCE_NAME,
+ state=_TEST_JOB_STATE_SUCCESS,
+ output_info=gca_batch_prediction_job_compat.BatchPredictionJob.OutputInfo(
+ gcs_output_directory=_TEST_GCS_OUTPUT_PREFIX
+ ),
+ )
+ yield get_job_mock
+
+
+@pytest.fixture
+def get_batch_prediction_job_with_tuned_gemini_model_mock():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "get_batch_prediction_job"
+ ) as get_job_mock:
+ get_job_mock.return_value = gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_TUNED_GEMINI_MODEL_RESOURCE_NAME,
+ state=_TEST_JOB_STATE_SUCCESS,
+ output_info=gca_batch_prediction_job_compat.BatchPredictionJob.OutputInfo(
+ gcs_output_directory=_TEST_GCS_OUTPUT_PREFIX
+ ),
+ )
+ yield get_job_mock
+
+
+@pytest.fixture
+def get_gemini_model_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = gca_model.Model(
+ name=_TEST_TUNED_GEMINI_MODEL_RESOURCE_NAME,
+ model_source_info=gca_model.ModelSourceInfo(
+ source_type=gca_model.ModelSourceInfo.ModelSourceType.GENIE
+ ),
+ )
+ yield get_model_mock
+
+
+@pytest.fixture
+def get_non_gemini_model_mock():
+ with mock.patch.object(
+ model_service_client.ModelServiceClient, "get_model"
+ ) as get_model_mock:
+ get_model_mock.return_value = gca_model.Model(
+ name=_TEST_TUNED_GEMINI_MODEL_RESOURCE_NAME,
+ )
+ yield get_model_mock
+
+
+@pytest.fixture
+def get_batch_prediction_job_invalid_model_mock():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "get_batch_prediction_job"
+ ) as get_job_mock:
+ get_job_mock.return_value = gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_PALM_MODEL_RESOURCE_NAME,
+ state=_TEST_JOB_STATE_SUCCESS,
+ )
+ yield get_job_mock
+
+
+@pytest.fixture
+def create_batch_prediction_job_mock():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "create_batch_prediction_job"
+ ) as create_job_mock:
+ create_job_mock.return_value = _TEST_GAPIC_BATCH_PREDICTION_JOB
+ yield create_job_mock
+
+
+@pytest.fixture
+def cancel_batch_prediction_job_mock():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "cancel_batch_prediction_job"
+ ) as cancel_job_mock:
+ yield cancel_job_mock
+
+
+@pytest.fixture
+def delete_batch_prediction_job_mock():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "delete_batch_prediction_job"
+ ) as delete_job_mock:
+ yield delete_job_mock
+
+
+@pytest.fixture
+def list_batch_prediction_jobs_mock():
+ with mock.patch.object(
+ job_service_client.JobServiceClient, "list_batch_prediction_jobs"
+ ) as list_jobs_mock:
+ list_jobs_mock.return_value = [
+ _TEST_GAPIC_BATCH_PREDICTION_JOB,
+ gca_batch_prediction_job_compat.BatchPredictionJob(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_PALM_MODEL_RESOURCE_NAME,
+ state=_TEST_JOB_STATE_SUCCESS,
+ ),
+ ]
+ yield list_jobs_mock
+
+
+@pytest.mark.usefixtures(
+ "google_auth_mock", "generate_display_name_mock", "complete_bq_uri_mock"
+)
+class TestBatchPredictionJob:
+ """Unit tests for BatchPredictionJob."""
+
+ def setup_method(self):
+ importlib.reload(aiplatform_initializer)
+ importlib.reload(aiplatform)
+ importlib.reload(vertexai)
+ vertexai.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ def teardown_method(self):
+ aiplatform_initializer.global_pool.shutdown(wait=True)
+
+ def test_init_batch_prediction_job(
+ self, get_batch_prediction_job_with_gcs_output_mock
+ ):
+ batch_prediction.BatchPredictionJob(_TEST_BATCH_PREDICTION_JOB_ID)
+
+ get_batch_prediction_job_with_gcs_output_mock.assert_called_once_with(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME, retry=aiplatform_base._DEFAULT_RETRY
+ )
+
+ def test_init_batch_prediction_job_with_llama_model(
+ self,
+ get_batch_prediction_job_with_llama_model_mock,
+ ):
+ batch_prediction.BatchPredictionJob(_TEST_BATCH_PREDICTION_JOB_ID)
+
+ get_batch_prediction_job_with_llama_model_mock.assert_called_once_with(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME, retry=aiplatform_base._DEFAULT_RETRY
+ )
+
+ def test_init_batch_prediction_job_with_claude_model(
+ self,
+ get_batch_prediction_job_with_claude_model_mock,
+ ):
+ batch_prediction.BatchPredictionJob(_TEST_BATCH_PREDICTION_JOB_ID)
+
+ get_batch_prediction_job_with_claude_model_mock.assert_called_once_with(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME, retry=aiplatform_base._DEFAULT_RETRY
+ )
+
+ def test_init_batch_prediction_job_with_tuned_gemini_model(
+ self,
+ get_batch_prediction_job_with_tuned_gemini_model_mock,
+ get_gemini_model_mock,
+ ):
+ batch_prediction.BatchPredictionJob(_TEST_BATCH_PREDICTION_JOB_ID)
+
+ get_batch_prediction_job_with_tuned_gemini_model_mock.assert_called_once_with(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME, retry=aiplatform_base._DEFAULT_RETRY
+ )
+ get_gemini_model_mock.assert_called_once_with(
+ name=_TEST_TUNED_GEMINI_MODEL_RESOURCE_NAME,
+ retry=aiplatform_base._DEFAULT_RETRY,
+ )
+
+ @pytest.mark.usefixtures("get_batch_prediction_job_invalid_model_mock")
+ def test_init_batch_prediction_job_invalid_model(self):
+ with pytest.raises(
+ ValueError,
+ match=(
+ f"BatchPredictionJob '{_TEST_BATCH_PREDICTION_JOB_ID}' "
+ f"runs with the model '{_TEST_PALM_MODEL_RESOURCE_NAME}', "
+ "which is not a GenAI model."
+ ),
+ ):
+ batch_prediction.BatchPredictionJob(_TEST_BATCH_PREDICTION_JOB_ID)
+
+ @pytest.mark.usefixtures(
+ "get_batch_prediction_job_with_tuned_gemini_model_mock",
+ "get_non_gemini_model_mock",
+ )
+ def test_init_batch_prediction_job_with_invalid_tuned_model(
+ self,
+ ):
+ with pytest.raises(
+ ValueError,
+ match=(
+ f"BatchPredictionJob '{_TEST_BATCH_PREDICTION_JOB_ID}' "
+ f"runs with the model '{_TEST_TUNED_GEMINI_MODEL_RESOURCE_NAME}', "
+ "which is not a GenAI model."
+ ),
+ ):
+ batch_prediction.BatchPredictionJob(_TEST_BATCH_PREDICTION_JOB_ID)
+
+ @pytest.mark.usefixtures("get_batch_prediction_job_with_gcs_output_mock")
+ def test_submit_batch_prediction_job_with_gcs_input(
+ self, create_batch_prediction_job_mock
+ ):
+ job = batch_prediction.BatchPredictionJob.submit(
+ source_model=_TEST_GEMINI_MODEL_NAME,
+ input_dataset=_TEST_GCS_INPUT_URI,
+ output_uri_prefix=_TEST_GCS_OUTPUT_PREFIX,
+ )
+
+ assert job.gca_resource == _TEST_GAPIC_BATCH_PREDICTION_JOB
+ assert job.state == _TEST_JOB_STATE_RUNNING
+ assert not job.has_ended
+ assert not job.has_succeeded
+
+ job.refresh()
+ assert job.state == _TEST_JOB_STATE_SUCCESS
+ assert job.has_ended
+ assert job.has_succeeded
+ assert job.output_location == _TEST_GCS_OUTPUT_PREFIX
+
+ expected_gapic_batch_prediction_job = gca_batch_prediction_job_compat.BatchPredictionJob(
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_GEMINI_MODEL_RESOURCE_NAME,
+ input_config=gca_batch_prediction_job_compat.BatchPredictionJob.InputConfig(
+ instances_format="jsonl",
+ gcs_source=gca_io_compat.GcsSource(uris=[_TEST_GCS_INPUT_URI]),
+ ),
+ output_config=gca_batch_prediction_job_compat.BatchPredictionJob.OutputConfig(
+ gcs_destination=gca_io_compat.GcsDestination(
+ output_uri_prefix=_TEST_GCS_OUTPUT_PREFIX
+ ),
+ predictions_format="jsonl",
+ ),
+ )
+ create_batch_prediction_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_batch_prediction_job_with_bq_output_mock")
+ def test_submit_batch_prediction_job_with_bq_input(
+ self, create_batch_prediction_job_mock
+ ):
+ job = batch_prediction.BatchPredictionJob.submit(
+ source_model=_TEST_GEMINI_MODEL_NAME,
+ input_dataset=_TEST_BQ_INPUT_URI,
+ output_uri_prefix=_TEST_BQ_OUTPUT_PREFIX,
+ )
+
+ assert job.gca_resource == _TEST_GAPIC_BATCH_PREDICTION_JOB
+ assert job.state == _TEST_JOB_STATE_RUNNING
+ assert not job.has_ended
+ assert not job.has_succeeded
+
+ job.refresh()
+ assert job.state == _TEST_JOB_STATE_SUCCESS
+ assert job.has_ended
+ assert job.has_succeeded
+ assert job.output_location == _TEST_BQ_OUTPUT_PREFIX
+
+ expected_gapic_batch_prediction_job = gca_batch_prediction_job_compat.BatchPredictionJob(
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_GEMINI_MODEL_RESOURCE_NAME,
+ input_config=gca_batch_prediction_job_compat.BatchPredictionJob.InputConfig(
+ instances_format="bigquery",
+ bigquery_source=gca_io_compat.BigQuerySource(
+ input_uri=_TEST_BQ_INPUT_URI
+ ),
+ ),
+ output_config=gca_batch_prediction_job_compat.BatchPredictionJob.OutputConfig(
+ bigquery_destination=gca_io_compat.BigQueryDestination(
+ output_uri=_TEST_BQ_OUTPUT_PREFIX
+ ),
+ predictions_format="bigquery",
+ ),
+ )
+ create_batch_prediction_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=None,
+ )
+
+ def test_submit_batch_prediction_job_with_gcs_input_without_output_uri_prefix(
+ self, create_batch_prediction_job_mock
+ ):
+ vertexai.init(staging_bucket=_TEST_BUCKET)
+ model = GenerativeModel(_TEST_GEMINI_MODEL_NAME)
+ job = batch_prediction.BatchPredictionJob.submit(
+ source_model=model,
+ input_dataset=[_TEST_GCS_INPUT_URI, _TEST_GCS_INPUT_URI_2],
+ )
+
+ assert job.gca_resource == _TEST_GAPIC_BATCH_PREDICTION_JOB
+
+ expected_gapic_batch_prediction_job = gca_batch_prediction_job_compat.BatchPredictionJob(
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_GEMINI_MODEL_RESOURCE_NAME,
+ input_config=gca_batch_prediction_job_compat.BatchPredictionJob.InputConfig(
+ instances_format="jsonl",
+ gcs_source=gca_io_compat.GcsSource(
+ uris=[_TEST_GCS_INPUT_URI, _TEST_GCS_INPUT_URI_2]
+ ),
+ ),
+ output_config=gca_batch_prediction_job_compat.BatchPredictionJob.OutputConfig(
+ gcs_destination=gca_io_compat.GcsDestination(
+ output_uri_prefix=f"{_TEST_BUCKET}/gen-ai-batch-prediction"
+ ),
+ predictions_format="jsonl",
+ ),
+ )
+ create_batch_prediction_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=None,
+ )
+
+ def test_submit_batch_prediction_job_with_bq_input_without_output_uri_prefix(
+ self, create_batch_prediction_job_mock
+ ):
+ model = GenerativeModel(_TEST_GEMINI_MODEL_NAME)
+ job = batch_prediction.BatchPredictionJob.submit(
+ source_model=model,
+ input_dataset=_TEST_BQ_INPUT_URI,
+ )
+
+ assert job.gca_resource == _TEST_GAPIC_BATCH_PREDICTION_JOB
+
+ expected_gapic_batch_prediction_job = gca_batch_prediction_job_compat.BatchPredictionJob(
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_GEMINI_MODEL_RESOURCE_NAME,
+ input_config=gca_batch_prediction_job_compat.BatchPredictionJob.InputConfig(
+ instances_format="bigquery",
+ bigquery_source=gca_io_compat.BigQuerySource(
+ input_uri=_TEST_BQ_INPUT_URI
+ ),
+ ),
+ output_config=gca_batch_prediction_job_compat.BatchPredictionJob.OutputConfig(
+ bigquery_destination=gca_io_compat.BigQueryDestination(
+ output_uri=_TEST_BQ_OUTPUT_PREFIX
+ ),
+ predictions_format="bigquery",
+ ),
+ )
+ create_batch_prediction_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=None,
+ )
+
+ def test_submit_batch_prediction_job_with_llama_model(
+ self,
+ create_batch_prediction_job_mock,
+ ):
+ job = batch_prediction.BatchPredictionJob.submit(
+ source_model=_TEST_LLAMA_MODEL_RESOURCE_NAME,
+ input_dataset=_TEST_BQ_INPUT_URI,
+ )
+
+ assert job.gca_resource == _TEST_GAPIC_BATCH_PREDICTION_JOB
+
+ expected_gapic_batch_prediction_job = gca_batch_prediction_job_compat.BatchPredictionJob(
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_LLAMA_MODEL_RESOURCE_NAME,
+ input_config=gca_batch_prediction_job_compat.BatchPredictionJob.InputConfig(
+ instances_format="bigquery",
+ bigquery_source=gca_io_compat.BigQuerySource(
+ input_uri=_TEST_BQ_INPUT_URI
+ ),
+ ),
+ output_config=gca_batch_prediction_job_compat.BatchPredictionJob.OutputConfig(
+ bigquery_destination=gca_io_compat.BigQueryDestination(
+ output_uri=_TEST_BQ_OUTPUT_PREFIX
+ ),
+ predictions_format="bigquery",
+ ),
+ )
+ create_batch_prediction_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=None,
+ )
+
+ def test_submit_batch_prediction_job_with_claude_model(
+ self,
+ create_batch_prediction_job_mock,
+ ):
+ job = batch_prediction.BatchPredictionJob.submit(
+ source_model=_TEST_CLAUDE_MODEL_RESOURCE_NAME,
+ input_dataset=_TEST_BQ_INPUT_URI,
+ )
+
+ assert job.gca_resource == _TEST_GAPIC_BATCH_PREDICTION_JOB
+
+ expected_gapic_batch_prediction_job = gca_batch_prediction_job_compat.BatchPredictionJob(
+ display_name=_TEST_DISPLAY_NAME,
+ model=_TEST_CLAUDE_MODEL_RESOURCE_NAME,
+ input_config=gca_batch_prediction_job_compat.BatchPredictionJob.InputConfig(
+ instances_format="bigquery",
+ bigquery_source=gca_io_compat.BigQuerySource(
+ input_uri=_TEST_BQ_INPUT_URI
+ ),
+ ),
+ output_config=gca_batch_prediction_job_compat.BatchPredictionJob.OutputConfig(
+ bigquery_destination=gca_io_compat.BigQueryDestination(
+ output_uri=_TEST_BQ_OUTPUT_PREFIX
+ ),
+ predictions_format="bigquery",
+ ),
+ )
+ create_batch_prediction_job_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ batch_prediction_job=expected_gapic_batch_prediction_job,
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("create_batch_prediction_job_mock")
+ def test_submit_batch_prediction_job_with_tuned_model(
+ self,
+ get_gemini_model_mock,
+ ):
+ job = batch_prediction.BatchPredictionJob.submit(
+ source_model=_TEST_TUNED_GEMINI_MODEL_RESOURCE_NAME,
+ input_dataset=_TEST_BQ_INPUT_URI,
+ )
+
+ assert job.gca_resource == _TEST_GAPIC_BATCH_PREDICTION_JOB
+ get_gemini_model_mock.assert_called_once_with(
+ name=_TEST_TUNED_GEMINI_MODEL_RESOURCE_NAME,
+ retry=aiplatform_base._DEFAULT_RETRY,
+ )
+
+ def test_submit_batch_prediction_job_with_invalid_source_model(self):
+ with pytest.raises(
+ ValueError,
+ match=(
+ "Abbreviated model names are only supported for Gemini models. "
+ "Please provide the full publisher model name."
+ ),
+ ):
+ batch_prediction.BatchPredictionJob.submit(
+ source_model=_TEST_PALM_MODEL_NAME,
+ input_dataset=_TEST_GCS_INPUT_URI,
+ )
+
+ def test_submit_batch_prediction_job_with_invalid_abbreviated_model_name(self):
+ with pytest.raises(
+ ValueError,
+ match=(
+ "Abbreviated model names are only supported for Gemini models. "
+ "Please provide the full publisher model name."
+ ),
+ ):
+ batch_prediction.BatchPredictionJob.submit(
+ source_model=_TEST_LLAMA_MODEL_NAME,
+ input_dataset=_TEST_GCS_INPUT_URI,
+ )
+
+ @pytest.mark.usefixtures("get_non_gemini_model_mock")
+ def test_submit_batch_prediction_job_with_non_gemini_tuned_model(self):
+ with pytest.raises(
+ ValueError,
+ match=(
+ f"Model '{_TEST_TUNED_GEMINI_MODEL_RESOURCE_NAME}' "
+ "is not a Generative AI model."
+ ),
+ ):
+ batch_prediction.BatchPredictionJob.submit(
+ source_model=_TEST_TUNED_GEMINI_MODEL_RESOURCE_NAME,
+ input_dataset=_TEST_GCS_INPUT_URI,
+ )
+
+ def test_submit_batch_prediction_job_with_invalid_model_name(self):
+ invalid_model_name = "invalid/model/name"
+ with pytest.raises(
+ ValueError,
+ match=(f"Invalid format for model name: {invalid_model_name}."),
+ ):
+ batch_prediction.BatchPredictionJob.submit(
+ source_model=invalid_model_name,
+ input_dataset=_TEST_GCS_INPUT_URI,
+ )
+
+ def test_submit_batch_prediction_job_with_invalid_input_dataset(self):
+ with pytest.raises(
+ ValueError,
+ match=(
+ f"Unsupported input URI: {_TEST_INVALID_URI}. "
+ "Supported formats: 'gs://path/to/input/data.jsonl' and "
+ "'bq://projectId.bqDatasetId.bqTableId'"
+ ),
+ ):
+ batch_prediction.BatchPredictionJob.submit(
+ source_model=_TEST_GEMINI_MODEL_NAME,
+ input_dataset=_TEST_INVALID_URI,
+ )
+
+ invalid_bq_uris = ["bq://projectId.dataset1", "bq://projectId.dataset2"]
+ with pytest.raises(
+ ValueError,
+ match=("Multiple Bigquery input datasets are not supported."),
+ ):
+ batch_prediction.BatchPredictionJob.submit(
+ source_model=_TEST_GEMINI_MODEL_NAME,
+ input_dataset=invalid_bq_uris,
+ )
+
+ def test_submit_batch_prediction_job_with_invalid_output_uri_prefix(self):
+ with pytest.raises(
+ ValueError,
+ match=(
+ f"Unsupported output URI: {_TEST_INVALID_URI}. "
+ "Supported formats: 'gs://path/to/output/data' and "
+ "'bq://projectId.bqDatasetId'"
+ ),
+ ):
+ batch_prediction.BatchPredictionJob.submit(
+ source_model=_TEST_GEMINI_MODEL_NAME,
+ input_dataset=_TEST_GCS_INPUT_URI,
+ output_uri_prefix=_TEST_INVALID_URI,
+ )
+
+ def test_submit_batch_prediction_job_without_output_uri_prefix_and_bucket(self):
+ with pytest.raises(
+ ValueError,
+ match=(
+ "Please either specify output_uri_prefix or "
+ "set staging_bucket in vertexai.init()."
+ ),
+ ):
+ batch_prediction.BatchPredictionJob.submit(
+ source_model=_TEST_GEMINI_MODEL_NAME,
+ input_dataset=_TEST_GCS_INPUT_URI,
+ )
+
+ @pytest.mark.usefixtures("create_batch_prediction_job_mock")
+ def test_cancel_batch_prediction_job(self, cancel_batch_prediction_job_mock):
+ job = batch_prediction.BatchPredictionJob.submit(
+ source_model=_TEST_GEMINI_MODEL_NAME,
+ input_dataset=_TEST_GCS_INPUT_URI,
+ output_uri_prefix=_TEST_GCS_OUTPUT_PREFIX,
+ )
+ job.cancel()
+
+ cancel_batch_prediction_job_mock.assert_called_once_with(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ )
+
+ @pytest.mark.usefixtures("get_batch_prediction_job_with_gcs_output_mock")
+ def test_delete_batch_prediction_job(self, delete_batch_prediction_job_mock):
+ job = batch_prediction.BatchPredictionJob(_TEST_BATCH_PREDICTION_JOB_ID)
+ job.delete()
+
+ delete_batch_prediction_job_mock.assert_called_once_with(
+ name=_TEST_BATCH_PREDICTION_JOB_NAME,
+ )
+
+ def tes_list_batch_prediction_jobs(self, list_batch_prediction_jobs_mock):
+ jobs = batch_prediction.BatchPredictionJob.list()
+
+ assert len(jobs) == 1
+ assert jobs[0].gca_resource == _TEST_GAPIC_BATCH_PREDICTION_JOB
+
+ list_batch_prediction_jobs_mock.assert_called_once_with(
+ request={"parent": _TEST_PARENT}
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_evaluation.py b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..13ca753f7b22115d051aa38024a09509c3405285
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_evaluation.py
@@ -0,0 +1,1955 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import re
+import threading
+import time
+from unittest import mock
+
+from google import auth
+from google.auth import credentials as auth_credentials
+from google.cloud import aiplatform
+import vertexai
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform.metadata import metadata
+from google.cloud.aiplatform_v1.services import (
+ evaluation_service as gapic_evaluation_services,
+)
+from google.cloud.aiplatform_v1.types import (
+ evaluation_service as gapic_evaluation_service_types,
+)
+from google.cloud.aiplatform_v1beta1.services import (
+ evaluation_service as gapic_evaluation_services_preview,
+)
+from google.cloud.aiplatform_v1beta1.types import (
+ evaluation_service as gapic_evaluation_service_types_preview,
+)
+from vertexai import evaluation
+from vertexai import generative_models
+from vertexai.evaluation import _base as eval_base
+from vertexai.evaluation import _evaluation
+from vertexai.evaluation import eval_task
+from vertexai.evaluation import utils
+from vertexai.evaluation.metrics import _rouge
+from vertexai.evaluation.metrics import metric_prompt_template
+from vertexai.evaluation.metrics import (
+ metric_prompt_template_examples,
+)
+from vertexai.evaluation.metrics import pairwise_metric
+from vertexai.evaluation.metrics import pointwise_metric
+from vertexai.preview import evaluation as evaluation_preview
+from vertexai.preview import reasoning_engines
+import numpy as np
+import pandas as pd
+import pytest
+
+
+EvalTask = eval_task.EvalTask
+EvalTaskPreview = evaluation_preview.eval_task.EvalTask
+Pointwise = metric_prompt_template_examples.MetricPromptTemplateExamples.Pointwise
+PointwisePreview = (
+ evaluation_preview.metrics.metric_prompt_template_examples.MetricPromptTemplateExamples.Pointwise
+)
+Pairwise = metric_prompt_template_examples.MetricPromptTemplateExamples.Pairwise
+
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+_TEST_BUCKET = "gs://test-bucket"
+_TEST_FILE_NAME = "test-file-name.csv"
+_AUTORATER_INSTRUCTION = """
+You are an expert evaluator. Your task is to evaluate the quality of the responses generated by AI models.
+"""
+_METRIC_DEFINITION = "You will be assessing Text Quality"
+_CRITERIA = {
+ "Coherence": ("The response presents ideas in a logical and organized manner."),
+ "Fluency": "The text flows smoothly and naturally",
+}
+_POINTWISE_RATING_RUBRIC = {
+ "3": "(Good). Well-written.",
+ "2": "(Ok). Adequate writing with decent coherence and fluency.",
+ "1": "(Bad). Poorly written.",
+}
+_PAIRWISE_RATING_RUBRIC = {
+ "A": "Response A answers better than response B.",
+ "SAME": "Response A and B answers equally well.",
+ "B": "Response B answers better than response A.",
+}
+_EVALUATION_STEPS = {
+ "STEP 1": "Assess grammar correctness",
+ "STEP 2": "Assess word choice and flow",
+}
+_TEST_POINTWISE_METRIC = pointwise_metric.PointwiseMetric(
+ metric="test_pointwise_metric",
+ metric_prompt_template=metric_prompt_template.PointwiseMetricPromptTemplate(
+ metric_definition=_METRIC_DEFINITION,
+ criteria=_CRITERIA,
+ rating_rubric=_POINTWISE_RATING_RUBRIC,
+ evaluation_steps=_EVALUATION_STEPS,
+ ),
+)
+_TEST_POINTWISE_METRIC_FREE_STRING = pointwise_metric.PointwiseMetric(
+ metric="test_pointwise_metric_str", metric_prompt_template="abc: {abc}"
+)
+_TEST_PAIRWISE_METRIC = pairwise_metric.PairwiseMetric(
+ metric="test_pairwise_metric",
+ metric_prompt_template=metric_prompt_template.PairwiseMetricPromptTemplate(
+ metric_definition=_METRIC_DEFINITION,
+ criteria=_CRITERIA,
+ rating_rubric=_PAIRWISE_RATING_RUBRIC,
+ evaluation_steps=_EVALUATION_STEPS,
+ ),
+)
+_TEST_COMET = pointwise_metric.Comet(
+ version="COMET_22_SRC_REF",
+ source_language="en",
+ target_language="zh",
+)
+_TEST_METRICX = pointwise_metric.MetricX(
+ version="METRICX_24_SRC",
+ source_language="en",
+ target_language="zh",
+)
+_TEST_METRICS = (
+ "exact_match",
+ "bleu",
+ "rouge_1",
+ "rouge_2",
+ "rouge_l",
+ "rouge_l_sum",
+ Pointwise.COHERENCE,
+ Pointwise.FLUENCY,
+ Pointwise.SAFETY,
+ Pointwise.GROUNDEDNESS,
+ Pointwise.SUMMARIZATION_QUALITY,
+ Pointwise.VERBOSITY,
+ Pointwise.QUESTION_ANSWERING_QUALITY,
+ _TEST_POINTWISE_METRIC,
+ _TEST_PAIRWISE_METRIC,
+)
+_TEST_EVAL_DATASET_WITHOUT_PROMPT = pd.DataFrame(
+ {
+ "response": ["test", "text"],
+ "reference": ["test", "ref"],
+ "context": ["test", "context"],
+ "instruction": ["test", "instruction"],
+ }
+)
+_TEST_EVAL_DATASET_WITHOUT_RESPONSE = pd.DataFrame(
+ {
+ "prompt": ["test", "prompt"],
+ "reference": ["test", "ref"],
+ "context": ["test", "context"],
+ "instruction": ["test", "instruction"],
+ }
+)
+_TEST_AGENT_EVAL_DATASET_WITHOUT_RESPONSE = pd.DataFrame(
+ {
+ "prompt": ["test_input1", "test_input2"],
+ "reference_trajectory": [
+ [{"tool_name": "test_tool1"}, {"tool_name": "test_tool2"}],
+ [{"tool_name": "test_tool3"}, {"tool_name": "test_tool4"}],
+ ],
+ },
+)
+_TEST_EVAL_DATASET_ALL_INCLUDED = pd.DataFrame(
+ {
+ "prompt": ["test_prompt", "text_prompt"],
+ "response": ["test", "text"],
+ "reference": ["test", "ref"],
+ "context": ["test", "context"],
+ "instruction": ["test", "instruction"],
+ "source": ["test", "source"],
+ }
+)
+_TEST_EVAL_DATASET_SINGLE = pd.DataFrame({"prompt": ["test_prompt", "text_prompt"]})
+_TEST_JSONL_FILE_CONTENT = """{"prompt": "prompt", "reference": "reference"}\n
+{"prompt":"test", "reference": "test"}\n
+"""
+_TEST_CSV_FILE_CONTENT = """reference,context,instruction\ntest,test,test\n
+text,text,text\n
+"""
+_TEST_EXPERIMENT = "test-experiment"
+_TEST_CSV = pd.DataFrame(
+ columns={
+ "response": ["text"],
+ "reference": ["ref"],
+ }
+)
+_EXPECTED_POINTWISE_PROMPT_TEMPLATE = """
+# Instruction
+hello
+
+
+# Evaluation
+## Metric Definition
+this is eval metric
+
+## Criteria
+metric1: summarization
+
+## Rating Rubric
+0: bad
+1: good
+
+## Evaluation Steps
+step1: start
+step2: finish
+
+## Evaluation Examples
+Q: hi A: hello
+
+
+# User Inputs and AI-generated Response
+## User Inputs
+### country
+{country}
+
+
+
+
+## AI-generated Response
+{response}
+"""
+_EXPECTED_POINTWISE_PROMPT_TEMPLATE_WITH_DEFAULT_VALUES = """
+# Instruction
+You are an expert evaluator. Your task is to evaluate the quality of the responses generated by AI models. We will provide you with the user prompt and an AI-generated responses.
+You should first read the user input carefully for analyzing the task, and then evaluate the quality of the responses based on the Criteria provided in the Evaluation section below.
+You will assign the response a rating following the Rating Rubric and Evaluation Steps. Give step by step explanations for your rating, and only choose ratings from the Rating Rubric.
+
+
+# Evaluation
+## Criteria
+Coherence: The response presents ideas in a logical and organized manner.
+Fluency: The text flows smoothly and naturally
+
+## Rating Rubric
+1: (Bad). Poorly written.
+2: (Ok). Adequate writing with decent coherence and fluency.
+3: (Good). Well-written.
+
+## Evaluation Steps
+Step 1: Assess the response in aspects of all criteria provided. Provide assessment according to each criterion.
+Step 2: Score based on the rating rubric. Give a brief rationale to explain your evaluation considering each individual criterion.
+
+
+# User Inputs and AI-generated Response
+## User Inputs
+
+
+
+## AI-generated Response
+{response}
+"""
+
+_EXPECTED_PAIRWISE_PROMPT_TEMPLATE = """
+# Instruction
+hello
+
+
+# Evaluation
+## Metric Definition
+this is eval metric
+
+## Criteria
+metric1: summarization
+
+## Rating Rubric
+A: good
+B: good
+
+## Evaluation Steps
+step1: start
+step2: finish
+
+## Evaluation Examples
+Q: hi A: hello
+
+
+# User Inputs and AI-generated Responses
+## User Inputs
+### country
+{country}
+
+
+## AI-generated Responses
+### Response A
+{baseline_model_response}
+
+### Response B
+{response}
+"""
+
+_EXPECTED_PAIRWISE_PROMPT_TEMPLATE_WITH_DEFAULT_VALUES = """
+# Instruction
+You are an expert evaluator. Your task is to evaluate the quality of the responses generated by two AI models. We will provide you with the user input and a pair of AI-generated responses (Response A and Response B).
+You should first read the user input carefully for analyzing the task, and then evaluate the quality of the responses based on based on the Criteria provided in the Evaluation section below.
+You will first judge responses individually, following the Rating Rubric and Evaluation Steps. Then you will give step by step explanations for your judgement, compare results to declare the winner based on the Rating Rubric and Evaluation Steps.
+
+
+# Evaluation
+## Criteria
+Coherence: The response presents ideas in a logical and organized manner.
+Fluency: The text flows smoothly and naturally
+
+## Rating Rubric
+A: Response A answers better than response B.
+B: Response B answers better than response A.
+SAME: Response A and B answers equally well.
+
+## Evaluation Steps
+Step 1: Analyze Response A based on all the Criteria.
+Step 2: Analyze Response B based on all the Criteria.
+Step 3: Compare the overall performance of Response A and Response B based on your analyses and assessment.
+Step 4: Output your preference of "A", "SAME" or "B" to the pairwise_choice field according to the Rating Rubrics.
+Step 5: Output your assessment reasoning in the explanation field
+
+
+# User Inputs and AI-generated Responses
+## User Inputs
+
+## AI-generated Responses
+### Response A
+{baseline_model_response}
+
+### Response B
+{response}
+"""
+
+_MOCK_RUNNABLE_INFERENCE_RESPONSE = [
+ {
+ "input": "test_input",
+ "output": "test_output",
+ "intermediate_steps": [
+ [{"kwargs": {"tool": "test_tool1"}, "tool_output": "test_tool_output"}],
+ [{"kwargs": {"tool": "test_tool2"}, "tool_output": "test_tool_output"}],
+ ],
+ },
+ {
+ "input": "test_input",
+ "output": "test_output",
+ "intermediate_steps": [
+ [{"kwargs": {"tool": "test_tool2"}, "tool_output": "test_tool_output"}],
+ [{"kwargs": {"tool": "test_tool3"}, "tool_output": "test_tool_output"}],
+ ],
+ },
+]
+
+_MOCK_EXACT_MATCH_RESULT = (
+ gapic_evaluation_service_types.EvaluateInstancesResponse(
+ exact_match_results=gapic_evaluation_service_types.ExactMatchResults(
+ exact_match_metric_values=[
+ gapic_evaluation_service_types.ExactMatchMetricValue(score=1.0),
+ ]
+ )
+ ),
+ gapic_evaluation_service_types.EvaluateInstancesResponse(
+ exact_match_results=gapic_evaluation_service_types.ExactMatchResults(
+ exact_match_metric_values=[
+ gapic_evaluation_service_types.ExactMatchMetricValue(score=0.0),
+ ]
+ )
+ ),
+)
+_MOCK_TRAJECTORY_EXACT_MATCH_RESULT = (
+ gapic_evaluation_service_types_preview.EvaluateInstancesResponse(
+ trajectory_exact_match_results=gapic_evaluation_service_types_preview.TrajectoryExactMatchResults(
+ trajectory_exact_match_metric_values=[
+ gapic_evaluation_service_types_preview.TrajectoryExactMatchMetricValue(
+ score=1.0
+ ),
+ ]
+ )
+ ),
+ gapic_evaluation_service_types_preview.EvaluateInstancesResponse(
+ trajectory_exact_match_results=gapic_evaluation_service_types_preview.TrajectoryExactMatchResults(
+ trajectory_exact_match_metric_values=[
+ gapic_evaluation_service_types_preview.TrajectoryExactMatchMetricValue(
+ score=0.0
+ ),
+ ]
+ )
+ ),
+)
+_MOCK_POINTWISE_RESULT = (
+ gapic_evaluation_service_types.EvaluateInstancesResponse(
+ pointwise_metric_result=gapic_evaluation_service_types.PointwiseMetricResult(
+ score=5, explanation="explanation"
+ )
+ ),
+ gapic_evaluation_service_types.EvaluateInstancesResponse(
+ pointwise_metric_result=gapic_evaluation_service_types.PointwiseMetricResult(
+ score=4, explanation="explanation"
+ )
+ ),
+)
+_MOCK_PAIRWISE_RESULT = (
+ gapic_evaluation_service_types.EvaluateInstancesResponse(
+ pairwise_metric_result=gapic_evaluation_service_types.PairwiseMetricResult(
+ pairwise_choice=gapic_evaluation_service_types.PairwiseChoice.BASELINE,
+ explanation="explanation",
+ )
+ ),
+ gapic_evaluation_service_types.EvaluateInstancesResponse(
+ pairwise_metric_result=gapic_evaluation_service_types.PairwiseMetricResult(
+ pairwise_choice=gapic_evaluation_service_types.PairwiseChoice.BASELINE,
+ explanation="explanation",
+ )
+ ),
+)
+_MOCK_SUMMARIZATION_QUALITY_RESULT = (
+ gapic_evaluation_service_types.EvaluateInstancesResponse(
+ pointwise_metric_result=gapic_evaluation_service_types.PointwiseMetricResult(
+ score=5, explanation="explanation"
+ )
+ ),
+ gapic_evaluation_service_types.EvaluateInstancesResponse(
+ pointwise_metric_result=gapic_evaluation_service_types.PointwiseMetricResult(
+ score=4, explanation="explanation"
+ )
+ ),
+)
+_MOCK_COHERENCE_RESULT = (
+ gapic_evaluation_service_types_preview.EvaluateInstancesResponse(
+ pointwise_metric_result=gapic_evaluation_service_types_preview.PointwiseMetricResult(
+ score=5, explanation="explanation"
+ )
+ ),
+ gapic_evaluation_service_types_preview.EvaluateInstancesResponse(
+ pointwise_metric_result=gapic_evaluation_service_types_preview.PointwiseMetricResult(
+ score=4, explanation="explanation"
+ )
+ ),
+)
+_MOCK_PAIRWISE_SUMMARIZATION_QUALITY_RESULT = (
+ gapic_evaluation_service_types.EvaluateInstancesResponse(
+ pairwise_metric_result=gapic_evaluation_service_types.PairwiseMetricResult(
+ pairwise_choice=gapic_evaluation_service_types.PairwiseChoice.BASELINE,
+ explanation="explanation",
+ )
+ ),
+ gapic_evaluation_service_types.EvaluateInstancesResponse(
+ pairwise_metric_result=gapic_evaluation_service_types.PairwiseMetricResult(
+ pairwise_choice=gapic_evaluation_service_types.PairwiseChoice.CANDIDATE,
+ explanation="explanation",
+ )
+ ),
+)
+_MOCK_MODEL_INFERENCE_RESPONSE = generative_models.GenerationResponse.from_dict(
+ {
+ "candidates": [
+ {
+ "content": {"parts": [{"text": "test_response"}]},
+ }
+ ]
+ }
+)
+MOCK_EVAL_RESULT = eval_base.EvalResult(
+ summary_metrics={
+ "row_count": 1,
+ "mock_metric/mean": 1.0,
+ "mock_metric/std": np.nan,
+ },
+ metrics_table=pd.DataFrame(
+ {
+ "response": ["test"],
+ "mock_metric": [1.0],
+ }
+ ),
+)
+_EXPECTED_ROUGE_REQUESTS = (
+ gapic_evaluation_service_types.EvaluateInstancesRequest(
+ location=f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}",
+ rouge_input=gapic_evaluation_service_types.RougeInput(
+ metric_spec=gapic_evaluation_service_types.RougeSpec(
+ rouge_type="rougeLsum", use_stemmer=True, split_summaries=True
+ ),
+ instances=[
+ gapic_evaluation_service_types.RougeInstance(
+ prediction="test_response", reference="test"
+ ),
+ ],
+ ),
+ ),
+ gapic_evaluation_service_types.EvaluateInstancesRequest(
+ location=f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}",
+ rouge_input=gapic_evaluation_service_types.RougeInput(
+ metric_spec=gapic_evaluation_service_types.RougeSpec(
+ rouge_type="rougeLsum", use_stemmer=True, split_summaries=True
+ ),
+ instances=[
+ gapic_evaluation_service_types.RougeInstance(
+ prediction="test_response", reference="ref"
+ ),
+ ],
+ ),
+ ),
+)
+_MOCK_ROUGE_RESULT = (
+ gapic_evaluation_service_types.EvaluateInstancesResponse(
+ rouge_results=gapic_evaluation_service_types.RougeResults(
+ rouge_metric_values=[
+ gapic_evaluation_service_types.RougeMetricValue(score=1.0)
+ ]
+ )
+ ),
+ gapic_evaluation_service_types.EvaluateInstancesResponse(
+ rouge_results=gapic_evaluation_service_types.RougeResults(
+ rouge_metric_values=[
+ gapic_evaluation_service_types.RougeMetricValue(score=0.5)
+ ]
+ )
+ ),
+)
+_EXPECTED_COLUMN_MAPPING = {
+ "context": "context",
+ "reference": "reference",
+ "response": "response",
+ "instruction": "instruction",
+ "prompt": "prompt",
+ "source": "source",
+}
+_MOCK_MODEL_BASED_TRANSLATION_RESULT = (
+ # The order of the responses is important.
+ gapic_evaluation_service_types.EvaluateInstancesResponse(
+ comet_result=gapic_evaluation_service_types.CometResult(score=0.1)
+ ),
+ gapic_evaluation_service_types.EvaluateInstancesResponse(
+ metricx_result=gapic_evaluation_service_types.MetricxResult(score=5)
+ ),
+ gapic_evaluation_service_types.EvaluateInstancesResponse(
+ comet_result=gapic_evaluation_service_types.CometResult(score=0.9)
+ ),
+ gapic_evaluation_service_types.EvaluateInstancesResponse(
+ metricx_result=gapic_evaluation_service_types.MetricxResult(score=20)
+ ),
+)
+
+
+@pytest.fixture(scope="module")
+def google_auth_mock():
+ with mock.patch.object(auth, "default") as google_auth_mock:
+ google_auth_mock.return_value = (
+ auth_credentials.AnonymousCredentials(),
+ _TEST_PROJECT,
+ )
+ yield google_auth_mock
+
+
+@pytest.fixture
+def mock_experiment_tracker():
+ with mock.patch.object(
+ metadata, "_experiment_tracker", autospec=True
+ ) as mock_experiment_tracker:
+ yield mock_experiment_tracker
+
+
+@pytest.fixture
+def mock_storage_blob_upload_from_filename():
+ with mock.patch(
+ "google.cloud.storage.Blob.upload_from_filename"
+ ) as mock_blob_upload_from_filename, mock.patch(
+ "google.cloud.storage.Bucket.exists", return_value=True
+ ):
+ yield mock_blob_upload_from_filename
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestEvaluation:
+ def setup_method(self):
+ vertexai.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_create_eval_task(self):
+
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_ALL_INCLUDED,
+ metrics=_TEST_METRICS,
+ experiment=_TEST_EXPERIMENT,
+ )
+
+ assert test_eval_task.dataset.equals(_TEST_EVAL_DATASET_ALL_INCLUDED)
+ assert test_eval_task.metrics == _TEST_METRICS
+ assert test_eval_task.experiment == _TEST_EXPERIMENT
+ assert test_eval_task._metric_column_mapping == _EXPECTED_COLUMN_MAPPING
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_compute_exact_match_metric(self, api_transport):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ api_transport=api_transport,
+ )
+ eval_dataset = pd.DataFrame(
+ {
+ "response": ["test", "text"],
+ "reference": ["test", "ref"],
+ }
+ )
+ test_metrics = ["exact_match"]
+ test_eval_task = EvalTask(dataset=eval_dataset, metrics=test_metrics)
+ mock_metric_results = _MOCK_EXACT_MATCH_RESULT
+ with mock.patch.object(
+ target=gapic_evaluation_services.EvaluationServiceClient,
+ attribute="evaluate_instances",
+ side_effect=mock_metric_results,
+ ):
+ test_result = test_eval_task.evaluate()
+
+ assert test_result.summary_metrics["row_count"] == 2
+ assert test_result.summary_metrics["exact_match/mean"] == 0.5
+ assert test_result.summary_metrics["exact_match/std"] == pytest.approx(0.7, 0.1)
+ assert list(test_result.metrics_table.columns.values) == [
+ "response",
+ "reference",
+ "exact_match/score",
+ ]
+ assert test_result.metrics_table[["response", "reference"]].equals(eval_dataset)
+ assert list(test_result.metrics_table["exact_match/score"].values) == [
+ 1.0,
+ 0.0,
+ ]
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_compute_pointwise_metrics(self, api_transport):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ api_transport=api_transport,
+ )
+ test_metrics = [_TEST_POINTWISE_METRIC]
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_ALL_INCLUDED, metrics=test_metrics
+ )
+ mock_metric_results = _MOCK_POINTWISE_RESULT
+ with mock.patch.object(
+ target=gapic_evaluation_services.EvaluationServiceClient,
+ attribute="evaluate_instances",
+ side_effect=mock_metric_results,
+ ):
+ test_result = test_eval_task.evaluate()
+
+ assert test_result.summary_metrics["row_count"] == 2
+ assert test_result.summary_metrics["test_pointwise_metric/mean"] == 4.5
+ assert test_result.summary_metrics[
+ "test_pointwise_metric/std"
+ ] == pytest.approx(0.7, 0.1)
+ assert set(test_result.metrics_table.columns.values) == set(
+ [
+ "prompt",
+ "response",
+ "context",
+ "instruction",
+ "reference",
+ "test_pointwise_metric/score",
+ "test_pointwise_metric/explanation",
+ "source",
+ ]
+ )
+ assert test_result.metrics_table["response"].equals(
+ _TEST_EVAL_DATASET_ALL_INCLUDED["response"]
+ )
+ assert test_result.metrics_table["prompt"].equals(
+ _TEST_EVAL_DATASET_ALL_INCLUDED["prompt"]
+ )
+ assert list(
+ test_result.metrics_table["test_pointwise_metric/score"].values
+ ) == [5, 4]
+ assert list(
+ test_result.metrics_table["test_pointwise_metric/explanation"].values
+ ) == [
+ "explanation",
+ "explanation",
+ ]
+
+ def test_compute_pointwise_metrics_free_string(self):
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_ALL_INCLUDED,
+ metrics=[_TEST_POINTWISE_METRIC_FREE_STRING],
+ metric_column_mapping={"abc": "prompt"},
+ )
+ mock_metric_results = _MOCK_POINTWISE_RESULT
+ with mock.patch.object(
+ target=gapic_evaluation_services.EvaluationServiceClient,
+ attribute="evaluate_instances",
+ side_effect=mock_metric_results,
+ ):
+ test_result = test_eval_task.evaluate()
+
+ assert test_result.summary_metrics["row_count"] == 2
+ assert test_result.summary_metrics["test_pointwise_metric_str/mean"] == 4.5
+ assert test_result.summary_metrics[
+ "test_pointwise_metric_str/std"
+ ] == pytest.approx(0.7, 0.1)
+ assert set(test_result.metrics_table.columns.values) == set(
+ [
+ "prompt",
+ "response",
+ "context",
+ "instruction",
+ "reference",
+ "test_pointwise_metric_str/score",
+ "test_pointwise_metric_str/explanation",
+ "source",
+ ]
+ )
+ assert test_result.metrics_table["response"].equals(
+ _TEST_EVAL_DATASET_ALL_INCLUDED["response"]
+ )
+ assert test_result.metrics_table["prompt"].equals(
+ _TEST_EVAL_DATASET_ALL_INCLUDED["prompt"]
+ )
+ assert list(
+ test_result.metrics_table["test_pointwise_metric_str/score"].values
+ ) == [5, 4]
+ assert list(
+ test_result.metrics_table["test_pointwise_metric_str/explanation"].values
+ ) == [
+ "explanation",
+ "explanation",
+ ]
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_compute_pointwise_metrics_metric_prompt_template_example(
+ self, api_transport
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ api_transport=api_transport,
+ )
+ mock_model = mock.create_autospec(
+ generative_models.GenerativeModel, instance=True
+ )
+ mock_model.generate_content.return_value = _MOCK_MODEL_INFERENCE_RESPONSE
+ mock_model._model_name = "publishers/google/model/gemini-1.0-pro"
+ test_metrics = [Pointwise.SUMMARIZATION_QUALITY]
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_WITHOUT_RESPONSE, metrics=test_metrics
+ )
+ mock_metric_results = _MOCK_SUMMARIZATION_QUALITY_RESULT
+ with mock.patch.object(
+ target=gapic_evaluation_services.EvaluationServiceClient,
+ attribute="evaluate_instances",
+ side_effect=mock_metric_results,
+ ):
+ test_result = test_eval_task.evaluate(
+ model=mock_model,
+ prompt_template="{instruction} test prompt template {context}",
+ )
+
+ assert test_result.summary_metrics["row_count"] == 2
+ assert test_result.summary_metrics["summarization_quality/mean"] == 4.5
+ assert test_result.summary_metrics[
+ "summarization_quality/std"
+ ] == pytest.approx(0.7, 0.1)
+ assert set(test_result.metrics_table.columns.values) == set(
+ [
+ "context",
+ "instruction",
+ "reference",
+ "prompt",
+ "response",
+ "summarization_quality/score",
+ "summarization_quality/explanation",
+ ]
+ )
+ assert list(
+ test_result.metrics_table["summarization_quality/score"].values
+ ) == [5, 4]
+ assert list(
+ test_result.metrics_table["summarization_quality/explanation"].values
+ ) == [
+ "explanation",
+ "explanation",
+ ]
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_compute_pointwise_metrics_without_model_inference(self, api_transport):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ api_transport=api_transport,
+ )
+ test_metrics = [Pointwise.SUMMARIZATION_QUALITY]
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_ALL_INCLUDED, metrics=test_metrics
+ )
+ mock_metric_results = _MOCK_SUMMARIZATION_QUALITY_RESULT
+ with mock.patch.object(
+ target=gapic_evaluation_services.EvaluationServiceClient,
+ attribute="evaluate_instances",
+ side_effect=mock_metric_results,
+ ):
+ test_result = test_eval_task.evaluate()
+
+ assert test_result.summary_metrics["row_count"] == 2
+ assert test_result.summary_metrics["summarization_quality/mean"] == 4.5
+ assert test_result.summary_metrics[
+ "summarization_quality/std"
+ ] == pytest.approx(0.7, 0.1)
+ assert set(test_result.metrics_table.columns.values) == set(
+ [
+ "context",
+ "instruction",
+ "reference",
+ "prompt",
+ "response",
+ "summarization_quality/score",
+ "summarization_quality/explanation",
+ "source",
+ ]
+ )
+ assert list(
+ test_result.metrics_table["summarization_quality/score"].values
+ ) == [5, 4]
+ assert list(
+ test_result.metrics_table["summarization_quality/explanation"].values
+ ) == [
+ "explanation",
+ "explanation",
+ ]
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_compute_model_based_translation_metrics_without_model_inference(
+ self, api_transport
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ api_transport=api_transport,
+ )
+ test_metrics = [_TEST_COMET, _TEST_METRICX]
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_ALL_INCLUDED, metrics=test_metrics
+ )
+
+ mock_metric_results = _MOCK_MODEL_BASED_TRANSLATION_RESULT
+ with mock.patch.object(
+ target=gapic_evaluation_services.EvaluationServiceClient,
+ attribute="evaluate_instances",
+ side_effect=mock_metric_results,
+ ):
+ test_result = test_eval_task.evaluate()
+
+ assert test_result.summary_metrics["row_count"] == 2
+ assert test_result.summary_metrics["comet/mean"] == 0.5
+ assert test_result.summary_metrics["metricx/mean"] == 12.5
+ assert test_result.summary_metrics["comet/std"] == pytest.approx(0.5, 0.6)
+ assert test_result.summary_metrics["metricx/std"] == pytest.approx(10, 11)
+ assert set(test_result.metrics_table.columns.values) == set(
+ [
+ "context",
+ "instruction",
+ "reference",
+ "prompt",
+ "response",
+ "source",
+ "comet/score",
+ "metricx/score",
+ ]
+ )
+ assert list(test_result.metrics_table["comet/score"].values) == [0.1, 0.9]
+ assert list(test_result.metrics_table["metricx/score"].values) == [5, 20]
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_compute_automatic_metrics_with_custom_metric_spec(self, api_transport):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ api_transport=api_transport,
+ )
+ mock_model = mock.create_autospec(
+ generative_models.GenerativeModel, instance=True
+ )
+ mock_model.generate_content.return_value = _MOCK_MODEL_INFERENCE_RESPONSE
+ mock_model._model_name = "publishers/google/model/gemini-1.0-pro"
+ test_metrics = [
+ _rouge.Rouge(
+ rouge_type="rougeLsum",
+ use_stemmer=True,
+ split_summaries=True,
+ )
+ ]
+ test_eval_task = evaluation.EvalTask(
+ dataset=_TEST_EVAL_DATASET_WITHOUT_RESPONSE, metrics=test_metrics
+ )
+ with mock.patch.object(
+ target=gapic_evaluation_services.EvaluationServiceClient,
+ attribute="evaluate_instances",
+ side_effect=_MOCK_ROUGE_RESULT,
+ ) as mock_evaluate_instances:
+ test_result = test_eval_task.evaluate(
+ model=mock_model,
+ )
+
+ assert test_result.summary_metrics["row_count"] == 2
+ assert test_result.summary_metrics["rouge/mean"] == 0.75
+ assert test_result.summary_metrics["rouge/std"] == pytest.approx(0.35, 0.1)
+ assert set(test_result.metrics_table.columns.values) == set(
+ [
+ "prompt",
+ "reference",
+ "response",
+ "context",
+ "instruction",
+ "rouge/score",
+ ]
+ )
+ assert list(test_result.metrics_table["rouge/score"].values) == [1, 0.5]
+
+ api_requests = [
+ call.kwargs["request"] for call in mock_evaluate_instances.call_args_list
+ ]
+ assert api_requests == list(_EXPECTED_ROUGE_REQUESTS)
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_compute_pairwise_metrics(self, api_transport):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ api_transport=api_transport,
+ )
+ mock_baseline_model = mock.create_autospec(
+ generative_models.GenerativeModel, instance=True
+ )
+ mock_baseline_model.generate_content.return_value = (
+ _MOCK_MODEL_INFERENCE_RESPONSE
+ )
+ mock_baseline_model._model_name = "publishers/google/model/gemini-pro"
+ mock_candidate_model = mock.create_autospec(
+ generative_models.GenerativeModel, instance=True
+ )
+ mock_candidate_model.generate_content.return_value = (
+ _MOCK_MODEL_INFERENCE_RESPONSE
+ )
+ mock_candidate_model._model_name = "publishers/google/model/gemini-pro"
+ _TEST_PAIRWISE_METRIC._baseline_model = mock_baseline_model
+ test_metrics = [_TEST_PAIRWISE_METRIC]
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_WITHOUT_RESPONSE, metrics=test_metrics
+ )
+ mock_metric_results = _MOCK_PAIRWISE_SUMMARIZATION_QUALITY_RESULT
+ with mock.patch.object(
+ target=gapic_evaluation_services.EvaluationServiceClient,
+ attribute="evaluate_instances",
+ side_effect=mock_metric_results,
+ ):
+ test_result = test_eval_task.evaluate(
+ model=mock_candidate_model,
+ prompt_template="{instruction} test prompt template {context}",
+ )
+ _TEST_PAIRWISE_METRIC._baseline_model = None
+ assert test_result.summary_metrics["row_count"] == 2
+ assert set(test_result.metrics_table.columns.values) == set(
+ [
+ "context",
+ "instruction",
+ "prompt",
+ "response",
+ "reference",
+ "baseline_model_response",
+ "test_pairwise_metric/pairwise_choice",
+ "test_pairwise_metric/explanation",
+ ]
+ )
+ assert list(
+ test_result.metrics_table["test_pairwise_metric/pairwise_choice"].values
+ ) == ["BASELINE", "CANDIDATE"]
+ assert list(
+ test_result.metrics_table["test_pairwise_metric/explanation"].values
+ ) == [
+ "explanation",
+ "explanation",
+ ]
+ assert set(test_result.summary_metrics.keys()) == set(
+ [
+ "row_count",
+ "test_pairwise_metric/candidate_model_win_rate",
+ "test_pairwise_metric/baseline_model_win_rate",
+ ]
+ )
+ assert (
+ test_result.summary_metrics["test_pairwise_metric/candidate_model_win_rate"]
+ == 0.5
+ )
+ assert (
+ test_result.summary_metrics["test_pairwise_metric/baseline_model_win_rate"]
+ == 0.5
+ )
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_compute_pairwise_metrics_metric_prompt_template_example(
+ self, api_transport
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ api_transport=api_transport,
+ )
+ eval_dataset = _TEST_EVAL_DATASET_WITHOUT_RESPONSE.copy(deep=True)
+ eval_dataset.insert(1, "baseline_model_response", ["baseline", "response"])
+ mock_candidate_model = mock.create_autospec(
+ generative_models.GenerativeModel, instance=True
+ )
+ mock_candidate_model.generate_content.return_value = (
+ _MOCK_MODEL_INFERENCE_RESPONSE
+ )
+ mock_candidate_model._model_name = "publishers/google/model/gemini-pro"
+ test_metrics = [Pairwise.SUMMARIZATION_QUALITY]
+ test_eval_task = EvalTask(dataset=eval_dataset, metrics=test_metrics)
+ mock_metric_results = _MOCK_PAIRWISE_SUMMARIZATION_QUALITY_RESULT
+ with mock.patch.object(
+ target=gapic_evaluation_services.EvaluationServiceClient,
+ attribute="evaluate_instances",
+ side_effect=mock_metric_results,
+ ):
+ test_result = test_eval_task.evaluate(
+ model=mock_candidate_model,
+ prompt_template="{instruction} test prompt template {context}",
+ )
+
+ assert test_result.summary_metrics["row_count"] == 2
+ assert set(test_result.metrics_table.columns.values) == set(
+ [
+ "context",
+ "instruction",
+ "prompt",
+ "response",
+ "reference",
+ "baseline_model_response",
+ "pairwise_summarization_quality/pairwise_choice",
+ "pairwise_summarization_quality/explanation",
+ ]
+ )
+ assert list(
+ test_result.metrics_table[
+ "pairwise_summarization_quality/pairwise_choice"
+ ].values
+ ) == ["BASELINE", "CANDIDATE"]
+ assert list(
+ test_result.metrics_table[
+ "pairwise_summarization_quality/explanation"
+ ].values
+ ) == [
+ "explanation",
+ "explanation",
+ ]
+ assert set(test_result.summary_metrics.keys()) == set(
+ [
+ "row_count",
+ "pairwise_summarization_quality/candidate_model_win_rate",
+ "pairwise_summarization_quality/baseline_model_win_rate",
+ ]
+ )
+ assert (
+ test_result.summary_metrics[
+ "pairwise_summarization_quality/candidate_model_win_rate"
+ ]
+ == 0.5
+ )
+ assert (
+ test_result.summary_metrics[
+ "pairwise_summarization_quality/baseline_model_win_rate"
+ ]
+ == 0.5
+ )
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_compute_pairwise_metrics_without_model_inference(self, api_transport):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ api_transport=api_transport,
+ )
+ eval_dataset = _TEST_EVAL_DATASET_ALL_INCLUDED.copy(deep=True)
+ eval_dataset.insert(1, "baseline_model_response", ["baseline", "response"])
+ test_metrics = [Pairwise.SUMMARIZATION_QUALITY]
+ test_eval_task = EvalTask(dataset=eval_dataset, metrics=test_metrics)
+ mock_metric_results = _MOCK_PAIRWISE_SUMMARIZATION_QUALITY_RESULT
+ with mock.patch.object(
+ target=gapic_evaluation_services.EvaluationServiceClient,
+ attribute="evaluate_instances",
+ side_effect=mock_metric_results,
+ ):
+ test_result = test_eval_task.evaluate()
+
+ assert test_result.summary_metrics["row_count"] == 2
+ assert set(test_result.metrics_table.columns.values) == set(
+ [
+ "prompt",
+ "response",
+ "baseline_model_response",
+ "reference",
+ "context",
+ "instruction",
+ "pairwise_summarization_quality/pairwise_choice",
+ "pairwise_summarization_quality/explanation",
+ "source",
+ ]
+ )
+ assert list(
+ test_result.metrics_table[
+ "pairwise_summarization_quality/pairwise_choice"
+ ].values
+ ) == ["BASELINE", "CANDIDATE"]
+ assert list(
+ test_result.metrics_table[
+ "pairwise_summarization_quality/explanation"
+ ].values
+ ) == [
+ "explanation",
+ "explanation",
+ ]
+ assert set(test_result.summary_metrics.keys()) == set(
+ [
+ "row_count",
+ "pairwise_summarization_quality/candidate_model_win_rate",
+ "pairwise_summarization_quality/baseline_model_win_rate",
+ ]
+ )
+ assert (
+ test_result.summary_metrics[
+ "pairwise_summarization_quality/candidate_model_win_rate"
+ ]
+ == 0.5
+ )
+ assert (
+ test_result.summary_metrics[
+ "pairwise_summarization_quality/baseline_model_win_rate"
+ ]
+ == 0.5
+ )
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_compute_multiple_metrics(self, api_transport):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ api_transport=api_transport,
+ )
+ mock_baseline_model = mock.create_autospec(
+ generative_models.GenerativeModel, instance=True
+ )
+ mock_baseline_model.generate_content.return_value = (
+ _MOCK_MODEL_INFERENCE_RESPONSE
+ )
+ mock_baseline_model._model_name = "publishers/google/model/gemini-pro"
+ _TEST_PAIRWISE_METRIC._baseline_model = mock_baseline_model
+ mock_model = mock.create_autospec(
+ generative_models.GenerativeModel, instance=True
+ )
+ mock_model.generate_content.return_value = _MOCK_MODEL_INFERENCE_RESPONSE
+ mock_model._model_name = "publishers/google/model/gemini-pro"
+ test_metrics = [
+ "exact_match",
+ Pointwise.SUMMARIZATION_QUALITY,
+ _TEST_PAIRWISE_METRIC,
+ ]
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_WITHOUT_RESPONSE, metrics=test_metrics
+ )
+ mock_metric_results = (
+ _MOCK_EXACT_MATCH_RESULT[0],
+ _MOCK_SUMMARIZATION_QUALITY_RESULT[0],
+ _MOCK_PAIRWISE_SUMMARIZATION_QUALITY_RESULT[0],
+ _MOCK_EXACT_MATCH_RESULT[1],
+ _MOCK_SUMMARIZATION_QUALITY_RESULT[1],
+ _MOCK_PAIRWISE_SUMMARIZATION_QUALITY_RESULT[1],
+ )
+ with mock.patch.object(
+ target=gapic_evaluation_services.EvaluationServiceClient,
+ attribute="evaluate_instances",
+ side_effect=mock_metric_results,
+ ):
+ test_result = test_eval_task.evaluate(
+ model=mock_model,
+ prompt_template="{instruction} test prompt template {context}",
+ )
+
+ _TEST_PAIRWISE_METRIC._baseline_model = None
+ assert test_result.summary_metrics["row_count"] == 2
+ assert set(test_result.metrics_table.columns.values) == set(
+ [
+ "prompt",
+ "response",
+ "baseline_model_response",
+ "reference",
+ "context",
+ "instruction",
+ "exact_match/score",
+ "summarization_quality/score",
+ "summarization_quality/explanation",
+ "test_pairwise_metric/pairwise_choice",
+ "test_pairwise_metric/explanation",
+ ]
+ )
+ assert list(test_result.metrics_table["exact_match/score"].values) == [
+ 1.0,
+ 0.0,
+ ]
+
+ assert list(
+ test_result.metrics_table["test_pairwise_metric/pairwise_choice"].values
+ ) == ["BASELINE", "CANDIDATE"]
+ assert list(
+ test_result.metrics_table["test_pairwise_metric/explanation"].values
+ ) == [
+ "explanation",
+ "explanation",
+ ]
+ assert (
+ test_result.summary_metrics["test_pairwise_metric/candidate_model_win_rate"]
+ == 0.5
+ )
+ assert (
+ test_result.summary_metrics["test_pairwise_metric/baseline_model_win_rate"]
+ == 0.5
+ )
+
+ assert list(
+ test_result.metrics_table["summarization_quality/score"].values
+ ) == [5, 4]
+ assert list(
+ test_result.metrics_table["summarization_quality/explanation"].values
+ ) == [
+ "explanation",
+ "explanation",
+ ]
+
+ def test_eval_result_experiment_run_logging(self):
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_ALL_INCLUDED,
+ metrics=[Pointwise.FLUENCY],
+ experiment=_TEST_EXPERIMENT,
+ )
+
+ with mock.patch.multiple(
+ metadata._experiment_tracker,
+ _experiment=mock.MagicMock(name=_TEST_EXPERIMENT),
+ _experiment_run=None,
+ set_experiment=mock.DEFAULT,
+ reset=mock.DEFAULT,
+ ):
+ with mock.patch.multiple(
+ vertexai.preview,
+ start_run=mock.MagicMock(),
+ log_params=mock.DEFAULT,
+ log_metrics=mock.DEFAULT,
+ ) as mock_metadata:
+ with mock.patch.object(
+ target=_evaluation,
+ attribute="evaluate",
+ side_effect=[MOCK_EVAL_RESULT],
+ ):
+ _ = test_eval_task.evaluate()
+
+ mock_metadata["log_metrics"].assert_called_once_with(
+ {"row_count": 1, "mock_metric/mean": 1.0, "mock_metric/std": "NaN"}
+ )
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestAgentEvaluation:
+ def setup_method(self):
+ vertexai.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_runnable_response_eval_with_runnable_inference(self, api_transport):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ api_transport=api_transport,
+ )
+ mock_runnable = mock.create_autospec(reasoning_engines.Queryable, instance=True)
+ mock_runnable.query.return_value = _MOCK_RUNNABLE_INFERENCE_RESPONSE
+
+ test_metrics = [PointwisePreview.COHERENCE]
+ test_eval_task = EvalTaskPreview(
+ dataset=_TEST_AGENT_EVAL_DATASET_WITHOUT_RESPONSE, metrics=test_metrics
+ )
+ mock_metric_results = _MOCK_COHERENCE_RESULT
+ with mock.patch.object(
+ target=gapic_evaluation_services_preview.EvaluationServiceClient,
+ attribute="evaluate_instances",
+ side_effect=mock_metric_results,
+ ):
+ test_result = test_eval_task.evaluate(
+ runnable=mock_runnable,
+ prompt_template="test prompt template",
+ )
+
+ assert test_result.summary_metrics["row_count"] == 2
+ assert test_result.summary_metrics["coherence/mean"] == 4.5
+ assert test_result.summary_metrics["coherence/std"] == pytest.approx(0.7, 0.1)
+ assert set(test_result.metrics_table.columns.values) == set(
+ [
+ "prompt",
+ "reference_trajectory",
+ "response",
+ "latency_in_seconds",
+ "failure",
+ "predicted_trajectory",
+ "coherence/score",
+ "coherence/explanation",
+ ]
+ )
+ assert list(test_result.metrics_table["coherence/score"].values) == [5, 4]
+ assert list(test_result.metrics_table["coherence/explanation"].values) == [
+ "explanation",
+ "explanation",
+ ]
+
+ @pytest.mark.parametrize("api_transport", ["grpc", "rest"])
+ def test_runnable_trajectory_eval_with_runnable_inference(self, api_transport):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ api_transport=api_transport,
+ )
+ mock_runnable = mock.create_autospec(reasoning_engines.Queryable, instance=True)
+ mock_runnable.query.return_value = _MOCK_RUNNABLE_INFERENCE_RESPONSE
+
+ test_metrics = ["trajectory_exact_match"]
+ test_eval_task = EvalTaskPreview(
+ dataset=_TEST_AGENT_EVAL_DATASET_WITHOUT_RESPONSE, metrics=test_metrics
+ )
+ mock_metric_results = _MOCK_TRAJECTORY_EXACT_MATCH_RESULT
+ with mock.patch.object(
+ target=gapic_evaluation_services_preview.EvaluationServiceClient,
+ attribute="evaluate_instances",
+ side_effect=mock_metric_results,
+ ):
+ test_result = test_eval_task.evaluate(runnable=mock_runnable)
+
+ assert test_result.summary_metrics["row_count"] == 2
+ assert test_result.summary_metrics["trajectory_exact_match/mean"] == 0.5
+ assert test_result.summary_metrics[
+ "trajectory_exact_match/std"
+ ] == pytest.approx(0.7, 0.1)
+ assert set(test_result.metrics_table.columns.values) == set(
+ [
+ "prompt",
+ "response",
+ "latency_in_seconds",
+ "failure",
+ "predicted_trajectory",
+ "reference_trajectory",
+ "trajectory_exact_match/score",
+ ]
+ )
+ assert list(
+ test_result.metrics_table["trajectory_exact_match/score"].values
+ ) == [1.0, 0.0]
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestEvaluationErrors:
+ def setup_method(self):
+ vertexai.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_evaluate_empty_metrics(self):
+ test_eval_task = EvalTask(dataset=_TEST_EVAL_DATASET_WITHOUT_PROMPT, metrics=[])
+ with pytest.raises(ValueError, match="Metrics cannot be empty."):
+ test_eval_task.evaluate()
+
+ def test_evaluate_invalid_metrics(self):
+ metric_name = "invalid_metric"
+ with pytest.raises(
+ ValueError,
+ match=f"Metric name: {metric_name} is not supported.",
+ ):
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_WITHOUT_PROMPT, metrics=[metric_name]
+ )
+ test_eval_task.evaluate()
+
+ def test_evaluate_duplicate_string_metric(self):
+ metrics = [
+ "exact_match",
+ "exact_match",
+ ]
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_WITHOUT_PROMPT, metrics=metrics
+ )
+ with pytest.raises(
+ ValueError,
+ match="Duplicate string metric name found: 'exact_match'",
+ ):
+ test_eval_task.evaluate()
+
+ def test_evaluate_duplicate_metric_instances(self):
+ metrics = [
+ Pointwise.SUMMARIZATION_QUALITY,
+ Pointwise.SUMMARIZATION_QUALITY,
+ ]
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_ALL_INCLUDED, metrics=metrics
+ )
+ with pytest.raises(
+ ValueError,
+ match=(
+ "Duplicate Metric instances of the same metric name found:"
+ " 'summarization_quality'"
+ ),
+ ):
+ test_eval_task.evaluate()
+
+ def test_evaluate_invalid_experiment_run_name(self):
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_WITHOUT_PROMPT, metrics=_TEST_METRICS
+ )
+ with pytest.raises(ValueError, match="Experiment is not set"):
+ test_eval_task.evaluate(experiment_run_name="invalid_experiment_run_name")
+
+ with pytest.raises(ValueError, match="Experiment is not set"):
+ test_eval_task.display_runs()
+
+ def test_evaluate_experiment_name_already_exists(self, mock_experiment_tracker):
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_WITHOUT_PROMPT,
+ metrics=_TEST_METRICS,
+ experiment="test_eval_experiment_name",
+ )
+ mock_experiment_tracker.experiment_run.return_value = "experiment_run_1"
+ with pytest.raises(ValueError, match="Experiment run already exists"):
+ test_eval_task.evaluate(experiment_run_name="experiment_run_2")
+
+ def test_evaluate_response_column_and_model_provided(self):
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_ALL_INCLUDED,
+ metrics=[_TEST_POINTWISE_METRIC],
+ )
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ (
+ "The `model` parameter or `baseline_model` in pairwise metric is"
+ " specified, but the evaluation `dataset` contains model response"
+ " column or baseline model response column `response`"
+ " to perform bring-your-own-response(BYOR) evaluation. If you would"
+ " like to perform evaluation using the dataset with the"
+ " existing model response column or or baseline model response column"
+ " `response`, please remove `model` parameter in `EvalTask.evaluate()`"
+ " function or `baseline_model` in `PairwiseMetric`."
+ )
+ ),
+ ):
+ test_eval_task.evaluate(model=mock.MagicMock())
+
+ def test_evaluate_baseline_response_column_and_baseline_model_provided(self):
+ _TEST_PAIRWISE_METRIC._baseline_model = mock.MagicMock()
+ eval_dataset = _TEST_EVAL_DATASET_WITHOUT_RESPONSE.copy(deep=True)
+ eval_dataset.insert(1, "baseline_model_response", ["baseline", "response"])
+ test_eval_task = EvalTask(
+ dataset=eval_dataset,
+ metrics=[_TEST_PAIRWISE_METRIC],
+ )
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ (
+ "The `model` parameter or `baseline_model` in pairwise metric is"
+ " specified, but the evaluation `dataset` contains model response"
+ " column or baseline model response column `baseline_model_response`"
+ " to perform bring-your-own-response(BYOR) evaluation. If you would"
+ " like to perform evaluation using the dataset with the"
+ " existing model response column or or baseline model response column"
+ " `baseline_model_response`, please remove `model` parameter in"
+ " `EvalTask.evaluate()` function or `baseline_model` in"
+ " `PairwiseMetric`."
+ )
+ ),
+ ):
+ test_eval_task.evaluate(model=mock.MagicMock())
+ _TEST_PAIRWISE_METRIC._baseline_model = None
+
+ def test_evaluate_baseline_model_provided_but_no_baseline_response_column(self):
+ mock_baseline_model = mock.create_autospec(
+ generative_models.GenerativeModel, instance=True
+ )
+ mock_baseline_model.generate_content.return_value = (
+ _MOCK_MODEL_INFERENCE_RESPONSE
+ )
+ mock_baseline_model._model_name = "publishers/google/model/gemini-pro"
+ _TEST_PAIRWISE_METRIC._baseline_model = mock_baseline_model
+
+ mock_candidate_model = mock.create_autospec(
+ generative_models.GenerativeModel, instance=True
+ )
+ mock_candidate_model.generate_content.return_value = (
+ _MOCK_MODEL_INFERENCE_RESPONSE
+ )
+ mock_candidate_model._model_name = "publishers/google/model/gemini-1.0-pro"
+ mock_metric_results = _MOCK_PAIRWISE_RESULT
+ eval_dataset = _TEST_EVAL_DATASET_WITHOUT_RESPONSE.copy(deep=True)
+ test_eval_task = EvalTask(
+ dataset=eval_dataset,
+ metrics=[_TEST_PAIRWISE_METRIC],
+ )
+ with mock.patch.object(
+ target=gapic_evaluation_services.EvaluationServiceClient,
+ attribute="evaluate_instances",
+ side_effect=mock_metric_results,
+ ):
+ test_result = test_eval_task.evaluate(
+ model=mock_candidate_model,
+ )
+ _TEST_PAIRWISE_METRIC._baseline_model = None
+ assert test_result.summary_metrics["row_count"] == 2
+
+ def test_evaluate_response_column_and_model_not_provided(self):
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_SINGLE,
+ metrics=[_TEST_POINTWISE_METRIC],
+ )
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ (
+ "Cannot find the `response` column in the evaluation dataset"
+ " to fill the metric prompt template for"
+ " `test_pointwise_metric` metric."
+ )
+ ),
+ ):
+ test_eval_task.evaluate()
+
+ def test_evaluate_baseline_model_response_column_not_provided(
+ self,
+ ):
+ test_eval_dataset = _TEST_EVAL_DATASET_SINGLE.copy(deep=True)
+ test_eval_dataset.insert(1, "response", ["test", "response"])
+ test_eval_task = EvalTask(
+ dataset=test_eval_dataset,
+ metrics=[_TEST_PAIRWISE_METRIC],
+ )
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ (
+ "Cannot find the `baseline_model_response` column in the"
+ " evaluation dataset to fill the metric prompt template for"
+ " `test_pairwise_metric` metric."
+ )
+ ),
+ ):
+ test_eval_task.evaluate()
+
+ @pytest.mark.parametrize("eval_task_version", [EvalTask, EvalTaskPreview])
+ def test_evaluate_response_column_not_provided(self, eval_task_version):
+ test_eval_dataset = _TEST_EVAL_DATASET_SINGLE
+ test_eval_task = eval_task_version(
+ dataset=test_eval_dataset,
+ metrics=["exact_match"],
+ )
+ with pytest.raises(
+ KeyError,
+ match=re.escape(
+ (
+ "Required column `response` not found in the evaluation "
+ "dataset. The columns in the evaluation dataset are ['prompt']"
+ )
+ ),
+ ):
+ test_eval_task.evaluate()
+
+ @pytest.mark.parametrize("eval_task_version", [EvalTask, EvalTaskPreview])
+ def test_evaluate_reference_column_not_provided(self, eval_task_version):
+ test_eval_dataset = pd.DataFrame({"response": ["test", "text"]})
+ test_eval_task = eval_task_version(
+ dataset=test_eval_dataset,
+ metrics=["exact_match"],
+ )
+ with pytest.raises(
+ KeyError,
+ match=re.escape(
+ (
+ "Required column `reference` not found in the evaluation "
+ "dataset. The columns in the evaluation dataset are ['response']"
+ )
+ ),
+ ):
+ test_eval_task.evaluate()
+
+ def test_evaluate_reference_or_source_column_not_provided(
+ self,
+ ):
+ test_eval_dataset = pd.DataFrame({"response": ["test", "text"]})
+ test_eval_task = EvalTask(
+ dataset=test_eval_dataset,
+ metrics=[_TEST_COMET, _TEST_METRICX],
+ )
+ with pytest.raises(
+ KeyError,
+ match=re.escape(
+ (
+ "Required column `source` not found in the evaluation "
+ "dataset. The columns in the evaluation dataset are ['response']"
+ )
+ ),
+ ):
+ test_eval_task.evaluate()
+
+ def test_evaluate_invalid_prompt_template_variables(self):
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_SINGLE,
+ metrics=[Pointwise.FLUENCY],
+ )
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ (
+ "Failed to assemble prompt template: The following column(s) are"
+ " missing: invalid_variable. Please verify prompt_template"
+ " variables {'invalid_variable'} and evaluation dataset"
+ " column names {'prompt'}."
+ )
+ ),
+ ):
+ test_eval_task.evaluate(
+ prompt_template="test_prompt_template {invalid_variable}",
+ )
+
+ def test_evaluate_pairwise_metrics_with_multiple_baseline_models(self):
+ mock_baseline_model_1 = mock.create_autospec(
+ generative_models.GenerativeModel, instance=True
+ )
+ mock_baseline_model_1._model_name = "publishers/google/model/gemini-1.0-pro"
+ mock_baseline_model_2 = mock.create_autospec(
+ generative_models.GenerativeModel, instance=True
+ )
+ mock_baseline_model_2._model_name = "publishers/google/model/gemini-1.5-pro"
+ test_metrics = [
+ pairwise_metric.PairwiseMetric(
+ metric="pairwise_metric_1",
+ metric_prompt_template="test_prompt_template",
+ baseline_model=mock_baseline_model_1,
+ ),
+ pairwise_metric.PairwiseMetric(
+ metric="pairwise_metric_2",
+ metric_prompt_template="test_prompt_template",
+ baseline_model=mock_baseline_model_2,
+ ),
+ ]
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_ALL_INCLUDED, metrics=test_metrics
+ )
+ with pytest.raises(
+ ValueError,
+ match="Not all `PairwiseMetric` instances have the same `baseline_model`",
+ ):
+ test_eval_task.evaluate()
+
+ def test_evaluate_invalid_model_and_dataset_input(self):
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_WITHOUT_PROMPT,
+ metrics=[_TEST_POINTWISE_METRIC],
+ )
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ (
+ "The `model` parameter or `baseline_model` in pairwise metric is"
+ " specified, but the evaluation `dataset` contains model response"
+ " column or baseline model response column `response`"
+ " to perform bring-your-own-response(BYOR) evaluation. If you would"
+ " like to perform evaluation using the dataset with the"
+ " existing model response column or or baseline model response column"
+ " `response`, please remove `model` parameter in `EvalTask.evaluate()`"
+ " function or `baseline_model` in `PairwiseMetric`."
+ )
+ ),
+ ):
+ test_eval_task.evaluate(
+ model=generative_models.GenerativeModel(model_name="invalid_model_name")
+ )
+
+ def test_unmatched_metric_column_mapping(self):
+ test_eval_task = EvalTask(
+ dataset=_TEST_EVAL_DATASET_ALL_INCLUDED,
+ metrics=[_TEST_POINTWISE_METRIC_FREE_STRING],
+ )
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ (
+ "The `model` parameter or `baseline_model` in pairwise metric is"
+ " specified, but the evaluation `dataset` contains model response"
+ " column or baseline model response column `response`"
+ " to perform bring-your-own-response(BYOR) evaluation. If you would"
+ " like to perform evaluation using the dataset with the"
+ " existing model response column or or baseline model response column"
+ " `response`, please remove `model` parameter in `EvalTask.evaluate()`"
+ " function or `baseline_model` in `PairwiseMetric`."
+ )
+ ),
+ ):
+ test_eval_task.evaluate(
+ model=generative_models.GenerativeModel(model_name="invalid_model_name")
+ )
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestEvaluationUtils:
+ def setup_method(self):
+ vertexai.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_create_evaluation_service_client(self):
+ client = utils.create_evaluation_service_client()
+ assert isinstance(client, utils._EvaluationServiceClientWithOverride)
+
+ def test_load_dataset_from_dataframe(self):
+ data = {"col1": [1, 2], "col2": ["a", "b"]}
+ df = pd.DataFrame(data)
+ loaded_df = utils.load_dataset(df)
+ assert loaded_df.equals(df)
+
+ def test_load_dataset_from_dict(self):
+ data = {"col1": [1, 2], "col2": ["a", "b"]}
+ loaded_df = utils.load_dataset(data)
+ assert isinstance(loaded_df, pd.DataFrame)
+ assert loaded_df.to_dict("list") == data
+
+ def test_load_dataset_from_gcs_jsonl(self):
+ source = "gs://test_bucket/test_file.jsonl"
+ with mock.patch.object(
+ utils,
+ "_read_gcs_file_contents",
+ return_value=_TEST_JSONL_FILE_CONTENT,
+ ):
+ loaded_df = utils.load_dataset(source)
+
+ assert isinstance(loaded_df, pd.DataFrame)
+ assert loaded_df.to_dict("list") == {
+ "prompt": ["prompt", "test"],
+ "reference": ["reference", "test"],
+ }
+
+ def test_load_dataset_from_gcs_csv(self):
+ source = "gs://test_bucket/test_file.csv"
+ with mock.patch.object(
+ utils, "_read_gcs_file_contents", return_value=_TEST_CSV_FILE_CONTENT
+ ):
+ loaded_df = utils.load_dataset(source)
+
+ assert isinstance(loaded_df, pd.DataFrame)
+ assert loaded_df.to_dict("list") == {
+ "reference": ["test", "text"],
+ "context": ["test", "text"],
+ "instruction": ["test", "text"],
+ }
+
+ def test_load_dataset_from_bigquery(self):
+ source = "bq://project-id.dataset.table_name"
+ with mock.patch.object(
+ utils, "_load_bigquery", return_value=_TEST_EVAL_DATASET_WITHOUT_PROMPT
+ ):
+ loaded_df = utils.load_dataset(source)
+
+ assert isinstance(loaded_df, pd.DataFrame)
+ assert loaded_df.equals(_TEST_EVAL_DATASET_WITHOUT_PROMPT)
+
+ def test_initialization(self):
+ limiter = utils.RateLimiter(rate=2)
+ assert limiter.seconds_per_event == 0.5
+
+ with pytest.raises(ValueError, match="Rate must be a positive number"):
+ utils.RateLimiter(-1)
+ with pytest.raises(ValueError, match="Rate must be a positive number"):
+ utils.RateLimiter(0)
+
+ def test_admit(self):
+ rate_limiter = utils.RateLimiter(rate=2)
+
+ assert rate_limiter._admit() == 0
+
+ time.sleep(0.1)
+ delay = rate_limiter._admit()
+ assert delay == pytest.approx(0.4, 0.01)
+
+ time.sleep(0.5)
+ delay = rate_limiter._admit()
+ assert delay == 0
+
+ def test_sleep_and_advance(self):
+ rate_limiter = utils.RateLimiter(rate=2)
+
+ start_time = time.time()
+ rate_limiter.sleep_and_advance()
+ assert (time.time() - start_time) < 0.1
+
+ start_time = time.time()
+ rate_limiter.sleep_and_advance()
+ assert (time.time() - start_time) >= 0.5
+
+ def test_thread_safety(self):
+ rate_limiter = utils.RateLimiter(rate=2)
+ start_time = time.time()
+
+ def target():
+ rate_limiter.sleep_and_advance()
+
+ threads = [threading.Thread(target=target) for _ in range(10)]
+ for thread in threads:
+ thread.start()
+ for thread in threads:
+ thread.join()
+
+ # Verify that the total minimum time should be 4.5 seconds
+ # (9 intervals of 0.5 seconds each).
+ total_time = time.time() - start_time
+ assert total_time >= 4.5
+
+ # TODO(b/361123127) Add test_to_metrics_spec back
+
+ def test_initialize_metric_column_mapping(self):
+ metric_column_mapping = {
+ "prompt": "prompt2",
+ "response": "response1",
+ "reference": "reference",
+ }
+ converted_metric_column_mapping = utils.initialize_metric_column_mapping(
+ metric_column_mapping=metric_column_mapping,
+ dataset=_TEST_EVAL_DATASET_ALL_INCLUDED,
+ )
+ assert converted_metric_column_mapping == _EXPECTED_COLUMN_MAPPING
+
+
+class TestPromptTemplate:
+ def test_init(self):
+ template_str = "Hello, {name}!"
+ prompt_template = evaluation.PromptTemplate(template_str)
+ assert prompt_template.template == template_str
+
+ def test_get_variables(self):
+ template_str = "Hello, {name}! Today is {day}."
+ prompt_template = evaluation.PromptTemplate(template_str)
+ assert prompt_template.variables == {"name", "day"}
+
+ def test_format(self):
+ template_str = "Hello, {name}! Today is {day}."
+ prompt_template = evaluation.PromptTemplate(template_str)
+ assembled_prompt = prompt_template.assemble(name="John", day="Monday")
+ assert str(assembled_prompt) == "Hello, John! Today is Monday."
+
+ def test_format_missing_variable(self):
+ template_str = "Hello, {name}!"
+ prompt_template = evaluation.PromptTemplate(template_str)
+ assembled_prompt = prompt_template.assemble()
+ assert str(assembled_prompt) == "Hello, {name}!"
+ assert prompt_template.variables == {"name"}
+
+ def test_partial_format(self):
+ template_str = "Hello, {name}! Today is {day}."
+ prompt_template = evaluation.PromptTemplate(template_str)
+ partially_assembled_prompt = prompt_template.assemble(name="John")
+
+ assert isinstance(partially_assembled_prompt, evaluation.PromptTemplate)
+ assert str(partially_assembled_prompt) == "Hello, John! Today is {day}."
+ assert partially_assembled_prompt.variables == {"day"}
+
+ assembled_prompt = partially_assembled_prompt.assemble(day="Monday")
+ assert str(assembled_prompt) == "Hello, John! Today is Monday."
+
+ def test_str(self):
+ template_str = "Hello, world!"
+ prompt_template = evaluation.PromptTemplate(template_str)
+ assert str(prompt_template) == template_str
+
+ def test_repr(self):
+ template_str = "Hello, {name}!"
+ prompt_template = evaluation.PromptTemplate(template_str)
+ assert repr(prompt_template) == f"PromptTemplate('{template_str}')"
+
+ def test_pointwise_metric_prompt_template(self):
+ pointwise_metric_prompt_template = evaluation.PointwiseMetricPromptTemplate(
+ criteria={"metric1": "summarization"},
+ rating_rubric={"1": "good", "0": "bad"},
+ input_variables=["country"],
+ instruction="hello",
+ metric_definition="this is eval metric",
+ evaluation_steps={"step1": "start", "step2": "finish"},
+ few_shot_examples=["Q: hi A: hello"],
+ )
+ assert (
+ str(pointwise_metric_prompt_template)
+ == _EXPECTED_POINTWISE_PROMPT_TEMPLATE.strip()
+ )
+
+ def test_pointwise_metric_prompt_template_with_default_values(self):
+ pointwise_metric_prompt_template = evaluation.PointwiseMetricPromptTemplate(
+ criteria=_CRITERIA,
+ rating_rubric=_POINTWISE_RATING_RUBRIC,
+ )
+ assert (
+ str(pointwise_metric_prompt_template)
+ == _EXPECTED_POINTWISE_PROMPT_TEMPLATE_WITH_DEFAULT_VALUES.strip()
+ )
+
+ def test_pairtwise_metric_prompt_template(self):
+ pairwise_metric_prompt_template = evaluation.PairwiseMetricPromptTemplate(
+ criteria={"metric1": "summarization"},
+ rating_rubric={"A": "good", "B": "good"},
+ input_variables=["country"],
+ instruction="hello",
+ metric_definition="this is eval metric",
+ evaluation_steps={"step1": "start", "step2": "finish"},
+ few_shot_examples=["Q: hi A: hello"],
+ )
+ assert (
+ str(pairwise_metric_prompt_template)
+ == _EXPECTED_PAIRWISE_PROMPT_TEMPLATE.strip()
+ )
+
+ def test_pairtwise_metric_prompt_template_with_default_values(self):
+ pairwise_metric_prompt_template = evaluation.PairwiseMetricPromptTemplate(
+ criteria=_CRITERIA,
+ rating_rubric=_PAIRWISE_RATING_RUBRIC,
+ )
+ assert (
+ str(pairwise_metric_prompt_template)
+ == _EXPECTED_PAIRWISE_PROMPT_TEMPLATE_WITH_DEFAULT_VALUES.strip()
+ )
+
+ def test_upload_results(self, mock_storage_blob_upload_from_filename):
+ evaluation.utils.upload_evaluation_results(
+ _TEST_CSV, _TEST_BUCKET, _TEST_FILE_NAME
+ )
+ assert mock_storage_blob_upload_from_filename.called_once_with(_TEST_CSV)
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_extensions.py b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_extensions.py
new file mode 100644
index 0000000000000000000000000000000000000000..8de81baee4bb740797c5b38166ff8588bc6ba18e
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_extensions.py
@@ -0,0 +1,446 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import importlib
+import json
+from unittest import mock
+
+from google import auth
+from google.api_core import operation as ga_operation
+from google.auth import credentials as auth_credentials
+from google.cloud import aiplatform
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import utils as aip_utils
+from google.cloud.aiplatform_v1beta1 import types
+from google.cloud.aiplatform_v1beta1.services import (
+ extension_execution_service,
+)
+from google.cloud.aiplatform_v1beta1.services import (
+ extension_registry_service,
+)
+from vertexai.generative_models import _generative_models
+from vertexai.preview import extensions
+from vertexai.reasoning_engines import _utils
+import pytest
+
+
+_TEST_CREDENTIALS = mock.Mock(spec=auth_credentials.AnonymousCredentials())
+_TEST_AUTH_CONFIG = types.AuthConfig(auth_type="GOOGLE_SERVICE_ACCOUNT_AUTH")
+_TEST_RESOURCE_ID = "1028944691210842416"
+_TEST_OPEN_API_GCS_URI = "gs://vertex-extension-experiment/code_interpreter.yaml"
+_TEST_OPEN_API_YAML = """
+ openapi: 3.0.0
+ info:
+ title: SomeApi
+ version: 1.0.0
+ servers:
+ - url: https://www.someapi.com
+ paths:
+ /path1:
+ get:
+ summary: Request description
+ operationId: requestSomething
+ parameters:
+ - name: request_parameter
+ in: query
+ required: true
+ schema:
+ type: string
+ responses:
+ '200':
+ description: Response description
+ content:
+ application/json:
+ schema:
+ type: object
+ properties:
+ response_parameter:
+ type: string"""
+_TEST_EXTENSION_MANIFEST_NAME = "code_interpreter_tool"
+_TEST_EXTENSION_MANIFEST_DESCRIPTION = "Google Code Interpreter Extension"
+_TEST_EXTENSION_MANIFEST_WITH_GCS_URI_OBJ = types.ExtensionManifest(
+ name=_TEST_EXTENSION_MANIFEST_NAME,
+ description=_TEST_EXTENSION_MANIFEST_DESCRIPTION,
+ api_spec=types.ExtensionManifest.ApiSpec(
+ open_api_gcs_uri=_TEST_OPEN_API_GCS_URI,
+ ),
+ auth_config=_TEST_AUTH_CONFIG,
+)
+_TEST_EXTENSION_MANIFEST_WITH_YAML_OBJ = types.ExtensionManifest(
+ name=_TEST_EXTENSION_MANIFEST_NAME,
+ description=_TEST_EXTENSION_MANIFEST_DESCRIPTION,
+ api_spec=types.ExtensionManifest.ApiSpec(
+ open_api_yaml=_TEST_OPEN_API_YAML,
+ ),
+ auth_config=_TEST_AUTH_CONFIG,
+)
+_TEST_EXTENSION_MANIFEST_WITH_NO_API_SPEC = types.ExtensionManifest(
+ name=_TEST_EXTENSION_MANIFEST_NAME,
+ description=_TEST_EXTENSION_MANIFEST_DESCRIPTION,
+ auth_config=_TEST_AUTH_CONFIG,
+)
+_TEST_LOCATION = "us-central1"
+_TEST_PROJECT = "test-project"
+_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
+_TEST_EXTENSION_RESOURCE_NAME = f"{_TEST_PARENT}/extensions/{_TEST_RESOURCE_ID}"
+_TEST_EXTENSION_DISPLAY_NAME = "Extension Display Name"
+_TEST_EXTENSION_OBJ = types.Extension(
+ name=_TEST_EXTENSION_RESOURCE_NAME,
+ display_name=_TEST_EXTENSION_DISPLAY_NAME,
+ manifest=_TEST_EXTENSION_MANIFEST_WITH_GCS_URI_OBJ,
+)
+_TEST_EXTENSION_WITH_YAML_API_SPEC_OBJ = types.Extension(
+ name=_TEST_EXTENSION_RESOURCE_NAME,
+ display_name=_TEST_EXTENSION_DISPLAY_NAME,
+ manifest=_TEST_EXTENSION_MANIFEST_WITH_YAML_OBJ,
+)
+_TEST_EXTENSION_WITH_NO_API_SPEC_OBJ = types.Extension(
+ name=_TEST_EXTENSION_RESOURCE_NAME,
+ display_name=_TEST_EXTENSION_DISPLAY_NAME,
+ manifest=_TEST_EXTENSION_MANIFEST_WITH_NO_API_SPEC,
+)
+_TEST_EXTENSION_OPERATION_ID = "search"
+_TEST_QUERY_PROMPT = "Find the first fibonacci number greater than 999"
+_TEST_EXTENSION_OPERATION_PARAMS = {"query": _TEST_QUERY_PROMPT}
+_TEST_RESPONSE_CONTENT = json.dumps(
+ {
+ "execution_error": "",
+ "execution_result": "The first fibonacci number greater than 999 is 1597\n",
+ "generated_code": "```python\n"
+ "def fibonacci(n):\n"
+ " a, b = 0, 1\n"
+ " for _ in range(n):\n"
+ " a, b = b, a + b\n"
+ " return a\n"
+ "\n"
+ "# Find the first fibonacci number greater than 999\n"
+ "n = 1\n"
+ "while fibonacci(n) <= 999:\n"
+ " n += 1\n"
+ "\n"
+ 'print(f"The first fibonacci number greater than 999 is '
+ '{fibonacci(n)}")\n'
+ "```",
+ "output_files": [],
+ }
+)
+_TEST_EXECUTE_EXTENSION_RESPONSE = types.ExecuteExtensionResponse(
+ content=_TEST_RESPONSE_CONTENT,
+)
+
+
+@pytest.fixture(scope="module")
+def google_auth_mock():
+ with mock.patch.object(auth, "default") as google_auth_mock:
+ google_auth_mock.return_value = (
+ auth_credentials.AnonymousCredentials(),
+ _TEST_PROJECT,
+ )
+ yield google_auth_mock
+
+
+@pytest.fixture
+def get_extension_mock():
+ with mock.patch.object(
+ extension_registry_service.ExtensionRegistryServiceClient,
+ "get_extension",
+ ) as get_extension_mock:
+ api_client_mock = mock.Mock(
+ spec=extension_registry_service.ExtensionRegistryServiceClient,
+ )
+ api_client_mock.get_extension.return_value = _TEST_EXTENSION_OBJ
+ get_extension_mock.return_value = api_client_mock
+ yield get_extension_mock
+
+
+@pytest.fixture
+def create_extension_mock():
+ with mock.patch.object(
+ extension_registry_service.ExtensionRegistryServiceClient,
+ "import_extension",
+ ) as create_extension_mock:
+ create_extension_lro_mock = mock.Mock(ga_operation.Operation)
+ create_extension_lro_mock.result.return_value = _TEST_EXTENSION_OBJ
+ create_extension_mock.return_value = create_extension_lro_mock
+ yield create_extension_mock
+
+
+@pytest.fixture
+def execute_extension_mock():
+ with mock.patch.object(
+ extension_execution_service.ExtensionExecutionServiceClient, "execute_extension"
+ ) as execute_extension_mock:
+ execute_extension_mock.return_value.content = _TEST_RESPONSE_CONTENT
+ yield execute_extension_mock
+
+
+@pytest.fixture
+def query_extension_mock():
+ with mock.patch.object(
+ extension_execution_service.ExtensionExecutionServiceClient, "query_extension"
+ ) as query_extension_mock:
+ query_extension_mock.return_value.steps = [
+ types.Content(
+ role="user",
+ parts=[
+ types.Part(
+ text=_TEST_QUERY_PROMPT,
+ )
+ ],
+ ),
+ types.Content(
+ role="extension",
+ parts=[
+ types.Part(
+ text=_TEST_RESPONSE_CONTENT,
+ )
+ ],
+ ),
+ ]
+ query_extension_mock.return_value.failure_message = ""
+ yield query_extension_mock
+
+
+@pytest.fixture
+def delete_extension_mock():
+ with mock.patch.object(
+ extension_registry_service.ExtensionRegistryServiceClient,
+ "delete_extension",
+ ) as delete_extension_mock:
+ delete_extension_lro_mock = mock.Mock(ga_operation.Operation)
+ delete_extension_lro_mock.result.return_value = None
+ delete_extension_mock.return_value = delete_extension_lro_mock
+ yield delete_extension_mock
+
+
+@pytest.fixture
+def to_dict_mock():
+ with mock.patch.object(_utils, "to_dict") as to_dict_mock:
+ to_dict_mock.return_value = {}
+ yield to_dict_mock
+
+
+@pytest.fixture
+def load_yaml_mock():
+ with mock.patch.object(
+ aip_utils.yaml_utils,
+ "load_yaml",
+ autospec=True,
+ ) as load_yaml_mock:
+ load_yaml_mock.return_value = lambda x: x
+ yield load_yaml_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestExtension:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_get_extension(self, get_extension_mock):
+ extensions.Extension(_TEST_RESOURCE_ID)
+ get_extension_mock.assert_called_once_with(
+ name=_TEST_EXTENSION_RESOURCE_NAME,
+ retry=aiplatform.base._DEFAULT_RETRY,
+ )
+
+ def test_create_extension(
+ self,
+ create_extension_mock,
+ get_extension_mock,
+ load_yaml_mock,
+ ):
+ extensions.Extension.create(
+ extension_name=_TEST_EXTENSION_RESOURCE_NAME,
+ display_name=_TEST_EXTENSION_DISPLAY_NAME,
+ manifest=_TEST_EXTENSION_MANIFEST_WITH_GCS_URI_OBJ,
+ )
+ create_extension_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ extension=_TEST_EXTENSION_OBJ,
+ )
+ get_extension_mock.assert_called_once_with(
+ name=_TEST_EXTENSION_RESOURCE_NAME,
+ retry=aiplatform.base._DEFAULT_RETRY,
+ )
+
+ def test_delete_after_create_extension(
+ self,
+ create_extension_mock,
+ get_extension_mock,
+ delete_extension_mock,
+ load_yaml_mock,
+ ):
+ test_extension = extensions.Extension.create(
+ extension_name=_TEST_EXTENSION_RESOURCE_NAME,
+ display_name=_TEST_EXTENSION_DISPLAY_NAME,
+ manifest=_TEST_EXTENSION_MANIFEST_WITH_GCS_URI_OBJ,
+ )
+ create_extension_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ extension=_TEST_EXTENSION_OBJ,
+ )
+ get_extension_mock.assert_any_call(
+ name=_TEST_EXTENSION_RESOURCE_NAME,
+ retry=aiplatform.base._DEFAULT_RETRY,
+ )
+ # Manually set _gca_resource here to prevent the mocks from propagating.
+ test_extension._gca_resource = _TEST_EXTENSION_OBJ
+ test_extension.delete()
+ delete_extension_mock.assert_called_once_with(
+ name=test_extension.resource_name,
+ )
+
+ def test_delete_after_get_extension(
+ self,
+ get_extension_mock,
+ delete_extension_mock,
+ load_yaml_mock,
+ ):
+ test_extension = extensions.Extension(_TEST_RESOURCE_ID)
+ get_extension_mock.assert_any_call(
+ name=_TEST_EXTENSION_RESOURCE_NAME,
+ retry=aiplatform.base._DEFAULT_RETRY,
+ )
+ # Manually set _gca_resource here to prevent the mocks from propagating.
+ test_extension._gca_resource = _TEST_EXTENSION_OBJ
+ test_extension.delete()
+ delete_extension_mock.assert_called_once_with(
+ name=test_extension.resource_name,
+ )
+
+ def test_execute_extension(
+ self,
+ get_extension_mock,
+ execute_extension_mock,
+ load_yaml_mock,
+ ):
+ test_extension = extensions.Extension(_TEST_RESOURCE_ID)
+ get_extension_mock.assert_called_once_with(
+ name=_TEST_EXTENSION_RESOURCE_NAME,
+ retry=aiplatform.base._DEFAULT_RETRY,
+ )
+ # Manually set _gca_resource here to prevent the mocks from propagating.
+ test_extension._gca_resource = _TEST_EXTENSION_OBJ
+ test_extension.execute(
+ operation_id=_TEST_EXTENSION_OPERATION_ID,
+ operation_params=_TEST_EXTENSION_OPERATION_PARAMS,
+ runtime_auth_config=_TEST_AUTH_CONFIG,
+ )
+ execute_extension_mock.assert_called_once_with(
+ types.ExecuteExtensionRequest(
+ name=_TEST_EXTENSION_RESOURCE_NAME,
+ operation_id=_TEST_EXTENSION_OPERATION_ID,
+ operation_params=_utils.to_proto(
+ _TEST_EXTENSION_OPERATION_PARAMS,
+ ),
+ runtime_auth_config=_TEST_AUTH_CONFIG,
+ ),
+ )
+
+ def test_query_extension(
+ self,
+ get_extension_mock,
+ query_extension_mock,
+ load_yaml_mock,
+ ):
+ test_extension = extensions.Extension(_TEST_RESOURCE_ID)
+ get_extension_mock.assert_called_once_with(
+ name=_TEST_EXTENSION_RESOURCE_NAME,
+ retry=aiplatform.base._DEFAULT_RETRY,
+ )
+ # Manually set _gca_resource here to prevent the mocks from propagating.
+ test_extension._gca_resource = _TEST_EXTENSION_OBJ
+ response = test_extension.query(
+ contents=[
+ _generative_models.Content(
+ parts=[
+ _generative_models.Part.from_text(
+ _TEST_QUERY_PROMPT,
+ )
+ ],
+ role="user",
+ )
+ ],
+ )
+ assert response.steps[-1].parts[0].text == _TEST_RESPONSE_CONTENT
+
+ query_extension_mock.assert_called_once_with(
+ types.QueryExtensionRequest(
+ name=_TEST_EXTENSION_RESOURCE_NAME,
+ contents=[
+ types.Content(
+ role="user",
+ parts=[
+ types.Part(
+ text=_TEST_QUERY_PROMPT,
+ )
+ ],
+ )
+ ],
+ ),
+ )
+
+ def test_api_spec_from_yaml(self, get_extension_mock, load_yaml_mock):
+ test_extension = extensions.Extension(_TEST_RESOURCE_ID)
+ get_extension_mock.assert_called_once_with(
+ name=_TEST_EXTENSION_RESOURCE_NAME,
+ retry=aiplatform.base._DEFAULT_RETRY,
+ )
+ # Manually set _gca_resource here to prevent the mocks from propagating.
+ test_extension._gca_resource = _TEST_EXTENSION_WITH_YAML_API_SPEC_OBJ
+ test_extension.api_spec() == {}
+
+ def test_no_api_spec(self, get_extension_mock, load_yaml_mock):
+ test_extension = extensions.Extension(_TEST_RESOURCE_ID)
+ get_extension_mock.assert_called_once_with(
+ name=_TEST_EXTENSION_RESOURCE_NAME,
+ retry=aiplatform.base._DEFAULT_RETRY,
+ )
+ # Manually set _gca_resource here to prevent the mocks from propagating.
+ test_extension._gca_resource = _TEST_EXTENSION_WITH_NO_API_SPEC_OBJ
+ test_extension.api_spec() == {}
+
+ def test_api_spec_from_gcs_uri(
+ self,
+ get_extension_mock,
+ load_yaml_mock,
+ ):
+ test_extension = extensions.Extension(_TEST_RESOURCE_ID)
+ get_extension_mock.assert_called_once_with(
+ name=_TEST_EXTENSION_RESOURCE_NAME,
+ retry=aiplatform.base._DEFAULT_RETRY,
+ )
+ # Manually set _gca_resource here to prevent the mocks from propagating.
+ test_extension._gca_resource = _TEST_EXTENSION_OBJ
+ test_extension.api_spec()
+ load_yaml_mock.assert_called_once_with(_TEST_OPEN_API_GCS_URI)
+
+ def test_operation_schemas(self, get_extension_mock):
+ test_extension = extensions.Extension(_TEST_RESOURCE_ID)
+ get_extension_mock.assert_called_once_with(
+ name=_TEST_EXTENSION_RESOURCE_NAME,
+ retry=aiplatform.base._DEFAULT_RETRY,
+ )
+ # Manually set _gca_resource here to prevent the mocks from propagating.
+ test_extension._gca_resource = _TEST_EXTENSION_OBJ
+ test_extension.operation_schemas()
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_feature.py b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_feature.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3f3e00b4f1350c90f168751d92b8f992e923c94
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_feature.py
@@ -0,0 +1,291 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import re
+from typing import Dict, List, Optional
+from unittest import mock
+from unittest.mock import call, patch
+
+from google.api_core import operation as ga_operation
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform.compat import types
+from google.cloud.aiplatform.compat.services import (
+ feature_registry_service_client,
+)
+from google.cloud.aiplatform_v1beta1.services.feature_registry_service import (
+ FeatureRegistryServiceClient,
+)
+from feature_store_constants import (
+ _TEST_FG1_F1_DESCRIPTION,
+ _TEST_FG1_F1_FEATURE_STATS_AND_ANOMALY,
+ _TEST_FG1_F1_ID,
+ _TEST_FG1_F1_LABELS,
+ _TEST_FG1_F1_PATH,
+ _TEST_FG1_F1_POINT_OF_CONTACT,
+ _TEST_FG1_F1_WITH_STATS,
+ _TEST_FG1_F2_DESCRIPTION,
+ _TEST_FG1_F2_ID,
+ _TEST_FG1_F2_LABELS,
+ _TEST_FG1_F2_PATH,
+ _TEST_FG1_F2_POINT_OF_CONTACT,
+ _TEST_FG1_F2_VERSION_COLUMN_NAME,
+ _TEST_FG1_ID,
+ _TEST_LOCATION,
+ _TEST_PROJECT,
+)
+from vertexai.resources.preview import (
+ Feature,
+ FeatureGroup,
+)
+import pytest
+
+
+@pytest.fixture
+def delete_feature_mock():
+ with patch.object(
+ feature_registry_service_client.FeatureRegistryServiceClient,
+ "delete_feature",
+ ) as delete_feature_mock:
+ delete_feature_lro_mock = mock.Mock(ga_operation.Operation)
+ delete_feature_mock.return_value = delete_feature_lro_mock
+ yield delete_feature_mock
+
+
+@pytest.fixture
+def get_feature_with_stats_and_anomalies_mock():
+ with patch.object(
+ FeatureRegistryServiceClient,
+ "get_feature",
+ ) as get_feature_with_stats_and_anomalies_mock:
+ get_feature_with_stats_and_anomalies_mock.return_value = _TEST_FG1_F1_WITH_STATS
+ yield get_feature_with_stats_and_anomalies_mock
+
+
+pytestmark = pytest.mark.usefixtures("google_auth_mock")
+
+
+def feature_eq(
+ feature_to_check: Feature,
+ name: str,
+ resource_name: str,
+ project: str,
+ location: str,
+ description: str,
+ labels: Dict[str, str],
+ point_of_contact: str,
+ version_column_name: Optional[str] = None,
+ feature_stats_and_anomalies: Optional[
+ List[types.feature_monitor.FeatureStatsAndAnomaly]
+ ] = None,
+):
+ """Check if a Feature has the appropriate values set."""
+ assert feature_to_check.name == name
+ assert feature_to_check.resource_name == resource_name
+ assert feature_to_check.project == project
+ assert feature_to_check.location == location
+ assert feature_to_check.description == description
+ assert feature_to_check.labels == labels
+ assert feature_to_check.point_of_contact == point_of_contact
+
+ if version_column_name:
+ assert feature_to_check.version_column_name == version_column_name
+ if feature_stats_and_anomalies:
+ assert (
+ feature_to_check.feature_stats_and_anomalies == feature_stats_and_anomalies
+ )
+
+
+def test_init_with_feature_id_and_no_fg_id_raises_error(get_feature_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ "Since feature 'my_fg1_f1' is not provided as a path, please specify"
+ + " feature_group_id."
+ ),
+ ):
+ Feature(_TEST_FG1_F1_ID)
+
+
+def test_init_with_feature_path_and_fg_id_raises_error(get_feature_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ "Since feature 'projects/test-project/locations/us-central1/featureGroups/my_fg1/features/my_fg1_f1' is provided as a path, feature_group_id should not be specified."
+ ),
+ ):
+ Feature(_TEST_FG1_F1_PATH, feature_group_id=_TEST_FG1_ID)
+
+
+def test_init_with_feature_id(get_feature_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ feature = Feature(_TEST_FG1_F1_ID, feature_group_id=_TEST_FG1_ID)
+
+ get_feature_mock.assert_called_once_with(
+ name=_TEST_FG1_F1_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ feature_eq(
+ feature,
+ name=_TEST_FG1_F1_ID,
+ resource_name=_TEST_FG1_F1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ )
+
+
+def test_init_with_feature_id_for_explicit_version_column(
+ get_feature_with_version_column_mock,
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ feature = Feature(_TEST_FG1_F2_ID, feature_group_id=_TEST_FG1_ID)
+
+ get_feature_with_version_column_mock.assert_called_once_with(
+ name=_TEST_FG1_F2_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ feature_eq(
+ feature,
+ name=_TEST_FG1_F2_ID,
+ resource_name=_TEST_FG1_F2_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F2_DESCRIPTION,
+ labels=_TEST_FG1_F2_LABELS,
+ point_of_contact=_TEST_FG1_F2_POINT_OF_CONTACT,
+ version_column_name=_TEST_FG1_F2_VERSION_COLUMN_NAME,
+ )
+
+
+def test_init_with_feature_path(get_feature_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ feature = Feature(_TEST_FG1_F1_PATH)
+
+ get_feature_mock.assert_called_once_with(
+ name=_TEST_FG1_F1_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ feature_eq(
+ feature,
+ name=_TEST_FG1_F1_ID,
+ resource_name=_TEST_FG1_F1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ )
+
+
+def test_init_with_feature_path_for_explicit_version_column(
+ get_feature_with_version_column_mock,
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ feature = Feature(_TEST_FG1_F2_PATH)
+
+ get_feature_with_version_column_mock.assert_called_once_with(
+ name=_TEST_FG1_F2_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ feature_eq(
+ feature,
+ name=_TEST_FG1_F2_ID,
+ resource_name=_TEST_FG1_F2_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ version_column_name=_TEST_FG1_F2_VERSION_COLUMN_NAME,
+ description=_TEST_FG1_F2_DESCRIPTION,
+ labels=_TEST_FG1_F2_LABELS,
+ point_of_contact=_TEST_FG1_F2_POINT_OF_CONTACT,
+ )
+
+
+def test_init_with_latest_stats_count(get_feature_with_stats_and_anomalies_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ feature = Feature(name=_TEST_FG1_F1_PATH, latest_stats_count=1)
+
+ get_feature_with_stats_and_anomalies_mock.assert_called_once_with(
+ request=types.featurestore_service_v1beta1.GetFeatureRequest(
+ name=_TEST_FG1_F1_PATH,
+ feature_stats_and_anomaly_spec=types.feature_monitor.FeatureStatsAndAnomalySpec(
+ latest_stats_count=1
+ ),
+ )
+ )
+
+ feature_eq(
+ feature,
+ name=_TEST_FG1_F1_ID,
+ resource_name=_TEST_FG1_F1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ feature_stats_and_anomalies=[_TEST_FG1_F1_FEATURE_STATS_AND_ANOMALY],
+ )
+
+
+@pytest.mark.parametrize("sync", [True])
+def test_delete_feature(
+ get_fg_mock, get_feature_mock, delete_feature_mock, base_logger_mock, sync
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ feature = FeatureGroup(_TEST_FG1_ID).get_feature(_TEST_FG1_F1_ID)
+ feature.delete(sync=sync)
+
+ if not sync:
+ feature.wait()
+
+ delete_feature_mock.assert_called_once_with(
+ name=_TEST_FG1_F1_PATH,
+ )
+
+ base_logger_mock.assert_has_calls(
+ [
+ call(
+ "Deleting Feature resource:"
+ " projects/test-project/locations/us-central1/featureGroups/my_fg1/features/my_fg1_f1"
+ ),
+ call(
+ "Delete Feature backing LRO:"
+ f" {delete_feature_mock.return_value.operation.name}"
+ ),
+ call(
+ "Feature resource"
+ " projects/test-project/locations/us-central1/featureGroups/my_fg1/features/my_fg1_f1"
+ " deleted."
+ ),
+ ]
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_feature_group.py b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_feature_group.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ef2a74c54460b4ec5c1bda9652295308d32e242
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_feature_group.py
@@ -0,0 +1,1027 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import re
+from typing import Dict, List
+from unittest import mock
+from unittest.mock import call, patch
+
+from google.auth import credentials as auth_credentials
+from google.api_core import operation as ga_operation
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform_v1beta1.services.feature_registry_service import (
+ FeatureRegistryServiceClient,
+)
+from vertexai.resources.preview.feature_store import (
+ feature_group,
+)
+from vertexai.resources.preview import (
+ FeatureGroup,
+)
+from vertexai.resources.preview.feature_store import (
+ FeatureGroupBigQuerySource,
+)
+import pytest
+from google.cloud.aiplatform.compat.services import (
+ feature_registry_service_client,
+)
+from google.cloud.aiplatform.compat import types
+
+
+from feature_store_constants import (
+ _TEST_PARENT,
+ _TEST_PROJECT,
+ _TEST_LOCATION,
+ _TEST_FG1,
+ _TEST_FG1_ID,
+ _TEST_FG1_PATH,
+ _TEST_FG1_BQ_URI,
+ _TEST_FG1_ENTITY_ID_COLUMNS,
+ _TEST_FG1_LABELS,
+ _TEST_FG2_ID,
+ _TEST_FG2_PATH,
+ _TEST_FG2_BQ_URI,
+ _TEST_FG2_ENTITY_ID_COLUMNS,
+ _TEST_FG2_LABELS,
+ _TEST_FG3_ID,
+ _TEST_FG3_PATH,
+ _TEST_FG3_BQ_URI,
+ _TEST_FG3_ENTITY_ID_COLUMNS,
+ _TEST_FG3_LABELS,
+ _TEST_FG_LIST,
+ _TEST_FG1_F1,
+ _TEST_FG1_F1_ID,
+ _TEST_FG1_F1_PATH,
+ _TEST_FG1_F1_DESCRIPTION,
+ _TEST_FG1_F1_LABELS,
+ _TEST_FG1_F1_POINT_OF_CONTACT,
+ _TEST_FG1_F1_WITH_STATS,
+ _TEST_FG1_F1_FEATURE_STATS_AND_ANOMALY,
+ _TEST_FG1_F2,
+ _TEST_FG1_F2_ID,
+ _TEST_FG1_F2_PATH,
+ _TEST_FG1_F2_DESCRIPTION,
+ _TEST_FG1_F2_LABELS,
+ _TEST_FG1_F2_POINT_OF_CONTACT,
+ _TEST_FG1_F2_VERSION_COLUMN_NAME,
+ _TEST_FG1_FEATURE_LIST,
+ _TEST_FG1_FM1,
+ _TEST_FG1_FM1_ID,
+ _TEST_FG1_FM1_PATH,
+ _TEST_FG1_FM1_DESCRIPTION,
+ _TEST_FG1_FM1_LABELS,
+ _TEST_FG1_FM1_FEATURE_SELECTION_CONFIGS,
+ _TEST_FG1_FM1_SCHEDULE_CONFIG,
+ _TEST_FG1_FM2_ID,
+ _TEST_FG1_FM2_PATH,
+ _TEST_FG1_FM2_DESCRIPTION,
+ _TEST_FG1_FM2_LABELS,
+ _TEST_FG1_FM2_FEATURE_SELECTION_CONFIGS,
+ _TEST_FG1_FM2_SCHEDULE_CONFIG,
+ _TEST_FG1_FM_LIST,
+)
+from test_feature import feature_eq
+from test_feature_monitor import (
+ feature_monitor_eq,
+)
+
+
+pytestmark = pytest.mark.usefixtures("google_auth_mock")
+
+
+@pytest.fixture
+def fg_logger_mock():
+ with patch.object(
+ feature_group._LOGGER,
+ "info",
+ wraps=feature_group._LOGGER.info,
+ ) as logger_mock:
+ yield logger_mock
+
+
+@pytest.fixture
+def create_fg_mock():
+ with patch.object(
+ feature_registry_service_client.FeatureRegistryServiceClient,
+ "create_feature_group",
+ ) as create_fg_mock:
+ create_fg_lro_mock = mock.Mock(ga_operation.Operation)
+ create_fg_lro_mock.result.return_value = _TEST_FG1
+ create_fg_mock.return_value = create_fg_lro_mock
+ yield create_fg_mock
+
+
+@pytest.fixture
+def list_fg_mock():
+ with patch.object(
+ feature_registry_service_client.FeatureRegistryServiceClient,
+ "list_feature_groups",
+ ) as list_fg_mock:
+ list_fg_mock.return_value = _TEST_FG_LIST
+ yield list_fg_mock
+
+
+@pytest.fixture
+def delete_fg_mock():
+ with patch.object(
+ feature_registry_service_client.FeatureRegistryServiceClient,
+ "delete_feature_group",
+ ) as delete_fg_mock:
+ delete_fg_lro_mock = mock.Mock(ga_operation.Operation)
+ delete_fg_mock.return_value = delete_fg_lro_mock
+ yield delete_fg_mock
+
+
+@pytest.fixture
+def create_feature_mock():
+ with patch.object(
+ feature_registry_service_client.FeatureRegistryServiceClient,
+ "create_feature",
+ ) as create_feature_mock:
+ create_feature_lro_mock = mock.Mock(ga_operation.Operation)
+ create_feature_lro_mock.result.return_value = _TEST_FG1_F1
+ create_feature_mock.return_value = create_feature_lro_mock
+ yield create_feature_mock
+
+
+@pytest.fixture
+def create_feature_monitor_mock():
+ with patch.object(
+ FeatureRegistryServiceClient,
+ "create_feature_monitor",
+ ) as create_feature_monitor_mock:
+ create_feature_monitor_lro_mock = mock.Mock(ga_operation.Operation)
+ create_feature_monitor_lro_mock.result.return_value = _TEST_FG1_FM1
+ create_feature_monitor_mock.return_value = create_feature_monitor_lro_mock
+ yield create_feature_monitor_mock
+
+
+@pytest.fixture
+def create_feature_with_version_column_mock():
+ with patch.object(
+ feature_registry_service_client.FeatureRegistryServiceClient,
+ "create_feature",
+ ) as create_feature_mock:
+ create_feature_lro_mock = mock.Mock(ga_operation.Operation)
+ create_feature_lro_mock.result.return_value = _TEST_FG1_F2
+ create_feature_mock.return_value = create_feature_lro_mock
+ yield create_feature_mock
+
+
+@pytest.fixture
+def list_features_mock():
+ with patch.object(
+ feature_registry_service_client.FeatureRegistryServiceClient,
+ "list_features",
+ ) as list_features_mock:
+ list_features_mock.return_value = _TEST_FG1_FEATURE_LIST
+ yield list_features_mock
+
+
+@pytest.fixture
+def list_feature_monitors_mock():
+ with patch.object(
+ FeatureRegistryServiceClient,
+ "list_feature_monitors",
+ ) as list_feature_monitors_mock:
+ list_feature_monitors_mock.return_value = _TEST_FG1_FM_LIST
+ yield list_feature_monitors_mock
+
+
+@pytest.fixture
+def get_feature_with_stats_and_anomalies_mock():
+ with patch.object(
+ FeatureRegistryServiceClient,
+ "get_feature",
+ ) as get_feature_with_stats_and_anomalies_mock:
+ get_feature_with_stats_and_anomalies_mock.return_value = _TEST_FG1_F1_WITH_STATS
+ yield get_feature_with_stats_and_anomalies_mock
+
+
+@pytest.fixture()
+def mock_base_instantiate_client():
+ with patch.object(
+ aiplatform.base.VertexAiResourceNoun,
+ "_instantiate_client",
+ ) as base_instantiate_client_mock:
+ base_instantiate_client_mock.return_value = mock.MagicMock()
+ yield base_instantiate_client_mock
+
+
+def fg_eq(
+ fg_to_check: FeatureGroup,
+ name: str,
+ resource_name: str,
+ source_uri: str,
+ entity_id_columns: List[str],
+ project: str,
+ location: str,
+ labels: Dict[str, str],
+):
+ """Check if a FeatureGroup has the appropriate values set."""
+ assert fg_to_check.name == name
+ assert fg_to_check.resource_name == resource_name
+ assert fg_to_check.source == FeatureGroupBigQuerySource(
+ uri=source_uri,
+ entity_id_columns=entity_id_columns,
+ )
+ assert fg_to_check.project == project
+ assert fg_to_check.location == location
+ assert fg_to_check.labels == labels
+
+
+@pytest.mark.parametrize(
+ "feature_group_name",
+ [_TEST_FG1_ID, _TEST_FG1_PATH],
+)
+def test_init(feature_group_name, get_fg_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fg = FeatureGroup(feature_group_name)
+
+ get_fg_mock.assert_called_once_with(
+ name=_TEST_FG1_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ fg_eq(
+ fg,
+ name=_TEST_FG1_ID,
+ resource_name=_TEST_FG1_PATH,
+ source_uri=_TEST_FG1_BQ_URI,
+ entity_id_columns=_TEST_FG1_ENTITY_ID_COLUMNS,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_FG1_LABELS,
+ )
+
+
+def test_create_fg_no_source_raises_error():
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with pytest.raises(
+ ValueError,
+ match=re.escape("Please specify a valid source."),
+ ):
+ FeatureGroup.create("fg")
+
+
+def test_create_fg_bad_source_raises_error():
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with pytest.raises(
+ ValueError,
+ match=re.escape("Only FeatureGroupBigQuerySource is a supported source."),
+ ):
+ FeatureGroup.create("fg", source=int(1))
+
+
+def test_create_fg_no_source_bq_uri_raises_error():
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with pytest.raises(
+ ValueError,
+ match=re.escape("Please specify URI in BigQuery source."),
+ ):
+ FeatureGroup.create(
+ "fg", source=FeatureGroupBigQuerySource(uri=None, entity_id_columns=None)
+ )
+
+
+@pytest.mark.parametrize("create_request_timeout", [None, 1.0])
+@pytest.mark.parametrize("sync", [True, False])
+def test_create_fg(
+ create_fg_mock, get_fg_mock, fg_logger_mock, create_request_timeout, sync
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fg = FeatureGroup.create(
+ _TEST_FG1_ID,
+ source=FeatureGroupBigQuerySource(
+ uri=_TEST_FG1_BQ_URI,
+ entity_id_columns=_TEST_FG1_ENTITY_ID_COLUMNS,
+ ),
+ labels=_TEST_FG1_LABELS,
+ create_request_timeout=create_request_timeout,
+ sync=sync,
+ )
+
+ if not sync:
+ fg.wait()
+
+ # When creating, the FeatureOnlineStore object doesn't have the path set.
+ expected_fg = types.feature_group.FeatureGroup(
+ name=_TEST_FG1_ID,
+ big_query=types.feature_group.FeatureGroup.BigQuery(
+ big_query_source=types.io.BigQuerySource(
+ input_uri=_TEST_FG1_BQ_URI,
+ ),
+ entity_id_columns=_TEST_FG1_ENTITY_ID_COLUMNS,
+ ),
+ labels=_TEST_FG1_LABELS,
+ )
+ create_fg_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ feature_group=expected_fg,
+ feature_group_id=_TEST_FG1_ID,
+ metadata=(),
+ timeout=create_request_timeout,
+ )
+
+ fg_logger_mock.assert_has_calls(
+ [
+ call("Creating FeatureGroup"),
+ call(
+ f"Create FeatureGroup backing LRO: {create_fg_mock.return_value.operation.name}"
+ ),
+ call(
+ "FeatureGroup created. Resource name: projects/test-project/locations/us-central1/featureGroups/my_fg1"
+ ),
+ call("To use this FeatureGroup in another session:"),
+ call(
+ "feature_group = aiplatform.FeatureGroup('projects/test-project/locations/us-central1/featureGroups/my_fg1')"
+ ),
+ ]
+ )
+
+ fg_eq(
+ fg,
+ name=_TEST_FG1_ID,
+ resource_name=_TEST_FG1_PATH,
+ source_uri=_TEST_FG1_BQ_URI,
+ entity_id_columns=_TEST_FG1_ENTITY_ID_COLUMNS,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_FG1_LABELS,
+ )
+
+
+def test_list(list_fg_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ feature_groups = FeatureGroup.list()
+
+ list_fg_mock.assert_called_once_with(request={"parent": _TEST_PARENT})
+ assert len(feature_groups) == len(_TEST_FG_LIST)
+ fg_eq(
+ feature_groups[0],
+ name=_TEST_FG1_ID,
+ resource_name=_TEST_FG1_PATH,
+ source_uri=_TEST_FG1_BQ_URI,
+ entity_id_columns=_TEST_FG1_ENTITY_ID_COLUMNS,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_FG1_LABELS,
+ )
+ fg_eq(
+ feature_groups[1],
+ name=_TEST_FG2_ID,
+ resource_name=_TEST_FG2_PATH,
+ source_uri=_TEST_FG2_BQ_URI,
+ entity_id_columns=_TEST_FG2_ENTITY_ID_COLUMNS,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_FG2_LABELS,
+ )
+ fg_eq(
+ feature_groups[2],
+ name=_TEST_FG3_ID,
+ resource_name=_TEST_FG3_PATH,
+ source_uri=_TEST_FG3_BQ_URI,
+ entity_id_columns=_TEST_FG3_ENTITY_ID_COLUMNS,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_FG3_LABELS,
+ )
+
+
+@pytest.mark.parametrize("force", [True, False])
+@pytest.mark.parametrize("sync", [True])
+def test_delete(delete_fg_mock, get_fg_mock, fg_logger_mock, force, sync):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fg = FeatureGroup(_TEST_FG1_ID)
+ fg.delete(force=force, sync=sync)
+
+ if not sync:
+ fg.wait()
+
+ delete_fg_mock.assert_called_once_with(
+ name=_TEST_FG1_PATH,
+ force=force,
+ )
+
+ fg_logger_mock.assert_has_calls(
+ [
+ call(
+ "Deleting FeatureGroup resource: projects/test-project/locations/us-central1/featureGroups/my_fg1"
+ ),
+ call(
+ f"Delete FeatureGroup backing LRO: {delete_fg_mock.return_value.operation.name}"
+ ),
+ call(
+ "FeatureGroup resource projects/test-project/locations/us-central1/featureGroups/my_fg1 deleted."
+ ),
+ ]
+ )
+
+
+def test_get_feature(get_fg_mock, get_feature_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fg = FeatureGroup(_TEST_FG1_ID)
+ feature = fg.get_feature(_TEST_FG1_F1_ID)
+
+ get_feature_mock.assert_called_once_with(
+ name=_TEST_FG1_F1_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ feature_eq(
+ feature,
+ name=_TEST_FG1_F1_ID,
+ resource_name=_TEST_FG1_F1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ )
+
+
+def test_get_feature_with_latest_stats_count(
+ get_fg_mock, get_feature_with_stats_and_anomalies_mock
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fg = FeatureGroup(_TEST_FG1_ID)
+ feature = fg.get_feature(_TEST_FG1_F1_ID, latest_stats_count=1)
+
+ get_feature_with_stats_and_anomalies_mock.assert_called_once_with(
+ request=types.featurestore_service_v1beta1.GetFeatureRequest(
+ name=_TEST_FG1_F1_PATH,
+ feature_stats_and_anomaly_spec=types.feature_monitor.FeatureStatsAndAnomalySpec(
+ latest_stats_count=1
+ ),
+ )
+ )
+
+ feature_eq(
+ feature,
+ name=_TEST_FG1_F1_ID,
+ resource_name=_TEST_FG1_F1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ feature_stats_and_anomalies=[_TEST_FG1_F1_FEATURE_STATS_AND_ANOMALY],
+ )
+
+
+def test_get_feature_credentials_set_in_init(mock_base_instantiate_client):
+ credentials = mock.MagicMock(spec=auth_credentials.Credentials)
+ aiplatform.init(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, credentials=credentials
+ )
+
+ mock_base_instantiate_client.return_value.get_feature_group.return_value = _TEST_FG1
+ mock_base_instantiate_client.return_value.get_feature.return_value = _TEST_FG1_F1
+
+ fg = FeatureGroup(_TEST_FG1_ID)
+ mock_base_instantiate_client.assert_called_with(
+ location=_TEST_LOCATION,
+ credentials=credentials,
+ appended_user_agent=None,
+ )
+
+ feature = fg.get_feature(_TEST_FG1_F1_ID)
+ mock_base_instantiate_client.assert_called_with(
+ location=_TEST_LOCATION,
+ credentials=credentials,
+ appended_user_agent=None,
+ )
+
+ feature_eq(
+ feature,
+ name=_TEST_FG1_F1_ID,
+ resource_name=_TEST_FG1_F1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ )
+
+
+def test_get_feature_from_feature_group_with_explicit_credentials(
+ mock_base_instantiate_client,
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ mock_base_instantiate_client.return_value.get_feature_group.return_value = _TEST_FG1
+ mock_base_instantiate_client.return_value.get_feature.return_value = _TEST_FG1_F1
+
+ credentials = mock.MagicMock(spec=auth_credentials.Credentials)
+ fg = FeatureGroup(_TEST_FG1_ID, credentials=credentials)
+ mock_base_instantiate_client.assert_called_with(
+ location=_TEST_LOCATION,
+ credentials=credentials,
+ appended_user_agent=None,
+ )
+
+ feature = fg.get_feature(_TEST_FG1_F1_ID)
+ mock_base_instantiate_client.assert_called_with(
+ location=_TEST_LOCATION,
+ credentials=credentials,
+ appended_user_agent=None,
+ )
+
+ feature_eq(
+ feature,
+ name=_TEST_FG1_F1_ID,
+ resource_name=_TEST_FG1_F1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ )
+
+
+def test_get_feature_from_feature_group_with_explicit_credentials_overrides_init_credentials(
+ mock_base_instantiate_client,
+):
+ init_credentials = mock.MagicMock(spec=auth_credentials.Credentials)
+ aiplatform.init(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, credentials=init_credentials
+ )
+
+ mock_base_instantiate_client.return_value.get_feature_group.return_value = _TEST_FG1
+ mock_base_instantiate_client.return_value.get_feature.return_value = _TEST_FG1_F1
+
+ credentials = mock.MagicMock(spec=auth_credentials.Credentials)
+ fg = FeatureGroup(_TEST_FG1_ID, credentials=credentials)
+ mock_base_instantiate_client.assert_called_with(
+ location=_TEST_LOCATION,
+ credentials=credentials,
+ appended_user_agent=None,
+ )
+
+ feature = fg.get_feature(_TEST_FG1_F1_ID)
+ mock_base_instantiate_client.assert_called_with(
+ location=_TEST_LOCATION,
+ credentials=credentials,
+ appended_user_agent=None,
+ )
+
+ feature_eq(
+ feature,
+ name=_TEST_FG1_F1_ID,
+ resource_name=_TEST_FG1_F1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ )
+
+
+def test_get_feature_with_explicit_credentials(mock_base_instantiate_client):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ mock_base_instantiate_client.return_value.get_feature_group.return_value = _TEST_FG1
+ mock_base_instantiate_client.return_value.get_feature.return_value = _TEST_FG1_F1
+
+ fg = FeatureGroup(_TEST_FG1_ID)
+ mock_base_instantiate_client.assert_called_with(
+ location=_TEST_LOCATION,
+ credentials=mock.ANY,
+ appended_user_agent=None,
+ )
+
+ credentials = mock.MagicMock(spec=auth_credentials.Credentials)
+ feature = fg.get_feature(_TEST_FG1_F1_ID, credentials=credentials)
+ mock_base_instantiate_client.assert_called_with(
+ location=_TEST_LOCATION,
+ credentials=credentials,
+ appended_user_agent=None,
+ )
+
+ feature_eq(
+ feature,
+ name=_TEST_FG1_F1_ID,
+ resource_name=_TEST_FG1_F1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ )
+
+
+def test_get_feature_with_explicit_credentials_overrides_init_credentials(
+ mock_base_instantiate_client,
+):
+ init_credentials = mock.MagicMock(spec=auth_credentials.Credentials)
+ aiplatform.init(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, credentials=init_credentials
+ )
+
+ mock_base_instantiate_client.return_value.get_feature_group.return_value = _TEST_FG1
+ mock_base_instantiate_client.return_value.get_feature.return_value = _TEST_FG1_F1
+
+ fg = FeatureGroup(_TEST_FG1_ID)
+ mock_base_instantiate_client.assert_called_with(
+ location=_TEST_LOCATION,
+ credentials=init_credentials,
+ appended_user_agent=None,
+ )
+
+ credentials = mock.MagicMock(spec=auth_credentials.Credentials)
+ feature = fg.get_feature(_TEST_FG1_F1_ID, credentials=credentials)
+ mock_base_instantiate_client.assert_called_with(
+ location=_TEST_LOCATION,
+ credentials=credentials,
+ appended_user_agent=None,
+ )
+
+ feature_eq(
+ feature,
+ name=_TEST_FG1_F1_ID,
+ resource_name=_TEST_FG1_F1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ )
+
+
+def test_get_feature_with_explicit_credentials_overrides_feature_group_credentials(
+ mock_base_instantiate_client,
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ mock_base_instantiate_client.return_value.get_feature_group.return_value = _TEST_FG1
+ mock_base_instantiate_client.return_value.get_feature.return_value = _TEST_FG1_F1
+
+ feature_group_credentials = mock.MagicMock(spec=auth_credentials.Credentials)
+ fg = FeatureGroup(_TEST_FG1_ID, credentials=feature_group_credentials)
+ mock_base_instantiate_client.assert_called_with(
+ location=_TEST_LOCATION,
+ credentials=feature_group_credentials,
+ appended_user_agent=None,
+ )
+
+ credentials = mock.MagicMock(spec=auth_credentials.Credentials)
+ feature = fg.get_feature(_TEST_FG1_F1_ID, credentials=credentials)
+ mock_base_instantiate_client.assert_called_with(
+ location=_TEST_LOCATION,
+ credentials=credentials,
+ appended_user_agent=None,
+ )
+
+ feature_eq(
+ feature,
+ name=_TEST_FG1_F1_ID,
+ resource_name=_TEST_FG1_F1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ )
+
+
+def test_get_feature_with_explicit_credentials_overrides_init_and_feature_group_credentials(
+ mock_base_instantiate_client,
+):
+ init_credentials = mock.MagicMock(spec=auth_credentials.Credentials)
+ aiplatform.init(
+ project=_TEST_PROJECT, location=_TEST_LOCATION, credentials=init_credentials
+ )
+
+ mock_base_instantiate_client.return_value.get_feature_group.return_value = _TEST_FG1
+ mock_base_instantiate_client.return_value.get_feature.return_value = _TEST_FG1_F1
+
+ feature_group_credentials = mock.MagicMock(spec=auth_credentials.Credentials)
+ fg = FeatureGroup(_TEST_FG1_ID, credentials=feature_group_credentials)
+ mock_base_instantiate_client.assert_called_with(
+ location=_TEST_LOCATION,
+ credentials=feature_group_credentials,
+ appended_user_agent=None,
+ )
+
+ credentials = mock.MagicMock(spec=auth_credentials.Credentials)
+ feature = fg.get_feature(_TEST_FG1_F1_ID, credentials=credentials)
+ mock_base_instantiate_client.assert_called_with(
+ location=_TEST_LOCATION,
+ credentials=credentials,
+ appended_user_agent=None,
+ )
+
+ feature_eq(
+ feature,
+ name=_TEST_FG1_F1_ID,
+ resource_name=_TEST_FG1_F1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ )
+
+
+@pytest.mark.parametrize("create_request_timeout", [None, 1.0])
+@pytest.mark.parametrize("sync", [True, False])
+def test_create_feature(
+ get_fg_mock,
+ create_feature_mock,
+ get_feature_mock,
+ fg_logger_mock,
+ create_request_timeout,
+ sync,
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fg = FeatureGroup(_TEST_FG1_ID)
+ feature = fg.create_feature(
+ _TEST_FG1_F1_ID,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ create_request_timeout=create_request_timeout,
+ sync=sync,
+ )
+
+ if not sync:
+ feature.wait()
+
+ expected_feature = types.feature.Feature(
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ )
+ create_feature_mock.assert_called_once_with(
+ parent=_TEST_FG1_PATH,
+ feature=expected_feature,
+ feature_id=_TEST_FG1_F1_ID,
+ metadata=(),
+ timeout=create_request_timeout,
+ )
+
+ feature_eq(
+ feature,
+ name=_TEST_FG1_F1_ID,
+ resource_name=_TEST_FG1_F1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ )
+
+ fg_logger_mock.assert_has_calls(
+ [
+ call("Creating Feature"),
+ call(
+ f"Create Feature backing LRO: {create_feature_mock.return_value.operation.name}"
+ ),
+ call(
+ "Feature created. Resource name: projects/test-project/locations/us-central1/featureGroups/my_fg1/features/my_fg1_f1"
+ ),
+ call("To use this Feature in another session:"),
+ call(
+ "feature = aiplatform.Feature('projects/test-project/locations/us-central1/featureGroups/my_fg1/features/my_fg1_f1')"
+ ),
+ ]
+ )
+
+
+@pytest.mark.parametrize("create_request_timeout", [None, 1.0])
+@pytest.mark.parametrize("sync", [True, False])
+def test_create_feature_with_version_feature_column(
+ get_fg_mock,
+ create_feature_with_version_column_mock,
+ get_feature_with_version_column_mock,
+ fg_logger_mock,
+ create_request_timeout,
+ sync,
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fg = FeatureGroup(_TEST_FG1_ID)
+ feature = fg.create_feature(
+ _TEST_FG1_F2_ID,
+ version_column_name=_TEST_FG1_F2_VERSION_COLUMN_NAME,
+ description=_TEST_FG1_F2_DESCRIPTION,
+ labels=_TEST_FG1_F2_LABELS,
+ point_of_contact=_TEST_FG1_F2_POINT_OF_CONTACT,
+ create_request_timeout=create_request_timeout,
+ sync=sync,
+ )
+
+ if not sync:
+ feature.wait()
+
+ expected_feature = types.feature.Feature(
+ version_column_name=_TEST_FG1_F2_VERSION_COLUMN_NAME,
+ description=_TEST_FG1_F2_DESCRIPTION,
+ labels=_TEST_FG1_F2_LABELS,
+ point_of_contact=_TEST_FG1_F2_POINT_OF_CONTACT,
+ )
+ create_feature_with_version_column_mock.assert_called_once_with(
+ parent=_TEST_FG1_PATH,
+ feature=expected_feature,
+ feature_id=_TEST_FG1_F2_ID,
+ metadata=(),
+ timeout=create_request_timeout,
+ )
+
+ feature_eq(
+ feature,
+ name=_TEST_FG1_F2_ID,
+ resource_name=_TEST_FG1_F2_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F2_DESCRIPTION,
+ labels=_TEST_FG1_F2_LABELS,
+ point_of_contact=_TEST_FG1_F2_POINT_OF_CONTACT,
+ version_column_name=_TEST_FG1_F2_VERSION_COLUMN_NAME,
+ )
+
+ fg_logger_mock.assert_has_calls(
+ [
+ call("Creating Feature"),
+ call(
+ f"Create Feature backing LRO: {create_feature_with_version_column_mock.return_value.operation.name}"
+ ),
+ call(
+ "Feature created. Resource name: projects/test-project/locations/us-central1/featureGroups/my_fg1/features/my_fg1_f2"
+ ),
+ call("To use this Feature in another session:"),
+ call(
+ "feature = aiplatform.Feature('projects/test-project/locations/us-central1/featureGroups/my_fg1/features/my_fg1_f2')"
+ ),
+ ]
+ )
+
+
+def test_list_features(get_fg_mock, list_features_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ features = FeatureGroup(_TEST_FG1_ID).list_features()
+
+ list_features_mock.assert_called_once_with(request={"parent": _TEST_FG1_PATH})
+ assert len(features) == len(_TEST_FG1_FEATURE_LIST)
+ feature_eq(
+ features[0],
+ name=_TEST_FG1_F1_ID,
+ resource_name=_TEST_FG1_F1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F1_DESCRIPTION,
+ labels=_TEST_FG1_F1_LABELS,
+ point_of_contact=_TEST_FG1_F1_POINT_OF_CONTACT,
+ )
+ feature_eq(
+ features[1],
+ name=_TEST_FG1_F2_ID,
+ resource_name=_TEST_FG1_F2_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_F2_DESCRIPTION,
+ labels=_TEST_FG1_F2_LABELS,
+ point_of_contact=_TEST_FG1_F2_POINT_OF_CONTACT,
+ version_column_name=_TEST_FG1_F2_VERSION_COLUMN_NAME,
+ )
+
+
+@pytest.mark.parametrize("create_request_timeout", [None, 1.0])
+def test_create_feature_monitor(
+ get_fg_mock,
+ get_feature_monitor_mock,
+ create_feature_monitor_mock,
+ fg_logger_mock,
+ create_request_timeout,
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fg = FeatureGroup(_TEST_FG1_ID)
+ feature_monitor = fg.create_feature_monitor(
+ _TEST_FG1_FM1_ID,
+ description=_TEST_FG1_FM1_DESCRIPTION,
+ labels=_TEST_FG1_FM1_LABELS,
+ schedule_config=_TEST_FG1_FM1_SCHEDULE_CONFIG,
+ feature_selection_configs=_TEST_FG1_FM1_FEATURE_SELECTION_CONFIGS,
+ create_request_timeout=create_request_timeout,
+ )
+
+ expected_feature_monitor = types.feature_monitor.FeatureMonitor(
+ description=_TEST_FG1_FM1_DESCRIPTION,
+ labels=_TEST_FG1_FM1_LABELS,
+ schedule_config=types.feature_monitor.ScheduleConfig(
+ cron=_TEST_FG1_FM1_SCHEDULE_CONFIG
+ ),
+ feature_selection_config=types.feature_monitor.FeatureSelectionConfig(
+ feature_configs=[
+ types.feature_monitor.FeatureSelectionConfig.FeatureConfig(
+ feature_id="my_fg1_f1", drift_threshold=0.3
+ ),
+ types.feature_monitor.FeatureSelectionConfig.FeatureConfig(
+ feature_id="my_fg1_f2", drift_threshold=0.4
+ ),
+ ]
+ ),
+ )
+ create_feature_monitor_mock.assert_called_once_with(
+ parent=_TEST_FG1_PATH,
+ feature_monitor_id=_TEST_FG1_FM1_ID,
+ feature_monitor=expected_feature_monitor,
+ metadata=(),
+ timeout=create_request_timeout,
+ )
+
+ feature_monitor_eq(
+ feature_monitor,
+ name=_TEST_FG1_FM1_ID,
+ resource_name=_TEST_FG1_FM1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_FM1_DESCRIPTION,
+ labels=_TEST_FG1_FM1_LABELS,
+ schedule_config=_TEST_FG1_FM1_SCHEDULE_CONFIG,
+ feature_selection_configs=_TEST_FG1_FM1_FEATURE_SELECTION_CONFIGS,
+ )
+
+ fg_logger_mock.assert_has_calls(
+ [
+ call("Creating FeatureMonitor"),
+ call(
+ f"Create FeatureMonitor backing LRO:"
+ f" {create_feature_monitor_mock.return_value.operation.name}"
+ ),
+ call(
+ "FeatureMonitor created. Resource name:"
+ " projects/test-project/locations/us-central1/featureGroups/"
+ "my_fg1/featureMonitors/my_fg1_fm1"
+ ),
+ call("To use this FeatureMonitor in another session:"),
+ call(
+ "feature_monitor = aiplatform.FeatureMonitor("
+ "'projects/test-project/locations/us-central1/featureGroups/"
+ "my_fg1/featureMonitors/my_fg1_fm1')"
+ ),
+ ]
+ )
+
+
+def test_list_feature_monitors(
+ get_fg_mock, get_feature_monitor_mock, list_feature_monitors_mock
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ feature_monitors = FeatureGroup(_TEST_FG1_ID).list_feature_monitors()
+
+ list_feature_monitors_mock.assert_called_once_with(
+ request={"parent": _TEST_FG1_PATH}
+ )
+ assert len(feature_monitors) == len(_TEST_FG1_FM_LIST)
+ feature_monitor_eq(
+ feature_monitors[0],
+ name=_TEST_FG1_FM1_ID,
+ resource_name=_TEST_FG1_FM1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_FM1_DESCRIPTION,
+ labels=_TEST_FG1_FM1_LABELS,
+ schedule_config=_TEST_FG1_FM1_SCHEDULE_CONFIG,
+ feature_selection_configs=_TEST_FG1_FM1_FEATURE_SELECTION_CONFIGS,
+ )
+ feature_monitor_eq(
+ feature_monitors[1],
+ name=_TEST_FG1_FM2_ID,
+ resource_name=_TEST_FG1_FM2_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_FM2_DESCRIPTION,
+ labels=_TEST_FG1_FM2_LABELS,
+ schedule_config=_TEST_FG1_FM2_SCHEDULE_CONFIG,
+ feature_selection_configs=_TEST_FG1_FM2_FEATURE_SELECTION_CONFIGS,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_feature_monitor.py b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_feature_monitor.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5aaa5490e303883a38f2c10966ccfa881ac8757
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_feature_monitor.py
@@ -0,0 +1,370 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import re
+from typing import Dict, List, Optional, Tuple
+from unittest.mock import patch
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+
+from feature_store_constants import (
+ _TEST_PROJECT,
+ _TEST_LOCATION,
+ _TEST_FG1_ID,
+ _TEST_FG1_FM1_DESCRIPTION,
+ _TEST_FG1_FM1_FEATURE_SELECTION_CONFIGS,
+ _TEST_FG1_FM1_ID,
+ _TEST_FG1_FM1_LABELS,
+ _TEST_FG1_FM1_PATH,
+ _TEST_FG1_FM1_SCHEDULE_CONFIG,
+ _TEST_FG1_FMJ1,
+ _TEST_FG1_FMJ1_DESCRIPTION,
+ _TEST_FG1_FMJ1_FEATURE_STATS_AND_ANOMALIES,
+ _TEST_FG1_FMJ1_ID,
+ _TEST_FG1_FMJ1_LABELS,
+ _TEST_FG1_FMJ_LIST,
+ _TEST_FG1_FMJ1_PATH,
+ _TEST_FG1_FMJ2_DESCRIPTION,
+ _TEST_FG1_FMJ2_LABELS,
+ _TEST_FG1_FMJ2_PATH,
+)
+from vertexai.resources.preview import FeatureMonitor
+from google.cloud.aiplatform_v1beta1.services.feature_registry_service import (
+ FeatureRegistryServiceClient,
+)
+from google.cloud.aiplatform.compat import types
+from vertexai.resources.preview.feature_store import (
+ feature_monitor,
+)
+import pytest
+
+
+pytestmark = pytest.mark.usefixtures("google_auth_mock")
+
+
+@pytest.fixture
+def fm_logger_mock():
+ with patch.object(
+ feature_monitor._LOGGER,
+ "info",
+ wraps=feature_monitor._LOGGER.info,
+ ) as logger_mock:
+ yield logger_mock
+
+
+@pytest.fixture
+def get_feature_monitor_job_mock():
+ with patch.object(
+ FeatureRegistryServiceClient,
+ "get_feature_monitor_job",
+ ) as get_fmj_mock:
+ get_fmj_mock.return_value = _TEST_FG1_FMJ1
+ yield get_fmj_mock
+
+
+@pytest.fixture
+def create_feature_monitor_job_mock():
+ with patch.object(
+ FeatureRegistryServiceClient,
+ "create_feature_monitor_job",
+ ) as create_feature_monitor_job_mock:
+ create_feature_monitor_job_mock.return_value = _TEST_FG1_FMJ1
+ yield create_feature_monitor_job_mock
+
+
+@pytest.fixture
+def list_feature_monitor_jobs_mock():
+ with patch.object(
+ FeatureRegistryServiceClient,
+ "list_feature_monitor_jobs",
+ ) as list_feature_monitor_jobs_mock:
+ list_feature_monitor_jobs_mock.return_value = _TEST_FG1_FMJ_LIST
+ yield list_feature_monitor_jobs_mock
+
+
+def feature_monitor_eq(
+ feature_monitor_to_check: FeatureMonitor,
+ name: str,
+ resource_name: str,
+ project: str,
+ location: str,
+ description: str,
+ labels: Dict[str, str],
+ schedule_config: str,
+ feature_selection_configs: List[Tuple[str, float]],
+):
+ """Check if a Feature Monitor has the appropriate values set."""
+ assert feature_monitor_to_check.name == name
+ assert feature_monitor_to_check.resource_name == resource_name
+ assert feature_monitor_to_check.project == project
+ assert feature_monitor_to_check.location == location
+ assert feature_monitor_to_check.description == description
+ assert feature_monitor_to_check.labels == labels
+ assert feature_monitor_to_check.schedule_config == schedule_config
+ assert (
+ feature_monitor_to_check.feature_selection_configs == feature_selection_configs
+ )
+
+
+def feature_monitor_job_eq(
+ feature_monitor_job_to_check: FeatureMonitor.FeatureMonitorJob,
+ resource_name: str,
+ project: str,
+ location: str,
+ description: str,
+ labels: Dict[str, str],
+ feature_stats_and_anomalies: Optional[
+ List[types.feature_monitor.FeatureStatsAndAnomaly]
+ ] = None,
+):
+ """Check if a Feature Monitor Job has the appropriate values set."""
+ assert feature_monitor_job_to_check.resource_name == resource_name
+ assert feature_monitor_job_to_check.project == project
+ assert feature_monitor_job_to_check.location == location
+ assert feature_monitor_job_to_check.description == description
+ assert feature_monitor_job_to_check.labels == labels
+ if feature_stats_and_anomalies:
+ assert (
+ feature_monitor_job_to_check.feature_stats_and_anomalies
+ == feature_stats_and_anomalies
+ )
+
+
+def test_init_with_feature_monitor_id_and_no_fg_id_raises_error():
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ "Since feature monitor 'my_fg1_fm1' is not provided as a path, please"
+ " specify feature_group_id."
+ ),
+ ):
+ FeatureMonitor(_TEST_FG1_FM1_ID)
+
+
+def test_init_with_feature_monitor_path_and_fg_id_raises_error():
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ "Since feature monitor 'projects/test-project/locations/us-central1/"
+ "featureGroups/my_fg1/featureMonitors/my_fg1_fm1' is provided as a "
+ "path, feature_group_id should not be specified."
+ ),
+ ):
+ FeatureMonitor(
+ _TEST_FG1_FM1_PATH,
+ feature_group_id=_TEST_FG1_ID,
+ )
+
+
+def test_init_with_feature_monitor_id(get_feature_monitor_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ feature_monitor = FeatureMonitor(
+ _TEST_FG1_FM1_ID,
+ feature_group_id=_TEST_FG1_ID,
+ )
+
+ get_feature_monitor_mock.assert_called_once_with(
+ name=_TEST_FG1_FM1_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ feature_monitor_eq(
+ feature_monitor,
+ name=_TEST_FG1_FM1_ID,
+ resource_name=_TEST_FG1_FM1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_FM1_DESCRIPTION,
+ labels=_TEST_FG1_FM1_LABELS,
+ schedule_config=_TEST_FG1_FM1_SCHEDULE_CONFIG,
+ feature_selection_configs=_TEST_FG1_FM1_FEATURE_SELECTION_CONFIGS,
+ )
+
+
+def test_init_with_feature_monitor_path(get_feature_monitor_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ feature_monitor = FeatureMonitor(_TEST_FG1_FM1_PATH)
+
+ get_feature_monitor_mock.assert_called_once_with(
+ name=_TEST_FG1_FM1_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ feature_monitor_eq(
+ feature_monitor,
+ name=_TEST_FG1_FM1_ID,
+ resource_name=_TEST_FG1_FM1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_FM1_DESCRIPTION,
+ labels=_TEST_FG1_FM1_LABELS,
+ schedule_config=_TEST_FG1_FM1_SCHEDULE_CONFIG,
+ feature_selection_configs=_TEST_FG1_FM1_FEATURE_SELECTION_CONFIGS,
+ )
+
+
+def test_init_with_feature_monitor_job_path(get_feature_monitor_job_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ feature_monitor_job = FeatureMonitor.FeatureMonitorJob(_TEST_FG1_FMJ1_PATH)
+
+ get_feature_monitor_job_mock.assert_called_once_with(
+ name=_TEST_FG1_FMJ1_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ feature_monitor_job_eq(
+ feature_monitor_job,
+ resource_name=_TEST_FG1_FMJ1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_FMJ1_DESCRIPTION,
+ labels=_TEST_FG1_FMJ1_LABELS,
+ feature_stats_and_anomalies=_TEST_FG1_FMJ1_FEATURE_STATS_AND_ANOMALIES,
+ )
+
+
+@pytest.mark.parametrize("create_request_timeout", [None, 1.0])
+def test_create_feature_monitor_job(
+ get_feature_monitor_mock,
+ get_feature_monitor_job_mock,
+ create_feature_monitor_job_mock,
+ create_request_timeout,
+ fm_logger_mock,
+):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ fm = FeatureMonitor(
+ _TEST_FG1_FM1_ID,
+ feature_group_id=_TEST_FG1_ID,
+ )
+ feature_monitor_job = fm.create_feature_monitor_job(
+ description=_TEST_FG1_FMJ1_DESCRIPTION,
+ labels=_TEST_FG1_FMJ1_LABELS,
+ create_request_timeout=create_request_timeout,
+ )
+
+ expected_feature_monitor_job = types.feature_monitor_job.FeatureMonitorJob(
+ description=_TEST_FG1_FMJ1_DESCRIPTION,
+ labels=_TEST_FG1_FMJ1_LABELS,
+ )
+ create_feature_monitor_job_mock.assert_called_once_with(
+ parent=_TEST_FG1_FM1_PATH,
+ feature_monitor_job=expected_feature_monitor_job,
+ metadata=(),
+ timeout=create_request_timeout,
+ )
+
+ feature_monitor_job_eq(
+ feature_monitor_job,
+ resource_name=_TEST_FG1_FMJ1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_FMJ1_DESCRIPTION,
+ labels=_TEST_FG1_FMJ1_LABELS,
+ feature_stats_and_anomalies=_TEST_FG1_FMJ1_FEATURE_STATS_AND_ANOMALIES,
+ )
+
+
+def test_get_feature_monitor_job(
+ get_feature_monitor_mock, get_feature_monitor_job_mock
+):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ fm = FeatureMonitor(
+ _TEST_FG1_FM1_ID,
+ feature_group_id=_TEST_FG1_ID,
+ )
+ feature_monitor_job = fm.get_feature_monitor_job(_TEST_FG1_FMJ1_ID)
+
+ get_feature_monitor_job_mock.assert_called_once_with(
+ name=_TEST_FG1_FMJ1_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ feature_monitor_job_eq(
+ feature_monitor_job,
+ resource_name=_TEST_FG1_FMJ1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_FMJ1_DESCRIPTION,
+ labels=_TEST_FG1_FMJ1_LABELS,
+ feature_stats_and_anomalies=_TEST_FG1_FMJ1_FEATURE_STATS_AND_ANOMALIES,
+ )
+
+
+def test_list_feature_monitors_jobs(
+ get_feature_monitor_mock, list_feature_monitor_jobs_mock
+):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ feature_monitor_jobs = FeatureMonitor(
+ _TEST_FG1_FM1_ID,
+ feature_group_id=_TEST_FG1_ID,
+ ).list_feature_monitor_jobs()
+
+ list_feature_monitor_jobs_mock.assert_called_once_with(
+ request={"parent": _TEST_FG1_FM1_PATH}
+ )
+ assert len(feature_monitor_jobs) == len(_TEST_FG1_FMJ_LIST)
+ feature_monitor_job_eq(
+ feature_monitor_jobs[0],
+ resource_name=_TEST_FG1_FMJ1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_FMJ1_DESCRIPTION,
+ labels=_TEST_FG1_FMJ1_LABELS,
+ )
+ feature_monitor_job_eq(
+ feature_monitor_jobs[1],
+ resource_name=_TEST_FG1_FMJ2_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ description=_TEST_FG1_FMJ2_DESCRIPTION,
+ labels=_TEST_FG1_FMJ2_LABELS,
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_feature_online_store.py b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_feature_online_store.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd79f245e3af5ad72655bc5a4e2b8ee16f6e81cc
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_feature_online_store.py
@@ -0,0 +1,681 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import re
+from typing import Dict
+from unittest import mock
+from unittest.mock import call
+from unittest.mock import patch
+
+from google.api_core import operation as ga_operation
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform.compat import types
+from google.cloud.aiplatform.compat.services import (
+ feature_online_store_admin_service_client,
+)
+from feature_store_constants import (
+ _TEST_BIGTABLE_FOS1_ID,
+ _TEST_BIGTABLE_FOS1_LABELS,
+ _TEST_BIGTABLE_FOS1_PATH,
+ _TEST_BIGTABLE_FOS2_ID,
+ _TEST_BIGTABLE_FOS2_LABELS,
+ _TEST_BIGTABLE_FOS2_PATH,
+ _TEST_BIGTABLE_FOS3_ID,
+ _TEST_BIGTABLE_FOS3_LABELS,
+ _TEST_BIGTABLE_FOS3_PATH,
+ _TEST_ESF_OPTIMIZED_FOS_ID,
+ _TEST_ESF_OPTIMIZED_FOS_LABELS,
+ _TEST_ESF_OPTIMIZED_FOS_PATH,
+ _TEST_FOS_LIST,
+ _TEST_FV1_BQ_URI,
+ _TEST_FV1_ENTITY_ID_COLUMNS,
+ _TEST_FV1_ID,
+ _TEST_FV1_LABELS,
+ _TEST_FV1_PATH,
+ _TEST_FV3_BQ_URI,
+ _TEST_FV3_ID,
+ _TEST_FV3_LABELS,
+ _TEST_FV3_PATH,
+ _TEST_LOCATION,
+ _TEST_OPTIMIZED_EMBEDDING_FV_ID,
+ _TEST_OPTIMIZED_EMBEDDING_FV_PATH,
+ _TEST_PARENT,
+ _TEST_PROJECT,
+ _TEST_PSC_OPTIMIZED_FOS_ID,
+ _TEST_PSC_OPTIMIZED_FOS_LABELS,
+ _TEST_PSC_OPTIMIZED_FOS_PATH,
+ _TEST_PSC_PROJECT_ALLOWLIST,
+)
+from test_feature_view import fv_eq
+from vertexai.resources.preview import (
+ DistanceMeasureType,
+ FeatureOnlineStore,
+ FeatureOnlineStoreType,
+ FeatureViewBigQuerySource,
+ FeatureViewVertexRagSource,
+ IndexConfig,
+ TreeAhConfig,
+)
+from vertexai.resources.preview.feature_store import (
+ feature_online_store,
+)
+import pytest
+
+
+@pytest.fixture
+def fos_logger_mock():
+ with patch.object(
+ feature_online_store._LOGGER,
+ "info",
+ wraps=feature_online_store._LOGGER.info,
+ ) as logger_mock:
+ yield logger_mock
+
+
+@pytest.fixture
+def list_fos_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "list_feature_online_stores",
+ ) as list_fos_mock:
+ list_fos_mock.return_value = _TEST_FOS_LIST
+ yield list_fos_mock
+
+
+@pytest.fixture
+def delete_fos_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "delete_feature_online_store",
+ ) as delete_fos_mock:
+ delete_fos_lro_mock = mock.Mock(ga_operation.Operation)
+ delete_fos_mock.return_value = delete_fos_lro_mock
+ yield delete_fos_mock
+
+
+def fos_eq(
+ fos_to_check: FeatureOnlineStore,
+ name: str,
+ resource_name: str,
+ project: str,
+ location: str,
+ labels: Dict[str, str],
+ type: FeatureOnlineStoreType,
+):
+ """Check if a FeatureOnlineStore has the appropriate values set."""
+ assert fos_to_check.name == name
+ assert fos_to_check.resource_name == resource_name
+ assert fos_to_check.project == project
+ assert fos_to_check.location == location
+ assert fos_to_check.labels == labels
+ assert fos_to_check.feature_online_store_type == type
+
+
+pytestmark = pytest.mark.usefixtures("google_auth_mock")
+
+
+@pytest.mark.parametrize(
+ "online_store_name",
+ [_TEST_BIGTABLE_FOS1_ID, _TEST_BIGTABLE_FOS1_PATH],
+)
+def test_init(online_store_name, get_fos_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fos = FeatureOnlineStore(online_store_name)
+
+ get_fos_mock.assert_called_once_with(
+ name=_TEST_BIGTABLE_FOS1_PATH, retry=base._DEFAULT_RETRY
+ )
+
+ fos_eq(
+ fos,
+ name=_TEST_BIGTABLE_FOS1_ID,
+ resource_name=_TEST_BIGTABLE_FOS1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_BIGTABLE_FOS1_LABELS,
+ type=FeatureOnlineStoreType.BIGTABLE,
+ )
+
+
+@pytest.mark.parametrize("create_request_timeout", [None, 1.0])
+def test_create(
+ create_request_timeout,
+ create_bigtable_fos_mock,
+ get_fos_mock,
+ fos_logger_mock,
+ sync=True,
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fos = FeatureOnlineStore.create_bigtable_store(
+ _TEST_BIGTABLE_FOS1_ID,
+ labels=_TEST_BIGTABLE_FOS1_LABELS,
+ create_request_timeout=create_request_timeout,
+ sync=sync,
+ )
+
+ if not sync:
+ fos.wait()
+
+ # When creating, the FeatureOnlineStore object doesn't have the path set.
+ expected_feature_online_store = types.feature_online_store_v1.FeatureOnlineStore(
+ bigtable=types.feature_online_store_v1.FeatureOnlineStore.Bigtable(
+ auto_scaling=types.feature_online_store_v1.FeatureOnlineStore.Bigtable.AutoScaling(
+ min_node_count=1,
+ max_node_count=1,
+ cpu_utilization_target=50,
+ )
+ ),
+ labels=_TEST_BIGTABLE_FOS1_LABELS,
+ )
+ create_bigtable_fos_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ feature_online_store=expected_feature_online_store,
+ feature_online_store_id=_TEST_BIGTABLE_FOS1_ID,
+ metadata=(),
+ timeout=create_request_timeout,
+ )
+
+ fos_logger_mock.assert_has_calls(
+ [
+ call("Creating FeatureOnlineStore"),
+ call(
+ f"Create FeatureOnlineStore backing LRO: {create_bigtable_fos_mock.return_value.operation.name}"
+ ),
+ call(
+ "FeatureOnlineStore created. Resource name: projects/test-project/locations/us-central1/featureOnlineStores/my_fos1"
+ ),
+ call("To use this FeatureOnlineStore in another session:"),
+ call(
+ "feature_online_store = aiplatform.FeatureOnlineStore('projects/test-project/locations/us-central1/featureOnlineStores/my_fos1')"
+ ),
+ ]
+ )
+
+ fos_eq(
+ fos,
+ name=_TEST_BIGTABLE_FOS1_ID,
+ resource_name=_TEST_BIGTABLE_FOS1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_BIGTABLE_FOS1_LABELS,
+ type=FeatureOnlineStoreType.BIGTABLE,
+ )
+
+
+@pytest.mark.parametrize("create_request_timeout", [None, 1.0])
+def test_create_esf_optimized_store(
+ create_request_timeout,
+ create_esf_optimized_fos_mock,
+ get_esf_optimized_fos_mock,
+ fos_logger_mock,
+ sync=True,
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fos = FeatureOnlineStore.create_optimized_store(
+ _TEST_ESF_OPTIMIZED_FOS_ID,
+ labels=_TEST_ESF_OPTIMIZED_FOS_LABELS,
+ create_request_timeout=create_request_timeout,
+ sync=sync,
+ )
+
+ if not sync:
+ fos.wait()
+
+ expected_feature_online_store = types.feature_online_store_v1.FeatureOnlineStore(
+ optimized=types.feature_online_store_v1.FeatureOnlineStore.Optimized(),
+ dedicated_serving_endpoint=types.feature_online_store_v1.FeatureOnlineStore.DedicatedServingEndpoint(),
+ labels=_TEST_ESF_OPTIMIZED_FOS_LABELS,
+ )
+ create_esf_optimized_fos_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ feature_online_store=expected_feature_online_store,
+ feature_online_store_id=_TEST_ESF_OPTIMIZED_FOS_ID,
+ metadata=(),
+ timeout=create_request_timeout,
+ )
+
+ fos_logger_mock.assert_has_calls(
+ [
+ call("Creating FeatureOnlineStore"),
+ call(
+ "Create FeatureOnlineStore backing LRO:"
+ f" {create_esf_optimized_fos_mock.return_value.operation.name}"
+ ),
+ call(
+ "FeatureOnlineStore created. Resource name:"
+ " projects/test-project/locations/us-central1/featureOnlineStores/my_esf_optimized_fos"
+ ),
+ call("To use this FeatureOnlineStore in another session:"),
+ call(
+ "feature_online_store ="
+ " aiplatform.FeatureOnlineStore('projects/test-project/locations/us-central1/featureOnlineStores/my_esf_optimized_fos')"
+ ),
+ ]
+ )
+
+ fos_eq(
+ fos,
+ name=_TEST_ESF_OPTIMIZED_FOS_ID,
+ resource_name=_TEST_ESF_OPTIMIZED_FOS_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_ESF_OPTIMIZED_FOS_LABELS,
+ type=FeatureOnlineStoreType.OPTIMIZED,
+ )
+
+
+def test_create_psc_optimized_store_no_project_allowlist_raises_error():
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ "`project_allowlist` cannot be empty when `enable_private_service_connect` is"
+ " set to true."
+ ),
+ ):
+ FeatureOnlineStore.create_optimized_store(
+ _TEST_PSC_OPTIMIZED_FOS_ID,
+ labels=_TEST_PSC_OPTIMIZED_FOS_LABELS,
+ enable_private_service_connect=True,
+ )
+
+
+def test_create_psc_optimized_store_empty_project_allowlist_raises_error():
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ "`project_allowlist` cannot be empty when `enable_private_service_connect` is"
+ " set to true."
+ ),
+ ):
+ FeatureOnlineStore.create_optimized_store(
+ _TEST_PSC_OPTIMIZED_FOS_ID,
+ enable_private_service_connect=True,
+ project_allowlist=[],
+ )
+
+
+@pytest.mark.parametrize("create_request_timeout", [None, 1.0])
+@pytest.mark.parametrize("sync", [True, False])
+def test_create_psc_optimized_store(
+ create_psc_optimized_fos_mock,
+ get_psc_optimized_fos_mock,
+ fos_logger_mock,
+ create_request_timeout,
+ sync,
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ fos = FeatureOnlineStore.create_optimized_store(
+ _TEST_PSC_OPTIMIZED_FOS_ID,
+ labels=_TEST_PSC_OPTIMIZED_FOS_LABELS,
+ create_request_timeout=create_request_timeout,
+ enable_private_service_connect=True,
+ project_allowlist=_TEST_PSC_PROJECT_ALLOWLIST,
+ )
+
+ if not sync:
+ fos.wait()
+
+ expected_feature_online_store = types.feature_online_store_v1.FeatureOnlineStore(
+ optimized=types.feature_online_store_v1.FeatureOnlineStore.Optimized(),
+ dedicated_serving_endpoint=types.feature_online_store_v1.FeatureOnlineStore.DedicatedServingEndpoint(
+ private_service_connect_config=types.service_networking_v1.PrivateServiceConnectConfig(
+ enable_private_service_connect=True,
+ project_allowlist=_TEST_PSC_PROJECT_ALLOWLIST,
+ )
+ ),
+ labels=_TEST_PSC_OPTIMIZED_FOS_LABELS,
+ )
+ create_psc_optimized_fos_mock.assert_called_once_with(
+ parent=_TEST_PARENT,
+ feature_online_store=expected_feature_online_store,
+ feature_online_store_id=_TEST_PSC_OPTIMIZED_FOS_ID,
+ metadata=(),
+ timeout=create_request_timeout,
+ )
+
+ fos_logger_mock.assert_has_calls(
+ [
+ call("Creating FeatureOnlineStore"),
+ call(
+ "Create FeatureOnlineStore backing LRO:"
+ f" {create_psc_optimized_fos_mock.return_value.operation.name}"
+ ),
+ call(
+ "FeatureOnlineStore created. Resource name:"
+ " projects/test-project/locations/us-central1/featureOnlineStores/my_psc_optimized_fos"
+ ),
+ call("To use this FeatureOnlineStore in another session:"),
+ call(
+ "feature_online_store ="
+ " aiplatform.FeatureOnlineStore('projects/test-project/locations/us-central1/featureOnlineStores/my_psc_optimized_fos')"
+ ),
+ ]
+ )
+
+ fos_eq(
+ fos,
+ name=_TEST_PSC_OPTIMIZED_FOS_ID,
+ resource_name=_TEST_PSC_OPTIMIZED_FOS_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_PSC_OPTIMIZED_FOS_LABELS,
+ type=FeatureOnlineStoreType.OPTIMIZED,
+ )
+
+
+def test_list(list_fos_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ online_stores = FeatureOnlineStore.list()
+
+ list_fos_mock.assert_called_once_with(request={"parent": _TEST_PARENT})
+ assert len(online_stores) == len(_TEST_FOS_LIST)
+ fos_eq(
+ online_stores[0],
+ name=_TEST_BIGTABLE_FOS1_ID,
+ resource_name=_TEST_BIGTABLE_FOS1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_BIGTABLE_FOS1_LABELS,
+ type=FeatureOnlineStoreType.BIGTABLE,
+ )
+ fos_eq(
+ online_stores[1],
+ name=_TEST_BIGTABLE_FOS2_ID,
+ resource_name=_TEST_BIGTABLE_FOS2_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_BIGTABLE_FOS2_LABELS,
+ type=FeatureOnlineStoreType.BIGTABLE,
+ )
+ fos_eq(
+ online_stores[2],
+ name=_TEST_BIGTABLE_FOS3_ID,
+ resource_name=_TEST_BIGTABLE_FOS3_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_BIGTABLE_FOS3_LABELS,
+ type=FeatureOnlineStoreType.BIGTABLE,
+ )
+
+
+@pytest.mark.parametrize("force", [True, False])
+def test_delete(force, delete_fos_mock, get_fos_mock, fos_logger_mock, sync=True):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fos = FeatureOnlineStore(_TEST_BIGTABLE_FOS1_ID)
+ fos.delete(force=force, sync=sync)
+
+ if not sync:
+ fos.wait()
+
+ delete_fos_mock.assert_called_once_with(
+ name=_TEST_BIGTABLE_FOS1_PATH,
+ force=force,
+ )
+
+ fos_logger_mock.assert_has_calls(
+ [
+ call(
+ "Deleting FeatureOnlineStore resource: projects/test-project/locations/us-central1/featureOnlineStores/my_fos1"
+ ),
+ call(
+ f"Delete FeatureOnlineStore backing LRO: {delete_fos_mock.return_value.operation.name}"
+ ),
+ call(
+ "FeatureOnlineStore resource projects/test-project/locations/us-central1/featureOnlineStores/my_fos1 deleted."
+ ),
+ ]
+ )
+
+
+def test_create_fv_none_source_raises_error(get_fos_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ fos = FeatureOnlineStore(_TEST_BIGTABLE_FOS1_ID)
+
+ with pytest.raises(
+ ValueError,
+ match=re.escape("Please specify a valid source."),
+ ):
+ fos.create_feature_view("bq_fv", None)
+
+
+def test_create_fv_wrong_object_type_raises_error(get_fos_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ fos = FeatureOnlineStore(_TEST_BIGTABLE_FOS1_ID)
+
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ "Only FeatureViewBigQuerySource and FeatureViewVertexRagSource are supported sources."
+ ),
+ ):
+ fos.create_feature_view("bq_fv", fos)
+
+
+def test_create_bq_fv_bad_uri_raises_error(get_fos_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ fos = FeatureOnlineStore(_TEST_BIGTABLE_FOS1_ID)
+
+ with pytest.raises(
+ ValueError,
+ match=re.escape("Please specify URI in BigQuery source."),
+ ):
+ fos.create_feature_view(
+ "bq_fv",
+ FeatureViewBigQuerySource(uri=None, entity_id_columns=["entity_id"]),
+ )
+
+
+@pytest.mark.parametrize("entity_id_columns", [None, []])
+def test_create_bq_fv_bad_entity_id_columns_raises_error(
+ entity_id_columns, get_fos_mock
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ fos = FeatureOnlineStore(_TEST_BIGTABLE_FOS1_ID)
+
+ with pytest.raises(
+ ValueError,
+ match=re.escape("Please specify entity ID columns in BigQuery source."),
+ ):
+ fos.create_feature_view(
+ "bq_fv",
+ FeatureViewBigQuerySource(uri="hi", entity_id_columns=entity_id_columns),
+ )
+
+
+@pytest.mark.parametrize("create_request_timeout", [None, 1.0])
+@pytest.mark.parametrize("sync", [True, False])
+def test_create_bq_fv(
+ create_request_timeout,
+ sync,
+ get_fos_mock,
+ create_bq_fv_mock,
+ get_fv_mock,
+ fos_logger_mock,
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ fos = FeatureOnlineStore(_TEST_BIGTABLE_FOS1_ID)
+
+ fv = fos.create_feature_view(
+ _TEST_FV1_ID,
+ FeatureViewBigQuerySource(
+ uri=_TEST_FV1_BQ_URI, entity_id_columns=_TEST_FV1_ENTITY_ID_COLUMNS
+ ),
+ labels=_TEST_FV1_LABELS,
+ create_request_timeout=create_request_timeout,
+ )
+
+ if not sync:
+ fos.wait()
+
+ # When creating, the FeatureView object doesn't have the path set.
+ expected_fv = types.feature_view.FeatureView(
+ big_query_source=types.feature_view.FeatureView.BigQuerySource(
+ uri=_TEST_FV1_BQ_URI,
+ entity_id_columns=_TEST_FV1_ENTITY_ID_COLUMNS,
+ ),
+ labels=_TEST_FV1_LABELS,
+ )
+ create_bq_fv_mock.assert_called_with(
+ parent=_TEST_BIGTABLE_FOS1_PATH,
+ feature_view=expected_fv,
+ feature_view_id=_TEST_FV1_ID,
+ metadata=(),
+ timeout=create_request_timeout,
+ )
+
+ fv_eq(
+ fv_to_check=fv,
+ name=_TEST_FV1_ID,
+ resource_name=_TEST_FV1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_FV1_LABELS,
+ )
+
+ fos_logger_mock.assert_has_calls(
+ [
+ call("Creating FeatureView"),
+ call(
+ f"Create FeatureView backing LRO: {create_bq_fv_mock.return_value.operation.name}"
+ ),
+ call(
+ "FeatureView created. Resource name: projects/test-project/locations/us-central1/featureOnlineStores/my_fos1/featureViews/my_fv1"
+ ),
+ call("To use this FeatureView in another session:"),
+ call(
+ "feature_view = aiplatform.FeatureView('projects/test-project/locations/us-central1/featureOnlineStores/my_fos1/featureViews/my_fv1')"
+ ),
+ ]
+ )
+
+
+def test_create_embedding_fv(
+ get_esf_optimized_fos_mock,
+ create_embedding_fv_from_bq_mock,
+ get_optimized_embedding_fv_mock,
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ fos = FeatureOnlineStore(_TEST_ESF_OPTIMIZED_FOS_ID)
+
+ embedding_fv = fos.create_feature_view(
+ _TEST_OPTIMIZED_EMBEDDING_FV_ID,
+ FeatureViewBigQuerySource(uri="hi", entity_id_columns=["entity_id"]),
+ index_config=IndexConfig(
+ embedding_column="embedding",
+ dimensions=1536,
+ filter_columns=["currency_code", "gender", "shipping_country_codes"],
+ crowding_column="crowding",
+ distance_measure_type=DistanceMeasureType.SQUARED_L2_DISTANCE,
+ algorithm_config=TreeAhConfig(),
+ ),
+ )
+ fv_eq(
+ fv_to_check=embedding_fv,
+ name=_TEST_OPTIMIZED_EMBEDDING_FV_ID,
+ resource_name=_TEST_OPTIMIZED_EMBEDDING_FV_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_FV1_LABELS,
+ )
+
+
+def test_create_rag_fv_bad_uri_raises_error(get_fos_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ fos = FeatureOnlineStore(_TEST_BIGTABLE_FOS1_ID)
+
+ with pytest.raises(
+ ValueError,
+ match=re.escape("Please specify URI in Vertex RAG source."),
+ ):
+ fos.create_feature_view(
+ "rag_fv",
+ FeatureViewVertexRagSource(uri=None),
+ )
+
+
+@pytest.mark.parametrize("create_request_timeout", [None, 1.0])
+@pytest.mark.parametrize("sync", [True, False])
+def test_create_rag_fv(
+ create_request_timeout,
+ sync,
+ get_fos_mock,
+ create_rag_fv_mock,
+ get_rag_fv_mock,
+ fos_logger_mock,
+):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ fos = FeatureOnlineStore(_TEST_BIGTABLE_FOS1_ID)
+
+ rag_fv = fos.create_feature_view(
+ _TEST_FV3_ID,
+ FeatureViewVertexRagSource(uri=_TEST_FV3_BQ_URI),
+ labels=_TEST_FV3_LABELS,
+ create_request_timeout=create_request_timeout,
+ )
+
+ if not sync:
+ fos.wait()
+
+ # When creating, the FeatureView object doesn't have the path set.
+ expected_fv = types.feature_view.FeatureView(
+ vertex_rag_source=types.feature_view.FeatureView.VertexRagSource(
+ uri=_TEST_FV3_BQ_URI,
+ ),
+ labels=_TEST_FV3_LABELS,
+ )
+ create_rag_fv_mock.assert_called_with(
+ parent=_TEST_BIGTABLE_FOS1_PATH,
+ feature_view=expected_fv,
+ feature_view_id=_TEST_FV3_ID,
+ metadata=(),
+ timeout=create_request_timeout,
+ )
+
+ fv_eq(
+ fv_to_check=rag_fv,
+ name=_TEST_FV3_ID,
+ resource_name=_TEST_FV3_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_FV3_LABELS,
+ )
+
+ fos_logger_mock.assert_has_calls(
+ [
+ call("Creating FeatureView"),
+ call(
+ f"Create FeatureView backing LRO: {create_rag_fv_mock.return_value.operation.name}"
+ ),
+ call(
+ "FeatureView created. Resource name: projects/test-project/locations/us-central1/featureOnlineStores/my_fos1/featureViews/my_fv3"
+ ),
+ call("To use this FeatureView in another session:"),
+ call(
+ "feature_view = aiplatform.FeatureView('projects/test-project/locations/us-central1/featureOnlineStores/my_fos1/featureViews/my_fv3')"
+ ),
+ ]
+ )
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_feature_view.py b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_feature_view.py
new file mode 100644
index 0000000000000000000000000000000000000000..c310bbd1e30b216b95b041f951e8075b761cb51d
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_feature_view.py
@@ -0,0 +1,855 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import re
+from typing import Dict
+from unittest import mock
+from unittest.mock import call, patch
+from google.api_core import operation as ga_operation
+
+from google.cloud import aiplatform
+from google.cloud.aiplatform import base
+from vertexai.resources.preview import (
+ FeatureView,
+)
+import vertexai.resources.preview.feature_store.utils as fs_utils
+import pytest
+from google.cloud.aiplatform.compat.services import (
+ feature_online_store_admin_service_client,
+ feature_online_store_service_client,
+)
+from vertexai.resources.preview.feature_store import (
+ feature_view,
+)
+
+from feature_store_constants import (
+ _TEST_BIGTABLE_FOS1_ID,
+ _TEST_BIGTABLE_FOS1_PATH,
+ _TEST_EMBEDDING_FV1_PATH,
+ _TEST_STRING_FILTER,
+ _TEST_FV1_ID,
+ _TEST_FV1_LABELS,
+ _TEST_FV1_PATH,
+ _TEST_FV2_ID,
+ _TEST_FV2_LABELS,
+ _TEST_FV2_PATH,
+ _TEST_FV3_ID,
+ _TEST_FV3_LABELS,
+ _TEST_FV3_PATH,
+ _TEST_FV_FETCH1,
+ _TEST_FV_LIST,
+ _TEST_FV_SEARCH1,
+ _TEST_FV_SYNC1,
+ _TEST_FV_SYNC1_ID,
+ _TEST_FV_SYNC1_PATH,
+ _TEST_FV_SYNC2_ID,
+ _TEST_FV_SYNC2_PATH,
+ _TEST_FV_SYNC_LIST,
+ _TEST_LOCATION,
+ _TEST_OPTIMIZED_FV1_PATH,
+ _TEST_OPTIMIZED_FV2_PATH,
+ _TEST_PROJECT,
+ _TEST_FV_SYNC1_RESPONSE,
+)
+
+
+pytestmark = pytest.mark.usefixtures("google_auth_mock")
+
+
+@pytest.fixture
+def fv_logger_mock():
+ with patch.object(
+ feature_view._LOGGER,
+ "info",
+ wraps=feature_view._LOGGER.info,
+ ) as logger_mock:
+ yield logger_mock
+
+
+@pytest.fixture
+def list_fv_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "list_feature_views",
+ ) as list_fv:
+ list_fv.return_value = _TEST_FV_LIST
+ yield list_fv
+
+
+@pytest.fixture
+def delete_fv_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "delete_feature_view",
+ ) as delete_fv:
+ delete_fv_lro_mock = mock.Mock(ga_operation.Operation)
+ delete_fv.return_value = delete_fv_lro_mock
+ yield delete_fv
+
+
+@pytest.fixture
+def get_fv_sync_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "get_feature_view_sync",
+ ) as get_fv_sync_mock:
+ get_fv_sync_mock.return_value = _TEST_FV_SYNC1
+ yield get_fv_sync_mock
+
+
+@pytest.fixture
+def list_fv_syncs_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "list_feature_view_syncs",
+ ) as list_fv_syncs_mock:
+ list_fv_syncs_mock.return_value = _TEST_FV_SYNC_LIST
+ yield list_fv_syncs_mock
+
+
+@pytest.fixture
+def sync_fv_sync_mock():
+ with patch.object(
+ feature_online_store_admin_service_client.FeatureOnlineStoreAdminServiceClient,
+ "sync_feature_view",
+ ) as sync_fv_sync_mock:
+ sync_fv_sync_mock.return_value = _TEST_FV_SYNC1_RESPONSE
+ yield sync_fv_sync_mock
+
+
+@pytest.fixture
+def fetch_feature_values_mock():
+ with patch.object(
+ feature_online_store_service_client.FeatureOnlineStoreServiceClient,
+ "fetch_feature_values",
+ ) as fetch_feature_values_mock:
+ fetch_feature_values_mock.return_value = _TEST_FV_FETCH1
+ yield fetch_feature_values_mock
+
+
+@pytest.fixture
+def search_nearest_entities_mock():
+ with patch.object(
+ feature_online_store_service_client.FeatureOnlineStoreServiceClient,
+ "search_nearest_entities",
+ ) as search_nearest_entities_mock:
+ search_nearest_entities_mock.return_value = _TEST_FV_SEARCH1
+ yield search_nearest_entities_mock
+
+
+@pytest.fixture
+def transport_mock():
+ with mock.patch(
+ "google.cloud.aiplatform_v1.services.feature_online_store_service.transports.grpc.FeatureOnlineStoreServiceGrpcTransport"
+ ) as transport:
+ transport.return_value = mock.MagicMock(autospec=True)
+ yield transport
+
+
+@pytest.fixture
+def grpc_insecure_channel_mock():
+ import grpc
+
+ with mock.patch.object(grpc, "insecure_channel", autospec=True) as channel:
+ channel.return_value = mock.MagicMock(autospec=True)
+ yield channel
+
+
+@pytest.fixture
+def client_mock():
+ with mock.patch(
+ "google.cloud.aiplatform_v1.services.feature_online_store_service.FeatureOnlineStoreServiceClient"
+ ) as client_mock:
+ yield client_mock
+
+
+@pytest.fixture
+def utils_client_with_override_mock():
+ with mock.patch(
+ "google.cloud.aiplatform.utils.FeatureOnlineStoreClientWithOverride"
+ ) as client_mock:
+ yield client_mock
+
+
+def fv_eq(
+ fv_to_check: FeatureView,
+ name: str,
+ resource_name: str,
+ project: str,
+ location: str,
+ labels: Dict[str, str],
+):
+ """Check if a FeatureView has the appropriate values set."""
+ assert fv_to_check.name == name
+ assert fv_to_check.resource_name == resource_name
+ assert fv_to_check.project == project
+ assert fv_to_check.location == location
+ assert fv_to_check.labels == labels
+
+
+def fv_sync_eq(
+ fv_sync_to_check: FeatureView.FeatureViewSync,
+ name: str,
+ resource_name: str,
+ project: str,
+ location: str,
+):
+ """Check if a FeatureViewSync has the appropriate values set."""
+ assert fv_sync_to_check.name == name
+ assert fv_sync_to_check.resource_name == resource_name
+ assert fv_sync_to_check.project == project
+ assert fv_sync_to_check.location == location
+
+
+def test_init_with_fv_id_and_no_fos_id_raises_error(get_fv_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ "Since feature view is not provided as a path, please specify"
+ + " feature_online_store_id."
+ ),
+ ):
+ FeatureView(_TEST_FV1_ID)
+
+
+def test_init_with_fv_id(get_fv_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fv = FeatureView(_TEST_FV1_ID, feature_online_store_id=_TEST_BIGTABLE_FOS1_ID)
+
+ get_fv_mock.assert_called_once_with(
+ name=_TEST_FV1_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ fv_eq(
+ fv_to_check=fv,
+ name=_TEST_FV1_ID,
+ resource_name=_TEST_FV1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_FV1_LABELS,
+ )
+
+
+def test_init_with_fv_path(get_fv_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fv = FeatureView(_TEST_FV1_PATH)
+
+ get_fv_mock.assert_called_once_with(
+ name=_TEST_FV1_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ fv_eq(
+ fv_to_check=fv,
+ name=_TEST_FV1_ID,
+ resource_name=_TEST_FV1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_FV1_LABELS,
+ )
+
+
+def test_list(list_fv_mock, get_fos_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ feature_views = FeatureView.list(feature_online_store_id=_TEST_BIGTABLE_FOS1_ID)
+
+ list_fv_mock.assert_called_once_with(request={"parent": _TEST_BIGTABLE_FOS1_PATH})
+ assert len(feature_views) == len(_TEST_FV_LIST)
+
+ fv_eq(
+ feature_views[0],
+ name=_TEST_FV1_ID,
+ resource_name=_TEST_FV1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_FV1_LABELS,
+ )
+ fv_eq(
+ feature_views[1],
+ name=_TEST_FV2_ID,
+ resource_name=_TEST_FV2_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_FV2_LABELS,
+ )
+ fv_eq(
+ feature_views[2],
+ name=_TEST_FV3_ID,
+ resource_name=_TEST_FV3_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ labels=_TEST_FV3_LABELS,
+ )
+
+
+def test_delete(delete_fv_mock, fv_logger_mock, get_fos_mock, get_fv_mock, sync=True):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fv = FeatureView(name=_TEST_FV1_ID, feature_online_store_id=_TEST_BIGTABLE_FOS1_ID)
+ fv.delete()
+
+ if not sync:
+ fv.wait()
+
+ delete_fv_mock.assert_called_once_with(name=_TEST_FV1_PATH)
+
+ fv_logger_mock.assert_has_calls(
+ [
+ call(
+ "Deleting FeatureView resource: projects/test-project/locations/us-central1/featureOnlineStores/my_fos1/featureViews/my_fv1"
+ ),
+ call(
+ f"Delete FeatureView backing LRO: {delete_fv_mock.return_value.operation.name}"
+ ),
+ call(
+ "FeatureView resource projects/test-project/locations/us-central1/featureOnlineStores/my_fos1/featureViews/my_fv1 deleted."
+ ),
+ ]
+ )
+
+
+def test_get_sync(get_fv_mock, get_fv_sync_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fv_sync = FeatureView(_TEST_FV1_PATH).get_sync(_TEST_FV_SYNC1_ID)
+
+ get_fv_mock.assert_called_once_with(
+ name=_TEST_FV1_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ get_fv_sync_mock.assert_called_once_with(
+ name=_TEST_FV_SYNC1_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ fv_sync_eq(
+ fv_sync_to_check=fv_sync,
+ name=_TEST_FV_SYNC1_ID,
+ resource_name=_TEST_FV_SYNC1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+
+def test_list_syncs(get_fv_mock, list_fv_syncs_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fv_syncs = FeatureView(_TEST_FV1_PATH).list_syncs()
+
+ get_fv_mock.assert_called_once_with(
+ name=_TEST_FV1_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ list_fv_syncs_mock.assert_called_once_with(request={"parent": _TEST_FV1_PATH})
+ assert len(fv_syncs) == len(_TEST_FV_SYNC_LIST)
+
+ fv_sync_eq(
+ fv_sync_to_check=fv_syncs[0],
+ name=_TEST_FV_SYNC1_ID,
+ resource_name=_TEST_FV_SYNC1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+ fv_sync_eq(
+ fv_sync_to_check=fv_syncs[1],
+ name=_TEST_FV_SYNC2_ID,
+ resource_name=_TEST_FV_SYNC2_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+
+def test_on_demand_sync(get_fv_mock, get_fv_sync_mock, sync_fv_sync_mock):
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ fv_sync = FeatureView(_TEST_FV1_PATH).sync()
+
+ get_fv_mock.assert_called_once_with(
+ name=_TEST_FV1_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ sync_fv_sync_mock.assert_called_once_with(
+ request={"feature_view": _TEST_FV1_PATH},
+ )
+
+ get_fv_sync_mock.assert_called_once_with(
+ name=_TEST_FV_SYNC1_PATH,
+ retry=base._DEFAULT_RETRY,
+ )
+
+ fv_sync_eq(
+ fv_sync_to_check=fv_sync,
+ name=_TEST_FV_SYNC1_ID,
+ resource_name=_TEST_FV_SYNC1_PATH,
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ )
+
+
+@pytest.mark.parametrize("output_type", ["dict", "proto"])
+def test_fetch_feature_values_bigtable(
+ get_fos_mock, get_fv_mock, fetch_feature_values_mock, fv_logger_mock, output_type
+):
+ if output_type == "dict":
+ fv_dict = FeatureView(_TEST_FV1_PATH).read(key=["key1"]).to_dict()
+ assert fv_dict == {
+ "features": [{"name": "key1", "value": {"string_value": "value1"}}]
+ }
+ elif output_type == "proto":
+ fv_proto = FeatureView(_TEST_FV1_PATH).read(key=["key1"]).to_proto()
+ assert fv_proto == _TEST_FV_FETCH1
+
+ fv_logger_mock.assert_has_calls(
+ [
+ call("Connecting to Bigtable online store name my_fos1"),
+ ]
+ )
+
+
+@pytest.mark.parametrize("output_type", ["dict", "proto"])
+def test_fetch_feature_values_optimized(
+ get_esf_optimized_fos_mock,
+ get_optimized_fv_mock,
+ fetch_feature_values_mock,
+ fv_logger_mock,
+ output_type,
+):
+ if output_type == "dict":
+ fv_dict = FeatureView(_TEST_OPTIMIZED_FV1_PATH).read(key=["key1"]).to_dict()
+ assert fv_dict == {
+ "features": [{"name": "key1", "value": {"string_value": "value1"}}]
+ }
+ elif output_type == "proto":
+ fv_proto = FeatureView(_TEST_OPTIMIZED_FV1_PATH).read(key=["key1"]).to_proto()
+ assert fv_proto == _TEST_FV_FETCH1
+
+ fv_logger_mock.assert_has_calls(
+ [
+ call(
+ "Public endpoint for the optimized online store my_esf_optimized_fos is test-esf-endpoint"
+ ),
+ ]
+ )
+
+
+def test_fetch_feature_values_optimized_no_endpoint(
+ get_esf_optimized_fos_no_endpoint_mock,
+ get_optimized_fv_no_endpointmock,
+ fetch_feature_values_mock,
+):
+ """Tests that the public endpoint is not created for the optimized online store."""
+ with pytest.raises(
+ fs_utils.PublicEndpointNotFoundError,
+ match=re.escape(
+ "Public endpoint is not created yet for the optimized online "
+ "store:my_esf_optimised_fos2. Please run sync and wait for it "
+ "to complete."
+ ),
+ ):
+ FeatureView(_TEST_OPTIMIZED_FV2_PATH).read(key=["key1"]).to_dict()
+
+
+def test_ffv_optimized_psc_with_no_connection_options_raises_error(
+ get_psc_optimized_fos_mock,
+ get_optimized_fv_mock,
+):
+ with pytest.raises(ValueError) as excinfo:
+ FeatureView(_TEST_OPTIMIZED_FV1_PATH).read(key=["key1"])
+
+ assert str(excinfo.value) == (
+ "Use `connection_options` to specify an IP address. Required for optimized online store with private service connect."
+ )
+
+
+def test_ffv_optimized_psc_with_no_connection_transport_raises_error(
+ get_psc_optimized_fos_mock,
+ get_optimized_fv_mock,
+):
+ with pytest.raises(ValueError) as excinfo:
+ FeatureView(_TEST_OPTIMIZED_FV1_PATH).read(
+ key=["key1"],
+ connection_options=fs_utils.ConnectionOptions(
+ host="1.2.3.4", transport=None
+ ),
+ )
+
+ assert str(excinfo.value) == (
+ "Unsupported connection transport type, got transport: None"
+ )
+
+
+def test_ffv_optimized_psc_with_bad_connection_transport_raises_error(
+ get_psc_optimized_fos_mock,
+ get_optimized_fv_mock,
+):
+ with pytest.raises(ValueError) as excinfo:
+ FeatureView(_TEST_OPTIMIZED_FV1_PATH).read(
+ key=["key1"],
+ connection_options=fs_utils.ConnectionOptions(
+ host="1.2.3.4", transport="hi"
+ ),
+ )
+
+ assert str(excinfo.value) == (
+ "Unsupported connection transport type, got transport: hi"
+ )
+
+
+@pytest.mark.parametrize("output_type", ["dict", "proto"])
+def test_ffv_optimized_psc(
+ get_psc_optimized_fos_mock,
+ get_optimized_fv_mock,
+ transport_mock,
+ grpc_insecure_channel_mock,
+ fetch_feature_values_mock,
+ output_type,
+):
+ rsp = FeatureView(_TEST_OPTIMIZED_FV1_PATH).read(
+ key=["key1"],
+ connection_options=fs_utils.ConnectionOptions(
+ host="1.2.3.4",
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ ),
+ )
+
+ # Ensure that we create and use insecure channel to the target.
+ grpc_insecure_channel_mock.assert_called_once_with("1.2.3.4:10002")
+ transport_grpc_channel = transport_mock.call_args.kwargs["channel"]
+ assert transport_grpc_channel == grpc_insecure_channel_mock.return_value
+
+ if output_type == "dict":
+ assert rsp.to_dict() == {
+ "features": [{"name": "key1", "value": {"string_value": "value1"}}]
+ }
+ elif output_type == "proto":
+ assert rsp.to_proto() == _TEST_FV_FETCH1
+
+
+def test_same_connection_options_are_equal():
+ opt1 = fs_utils.ConnectionOptions(
+ host="1.1.1.1",
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ )
+ opt2 = fs_utils.ConnectionOptions(
+ host="1.1.1.1",
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ )
+ assert opt1 == opt2
+
+
+def test_different_host_in_connection_options_are_not_equal():
+ opt1 = fs_utils.ConnectionOptions(
+ host="1.1.1.2",
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ )
+ opt2 = fs_utils.ConnectionOptions(
+ host="1.1.1.1",
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ )
+
+ assert opt1 != opt2
+
+
+def test_bad_transport_in_compared_connection_options_raises_error():
+ opt1 = fs_utils.ConnectionOptions(
+ host="1.1.1.1",
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ )
+ opt2 = fs_utils.ConnectionOptions(
+ host="1.1.1.1",
+ transport=None,
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ assert opt1 != opt2
+
+ assert str(excinfo.value) == (
+ "Transport 'ConnectionOptions.InsecureGrpcChannel()' cannot be compared to transport 'None'."
+ )
+
+
+def test_bad_transport_in_connection_options_raises_error():
+ opt1 = fs_utils.ConnectionOptions(
+ host="1.1.1.1",
+ transport=None,
+ )
+ opt2 = fs_utils.ConnectionOptions(
+ host="1.1.1.1",
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ assert opt1 != opt2
+
+ assert str(excinfo.value) == ("Unsupported transport supplied: None")
+
+
+def test_same_connection_options_have_same_hash():
+ opt1 = fs_utils.ConnectionOptions(
+ host="1.1.1.1",
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ )
+ opt2 = fs_utils.ConnectionOptions(
+ host="1.1.1.1",
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ )
+
+ d = {}
+ d[opt1] = "hi"
+ assert d[opt2] == "hi"
+
+
+@pytest.mark.parametrize(
+ "hosts",
+ [
+ ("1.1.1.1", "1.1.1.2"),
+ ("1.1.1.2", "1.1.1.1"),
+ ("10.0.0.1", "9.9.9.9"),
+ ],
+)
+def test_different_host_in_connection_options_have_different_hash(hosts):
+ opt1 = fs_utils.ConnectionOptions(
+ host=hosts[0],
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ )
+ opt2 = fs_utils.ConnectionOptions(
+ host=hosts[1],
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ )
+
+ d = {}
+ d[opt1] = "hi"
+ assert opt2 not in d
+
+
+@pytest.mark.parametrize(
+ "transports",
+ [
+ (fs_utils.ConnectionOptions.InsecureGrpcChannel(), None),
+ (None, fs_utils.ConnectionOptions.InsecureGrpcChannel()),
+ (None, "hi"),
+ ("hi", None),
+ ],
+)
+def test_bad_transport_in_connection_options_have_different_hash(transports):
+ opt1 = fs_utils.ConnectionOptions(
+ host="1.1.1.1",
+ transport=transports[0],
+ )
+ opt2 = fs_utils.ConnectionOptions(
+ host="1.1.1.1",
+ transport=transports[1],
+ )
+
+ d = {}
+ d[opt1] = "hi"
+ assert opt2 not in d
+
+
+def test_diff_host_and_bad_transport_in_connection_options_have_different_hash():
+ opt1 = fs_utils.ConnectionOptions(
+ host="1.1.1.1",
+ transport=None,
+ )
+ opt2 = fs_utils.ConnectionOptions(
+ host="9.9.9.9",
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ )
+
+ d = {}
+ d[opt1] = "hi"
+ assert opt2 not in d
+
+
+def test_ffv_optimized_psc_reuse_client_for_same_connection_options_in_same_ffv(
+ get_psc_optimized_fos_mock,
+ get_optimized_fv_mock,
+ client_mock,
+ transport_mock,
+ grpc_insecure_channel_mock,
+ fetch_feature_values_mock,
+):
+ fv = FeatureView(_TEST_OPTIMIZED_FV1_PATH)
+ fv.read(
+ key=["key1"],
+ connection_options=fs_utils.ConnectionOptions(
+ host="1.1.1.1",
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ ),
+ )
+ fv.read(
+ key=["key2"],
+ connection_options=fs_utils.ConnectionOptions(
+ host="1.1.1.1",
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ ),
+ )
+
+ # Insecure channel and transport creation should only be done once.
+ assert grpc_insecure_channel_mock.call_args_list == [mock.call("1.1.1.1:10002")]
+ assert transport_mock.call_args_list == [
+ mock.call(channel=grpc_insecure_channel_mock.return_value),
+ ]
+
+
+def test_ffv_optimized_psc_different_client_for_different_connection_options(
+ get_psc_optimized_fos_mock,
+ get_optimized_fv_mock,
+ client_mock,
+ transport_mock,
+ grpc_insecure_channel_mock,
+ fetch_feature_values_mock,
+):
+ # Return two different grpc channels each time insecure channel is called.
+ import grpc
+
+ grpc_chan1 = mock.MagicMock(spec=grpc.Channel)
+ grpc_chan2 = mock.MagicMock(spec=grpc.Channel)
+ grpc_insecure_channel_mock.side_effect = [grpc_chan1, grpc_chan2]
+
+ fv = FeatureView(_TEST_OPTIMIZED_FV1_PATH)
+ fv.read(
+ key=["key1"],
+ connection_options=fs_utils.ConnectionOptions(
+ host="1.1.1.1",
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ ),
+ )
+ fv.read(
+ key=["key2"],
+ connection_options=fs_utils.ConnectionOptions(
+ host="1.2.3.4",
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ ),
+ )
+
+ # Insecure channel and transport creation should be done twice - one for each different connection.
+ assert grpc_insecure_channel_mock.call_args_list == [
+ mock.call("1.1.1.1:10002"),
+ mock.call("1.2.3.4:10002"),
+ ]
+ assert transport_mock.call_args_list == [
+ mock.call(channel=grpc_chan1),
+ mock.call(channel=grpc_chan2),
+ ]
+
+
+def test_ffv_optimized_psc_bad_gapic_client_raises_error(
+ get_psc_optimized_fos_mock, get_optimized_fv_mock, utils_client_with_override_mock
+):
+ with pytest.raises(ValueError) as excinfo:
+ FeatureView(_TEST_OPTIMIZED_FV1_PATH).read(
+ key=["key1"],
+ connection_options=fs_utils.ConnectionOptions(
+ host="1.1.1.1",
+ transport=fs_utils.ConnectionOptions.InsecureGrpcChannel(),
+ ),
+ )
+
+ assert str(excinfo.value) == (
+ f"Unexpected gapic class '{utils_client_with_override_mock.get_gapic_client_class.return_value}' used by internal client."
+ )
+
+
+@pytest.mark.parametrize("output_type", ["dict", "proto"])
+def test_search_nearest_entities(
+ get_esf_optimized_fos_mock,
+ get_embedding_fv_mock,
+ search_nearest_entities_mock,
+ fv_logger_mock,
+ output_type,
+):
+ if output_type == "dict":
+ fv_dict = (
+ # Test with entity_id input.
+ FeatureView(_TEST_EMBEDDING_FV1_PATH)
+ .search(
+ entity_id="key1",
+ neighbor_count=2,
+ string_filters=[_TEST_STRING_FILTER],
+ per_crowding_attribute_neighbor_count=1,
+ return_full_entity=True,
+ approximate_neighbor_candidates=3,
+ leaf_nodes_search_fraction=0.5,
+ )
+ .to_dict()
+ )
+ assert fv_dict == {
+ "neighbors": [{"distance": 0.1, "entity_id": "neighbor_entity_id_1"}]
+ }
+ elif output_type == "proto":
+ fv_proto = (
+ # Test with embedding_value input.
+ FeatureView(_TEST_EMBEDDING_FV1_PATH)
+ .search(embedding_value=[0.1, 0.2, 0.3])
+ .to_proto()
+ )
+ assert fv_proto == _TEST_FV_SEARCH1
+
+ fv_logger_mock.assert_has_calls(
+ [
+ call(
+ "Public endpoint for the optimized online store my_esf_optimized_fos"
+ " is test-esf-endpoint"
+ ),
+ ]
+ )
+
+
+def test_search_nearest_entities_without_entity_id_or_embedding(
+ get_esf_optimized_fos_mock,
+ get_embedding_fv_mock,
+ search_nearest_entities_mock,
+ fv_logger_mock,
+):
+ try:
+ FeatureView(_TEST_EMBEDDING_FV1_PATH).search().to_proto()
+ assert not search_nearest_entities_mock.called
+ except ValueError as e:
+ error_msg = (
+ "Either entity_id or embedding_value needs to be provided for search."
+ )
+ assert str(e) == error_msg
+
+
+def test_search_nearest_entities_no_endpoint(
+ get_esf_optimized_fos_no_endpoint_mock,
+ get_optimized_fv_no_endpointmock,
+ fetch_feature_values_mock,
+):
+ """Tests that the public endpoint is not created for the optimized online store."""
+ try:
+ FeatureView(_TEST_OPTIMIZED_FV2_PATH).search(entity_id="key1").to_dict()
+ assert not fetch_feature_values_mock.called
+ except fs_utils.PublicEndpointNotFoundError as e:
+ assert isinstance(e, fs_utils.PublicEndpointNotFoundError)
+ error_msg = (
+ "Public endpoint is not created yet for the optimized online "
+ "store:my_esf_optimised_fos2. Please run sync and wait for it "
+ "to complete."
+ )
+ assert str(e) == error_msg
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_model_monitors.py b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_model_monitors.py
new file mode 100644
index 0000000000000000000000000000000000000000..11a02d02368831111aabc10d42113fa7d1966ee0
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_model_monitors.py
@@ -0,0 +1,1227 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import importlib
+import os
+from unittest import mock
+
+from google import auth
+from google.api_core import operation as ga_operation
+from google.auth import credentials as auth_credentials
+from google.cloud import aiplatform
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import utils
+from google.cloud.aiplatform.compat.services import (
+ model_monitoring_service_client_v1beta1 as model_monitoring_service_client,
+ schedule_service_client_v1beta1 as schedule_service_client,
+)
+from google.cloud.aiplatform.compat.types import (
+ io_v1beta1 as io,
+ model_monitor_v1beta1 as gca_model_monitor,
+ model_monitoring_alert_v1beta1 as gca_model_monitoring_alert,
+ model_monitoring_job_v1beta1 as gca_model_monitoring_job,
+ model_monitoring_service_v1beta1 as gca_model_monitoring_service,
+ model_monitoring_spec_v1beta1 as gca_model_monitoring_spec,
+ model_monitoring_stats_v1beta1 as gca_model_monitoring_stats,
+ schedule_service_v1beta1 as gca_schedule_service,
+ schedule_v1beta1 as gca_schedule,
+ job_state_v1beta1 as gca_job_state,
+ explanation_v1beta1 as explanation,
+)
+from vertexai.resources.preview import (
+ ml_monitoring,
+ ModelMonitor,
+ ModelMonitoringJob,
+)
+import pytest
+
+from google.protobuf import empty_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+
+
+# -*- coding: utf-8 -*-
+
+_TEST_CREDENTIALS = mock.Mock(
+ spec=auth_credentials.AnonymousCredentials(),
+ universe_domain="googleapis.com",
+)
+_TEST_DESCRIPTION = "test description"
+_TEST_JSON_CONTENT_TYPE = "application/json"
+_TEST_LOCATION = "us-central1"
+_TEST_LOCATION_2 = "europe-west4"
+_TEST_PROJECT = "test-project"
+_TEST_REPLICA_COUNT = 1
+_TEST_MODEL_NAME = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/models/123"
+_TEST_MODEL_VERSION_ID = "1"
+_TEST_MODEL_MONITOR_APP = "ortools-on-vertex-v0.1"
+_TEST_MODEL_MONITOR_DISPLAY_NAME = "model-monitor-display-name"
+_TEST_MODEL_MONITOR_USER_ID = "user_456"
+_TEST_MODEL_MONITOR_ID = "456"
+_TEST_MODEL_MONITORING_JOB_DISPLAY_NAME = "job-display-name"
+_TEST_MODEL_MONITORING_JOB_USER_ID = "user_789"
+_TEST_MODEL_MONITORING_JOB_ID = "789"
+_TEST_SCHEDULE_NAME = "000"
+_TEST_OUTPUT_PATH = "tests/output_path"
+_TEST_NOTIFICATION_EMAIL = "123@test.com"
+_TEST_BASELINE_RESOURCE = "tests/baseline"
+_TEST_TARGET_RESOURCE = "tests/target"
+_TEST_TRAINING_DATASET = gca_model_monitoring_spec.ModelMonitoringInput(
+ columnized_dataset=gca_model_monitoring_spec.ModelMonitoringInput.ModelMonitoringDataset(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+)
+_TESTDATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "testdata")
+_TEST_MODEL_MONITOR_PARENT = initializer.global_config.common_location_path(
+ project=_TEST_PROJECT, location=_TEST_LOCATION
+)
+_TEST_MODEL_MONITORING_JOB_RESOURCE_NAME = model_monitoring_service_client.ModelMonitoringServiceClient.model_monitoring_job_path(
+ _TEST_PROJECT,
+ _TEST_LOCATION,
+ _TEST_MODEL_MONITOR_ID,
+ _TEST_MODEL_MONITORING_JOB_ID,
+)
+_TEST_MODEL_MONITOR_RESOURCE_NAME = (
+ model_monitoring_service_client.ModelMonitoringServiceClient.model_monitor_path(
+ _TEST_PROJECT, _TEST_LOCATION, _TEST_MODEL_MONITOR_ID
+ )
+)
+_TEST_MODEL_MONITORING_SCHEMA = ml_monitoring.spec.ModelMonitoringSchema(
+ feature_fields=[
+ ml_monitoring.spec.FieldSchema(
+ name="feature1",
+ data_type="string",
+ repeated=False,
+ )
+ ],
+)
+_TEST_CREATE_MODEL_MONITOR_OBJ = gca_model_monitor.ModelMonitor(
+ display_name=_TEST_MODEL_MONITOR_DISPLAY_NAME,
+ model_monitoring_target=gca_model_monitor.ModelMonitor.ModelMonitoringTarget(
+ vertex_model=gca_model_monitor.ModelMonitor.ModelMonitoringTarget.VertexModelSource(
+ model=_TEST_MODEL_NAME,
+ model_version_id=_TEST_MODEL_VERSION_ID,
+ )
+ ),
+ training_dataset=gca_model_monitoring_spec.ModelMonitoringInput(
+ columnized_dataset=gca_model_monitoring_spec.ModelMonitoringInput.ModelMonitoringDataset(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ ),
+ tabular_objective=gca_model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective(
+ feature_drift_spec=gca_model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec(
+ categorical_metric_type="l_infinity",
+ numeric_metric_type="jensen_shannon_divergence",
+ default_categorical_alert_condition=gca_model_monitoring_alert.ModelMonitoringAlertCondition(
+ threshold=0.1,
+ ),
+ default_numeric_alert_condition=gca_model_monitoring_alert.ModelMonitoringAlertCondition(
+ threshold=0.2,
+ ),
+ )
+ ),
+ output_spec=gca_model_monitoring_spec.ModelMonitoringOutputSpec(
+ gcs_base_directory=io.GcsDestination(output_uri_prefix=_TEST_OUTPUT_PATH)
+ ),
+ notification_spec=gca_model_monitoring_spec.ModelMonitoringNotificationSpec(
+ email_config=gca_model_monitoring_spec.ModelMonitoringNotificationSpec.EmailConfig(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ ),
+ ),
+)
+_TEST_MODEL_MONITOR_OBJ = gca_model_monitor.ModelMonitor(
+ name=_TEST_MODEL_MONITOR_RESOURCE_NAME,
+ display_name=_TEST_MODEL_MONITOR_DISPLAY_NAME,
+ model_monitoring_target=gca_model_monitor.ModelMonitor.ModelMonitoringTarget(
+ vertex_model=gca_model_monitor.ModelMonitor.ModelMonitoringTarget.VertexModelSource(
+ model=_TEST_MODEL_NAME,
+ model_version_id=_TEST_MODEL_VERSION_ID,
+ )
+ ),
+ training_dataset=gca_model_monitoring_spec.ModelMonitoringInput(
+ columnized_dataset=gca_model_monitoring_spec.ModelMonitoringInput.ModelMonitoringDataset(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ ),
+ model_monitoring_schema=gca_model_monitor.ModelMonitoringSchema(
+ feature_fields=[
+ gca_model_monitor.ModelMonitoringSchema.FieldSchema(
+ name="feature1",
+ data_type="string",
+ repeated=False,
+ )
+ ],
+ ),
+ tabular_objective=gca_model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective(
+ feature_drift_spec=gca_model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec(
+ categorical_metric_type="l_infinity",
+ numeric_metric_type="jensen_shannon_divergence",
+ default_categorical_alert_condition=gca_model_monitoring_alert.ModelMonitoringAlertCondition(
+ threshold=0.1,
+ ),
+ default_numeric_alert_condition=gca_model_monitoring_alert.ModelMonitoringAlertCondition(
+ threshold=0.2,
+ ),
+ )
+ ),
+ output_spec=gca_model_monitoring_spec.ModelMonitoringOutputSpec(
+ gcs_base_directory=io.GcsDestination(output_uri_prefix=_TEST_OUTPUT_PATH)
+ ),
+ notification_spec=gca_model_monitoring_spec.ModelMonitoringNotificationSpec(
+ email_config=gca_model_monitoring_spec.ModelMonitoringNotificationSpec.EmailConfig(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ ),
+ ),
+ explanation_spec=explanation.ExplanationSpec(
+ parameters=explanation.ExplanationParameters(top_k=10)
+ ),
+)
+_TEST_UPDATED_MODEL_MONITOR_OBJ = gca_model_monitor.ModelMonitor(
+ name=_TEST_MODEL_MONITOR_RESOURCE_NAME,
+ display_name=_TEST_MODEL_MONITOR_DISPLAY_NAME,
+ model_monitoring_target=gca_model_monitor.ModelMonitor.ModelMonitoringTarget(
+ vertex_model=gca_model_monitor.ModelMonitor.ModelMonitoringTarget.VertexModelSource(
+ model=_TEST_MODEL_NAME,
+ model_version_id=_TEST_MODEL_VERSION_ID,
+ )
+ ),
+ training_dataset=gca_model_monitoring_spec.ModelMonitoringInput(
+ columnized_dataset=gca_model_monitoring_spec.ModelMonitoringInput.ModelMonitoringDataset(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ ),
+ model_monitoring_schema=gca_model_monitor.ModelMonitoringSchema(
+ feature_fields=[
+ gca_model_monitor.ModelMonitoringSchema.FieldSchema(
+ name="feature1",
+ data_type="string",
+ repeated=False,
+ )
+ ],
+ ),
+ tabular_objective=gca_model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective(
+ feature_drift_spec=gca_model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec(
+ categorical_metric_type="l_infinity",
+ numeric_metric_type="jensen_shannon_divergence",
+ default_categorical_alert_condition=gca_model_monitoring_spec.model_monitoring_alert.ModelMonitoringAlertCondition(
+ threshold=0.1,
+ ),
+ default_numeric_alert_condition=gca_model_monitoring_spec.model_monitoring_alert.ModelMonitoringAlertCondition(
+ threshold=0.2,
+ ),
+ )
+ ),
+ output_spec=gca_model_monitoring_spec.ModelMonitoringOutputSpec(
+ gcs_base_directory=io.GcsDestination(output_uri_prefix=_TEST_OUTPUT_PATH)
+ ),
+ notification_spec=gca_model_monitoring_spec.ModelMonitoringNotificationSpec(
+ email_config=gca_model_monitoring_spec.ModelMonitoringNotificationSpec.EmailConfig(
+ user_emails=[_TEST_NOTIFICATION_EMAIL, "456@test.com"]
+ ),
+ ),
+ explanation_spec=explanation.ExplanationSpec(
+ parameters=explanation.ExplanationParameters(top_k=10)
+ ),
+)
+_TEST_CREATE_MODEL_MONITORING_JOB_OBJ = gca_model_monitoring_job.ModelMonitoringJob(
+ display_name=_TEST_MODEL_MONITORING_JOB_DISPLAY_NAME,
+ model_monitoring_spec=gca_model_monitoring_spec.ModelMonitoringSpec(
+ objective_spec=gca_model_monitoring_spec.ModelMonitoringObjectiveSpec(
+ tabular_objective=gca_model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective(
+ feature_drift_spec=gca_model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec(
+ categorical_metric_type="l_infinity",
+ numeric_metric_type="jensen_shannon_divergence",
+ default_categorical_alert_condition=gca_model_monitoring_alert.ModelMonitoringAlertCondition(
+ threshold=0.1,
+ ),
+ default_numeric_alert_condition=gca_model_monitoring_alert.ModelMonitoringAlertCondition(
+ threshold=0.2,
+ ),
+ )
+ ),
+ baseline_dataset=gca_model_monitoring_spec.ModelMonitoringInput(
+ columnized_dataset=gca_model_monitoring_spec.ModelMonitoringInput.ModelMonitoringDataset(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ ),
+ target_dataset=gca_model_monitoring_spec.ModelMonitoringInput(
+ columnized_dataset=gca_model_monitoring_spec.ModelMonitoringInput.ModelMonitoringDataset(
+ vertex_dataset=_TEST_TARGET_RESOURCE
+ )
+ ),
+ explanation_spec=explanation.ExplanationSpec(
+ parameters=explanation.ExplanationParameters(top_k=10)
+ ),
+ ),
+ output_spec=gca_model_monitoring_spec.ModelMonitoringOutputSpec(
+ gcs_base_directory=io.GcsDestination(output_uri_prefix=_TEST_OUTPUT_PATH)
+ ),
+ notification_spec=gca_model_monitoring_spec.ModelMonitoringNotificationSpec(
+ email_config=gca_model_monitoring_spec.ModelMonitoringNotificationSpec.EmailConfig(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ )
+ ),
+ ),
+)
+_TEST_MODEL_MONITORING_JOB_OBJ = gca_model_monitoring_job.ModelMonitoringJob(
+ name=_TEST_MODEL_MONITORING_JOB_RESOURCE_NAME,
+ display_name=_TEST_MODEL_MONITORING_JOB_DISPLAY_NAME,
+ model_monitoring_spec=gca_model_monitoring_spec.ModelMonitoringSpec(
+ objective_spec=gca_model_monitoring_spec.ModelMonitoringObjectiveSpec(
+ tabular_objective=gca_model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective(
+ feature_drift_spec=gca_model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec(
+ categorical_metric_type="l_infinity",
+ numeric_metric_type="jensen_shannon_divergence",
+ default_categorical_alert_condition=gca_model_monitoring_alert.ModelMonitoringAlertCondition(
+ threshold=0.1,
+ ),
+ default_numeric_alert_condition=gca_model_monitoring_alert.ModelMonitoringAlertCondition(
+ threshold=0.2,
+ ),
+ )
+ ),
+ baseline_dataset=gca_model_monitoring_spec.ModelMonitoringInput(
+ columnized_dataset=gca_model_monitoring_spec.ModelMonitoringInput.ModelMonitoringDataset(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ ),
+ target_dataset=gca_model_monitoring_spec.ModelMonitoringInput(
+ columnized_dataset=gca_model_monitoring_spec.ModelMonitoringInput.ModelMonitoringDataset(
+ vertex_dataset=_TEST_TARGET_RESOURCE
+ )
+ ),
+ explanation_spec=explanation.ExplanationSpec(
+ parameters=explanation.ExplanationParameters(top_k=10)
+ ),
+ ),
+ output_spec=gca_model_monitoring_spec.ModelMonitoringOutputSpec(
+ gcs_base_directory=io.GcsDestination(output_uri_prefix=_TEST_OUTPUT_PATH)
+ ),
+ notification_spec=gca_model_monitoring_spec.ModelMonitoringNotificationSpec(
+ email_config=gca_model_monitoring_spec.ModelMonitoringNotificationSpec.EmailConfig(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ )
+ ),
+ ),
+ state=gca_job_state.JobState.JOB_STATE_SUCCEEDED,
+)
+_TEST_CRON = r"America/New_York 1 \* \* \* \*"
+_TEST_SCHEDULE_OBJ = gca_schedule.Schedule(
+ display_name=_TEST_SCHEDULE_NAME,
+ cron=_TEST_CRON,
+ create_model_monitoring_job_request=gca_model_monitoring_service.CreateModelMonitoringJobRequest(
+ parent=_TEST_MODEL_MONITOR_RESOURCE_NAME,
+ model_monitoring_job=_TEST_MODEL_MONITORING_JOB_OBJ,
+ ),
+ max_concurrent_run_count=1,
+)
+_TEST_UPDATED_MODEL_MONITORING_JOB_OBJ = gca_model_monitoring_job.ModelMonitoringJob(
+ display_name=_TEST_MODEL_MONITORING_JOB_DISPLAY_NAME,
+ model_monitoring_spec=gca_model_monitoring_spec.ModelMonitoringSpec(
+ objective_spec=gca_model_monitoring_spec.ModelMonitoringObjectiveSpec(
+ tabular_objective=gca_model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective(
+ feature_drift_spec=gca_model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec(
+ categorical_metric_type="l_infinity",
+ numeric_metric_type="jensen_shannon_divergence",
+ default_categorical_alert_condition=gca_model_monitoring_alert.ModelMonitoringAlertCondition(
+ threshold=0.1,
+ ),
+ default_numeric_alert_condition=gca_model_monitoring_alert.ModelMonitoringAlertCondition(
+ threshold=0.2,
+ ),
+ )
+ ),
+ baseline_dataset=gca_model_monitoring_spec.ModelMonitoringInput(
+ columnized_dataset=gca_model_monitoring_spec.ModelMonitoringInput.ModelMonitoringDataset(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ ),
+ target_dataset=gca_model_monitoring_spec.ModelMonitoringInput(
+ columnized_dataset=gca_model_monitoring_spec.ModelMonitoringInput.ModelMonitoringDataset(
+ vertex_dataset=_TEST_TARGET_RESOURCE
+ )
+ ),
+ explanation_spec=explanation.ExplanationSpec(
+ parameters=explanation.ExplanationParameters(top_k=10)
+ ),
+ ),
+ output_spec=gca_model_monitoring_spec.ModelMonitoringOutputSpec(
+ gcs_base_directory=io.GcsDestination(output_uri_prefix=_TEST_OUTPUT_PATH)
+ ),
+ notification_spec=gca_model_monitoring_spec.ModelMonitoringNotificationSpec(
+ email_config=gca_model_monitoring_spec.ModelMonitoringNotificationSpec.EmailConfig(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ )
+ ),
+ ),
+)
+_TEST_UPDATED_SCHEDULE_OBJ = gca_schedule.Schedule(
+ display_name=_TEST_SCHEDULE_NAME,
+ cron=r"America/New_York 0 \* \* \* \*",
+ create_model_monitoring_job_request=gca_model_monitoring_service.CreateModelMonitoringJobRequest(
+ parent=_TEST_MODEL_MONITOR_RESOURCE_NAME,
+ model_monitoring_job=_TEST_UPDATED_MODEL_MONITORING_JOB_OBJ,
+ ),
+ max_concurrent_run_count=1,
+)
+_TEST_SEARCH_REQUEST = gca_model_monitoring_service.SearchModelMonitoringStatsRequest(
+ model_monitor=_TEST_MODEL_MONITOR_RESOURCE_NAME,
+ stats_filter=(
+ gca_model_monitoring_stats.SearchModelMonitoringStatsFilter(
+ tabular_stats_filter=(
+ gca_model_monitoring_stats.SearchModelMonitoringStatsFilter.TabularStatsFilter(
+ model_monitoring_job=_TEST_MODEL_MONITORING_JOB_RESOURCE_NAME,
+ )
+ )
+ )
+ ),
+)
+_TEST_SEARCH_RESPONSE = (
+ gca_model_monitoring_service.SearchModelMonitoringStatsResponse()
+)
+_TEST_SEARCH_ALERTS_REQUEST = (
+ gca_model_monitoring_service.SearchModelMonitoringAlertsRequest(
+ model_monitor=_TEST_MODEL_MONITOR_RESOURCE_NAME,
+ model_monitoring_job=_TEST_MODEL_MONITORING_JOB_RESOURCE_NAME,
+ )
+)
+_TEST_SEARCH_ALERTS_RESPONSE = (
+ gca_model_monitoring_service.SearchModelMonitoringAlertsResponse()
+)
+_TEST_LIST_REQUEST = gca_model_monitoring_service.ListModelMonitoringJobsRequest(
+ parent=_TEST_MODEL_MONITOR_RESOURCE_NAME
+)
+_TEST_LIST_RESPONSE = gca_model_monitoring_service.ListModelMonitoringJobsResponse(
+ model_monitoring_jobs=[
+ _TEST_MODEL_MONITORING_JOB_OBJ,
+ _TEST_MODEL_MONITORING_JOB_OBJ,
+ ],
+ next_page_token="1",
+)
+
+
+@pytest.fixture
+def authorized_session_mock():
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession"
+ ) as mock_authorized_session:
+ mock_auth_session = mock_authorized_session(_TEST_CREDENTIALS)
+ yield mock_auth_session
+
+
+@pytest.fixture(scope="module")
+def google_auth_mock():
+ with mock.patch.object(auth, "default") as google_auth_mock:
+ google_auth_mock.return_value = (
+ auth_credentials.AnonymousCredentials(),
+ "test-project",
+ )
+ yield google_auth_mock
+
+
+@pytest.fixture
+def create_client_mock():
+ with mock.patch.object(
+ initializer.global_config, "create_client"
+ ) as create_client_mock:
+ api_client_mock = mock.Mock(
+ spec=model_monitoring_service_client.ModelMonitoringServiceClient
+ )
+ api_client_mock.get_model_monitor.return_value = _TEST_MODEL_MONITOR_OBJ
+ create_client_mock.return_value = api_client_mock
+ yield create_client_mock
+
+
+@pytest.fixture
+def create_model_monitor_mock():
+ with mock.patch.object(
+ model_monitoring_service_client.ModelMonitoringServiceClient,
+ "create_model_monitor",
+ ) as create_model_monitor_mock:
+ create_model_monitor_lro_mock = mock.Mock(ga_operation.Operation)
+ create_model_monitor_lro_mock.result.return_value = _TEST_MODEL_MONITOR_OBJ
+ create_model_monitor_mock.return_value = create_model_monitor_lro_mock
+ yield create_model_monitor_mock
+
+
+@pytest.fixture
+def get_model_monitor_mock():
+ with mock.patch.object(
+ model_monitoring_service_client.ModelMonitoringServiceClient,
+ "get_model_monitor",
+ ) as get_model_monitor_mock:
+ get_model_monitor_mock.return_value = _TEST_MODEL_MONITOR_OBJ
+ yield get_model_monitor_mock
+
+
+@pytest.fixture
+def update_model_monitor_mock():
+ with mock.patch.object(
+ model_monitoring_service_client.ModelMonitoringServiceClient,
+ "update_model_monitor",
+ ) as update_model_monitor_mock:
+ update_model_monitor_lro_mock = mock.Mock(ga_operation.Operation)
+ update_model_monitor_lro_mock.result.return_value = (
+ _TEST_UPDATED_MODEL_MONITOR_OBJ
+ )
+ update_model_monitor_mock.return_value = update_model_monitor_lro_mock
+ yield update_model_monitor_mock
+
+
+@pytest.fixture
+def create_schedule_mock():
+ with mock.patch.object(
+ schedule_service_client.ScheduleServiceClient, "create_schedule"
+ ) as create_schedule_mock:
+ create_schedule_mock.return_value = _TEST_SCHEDULE_OBJ
+ yield create_schedule_mock
+
+
+@pytest.fixture
+def update_schedule_mock():
+ with mock.patch.object(
+ schedule_service_client.ScheduleServiceClient, "update_schedule"
+ ) as update_schedule_mock:
+ update_schedule_mock.return_value = _TEST_UPDATED_SCHEDULE_OBJ
+ yield update_schedule_mock
+
+
+@pytest.fixture
+def get_schedule_mock():
+ with mock.patch.object(
+ schedule_service_client.ScheduleServiceClient, "get_schedule"
+ ) as get_schedule_mock:
+ get_schedule_mock.return_value = _TEST_SCHEDULE_OBJ
+ yield get_schedule_mock
+
+
+@pytest.fixture
+def search_metrics_mock():
+ with mock.patch.object(
+ model_monitoring_service_client.ModelMonitoringServiceClient,
+ "search_model_monitoring_stats",
+ ) as search_metrics_mock:
+ search_metrics_mock.return_value = (
+ model_monitoring_service_client.pagers.SearchModelMonitoringStatsPager(
+ method=search_metrics_mock,
+ request=_TEST_SEARCH_REQUEST,
+ response=_TEST_SEARCH_RESPONSE,
+ )
+ )
+ yield search_metrics_mock
+
+
+@pytest.fixture
+def search_alerts_mock():
+ with mock.patch.object(
+ model_monitoring_service_client.ModelMonitoringServiceClient,
+ "search_model_monitoring_alerts",
+ ) as search_alerts_mock:
+ search_alerts_mock.return_value = (
+ model_monitoring_service_client.pagers.SearchModelMonitoringAlertsPager(
+ method=search_alerts_mock,
+ request=_TEST_SEARCH_ALERTS_REQUEST,
+ response=_TEST_SEARCH_ALERTS_RESPONSE,
+ )
+ )
+ yield search_alerts_mock
+
+
+@pytest.fixture
+def list_model_monitoring_jobs_mock():
+ with mock.patch.object(
+ model_monitoring_service_client.ModelMonitoringServiceClient,
+ "list_model_monitoring_jobs",
+ ) as list_model_monitoring_jobs_mock:
+ list_model_monitoring_jobs_mock.return_value = (
+ model_monitoring_service_client.pagers.ListModelMonitoringJobsPager(
+ method=list_model_monitoring_jobs_mock,
+ request=_TEST_LIST_REQUEST,
+ response=_TEST_LIST_RESPONSE,
+ )
+ )
+ yield list_model_monitoring_jobs_mock
+
+
+@pytest.fixture
+def delete_model_monitor_mock():
+ with mock.patch.object(
+ model_monitoring_service_client.ModelMonitoringServiceClient,
+ "delete_model_monitor",
+ ) as delete_model_monitor_mock:
+ delete_model_monitor_lro_mock = mock.Mock(ga_operation.Operation)
+ delete_model_monitor_lro_mock.result.return_value = empty_pb2.Empty
+ delete_model_monitor_mock.return_value = delete_model_monitor_lro_mock
+ yield delete_model_monitor_mock
+
+
+@pytest.fixture
+def create_model_monitoring_job_mock():
+ with mock.patch.object(
+ model_monitoring_service_client.ModelMonitoringServiceClient,
+ "create_model_monitoring_job",
+ ) as create_model_monitoring_job_mock:
+ create_model_monitoring_job_mock.return_value = _TEST_MODEL_MONITORING_JOB_OBJ
+ yield create_model_monitoring_job_mock
+
+
+@pytest.fixture
+def get_model_monitoring_job_mock():
+ with mock.patch.object(
+ model_monitoring_service_client.ModelMonitoringServiceClient,
+ "get_model_monitoring_job",
+ ) as get_model_monitoring_job_mock:
+ model_monitoring_job_mock = mock.Mock(
+ spec=gca_model_monitoring_job.ModelMonitoringJob
+ )
+ model_monitoring_job_mock.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
+ model_monitoring_job_mock.name = _TEST_MODEL_MONITORING_JOB_RESOURCE_NAME
+ get_model_monitoring_job_mock.return_value = model_monitoring_job_mock
+ yield get_model_monitoring_job_mock
+
+
+@pytest.fixture
+def delete_model_monitoring_job_mock():
+ with mock.patch.object(
+ model_monitoring_service_client.ModelMonitoringServiceClient,
+ "delete_model_monitoring_job",
+ ) as delete_model_monitoring_job_mock:
+ delete_model_monitoring_job_lro_mock = mock.Mock(ga_operation.Operation)
+ delete_model_monitoring_job_lro_mock.result.return_value = empty_pb2.Empty
+ delete_model_monitoring_job_mock.return_value = (
+ delete_model_monitoring_job_lro_mock
+ )
+ yield delete_model_monitoring_job_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock")
+class TestModelMonitor:
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+ aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ def test_constructor_creates_client(self, create_client_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ ModelMonitor(_TEST_MODEL_MONITOR_ID)
+ create_client_mock.assert_any_call(
+ client_class=utils.ModelMonitoringClientWithOverride,
+ credentials=initializer.global_config.credentials,
+ location_override=_TEST_LOCATION,
+ appended_user_agent=None,
+ )
+
+ def test_constructor_create_client_with_custom_location(self, create_client_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ ModelMonitor(_TEST_MODEL_MONITOR_ID, location=_TEST_LOCATION_2)
+ create_client_mock.assert_any_call(
+ client_class=utils.ModelMonitoringClientWithOverride,
+ credentials=initializer.global_config.credentials,
+ location_override=_TEST_LOCATION_2,
+ appended_user_agent=None,
+ )
+
+ def test_constructor_creates_client_with_custom_credentials(
+ self, create_client_mock
+ ):
+ creds = auth_credentials.AnonymousCredentials()
+ ModelMonitor(_TEST_MODEL_MONITOR_ID, credentials=creds)
+ create_client_mock.assert_any_call(
+ client_class=utils.ModelMonitoringClientWithOverride,
+ credentials=creds,
+ location_override=_TEST_LOCATION,
+ appended_user_agent=None,
+ )
+
+ @pytest.mark.usefixtures("create_model_monitor_mock")
+ def test_create_model_monitor(self, create_model_monitor_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ ModelMonitor.create(
+ training_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ model_name=_TEST_MODEL_NAME,
+ model_version_id=_TEST_MODEL_VERSION_ID,
+ display_name=_TEST_MODEL_MONITOR_DISPLAY_NAME,
+ tabular_objective_spec=ml_monitoring.spec.TabularObjective(
+ feature_drift_spec=ml_monitoring.spec.DataDriftSpec(
+ default_categorical_alert_threshold=0.1,
+ default_numeric_alert_threshold=0.2,
+ ),
+ ),
+ output_spec=ml_monitoring.spec.OutputSpec(gcs_base_dir=_TEST_OUTPUT_PATH),
+ notification_spec=ml_monitoring.spec.NotificationSpec(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ ),
+ )
+ create_model_monitor_mock.assert_called_once_with(
+ request=gca_model_monitoring_service.CreateModelMonitorRequest(
+ parent=_TEST_MODEL_MONITOR_PARENT,
+ model_monitor=_TEST_CREATE_MODEL_MONITOR_OBJ,
+ ),
+ )
+
+ @pytest.mark.usefixtures("create_model_monitor_mock")
+ def test_create_model_monitor_with_user_id(self, create_model_monitor_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ ModelMonitor.create(
+ training_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ model_name=_TEST_MODEL_NAME,
+ model_version_id=_TEST_MODEL_VERSION_ID,
+ display_name=_TEST_MODEL_MONITOR_DISPLAY_NAME,
+ tabular_objective_spec=ml_monitoring.spec.TabularObjective(
+ feature_drift_spec=ml_monitoring.spec.DataDriftSpec(
+ default_categorical_alert_threshold=0.1,
+ default_numeric_alert_threshold=0.2,
+ ),
+ ),
+ output_spec=ml_monitoring.spec.OutputSpec(gcs_base_dir=_TEST_OUTPUT_PATH),
+ notification_spec=ml_monitoring.spec.NotificationSpec(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ ),
+ model_monitor_id=_TEST_MODEL_MONITOR_USER_ID,
+ )
+ create_model_monitor_mock.assert_called_once_with(
+ request=gca_model_monitoring_service.CreateModelMonitorRequest(
+ parent=_TEST_MODEL_MONITOR_PARENT,
+ model_monitor=_TEST_CREATE_MODEL_MONITOR_OBJ,
+ model_monitor_id=_TEST_MODEL_MONITOR_USER_ID,
+ ),
+ )
+
+ @pytest.mark.usefixtures(
+ "create_model_monitor_mock",
+ "get_model_monitor_mock",
+ "update_model_monitor_mock",
+ )
+ def test_update_model_monitor(self, update_model_monitor_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ test_model_monitor = ModelMonitor.create(
+ training_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ model_name=_TEST_MODEL_NAME,
+ model_version_id=_TEST_MODEL_VERSION_ID,
+ display_name=_TEST_MODEL_MONITOR_DISPLAY_NAME,
+ tabular_objective_spec=ml_monitoring.spec.TabularObjective(
+ feature_drift_spec=ml_monitoring.spec.DataDriftSpec(
+ default_categorical_alert_threshold=0.1,
+ default_numeric_alert_threshold=0.2,
+ ),
+ ),
+ output_spec=ml_monitoring.spec.OutputSpec(gcs_base_dir=_TEST_OUTPUT_PATH),
+ notification_spec=ml_monitoring.spec.NotificationSpec(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ ),
+ )
+ assert isinstance(test_model_monitor, ModelMonitor)
+ test_model_monitor.update(
+ notification_spec=ml_monitoring.spec.NotificationSpec(
+ user_emails=[_TEST_NOTIFICATION_EMAIL, "456@test.com"]
+ ),
+ )
+ update_model_monitor_mock.assert_called_once_with(
+ model_monitor=_TEST_UPDATED_MODEL_MONITOR_OBJ,
+ update_mask=field_mask_pb2.FieldMask(paths=["notification_spec"]),
+ )
+
+ @pytest.mark.usefixtures("create_schedule_mock", "create_model_monitor_mock")
+ def test_create_schedule(self, create_schedule_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ test_model_monitor = ModelMonitor.create(
+ training_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ model_name=_TEST_MODEL_NAME,
+ model_version_id=_TEST_MODEL_VERSION_ID,
+ display_name=_TEST_MODEL_MONITOR_DISPLAY_NAME,
+ tabular_objective_spec=ml_monitoring.spec.TabularObjective(
+ feature_drift_spec=ml_monitoring.spec.DataDriftSpec(
+ default_categorical_alert_threshold=0.1,
+ default_numeric_alert_threshold=0.2,
+ ),
+ ),
+ output_spec=ml_monitoring.spec.OutputSpec(gcs_base_dir=_TEST_OUTPUT_PATH),
+ notification_spec=ml_monitoring.spec.NotificationSpec(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ ),
+ explanation_spec=explanation.ExplanationSpec(
+ parameters=explanation.ExplanationParameters(top_k=10)
+ ),
+ )
+ test_model_monitor.create_schedule(
+ display_name=_TEST_SCHEDULE_NAME,
+ model_monitoring_job_display_name=_TEST_MODEL_MONITORING_JOB_DISPLAY_NAME,
+ cron=_TEST_CRON,
+ target_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_TARGET_RESOURCE
+ ),
+ )
+ create_schedule_mock.assert_called_once_with(
+ request=gca_schedule_service.CreateScheduleRequest(
+ parent=_TEST_MODEL_MONITOR_PARENT,
+ schedule=gca_schedule.Schedule(
+ display_name=_TEST_SCHEDULE_NAME,
+ cron=_TEST_CRON,
+ create_model_monitoring_job_request=gca_model_monitoring_service.CreateModelMonitoringJobRequest(
+ parent=_TEST_MODEL_MONITOR_RESOURCE_NAME,
+ model_monitoring_job=_TEST_CREATE_MODEL_MONITORING_JOB_OBJ,
+ ),
+ max_concurrent_run_count=1,
+ ),
+ )
+ )
+
+ @pytest.mark.usefixtures(
+ "create_schedule_mock",
+ "update_schedule_mock",
+ "get_schedule_mock",
+ "create_model_monitor_mock",
+ )
+ def test_update_schedule(self, update_schedule_mock, get_schedule_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ test_model_monitor = ModelMonitor.create(
+ training_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ model_name=_TEST_MODEL_NAME,
+ model_version_id=_TEST_MODEL_VERSION_ID,
+ display_name=_TEST_MODEL_MONITOR_DISPLAY_NAME,
+ tabular_objective_spec=ml_monitoring.spec.TabularObjective(
+ feature_drift_spec=ml_monitoring.spec.DataDriftSpec(
+ default_categorical_alert_threshold=0.1,
+ default_numeric_alert_threshold=0.2,
+ ),
+ ),
+ output_spec=ml_monitoring.spec.OutputSpec(gcs_base_dir=_TEST_OUTPUT_PATH),
+ notification_spec=ml_monitoring.spec.NotificationSpec(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ ),
+ )
+ test_model_monitor.create_schedule(
+ display_name=_TEST_SCHEDULE_NAME,
+ model_monitoring_job_display_name=_TEST_MODEL_MONITORING_JOB_DISPLAY_NAME,
+ cron=_TEST_CRON,
+ target_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_TARGET_RESOURCE
+ ),
+ )
+ test_model_monitor.update_schedule(
+ schedule_name=_TEST_SCHEDULE_NAME,
+ cron=r"America/New_York 0 \* \* \* \*",
+ baseline_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ target_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_TARGET_RESOURCE
+ ),
+ tabular_objective_spec=ml_monitoring.spec.TabularObjective(
+ feature_drift_spec=ml_monitoring.spec.DataDriftSpec(
+ categorical_metric_type="l_infinity",
+ numeric_metric_type="jensen_shannon_divergence",
+ default_categorical_alert_threshold=0.1,
+ default_numeric_alert_threshold=0.2,
+ ),
+ ),
+ )
+ update_schedule_mock.assert_called_once_with(
+ schedule=_TEST_UPDATED_SCHEDULE_OBJ,
+ update_mask=field_mask_pb2.FieldMask(
+ paths=["cron", "create_model_monitoring_job_request"]
+ ),
+ )
+ assert get_schedule_mock.call_count == 1
+
+ @pytest.mark.usefixtures(
+ "create_model_monitoring_job_mock",
+ "create_model_monitor_mock",
+ "get_model_monitoring_job_mock",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_model_monitoring_job(self, create_model_monitoring_job_mock, sync):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ test_model_monitor = ModelMonitor.create(
+ training_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ model_name=_TEST_MODEL_NAME,
+ display_name=_TEST_MODEL_MONITOR_DISPLAY_NAME,
+ model_version_id=_TEST_MODEL_VERSION_ID,
+ )
+ test_model_monitoring_job = test_model_monitor.run(
+ display_name=_TEST_MODEL_MONITORING_JOB_DISPLAY_NAME,
+ baseline_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ target_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_TARGET_RESOURCE
+ ),
+ tabular_objective_spec=ml_monitoring.spec.TabularObjective(
+ feature_drift_spec=ml_monitoring.spec.DataDriftSpec(
+ default_categorical_alert_threshold=0.1,
+ default_numeric_alert_threshold=0.2,
+ ),
+ ),
+ output_spec=ml_monitoring.spec.OutputSpec(gcs_base_dir=_TEST_OUTPUT_PATH),
+ notification_spec=ml_monitoring.spec.NotificationSpec(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ ),
+ explanation_spec=explanation.ExplanationSpec(
+ parameters=explanation.ExplanationParameters(top_k=10)
+ ),
+ sync=sync,
+ )
+
+ if not sync:
+ test_model_monitoring_job.wait()
+
+ create_model_monitoring_job_mock.assert_called_once_with(
+ request=gca_model_monitoring_service.CreateModelMonitoringJobRequest(
+ parent=_TEST_MODEL_MONITOR_RESOURCE_NAME,
+ model_monitoring_job=_TEST_CREATE_MODEL_MONITORING_JOB_OBJ,
+ )
+ )
+
+ @pytest.mark.usefixtures(
+ "create_model_monitoring_job_mock",
+ "create_model_monitor_mock",
+ "get_model_monitoring_job_mock",
+ )
+ def test_run_model_monitoring_job_with_user_id(
+ self, create_model_monitoring_job_mock
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ test_model_monitor = ModelMonitor.create(
+ training_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ model_name=_TEST_MODEL_NAME,
+ display_name=_TEST_MODEL_MONITOR_DISPLAY_NAME,
+ model_version_id=_TEST_MODEL_VERSION_ID,
+ )
+ test_model_monitor.run(
+ display_name=_TEST_MODEL_MONITORING_JOB_DISPLAY_NAME,
+ baseline_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ target_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_TARGET_RESOURCE
+ ),
+ tabular_objective_spec=ml_monitoring.spec.TabularObjective(
+ feature_drift_spec=ml_monitoring.spec.DataDriftSpec(
+ default_categorical_alert_threshold=0.1,
+ default_numeric_alert_threshold=0.2,
+ ),
+ ),
+ output_spec=ml_monitoring.spec.OutputSpec(gcs_base_dir=_TEST_OUTPUT_PATH),
+ notification_spec=ml_monitoring.spec.NotificationSpec(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ ),
+ explanation_spec=explanation.ExplanationSpec(
+ parameters=explanation.ExplanationParameters(top_k=10)
+ ),
+ model_monitoring_job_id=_TEST_MODEL_MONITORING_JOB_USER_ID,
+ sync=True,
+ )
+ create_model_monitoring_job_mock.assert_called_once_with(
+ request=gca_model_monitoring_service.CreateModelMonitoringJobRequest(
+ parent=_TEST_MODEL_MONITOR_RESOURCE_NAME,
+ model_monitoring_job=_TEST_CREATE_MODEL_MONITORING_JOB_OBJ,
+ model_monitoring_job_id=_TEST_MODEL_MONITORING_JOB_USER_ID,
+ )
+ )
+
+ @pytest.mark.usefixtures(
+ "create_model_monitoring_job_mock",
+ "create_model_monitor_mock",
+ "search_metrics_mock",
+ "get_model_monitoring_job_mock",
+ )
+ def test_search_metrics(self, search_metrics_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ test_model_monitor = ModelMonitor.create(
+ training_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ model_name=_TEST_MODEL_NAME,
+ model_version_id=_TEST_MODEL_VERSION_ID,
+ display_name=_TEST_MODEL_MONITOR_DISPLAY_NAME,
+ tabular_objective_spec=ml_monitoring.spec.TabularObjective(
+ feature_drift_spec=ml_monitoring.spec.DataDriftSpec(
+ default_categorical_alert_threshold=0.1,
+ default_numeric_alert_threshold=0.2,
+ ),
+ ),
+ output_spec=ml_monitoring.spec.OutputSpec(gcs_base_dir=_TEST_OUTPUT_PATH),
+ notification_spec=ml_monitoring.spec.NotificationSpec(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ ),
+ )
+ test_model_monitor.run(
+ display_name=_TEST_MODEL_MONITORING_JOB_DISPLAY_NAME,
+ target_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_TARGET_RESOURCE
+ ),
+ sync=True,
+ )
+ test_model_monitor.search_metrics(
+ model_monitoring_job_name=_TEST_MODEL_MONITORING_JOB_RESOURCE_NAME
+ )
+ search_metrics_mock.assert_called_once_with(request=_TEST_SEARCH_REQUEST)
+
+ @pytest.mark.usefixtures(
+ "create_model_monitoring_job_mock",
+ "create_model_monitor_mock",
+ "search_alerts_mock",
+ "get_model_monitoring_job_mock",
+ )
+ def test_search_alerts(self, search_alerts_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ test_model_monitor = ModelMonitor.create(
+ training_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ model_name=_TEST_MODEL_NAME,
+ model_version_id=_TEST_MODEL_VERSION_ID,
+ display_name=_TEST_MODEL_MONITOR_DISPLAY_NAME,
+ tabular_objective_spec=ml_monitoring.spec.TabularObjective(
+ feature_drift_spec=ml_monitoring.spec.DataDriftSpec(
+ default_categorical_alert_threshold=0.1,
+ default_numeric_alert_threshold=0.2,
+ ),
+ ),
+ output_spec=ml_monitoring.spec.OutputSpec(gcs_base_dir=_TEST_OUTPUT_PATH),
+ notification_spec=ml_monitoring.spec.NotificationSpec(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ ),
+ )
+ test_model_monitor.run(
+ display_name=_TEST_MODEL_MONITORING_JOB_DISPLAY_NAME,
+ target_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_TARGET_RESOURCE
+ ),
+ sync=True,
+ )
+ test_model_monitor.search_alerts(
+ model_monitoring_job_name=_TEST_MODEL_MONITORING_JOB_RESOURCE_NAME
+ )
+ search_alerts_mock.assert_called_once_with(request=_TEST_SEARCH_ALERTS_REQUEST)
+
+ @pytest.mark.usefixtures("create_model_monitor_mock", "delete_model_monitor_mock")
+ @pytest.mark.parametrize("force", [True, False])
+ def test_delete_model_monitor(self, delete_model_monitor_mock, force):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ test_model_monitor = ModelMonitor.create(
+ training_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ model_name=_TEST_MODEL_NAME,
+ model_version_id=_TEST_MODEL_VERSION_ID,
+ display_name=_TEST_MODEL_MONITOR_DISPLAY_NAME,
+ tabular_objective_spec=ml_monitoring.spec.TabularObjective(
+ feature_drift_spec=ml_monitoring.spec.DataDriftSpec(
+ default_categorical_alert_threshold=0.1,
+ default_numeric_alert_threshold=0.2,
+ ),
+ ),
+ output_spec=ml_monitoring.spec.OutputSpec(gcs_base_dir=_TEST_OUTPUT_PATH),
+ notification_spec=ml_monitoring.spec.NotificationSpec(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ ),
+ )
+ test_model_monitor.delete(force=force)
+ delete_model_monitor_mock.assert_called_once_with(
+ request=gca_model_monitoring_service.DeleteModelMonitorRequest(
+ name=_TEST_MODEL_MONITOR_RESOURCE_NAME, force=force
+ )
+ )
+
+ @pytest.mark.usefixtures(
+ "create_model_monitoring_job_mock", "get_model_monitoring_job_mock"
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_create_model_monitoring_job(self, create_model_monitoring_job_mock, sync):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ test_model_monitoring_job = ModelMonitoringJob.create(
+ display_name=_TEST_MODEL_MONITORING_JOB_DISPLAY_NAME,
+ model_monitor_name=_TEST_MODEL_MONITOR_RESOURCE_NAME,
+ tabular_objective_spec=ml_monitoring.spec.TabularObjective(
+ feature_drift_spec=ml_monitoring.spec.DataDriftSpec(
+ default_categorical_alert_threshold=0.1,
+ default_numeric_alert_threshold=0.2,
+ ),
+ ),
+ baseline_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ target_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_TARGET_RESOURCE
+ ),
+ output_spec=ml_monitoring.spec.OutputSpec(gcs_base_dir=_TEST_OUTPUT_PATH),
+ notification_spec=ml_monitoring.spec.NotificationSpec(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ ),
+ explanation_spec=explanation.ExplanationSpec(
+ parameters=explanation.ExplanationParameters(top_k=10)
+ ),
+ sync=sync,
+ )
+
+ if not sync:
+ test_model_monitoring_job.wait()
+
+ create_model_monitoring_job_mock.assert_called_once_with(
+ request=gca_model_monitoring_service.CreateModelMonitoringJobRequest(
+ parent=_TEST_MODEL_MONITOR_RESOURCE_NAME,
+ model_monitoring_job=_TEST_CREATE_MODEL_MONITORING_JOB_OBJ,
+ )
+ )
+
+ @pytest.mark.usefixtures(
+ "create_model_monitor_mock",
+ "create_model_monitoring_job_mock",
+ "delete_model_monitoring_job_mock",
+ "get_model_monitoring_job_mock",
+ )
+ def test_delete_model_monitoring_job(self, delete_model_monitoring_job_mock):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ test_job = ModelMonitoringJob.create(
+ display_name=_TEST_MODEL_MONITORING_JOB_DISPLAY_NAME,
+ model_monitor_name=_TEST_MODEL_MONITOR_RESOURCE_NAME,
+ tabular_objective_spec=ml_monitoring.spec.TabularObjective(
+ feature_drift_spec=ml_monitoring.spec.DataDriftSpec(
+ default_categorical_alert_threshold=0.1,
+ default_numeric_alert_threshold=0.2,
+ ),
+ ),
+ baseline_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ target_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_TARGET_RESOURCE
+ ),
+ output_spec=ml_monitoring.spec.OutputSpec(gcs_base_dir=_TEST_OUTPUT_PATH),
+ notification_spec=ml_monitoring.spec.NotificationSpec(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ ),
+ sync=True,
+ )
+ test_job.delete()
+ delete_model_monitoring_job_mock.assert_called_once_with(
+ name=_TEST_MODEL_MONITORING_JOB_RESOURCE_NAME
+ )
+
+ @pytest.mark.usefixtures(
+ "create_model_monitor_mock",
+ "get_model_monitoring_job_mock",
+ "create_model_monitoring_job_mock",
+ )
+ def test_get_model_monitoring_job(self):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+ test_model_monitor = ModelMonitor.create(
+ training_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_BASELINE_RESOURCE
+ ),
+ model_name=_TEST_MODEL_NAME,
+ model_version_id=_TEST_MODEL_VERSION_ID,
+ display_name=_TEST_MODEL_MONITOR_DISPLAY_NAME,
+ tabular_objective_spec=ml_monitoring.spec.TabularObjective(
+ feature_drift_spec=ml_monitoring.spec.DataDriftSpec(
+ default_categorical_alert_threshold=0.1,
+ default_numeric_alert_threshold=0.2,
+ ),
+ ),
+ output_spec=ml_monitoring.spec.OutputSpec(gcs_base_dir=_TEST_OUTPUT_PATH),
+ notification_spec=ml_monitoring.spec.NotificationSpec(
+ user_emails=[_TEST_NOTIFICATION_EMAIL]
+ ),
+ )
+ test_model_monitor.run(
+ display_name=_TEST_MODEL_MONITORING_JOB_DISPLAY_NAME,
+ target_dataset=ml_monitoring.spec.MonitoringInput(
+ vertex_dataset=_TEST_TARGET_RESOURCE
+ ),
+ sync=True,
+ )
+ test_model_monitoring_job = test_model_monitor.get_model_monitoring_job(
+ model_monitoring_job_name=_TEST_MODEL_MONITORING_JOB_RESOURCE_NAME
+ )
+ assert isinstance(test_model_monitoring_job, ModelMonitoringJob)
+
+
+# TODO: Add unit tests for visualization methods.
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_vertexai.py b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_vertexai.py
new file mode 100644
index 0000000000000000000000000000000000000000..53f03a332eec59c0d22d990304911064f26f88a0
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/test_vertexai.py
@@ -0,0 +1,79 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Unit tests for generative model tuning."""
+# pylint: disable=protected-access,bad-continuation,g-import-not-at-top,reimported
+
+import sys
+from unittest import mock
+
+import pytest
+
+
+def test_vertexai_has_lazy_preview():
+ """Tests that `vertexai.preview` works but uis lazy-loaded."""
+ # * Cleaning up the `vertexai` module state.
+ # Important: If we run this test after some other tests that import the
+ # `vertexai.preview` module, then Python's importing mechanism adds the
+ # imported module to the `vertexai` as the `preview` attribute,
+ # and this prevents the `__getattr__` from being called.
+ # So to test that `__getattr__` is actually called, we must delete that
+ # attribute (which might or might not exist). Note that using `hasattr`
+ # triggers `__getattr__`.
+ # Removing the `vertexai.preview` attribute.
+ import vertexai as vertexai1
+
+ try:
+ delattr(vertexai1, "preview")
+ except AttributeError:
+ pass
+
+ main_module_name = vertexai1.__name__ # == "vertexai"
+ preview_module_name = main_module_name + ".preview"
+ del vertexai1
+
+ with mock.patch.dict(sys.modules):
+ # First we must remove the cached modules.
+ # Otherwise, importing a module is a no-op.
+ # https://docs.python.org/3/reference/import.html#the-module-cache
+ # Removal is harder due to Copybara transforms.
+ # Note that we must delete the entries, not set them to None.
+ try:
+ del sys.modules[main_module_name]
+ except KeyError:
+ pass
+ try:
+ del sys.modules[preview_module_name]
+ except KeyError:
+ pass
+
+ # * Verifying that importing `vertexai` does not import the `preview` module.
+ import vertexai
+
+ assert preview_module_name not in sys.modules
+
+ # * Also verifying that the `preview` attribute does not exist on the module,
+ # because if the attribute exists, then accessing `preview` will
+ # skip `__getattr__` (and importing).
+ # Important: We cannot use `hasattr` to check whether the `preview`
+ # attribute exist since that triggers `__getattr__` which creates the attribute.
+ # We should use `object.__getattribute__` for checking instead.
+ with pytest.raises(AttributeError):
+ object.__getattribute__(vertexai, "preview")
+
+ # * Verifying that it's still possible to access `vertexai.preview`
+ assert dir(vertexai.preview)
+
+ # * Verifying that accessing `vertexai.preview` caused the module to be loaded.
+ assert preview_module_name in sys.modules
diff --git a/testbed/googleapis__python-aiplatform/tests/unit/vertexai/tuning/test_tuning.py b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/tuning/test_tuning.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d6e74bb59201899aa5f226d04f255dbf232a56e
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/tests/unit/vertexai/tuning/test_tuning.py
@@ -0,0 +1,320 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Unit tests for generative model tuning."""
+# pylint: disable=protected-access,bad-continuation
+
+import copy
+import datetime
+import importlib
+from typing import Dict, Iterable
+from unittest import mock
+import uuid
+
+from google import auth
+from google.auth import credentials as auth_credentials
+from google.cloud import aiplatform
+import vertexai
+from google.cloud.aiplatform import compat
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import utils as aiplatform_utils
+from google.cloud.aiplatform.metadata import experiment_resources
+from google.cloud.aiplatform_v1beta1.services import gen_ai_tuning_service
+from google.cloud.aiplatform_v1beta1.types import job_state
+from google.cloud.aiplatform_v1beta1.types import tuning_job as gca_tuning_job
+from vertexai.preview import tuning
+from vertexai.preview.tuning import (
+ sft as preview_supervised_tuning,
+)
+from vertexai.tuning import sft as supervised_tuning
+from vertexai.tuning import _distillation
+from google.cloud import storage
+
+import pytest
+
+from unittest.mock import patch
+
+from google.rpc import status_pb2
+
+
+_TEST_PROJECT = "test-project"
+_TEST_LOCATION = "us-central1"
+
+
+@pytest.fixture(scope="module")
+def google_auth_mock():
+ with mock.patch.object(auth, "default") as auth_mock:
+ auth_mock.return_value = (
+ auth_credentials.AnonymousCredentials(),
+ "test-project",
+ )
+ yield auth_mock
+
+
+_global_tuning_jobs: Dict[str, gca_tuning_job.TuningJob] = {}
+
+
+class MockGenAiTuningServiceClient(gen_ai_tuning_service.GenAiTuningServiceClient):
+ @property
+ def _tuning_jobs(self) -> Dict[str, gca_tuning_job.TuningJob]:
+ return _global_tuning_jobs
+
+ def create_tuning_job(
+ self,
+ *,
+ parent: str,
+ tuning_job: gca_tuning_job.TuningJob,
+ **_,
+ ) -> gca_tuning_job.TuningJob:
+ tuning_job = copy.deepcopy(tuning_job)
+ resource_id = uuid.uuid4().hex
+ resource_name = f"{parent}/tuningJobs/{resource_id}"
+ tuning_job.name = resource_name
+ current_time = datetime.datetime.now(datetime.timezone.utc)
+ tuning_job.create_time = current_time
+ tuning_job.update_time = current_time
+ tuning_job.state = job_state.JobState.JOB_STATE_PENDING
+ self._tuning_jobs[resource_name] = tuning_job
+ return tuning_job
+
+ def _progress_tuning_job(self, name: str):
+ tuning_job: gca_tuning_job.TuningJob = self._tuning_jobs[name]
+ current_time = datetime.datetime.now(datetime.timezone.utc)
+ training_dataset_uri = (
+ tuning_job.supervised_tuning_spec.training_dataset_uri
+ or tuning_job.distillation_spec.training_dataset_uri
+ )
+ if tuning_job.state == job_state.JobState.JOB_STATE_PENDING:
+ if "invalid_dataset" in training_dataset_uri:
+ tuning_job.state = job_state.JobState.JOB_STATE_FAILED
+ tuning_job.error = status_pb2.Status(
+ code=400, message="Invalid dataset."
+ )
+ else:
+ tuning_job.state = job_state.JobState.JOB_STATE_RUNNING
+ tuning_job.update_time = current_time
+ elif tuning_job.state == job_state.JobState.JOB_STATE_RUNNING:
+ parent = tuning_job.name.partition("/tuningJobs/")[0]
+ tuning_job.state = job_state.JobState.JOB_STATE_SUCCEEDED
+ experiment_id = uuid.uuid4().hex
+ tuned_model_id = uuid.uuid4().hex
+ tuned_model_endpoint_id = uuid.uuid4().hex
+ tuning_job.experiment = (
+ f"{parent}/metadataStores/default/contexts/{experiment_id}"
+ )
+ tuning_job.tuned_model = gca_tuning_job.TunedModel(
+ model=f"{parent}/models/{tuned_model_id}",
+ endpoint=f"{parent}/endpoints/{tuned_model_endpoint_id}",
+ )
+ tuning_job.end_time = current_time
+ tuning_job.update_time = current_time
+ else:
+ pass
+
+ def get_tuning_job(self, *, name: str, **_) -> gca_tuning_job.TuningJob:
+ tuning_job = self._tuning_jobs[name]
+ tuning_job = copy.deepcopy(tuning_job)
+ self._progress_tuning_job(name)
+
+ return tuning_job
+
+ def list_tuning_jobs(
+ self, *, parent: str, **_
+ ) -> Iterable[gca_tuning_job.TuningJob]:
+ return [
+ tuning_job
+ for name, tuning_job in self._tuning_jobs.items()
+ if name.startswith(parent + "/")
+ ]
+
+ def cancel_tuning_job(self, *, name: str, **_) -> None:
+ tuning_job = self._tuning_jobs[name]
+ assert tuning_job.state in (
+ job_state.JobState.JOB_STATE_RUNNING,
+ job_state.JobState.JOB_STATE_PENDING,
+ )
+ tuning_job.state = job_state.JobState.JOB_STATE_CANCELLED
+
+
+class MockTuningJobClientWithOverride(aiplatform_utils.ClientWithOverride):
+ _is_temporary = False
+ _default_version = compat.V1BETA1
+ _version_map = (
+ (compat.V1, MockGenAiTuningServiceClient),
+ (compat.V1BETA1, MockGenAiTuningServiceClient),
+ )
+
+
+@pytest.fixture()
+def experiment_init_mock():
+ with patch.object(experiment_resources.Experiment, "__init__") as experiment_mock:
+ experiment_mock.return_value = None
+ yield experiment_mock
+
+
+@pytest.mark.usefixtures("google_auth_mock", "experiment_init_mock")
+class TestgenerativeModelTuning:
+ """Unit tests for generative model tuning."""
+
+ def setup_method(self):
+ importlib.reload(initializer)
+ importlib.reload(aiplatform)
+ importlib.reload(vertexai)
+
+ vertexai.init(
+ project=_TEST_PROJECT,
+ location=_TEST_LOCATION,
+ staging_bucket="gs://test-bucket",
+ )
+
+ def teardown_method(self):
+ initializer.global_pool.shutdown(wait=True)
+
+ @mock.patch.object(
+ target=tuning.TuningJob,
+ attribute="client_class",
+ new=MockTuningJobClientWithOverride,
+ )
+ @pytest.mark.parametrize(
+ "supervised_tuning",
+ [supervised_tuning, preview_supervised_tuning],
+ )
+ def test_genai_tuning_service_supervised_tuning_tune_model(
+ self, supervised_tuning: supervised_tuning
+ ):
+ sft_tuning_job = supervised_tuning.train(
+ source_model="gemini-1.0-pro-001",
+ train_dataset="gs://some-bucket/some_dataset.jsonl",
+ # Optional:
+ validation_dataset="gs://some-bucket/some_dataset.jsonl",
+ epochs=300,
+ learning_rate_multiplier=1.0,
+ adapter_size=8,
+ )
+ assert sft_tuning_job.state == job_state.JobState.JOB_STATE_PENDING
+ assert not sft_tuning_job.has_ended
+ assert not sft_tuning_job.has_succeeded
+
+ # Refreshing the job
+ sft_tuning_job.refresh()
+ assert sft_tuning_job.state == job_state.JobState.JOB_STATE_PENDING
+ assert not sft_tuning_job.has_ended
+ assert not sft_tuning_job.has_succeeded
+
+ # Refreshing the job
+ sft_tuning_job.refresh()
+ assert sft_tuning_job.state == job_state.JobState.JOB_STATE_RUNNING
+ assert not sft_tuning_job.has_ended
+ assert not sft_tuning_job.has_succeeded
+
+ # Refreshing the job
+ sft_tuning_job.refresh()
+ assert sft_tuning_job.state == job_state.JobState.JOB_STATE_SUCCEEDED
+ assert sft_tuning_job.has_ended
+ assert sft_tuning_job.has_succeeded
+ assert sft_tuning_job._experiment_name
+ assert sft_tuning_job.tuned_model_name
+ assert sft_tuning_job.tuned_model_endpoint_name
+
+ @mock.patch.object(
+ target=tuning.TuningJob,
+ attribute="client_class",
+ new=MockTuningJobClientWithOverride,
+ )
+ @pytest.mark.parametrize(
+ "supervised_tuning",
+ [supervised_tuning, preview_supervised_tuning],
+ )
+ def test_genai_tuning_service_encryption_spec(
+ self, supervised_tuning: supervised_tuning
+ ):
+ """Test that the global encryption spec propagates to the tuning job."""
+ vertexai.init(encryption_spec_key_name="test-key")
+
+ sft_tuning_job = supervised_tuning.train(
+ source_model="gemini-1.0-pro-001",
+ train_dataset="gs://some-bucket/some_dataset.jsonl",
+ )
+ assert sft_tuning_job.encryption_spec.kms_key_name == "test-key"
+
+ @mock.patch.object(
+ target=tuning.TuningJob,
+ attribute="client_class",
+ new=MockTuningJobClientWithOverride,
+ )
+ @pytest.mark.parametrize(
+ "supervised_tuning",
+ [supervised_tuning, preview_supervised_tuning],
+ )
+ def test_genai_tuning_service_service_account(
+ self, supervised_tuning: supervised_tuning
+ ):
+ """Test that the service account propagates to the tuning job."""
+ vertexai.init(service_account="test-sa@test-project.iam.gserviceaccount.com")
+
+ sft_tuning_job = supervised_tuning.train(
+ source_model="gemini-1.0-pro-002",
+ train_dataset="gs://some-bucket/some_dataset.jsonl",
+ )
+ assert (
+ sft_tuning_job.service_account
+ == "test-sa@test-project.iam.gserviceaccount.com"
+ )
+
+ @mock.patch.object(
+ target=tuning.TuningJob,
+ attribute="client_class",
+ new=MockTuningJobClientWithOverride,
+ )
+ @mock.patch.object(
+ target=storage.Bucket,
+ attribute="exists",
+ new=lambda _: True,
+ )
+ def test_genai_tuning_service_distillation_distill_model(self):
+ distillation_train = _distillation.distill_model
+
+ tuning_job = distillation_train(
+ student_model="gemma",
+ teacher_model="gemini-1.0-pro-001",
+ training_dataset="gs://some-bucket/some_dataset.jsonl",
+ # Optional:
+ validation_dataset="gs://some-bucket/some_dataset.jsonl",
+ epoch_count=300,
+ learning_rate_multiplier=1.0,
+ )
+ assert tuning_job.state == job_state.JobState.JOB_STATE_PENDING
+ assert not tuning_job.has_ended
+ assert not tuning_job.has_succeeded
+
+ # Refreshing the job
+ tuning_job.refresh()
+ assert tuning_job.state == job_state.JobState.JOB_STATE_PENDING
+ assert not tuning_job.has_ended
+ assert not tuning_job.has_succeeded
+
+ # Refreshing the job
+ tuning_job.refresh()
+ assert tuning_job.state == job_state.JobState.JOB_STATE_RUNNING
+ assert not tuning_job.has_ended
+ assert not tuning_job.has_succeeded
+
+ # Refreshing the job
+ tuning_job.refresh()
+ assert tuning_job.state == job_state.JobState.JOB_STATE_SUCCEEDED
+ assert tuning_job.has_ended
+ assert tuning_job.has_succeeded
+ assert tuning_job.tuned_model_name
diff --git a/testbed/googleapis__python-aiplatform/vertexai/__init__.py b/testbed/googleapis__python-aiplatform/vertexai/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a6fcc8894ae1a96864b1053f923db5e09aaf4ac
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/__init__.py
@@ -0,0 +1,42 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""The vertexai module."""
+
+import importlib
+
+from google.cloud.aiplatform import version as aiplatform_version
+
+__version__ = aiplatform_version.__version__
+
+from google.cloud.aiplatform import init
+
+__all__ = [
+ "init",
+ "preview",
+]
+
+
+def __getattr__(name):
+ # Lazy importing the preview submodule
+ # See https://peps.python.org/pep-0562/
+ if name == "preview":
+ # We need to import carefully to avoid `RecursionError`.
+ # This won't work since it causes `RecursionError`:
+ # `from vertexai import preview`
+ # This won't work due to Copybara lacking a transform:
+ # `import google.cloud.aiplatform.vertexai.preview as vertexai_preview`
+ return importlib.import_module(".preview", __name__)
+
+ raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
diff --git a/testbed/googleapis__python-aiplatform/vertexai/evaluation/__init__.py b/testbed/googleapis__python-aiplatform/vertexai/evaluation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f71105be8bd2293260ffe2b91c692a76eac1cc79
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/evaluation/__init__.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Vertex Gen AI Evaluation Service Module."""
+
+from vertexai.evaluation import _base
+from vertexai.evaluation import eval_task
+from vertexai.evaluation import metrics
+from vertexai.evaluation import prompt_template
+
+
+EvalResult = _base.EvalResult
+EvalTask = eval_task.EvalTask
+PairwiseMetric = metrics.PairwiseMetric
+PointwiseMetric = metrics.PointwiseMetric
+CustomMetric = metrics.CustomMetric
+Rouge = metrics.Rouge
+PromptTemplate = prompt_template.PromptTemplate
+PairwiseMetricPromptTemplate = metrics.PairwiseMetricPromptTemplate
+PointwiseMetricPromptTemplate = metrics.PointwiseMetricPromptTemplate
+MetricPromptTemplateExamples = metrics.MetricPromptTemplateExamples
+
+__all__ = [
+ "EvalTask",
+ "EvalResult",
+ "PairwiseMetric",
+ "PointwiseMetric",
+ "CustomMetric",
+ "Rouge",
+ "PromptTemplate",
+ "PairwiseMetricPromptTemplate",
+ "PointwiseMetricPromptTemplate",
+ "MetricPromptTemplateExamples",
+]
diff --git a/testbed/googleapis__python-aiplatform/vertexai/evaluation/_base.py b/testbed/googleapis__python-aiplatform/vertexai/evaluation/_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..25a1896eeca7e73b50794083ae15b7c7aa601031
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/evaluation/_base.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Base classes for evaluation."""
+
+
+import dataclasses
+from typing import Dict, List, Optional, Union, TYPE_CHECKING
+
+from google.cloud.aiplatform_v1.services import (
+ evaluation_service as gapic_evaluation_services,
+)
+from vertexai.evaluation.metrics import (
+ _base as metrics_base,
+)
+
+if TYPE_CHECKING:
+ import pandas as pd
+
+
+@dataclasses.dataclass
+class EvaluationRunConfig:
+ """Evaluation Run Configurations.
+
+ Attributes:
+ dataset: The dataset to evaluate.
+ metrics: The list of metric names, or Metric instances to evaluate.
+ metric_column_mapping: An optional dictionary column mapping that
+ overrides the metric prompt template input variable names with
+ mapped the evaluation dataset column names, used during evaluation.
+ For example, if the input_variables of the metric prompt template
+ are ["context", "reference"], the metric_column_mapping can be
+ {
+ "context": "news_context",
+ "reference": "ground_truth",
+ "response": "model_1_response"
+ }
+ if the dataset has columns "news_context", "ground_truth" and
+ "model_1_response".
+ client: The evaluation service client.
+ evaluation_service_qps: The custom QPS limit for the evaluation service.
+ retry_timeout: How long to keep retrying the evaluation requests, in seconds.
+ """
+
+ dataset: "pd.DataFrame"
+ metrics: List[Union[str, metrics_base._Metric]]
+ metric_column_mapping: Dict[str, str]
+ client: gapic_evaluation_services.EvaluationServiceClient
+ evaluation_service_qps: float
+ retry_timeout: float
+
+ def validate_dataset_column(self, column_name: str) -> None:
+ """Validates that the column names in the column map are in the dataset.
+
+ Args:
+ column_name: The column name to validate.
+
+ Raises:
+ KeyError: If any of the column names are not in the dataset.
+ """
+ if (
+ self.metric_column_mapping.get(column_name, column_name)
+ not in self.dataset.columns
+ ):
+ raise KeyError(
+ "Required column"
+ f" `{self.metric_column_mapping.get(column_name, column_name)}`"
+ " not found in the evaluation dataset. The columns in the"
+ f" evaluation dataset are {list(self.dataset.columns)}."
+ )
+
+
+@dataclasses.dataclass
+class EvalResult:
+ """Evaluation result.
+
+ Attributes:
+ summary_metrics: A dictionary of summary evaluation metrics for an evaluation run.
+ metrics_table: A pandas.DataFrame table containing evaluation dataset inputs,
+ predictions, explanations, and metric results per row.
+ metadata: The metadata for the evaluation run.
+ """
+
+ summary_metrics: Dict[str, float]
+ metrics_table: Optional["pd.DataFrame"] = None
+ metadata: Optional[Dict[str, str]] = None
diff --git a/testbed/googleapis__python-aiplatform/vertexai/evaluation/_evaluation.py b/testbed/googleapis__python-aiplatform/vertexai/evaluation/_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..13af4f29953278ca9eae585b3c4cf8f0e0877849
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/evaluation/_evaluation.py
@@ -0,0 +1,961 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Evaluation Orchestration Library."""
+
+import collections
+from concurrent import futures
+import copy
+import time
+from typing import Any, Callable, Dict, List, Optional, Set, Tuple, TYPE_CHECKING, Union
+
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform_v1beta1.types import (
+ content as gapic_content_types,
+)
+from vertexai import generative_models
+from vertexai.evaluation import _base as evaluation_base
+from vertexai.evaluation import constants
+from vertexai.evaluation import (
+ prompt_template as prompt_template_base,
+)
+from vertexai.evaluation import utils
+from vertexai.evaluation.metrics import (
+ _base as metrics_base,
+)
+from vertexai.evaluation.metrics import (
+ _instance_evaluation,
+)
+from vertexai.evaluation.metrics import (
+ metric_prompt_template_examples,
+)
+from vertexai.evaluation.metrics import pairwise_metric
+from vertexai.evaluation.metrics import pointwise_metric
+
+
+try:
+ from tqdm import tqdm
+except ImportError:
+ raise ImportError(
+ 'tqdm is not installed. Please install the SDK using "pip install'
+ ' google-cloud-aiplatform[evaluation]"'
+ )
+
+if TYPE_CHECKING:
+ import pandas as pd
+
+_LOGGER = base.Logger(__name__)
+_SUCCESSFUL_FINISH_REASONS = [
+ gapic_content_types.Candidate.FinishReason.STOP,
+ gapic_content_types.Candidate.FinishReason.MAX_TOKENS,
+ # Many responses have this finish reason
+ gapic_content_types.Candidate.FinishReason.FINISH_REASON_UNSPECIFIED,
+]
+
+
+def _validate_metrics(metrics: List[Union[str, metrics_base._Metric]]) -> None:
+ """Validates the metrics list.
+
+ Args:
+ metrics: The list of metric names, or Metric instances to
+ evaluate.
+
+ Raises:
+ ValueError: If metric is empty or if multiple metrics of the
+ same metric name are found.
+ """
+ if not metrics:
+ raise ValueError("Metrics cannot be empty.")
+
+ seen_strings = set()
+ seen_metric_names = set()
+
+ for metric in metrics:
+ if isinstance(metric, str):
+ if metric in seen_strings:
+ raise ValueError(f"Duplicate string metric name found: '{metric}'")
+ seen_strings.add(metric)
+ elif isinstance(metric, metrics_base._Metric):
+ if metric.metric_name in seen_metric_names:
+ raise ValueError(
+ "Duplicate Metric instances of the same metric name found: "
+ f"'{metric.metric_name}'"
+ )
+ seen_metric_names.add(metric.metric_name)
+
+
+def _validate_metric_column_map(
+ evaluation_run_config: evaluation_base.EvaluationRunConfig,
+):
+ """Validates the column map for metric prompt template usage."""
+ for metric in evaluation_run_config.metrics:
+ if isinstance(
+ metric, metrics_base._ModelBasedMetric # pylint: disable=protected-access
+ ):
+ for variable in prompt_template_base.PromptTemplate(
+ metric.metric_prompt_template
+ ).variables:
+ if (
+ evaluation_run_config.metric_column_mapping.get(variable, "")
+ not in evaluation_run_config.dataset.columns
+ ):
+ raise ValueError(
+ f"Cannot find the `{variable}` column in the evaluation"
+ " dataset to fill the metric prompt template for"
+ f" `{str(metric)}` metric. Please check if the column is"
+ " present in the evaluation dataset, or provide a"
+ " key-value pair in `metric_column_mapping` parameter"
+ " of `EvalTask` to map it to a different column name."
+ " The evaluation dataset columns are"
+ f" {list(evaluation_run_config.dataset.columns)}."
+ )
+
+
+def _validate_dataset(
+ evaluation_run_config: evaluation_base.EvaluationRunConfig,
+) -> None:
+ """Validates the required columns exists in the dataset."""
+ _validate_response_column_required(evaluation_run_config)
+ _validate_reference_column_required(evaluation_run_config)
+ _validate_reference_or_source_column_required(evaluation_run_config)
+
+
+def _validate_response_column_required(
+ evaluation_run_config: evaluation_base.EvaluationRunConfig,
+) -> None:
+ """Validates the response column exists in the dataset."""
+ for metric in evaluation_run_config.metrics:
+ if metric in constants.Metric.AUTOMATIC_METRIC_LIST or isinstance(
+ metric, metrics_base._TranslationMetric # pylint: disable=protected-access
+ ):
+ _validate_column_provided(
+ evaluation_run_config,
+ constants.Dataset.MODEL_RESPONSE_COLUMN,
+ )
+
+
+def _validate_reference_column_required(
+ evaluation_run_config: evaluation_base.EvaluationRunConfig,
+) -> None:
+ """Validates the reference column exists in the dataset."""
+ if set(evaluation_run_config.metrics).intersection(
+ set(constants.Metric.AUTOMATIC_METRIC_LIST)
+ ):
+ _validate_column_provided(
+ evaluation_run_config,
+ constants.Dataset.REFERENCE_COLUMN,
+ )
+
+
+def _validate_column_provided(
+ evaluation_run_config: evaluation_base.EvaluationRunConfig,
+ column_name: str,
+) -> None:
+ """Validates the required column exist in the dataset."""
+ if column_name not in evaluation_run_config.metric_column_mapping:
+ evaluation_run_config.metric_column_mapping[column_name] = column_name
+ evaluation_run_config.validate_dataset_column(column_name)
+
+
+def _validate_reference_or_source_column_required(
+ evaluation_run_config: evaluation_base.EvaluationRunConfig,
+) -> None:
+ """Validates one of reference or source columns exist in the dataset."""
+ for metric in evaluation_run_config.metrics:
+ if isinstance(
+ metric, metrics_base._TranslationMetric # pylint: disable=protected-access
+ ):
+ # Validate the reference column.
+ # This is optional if source column is provided.
+ try:
+ _validate_column_provided(
+ evaluation_run_config,
+ constants.Dataset.REFERENCE_COLUMN,
+ )
+ except KeyError:
+ # Reference column is optional. Checking for source column.
+ _validate_column_provided(
+ evaluation_run_config,
+ constants.Dataset.SOURCE_COLUMN,
+ )
+
+
+def _compute_custom_metrics(
+ row_dict: Dict[str, Any],
+ custom_metrics: List[metrics_base.CustomMetric],
+ pbar: tqdm,
+ executor: futures.ThreadPoolExecutor,
+) -> Dict[str, Any]:
+ """Computes custom metrics for a row.
+
+ Args:
+ row_dict: A dictionary of an instance in the eval dataset.
+ custom_metrics: A list of CustomMetrics.
+ pbar: A tqdm progress bar.
+ executor: A thread pool executor.
+
+ Returns:
+ A dictionary of an instance containing custom metric results.
+
+ Raises:
+ KeyError: If the custom metric function does not return a valid output.
+ """
+ futures_by_metric = collections.defaultdict(list)
+ for custom_metric in custom_metrics:
+ future = executor.submit(custom_metric.metric_function, row_dict)
+ future.add_done_callback(lambda _: pbar.update(1))
+ futures_by_metric[custom_metric].append(future)
+
+ for custom_metric, futures_list in futures_by_metric.items():
+ for future in futures_list:
+ metric_output = future.result()
+ try:
+ row_dict[
+ f"{custom_metric.name}/{constants.MetricResult.SCORE_KEY}"
+ ] = metric_output[custom_metric.name]
+ except KeyError:
+ raise KeyError(
+ f"Custom metric score `{custom_metric.name}` not found in"
+ f" the metric output {metric_output}. Please make sure the"
+ " custom metric function is valid, and the output"
+ f" dictionary uses `{custom_metric.name}` as the key for"
+ " metric score."
+ )
+ # Include additional metric results like explanation.
+ for key, value in metric_output.items():
+ if key != custom_metric.name:
+ row_dict[f"{custom_metric.name}/{key}"] = value
+ return row_dict
+
+
+def _separate_custom_metrics(
+ metrics: List[Union[str, metrics_base._Metric]],
+) -> Tuple[List[Union[str, metrics_base._Metric]], List[metrics_base.CustomMetric],]:
+ """Separates the metrics list into API and custom metrics."""
+ custom_metrics = []
+ api_metrics = []
+ for metric in metrics:
+ if isinstance(metric, metrics_base.CustomMetric):
+ custom_metrics.append(metric)
+ else:
+ api_metrics.append(metric)
+ return api_metrics, custom_metrics
+
+
+def _aggregate_summary_metrics(
+ evaluation_run_config: evaluation_base.EvaluationRunConfig,
+ metrics_table: "pd.DataFrame",
+) -> Dict[str, Any]:
+ """Computes summary metrics.
+
+ Args:
+ evaluation_run_config: Evaluation Run Configurations.
+ metrics_table: A dataframe containing per-instance metrics results.
+
+ Returns:
+ A dictionary containing summary metrics results and statistics.
+ """
+ summary_metrics = {}
+ summary_metrics[constants.MetricResult.ROW_COUNT_KEY] = metrics_table.shape[0]
+
+ for metric in evaluation_run_config.metrics:
+ try:
+ if isinstance(metric, pairwise_metric.PairwiseMetric):
+ summary_metrics[f"{metric.metric_name}/candidate_model_win_rate"] = (
+ metrics_table[
+ f"{metric.metric_name}/{constants.MetricResult.PAIRWISE_CHOICE_KEY}"
+ ]
+ == "CANDIDATE"
+ ).mean()
+ summary_metrics[f"{metric.metric_name}/baseline_model_win_rate"] = (
+ metrics_table[
+ f"{metric.metric_name}/{constants.MetricResult.PAIRWISE_CHOICE_KEY}"
+ ]
+ == "BASELINE"
+ ).mean()
+ else:
+ summary_metrics[f"{str(metric)}/mean"] = metrics_table.loc[
+ :, f"{str(metric)}/{constants.MetricResult.SCORE_KEY}"
+ ].mean()
+ summary_metrics[f"{str(metric)}/std"] = metrics_table.loc[
+ :, f"{str(metric)}/{constants.MetricResult.SCORE_KEY}"
+ ].std()
+ except (ValueError, KeyError) as e:
+ _LOGGER.warning(
+ f"Failed to compute metric statistics for `{metric}` metric."
+ f"{type(e).__name__}: {e}"
+ )
+ continue
+ return summary_metrics
+
+
+def _generate_content_text_response(
+ model: generative_models.GenerativeModel, prompt: str
+) -> str:
+ """Generates a text response from Gemini model from a text prompt.
+
+ Args:
+ model: The Gemini model instance.
+ prompt: The prompt to send to the model.
+
+ Returns:
+ The text response from the model.
+
+ Raises:
+ RuntimeError if the prompt or the response for the prompt is blocked for
+ safety reasons.
+ """
+ response = model.generate_content(prompt)
+ try:
+ if not response.candidates:
+ raise RuntimeError(
+ f"The model response was blocked due to"
+ f" {response._raw_response.prompt_feedback.block_reason.name}.\n"
+ f"Blocked reason message:"
+ f" {response._raw_response.prompt_feedback.block_reason_message}.\n"
+ "The input prompt may be blocked for safety reasons.",
+ f"Prompt: {prompt}.",
+ )
+ else:
+ candidate = response.candidates[0]
+ if candidate.finish_reason not in _SUCCESSFUL_FINISH_REASONS:
+ raise RuntimeError(
+ "The model response did not finish"
+ " successfully.\n"
+ f"Finish reason: {candidate.finish_reason}.\n"
+ f"Finish message: {candidate.finish_message}.\n"
+ f"Safety ratings: {candidate.safety_ratings}.\n"
+ "Please adjust the model safety_settings, or"
+ " try a different prompt."
+ )
+ return response.candidates[0].content.parts[0].text
+ except Exception:
+ raise RuntimeError(
+ f"Failed to generate response candidates from Gemini model"
+ f" {model._model_name}.\n"
+ f"Response: {response}.\n"
+ f"Prompt: {prompt}."
+ )
+
+
+def _generate_responses_from_gemini_model(
+ model: generative_models.GenerativeModel,
+ evaluation_run_config: evaluation_base.EvaluationRunConfig,
+ is_baseline_model: bool = False,
+) -> None:
+ """Generates responses from Gemini model.
+
+ Args:
+ model: The Gemini model instance.
+ evaluation_run_config: Evaluation Run Configurations.
+ is_baseline_model: Whether the model is a baseline model for PairwiseMetric.
+ """
+ # Ensure thread safety and avoid race conditions.
+ df = evaluation_run_config.dataset.copy()
+
+ _LOGGER.info(
+ f"Generating a total of {evaluation_run_config.dataset.shape[0]} "
+ f"responses from Gemini model {model._model_name.split('/')[-1]}."
+ )
+ tasks = []
+ with tqdm(total=len(df)) as pbar:
+ with futures.ThreadPoolExecutor(max_workers=constants.MAX_WORKERS) as executor:
+ for _, row in df.iterrows():
+ task = executor.submit(
+ _generate_content_text_response,
+ prompt=row[constants.Dataset.PROMPT_COLUMN],
+ model=model,
+ )
+ task.add_done_callback(lambda _: pbar.update(1))
+ tasks.append(task)
+ responses = [future.result() for future in tasks]
+ if is_baseline_model:
+ evaluation_run_config.dataset = df.assign(baseline_model_response=responses)
+ else:
+ evaluation_run_config.dataset = df.assign(response=responses)
+
+ _LOGGER.info(
+ f"All {evaluation_run_config.dataset.shape[0]} responses are successfully"
+ f" generated from Gemini model {model._model_name.split('/')[-1]}."
+ )
+
+
+def _generate_response_from_custom_model_fn(
+ model_fn: Callable[[str], str],
+ evaluation_run_config: evaluation_base.EvaluationRunConfig,
+ is_baseline_model: bool = False,
+) -> None:
+ """Generates responses from a custom model function.
+
+ Args:
+ model_fn: The custom model function.
+ evaluation_run_config: Evaluation Run Configurations.
+ is_baseline_model: Whether the model is a baseline model for
+ PairwiseMetric.
+ """
+ eval_dataset = evaluation_run_config.dataset.copy()
+ max_workers = 5
+
+ _LOGGER.info(
+ f"Generating a total of {evaluation_run_config.dataset.shape[0]} "
+ "responses from the custom model function."
+ )
+ tasks = []
+ try:
+ with tqdm(total=len(eval_dataset)) as pbar:
+ with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
+ for _, row in eval_dataset.iterrows():
+ task = executor.submit(
+ model_fn, row[constants.Dataset.PROMPT_COLUMN]
+ )
+ task.add_done_callback(lambda _: pbar.update(1))
+ tasks.append(task)
+ except (ValueError, IndexError) as e:
+ _LOGGER.warning(f"Failed to generate response from model function: {e}")
+
+ responses = [task.result() for task in tasks]
+ if is_baseline_model:
+ evaluation_run_config.dataset = eval_dataset.assign(
+ baseline_model_response=responses
+ )
+ else:
+ evaluation_run_config.dataset = eval_dataset.assign(response=responses)
+
+ _LOGGER.info(
+ f"All {evaluation_run_config.dataset.shape[0]} responses are successfully"
+ " generated from the custom model function."
+ )
+
+
+def _run_model_inference(
+ model: Union[generative_models.GenerativeModel, Callable[[str], str]],
+ evaluation_run_config: evaluation_base.EvaluationRunConfig,
+ response_column_name: str = constants.Dataset.MODEL_RESPONSE_COLUMN,
+) -> None:
+ """Runs model inference on dataset for evaluation.
+
+ Args:
+ model: The model or baseline model or a custom model function to
+ generate responses to evaluate.
+ evaluation_run_config: Evaluation Run Configurations.
+ response_column_name: Column name key in metric_column_mapping. Value is
+ constants.Dataset.MODEL_RESPONSE_COLUMN or
+ constants.Dataset.BASELINE_MODEL_RESPONSE_COLUMN.
+
+ Raises:
+ ValueError: If the model or baseline model is not supported.
+ """
+ is_baseline_model = (
+ response_column_name == constants.Dataset.BASELINE_MODEL_RESPONSE_COLUMN
+ )
+ if model:
+ if response_column_name not in evaluation_run_config.metric_column_mapping:
+ if constants.Dataset.PROMPT_COLUMN in evaluation_run_config.dataset.columns:
+ t1 = time.perf_counter()
+ if isinstance(model, generative_models.GenerativeModel):
+ _generate_responses_from_gemini_model(
+ model, evaluation_run_config, is_baseline_model
+ )
+ elif callable(model):
+ _generate_response_from_custom_model_fn(
+ model, evaluation_run_config, is_baseline_model
+ )
+ else:
+ raise ValueError(
+ f"Unsupported model or baseline model type: {type(model)}"
+ )
+ t2 = time.perf_counter()
+ _LOGGER.info(f"Multithreaded Batch Inference took: {t2 - t1} seconds.")
+ evaluation_run_config.metric_column_mapping[
+ response_column_name
+ ] = response_column_name
+ else:
+ raise ValueError(
+ "Missing required input `prompt` column to start model inference."
+ " Please provide a `prompt_template` parameter in"
+ " `EvalTask.evaluate()` function if you want to assemble a"
+ " `prompt` column with variables from the dataset, or provide a"
+ " `prompt` column in dataset to directly use as input to"
+ " the model. Mappings in `metric_column_mapping` do not"
+ " apply for model inference and are used for evaluation only."
+ )
+ else:
+ raise ValueError(
+ "The `model` parameter or `baseline_model` in pairwise metric is"
+ " specified, but the evaluation `dataset` contains model response"
+ " column or baseline model response column"
+ f" `{evaluation_run_config.metric_column_mapping[response_column_name]}`"
+ " to perform bring-your-own-response(BYOR) evaluation. If you would"
+ " like to perform evaluation using the dataset with the"
+ " existing model response column or or baseline model response column"
+ f" `{evaluation_run_config.metric_column_mapping[response_column_name]}`,"
+ " please remove `model` parameter in `EvalTask.evaluate()`"
+ " function or `baseline_model` in `PairwiseMetric`."
+ )
+
+
+def _check_variable_columns_exist(
+ dataset: "pd.DataFrame", variable_names_set: Set[str]
+) -> None:
+ """Checks if all variable names exist in the dataset columns.
+
+ Args:
+ dataset: The dataset to evaluate.
+ variable_names_set: A set of variable names.
+
+ Raises:
+ ValueError: If any variable names do not exist in the dataset columns
+ or the prompt template is invalid.
+ """
+ actual_column_names_set = set(dataset.columns)
+ if not variable_names_set.issubset(actual_column_names_set):
+ missing_columns = variable_names_set - actual_column_names_set
+ raise ValueError(
+ "Failed to assemble prompt template: The following column(s) are"
+ f" missing: {', '.join(missing_columns)}. "
+ f"Please verify prompt_template variables {variable_names_set} and "
+ f"evaluation dataset column names {actual_column_names_set}."
+ )
+
+
+def _assemble_prompt_for_dataset(
+ evaluation_run_config: evaluation_base.EvaluationRunConfig,
+ prompt_template: Union[prompt_template_base.PromptTemplate, str],
+) -> None:
+ """Assembles a prompt column in metrics_table from variable columns.
+
+ Args:
+ evaluation_run_config: Evaluation Run Configurations.
+ prompt_template: A `PromptTemplate` object or a prompt template string
+ with variables that can be assembled from the evaluation dataset. The
+ variables can be represented in curly braces `{variable}`, and
+ must be included in the dataset columns if specified. The variable
+ names cannot contain spaces.
+
+ Returns:
+ The assembled prompt template string to send to the model.
+
+ Raises:
+ ValueError: If any variable names do not exist in the dataset columns
+ or the prompt template is invalid.
+ """
+ if not prompt_template:
+ raise ValueError("Prompt template cannot be an empty string.")
+
+ _LOGGER.info(
+ "Assembling prompts from the `prompt_template`. The `prompt` column in"
+ " the `EvalResult.metrics_table` has the assembled prompts used for model"
+ " response generation."
+ )
+ if isinstance(prompt_template, str):
+ prompt_template = prompt_template_base.PromptTemplate(prompt_template)
+ _check_variable_columns_exist(
+ evaluation_run_config.dataset, prompt_template.variables
+ )
+
+ try:
+ evaluation_run_config.dataset[
+ constants.Dataset.PROMPT_COLUMN
+ ] = evaluation_run_config.dataset.apply(
+ lambda row: str(
+ prompt_template.assemble(
+ **row[list(prompt_template.variables)].astype(str).to_dict(),
+ )
+ ),
+ axis=1,
+ )
+ if (
+ constants.Dataset.PROMPT_COLUMN
+ in evaluation_run_config.metric_column_mapping
+ and evaluation_run_config.metric_column_mapping[
+ constants.Dataset.PROMPT_COLUMN
+ ]
+ != constants.Dataset.PROMPT_COLUMN
+ ):
+ _LOGGER.warning(
+ "The `prompt` column mapping provided in"
+ " `metric_column_mapping` parameter is overwritten by the"
+ " assembled `prompt` column because the `prompt_template`"
+ " parameter is provided. Please verify that you want to use"
+ " the assembled `prompt` column for evaluation."
+ )
+ evaluation_run_config.metric_column_mapping[
+ constants.Dataset.PROMPT_COLUMN
+ ] = constants.Dataset.PROMPT_COLUMN
+ except Exception as e:
+ raise ValueError(
+ f"Failed to assemble prompt template: {e}. Please make sure all"
+ " variables in `prompt_template` are present in the evaluation"
+ f" dataset columns: `{list(evaluation_run_config.dataset.columns)}`."
+ ) from e
+
+
+def _set_metric_table(
+ metric_name: str,
+ metric_results: Any,
+ metrics_table: "pd.DataFrame",
+ metric_result_key: str,
+):
+ """Parses value from metric results to metrics_table."""
+ if metric_result_key == constants.MetricResult.SCORE_KEY:
+ metric_result_items = [
+ result.get(metric_result_key) if isinstance(result, dict) else None
+ for result in metric_results
+ ]
+ else:
+ metric_result_items = [
+ result.get(metric_result_key) if isinstance(result, dict) else "Error"
+ for result in metric_results
+ ]
+ metrics_table[f"{metric_name}/{metric_result_key}"] = metric_result_items
+
+
+def _parse_metric_results_to_dataframe(
+ instance_df: "pd.DataFrame", results: Dict[Union[str, metrics_base._Metric], Any]
+) -> Dict[str, Any]:
+ """Parses metric results to a pandas dataframe.
+
+ Args:
+ instance_df: A dataframe containing per-instance metrics results.
+ results: A dictionary containing metric results.
+
+ Returns:
+ A dataframe containing per-instance metrics results. Each metric result
+ can contain metric score, explanation, and confidence.
+ """
+ try:
+ import pandas as pd
+ except ImportError:
+ raise ImportError(
+ 'Pandas is not installed. Please install the SDK using "pip install'
+ ' google-cloud-aiplatform[evaluation]"'
+ )
+
+ metrics_table = pd.DataFrame(dict(zip(instance_df.columns, instance_df.values.T)))
+ for metric, metric_results in results.items():
+ if isinstance(metric, pointwise_metric.PointwiseMetric):
+ _set_metric_table(
+ metric.metric_name,
+ metric_results,
+ metrics_table,
+ constants.MetricResult.EXPLANATION_KEY,
+ )
+ _set_metric_table(
+ metric.metric_name,
+ metric_results,
+ metrics_table,
+ constants.MetricResult.SCORE_KEY,
+ )
+ elif isinstance(metric, pairwise_metric.PairwiseMetric):
+ _set_metric_table(
+ metric.metric_name,
+ metric_results,
+ metrics_table,
+ constants.MetricResult.EXPLANATION_KEY,
+ )
+ _set_metric_table(
+ metric.metric_name,
+ metric_results,
+ metrics_table,
+ constants.MetricResult.PAIRWISE_CHOICE_KEY,
+ )
+ elif str(metric) in constants.Metric.AUTOMATIC_METRIC_LIST:
+ _set_metric_table(
+ str(metric),
+ metric_results,
+ metrics_table,
+ constants.MetricResult.SCORE_KEY,
+ )
+ elif isinstance(
+ metric, metrics_base._TranslationMetric # pylint: disable=protected-access
+ ):
+ _set_metric_table(
+ str(metric),
+ metric_results,
+ metrics_table,
+ constants.MetricResult.SCORE_KEY,
+ )
+ else:
+ _LOGGER.warning(
+ f"Metric name: {str(metric)} is not supported when parsing"
+ " metric results."
+ )
+
+ return metrics_table
+
+
+def _compute_metrics(
+ evaluation_run_config: evaluation_base.EvaluationRunConfig,
+) -> Tuple[Dict[str, Any], "pd.DataFrame"]:
+ """Computes the metrics for the dataset.
+
+ Args:
+ evaluation_run_config: Evaluation Run Configurations.
+
+ Returns:
+ The evaluation results for the input metrics.
+
+ Raises:
+ RuntimeError: The number of responses does not match the number of metrics.
+ """
+ try:
+ import pandas as pd
+ except ImportError:
+ raise ImportError(
+ 'Pandas is not installed. Please install the SDK using "pip install'
+ ' google-cloud-aiplatform[evaluation]"'
+ )
+
+ api_metrics, custom_metrics = _separate_custom_metrics(
+ evaluation_run_config.metrics
+ )
+ row_count = len(evaluation_run_config.dataset)
+ api_request_count = len(api_metrics) * row_count
+ custom_metric_request_count = len(custom_metrics) * row_count
+ total_request_count = api_request_count + custom_metric_request_count
+
+ _LOGGER.info(
+ f"Computing metrics with a total of {total_request_count} Vertex Gen AI"
+ " Evaluation Service API requests."
+ )
+
+ instance_list = []
+ futures_by_metric = collections.defaultdict(list)
+ rate_limiter = utils.RateLimiter(evaluation_run_config.evaluation_service_qps)
+ with tqdm(total=total_request_count) as pbar:
+ with futures.ThreadPoolExecutor(max_workers=constants.MAX_WORKERS) as executor:
+ for idx, row in evaluation_run_config.dataset.iterrows():
+ row_dict = _compute_custom_metrics(
+ row.to_dict(), custom_metrics, pbar, executor
+ )
+ instance_list.append(row_dict)
+ for metric in api_metrics:
+ future = executor.submit(
+ _instance_evaluation.evaluate_instances,
+ client=evaluation_run_config.client,
+ request=_instance_evaluation.build_request(
+ metric=metric,
+ row_dict=row_dict,
+ evaluation_run_config=evaluation_run_config,
+ ),
+ rate_limiter=rate_limiter,
+ retry_timeout=evaluation_run_config.retry_timeout,
+ )
+ future.add_done_callback(lambda _: pbar.update(1))
+ futures_by_metric[metric].append((future, idx))
+
+ # Retrieve results from all futures and handle errors.
+ results_dict = collections.defaultdict(list)
+ error_list = []
+ for metric, futures_list in futures_by_metric.items():
+ for future, index in futures_list:
+ try:
+ response = future.result()
+ results_dict[metric].append(response)
+ except Exception as e:
+ results_dict[metric].append("Error")
+ error_list.append((metric, index, f"Error: {e}"))
+
+ for metric, responses in results_dict.items():
+ results_dict[metric] = [
+ _instance_evaluation.handle_response(response) for response in responses
+ ]
+ if error_list:
+ _LOGGER.warning(
+ f"{len(error_list)} errors encountered during evaluation. Continue to"
+ " compute summary metrics for the rest of the dataset."
+ )
+ for metric_name, index, error in error_list:
+ _LOGGER.warning(
+ f"Error encountered for metric {metric_name} at dataset index"
+ f" {index}: {error}"
+ )
+ else:
+ _LOGGER.info(
+ f"All {total_request_count} metric requests are successfully computed."
+ )
+
+ instance_df = pd.DataFrame.from_dict(instance_list)
+ metrics_table = _parse_metric_results_to_dataframe(instance_df, results_dict)
+
+ # Aggregate the summary metrics.
+ summary_metrics = _aggregate_summary_metrics(evaluation_run_config, metrics_table)
+
+ return evaluation_base.EvalResult(
+ summary_metrics=summary_metrics, metrics_table=metrics_table
+ )
+
+
+def _get_baseline_model(evaluation_run_config: evaluation_base.EvaluationRunConfig):
+ """Gets the baseline model from the pairwise metrics."""
+ pairwise_metric_instances = [
+ metric
+ for metric in evaluation_run_config.metrics
+ if isinstance(metric, pairwise_metric.PairwiseMetric)
+ ]
+ baseline_models = {
+ instance.metric_name: instance.baseline_model
+ for instance in pairwise_metric_instances
+ }
+ if len(set(baseline_models.values())) > 1:
+ raise ValueError(
+ "Not all `PairwiseMetric` instances have the same `baseline_model`. "
+ f"Here are the detected baseline models: `{baseline_models}`. "
+ "Please separate pairwise metrics with different baseline models "
+ "in different `EvalTask` or use the same baseline model for "
+ "all pairwise metrics."
+ )
+ return pairwise_metric_instances[0].baseline_model
+
+
+def _convert_metric_prompt_template_example(metrics):
+ """Converts string metric names to generic model-based metric instances."""
+ updated_metrics = []
+ for metric in metrics:
+ if metric in constants.Metric.POINTWISE_METRIC_PROMPT_TEMPLATE_EXAMPLE_LIST:
+ template = metric_prompt_template_examples.MetricPromptTemplateExamples.get_prompt_template(
+ metric
+ )
+ metric = pointwise_metric.PointwiseMetric(
+ metric=metric, metric_prompt_template=template
+ )
+ elif metric in constants.Metric.PAIRWISE_METRIC_PROMPT_TEMPLATE_EXAMPLE_LIST:
+ template = metric_prompt_template_examples.MetricPromptTemplateExamples.get_prompt_template(
+ metric
+ )
+ metric = pairwise_metric.PairwiseMetric(
+ metric=metric, metric_prompt_template=template
+ )
+ _LOGGER.info(
+ f"Pairwise metric `{metric.metric_name}` loaded from"
+ " `MetricPromptTemplateExamples` does not have `baseline_model`"
+ " specified and only supports Bring-Your-Own-Response(BYOR)"
+ " evaluation. If you would like to run inference on the baseline model,"
+ " please instantiate a `PairwiseMetric` and provide the"
+ " `baseline_model` parameter."
+ )
+ updated_metrics.append(metric)
+ return updated_metrics
+
+
+def evaluate(
+ dataset: "pd.DataFrame",
+ metrics: List[Union[str, metrics_base._Metric]],
+ *,
+ model: Optional[
+ Union[generative_models.GenerativeModel, Callable[[str], str]]
+ ] = None,
+ prompt_template: Optional[Union[str, prompt_template_base.PromptTemplate]] = None,
+ metric_column_mapping: Dict[str, str],
+ evaluation_service_qps: Optional[float] = None,
+ retry_timeout: float = 600.0,
+) -> evaluation_base.EvalResult:
+ """Runs the evaluation for metrics.
+
+ Args:
+ dataset: The dataset to evaluate.
+ metrics: The list of metric names, or Metric instances to
+ evaluate. Prompt template is required for PairwiseMetric.
+ model: The GenerativeModel instance or a custom model function to generate
+ responses to evaluate. If not provided, the evaluation is computed with
+ the `response` column in the `dataset`.
+ prompt_template: A `PromptTemplate` or a prompt template string compatible
+ with `PromptTemplate` class with variables that can be formatted with
+ dataset columns to create assembled prompts. The variables can be
+ represented in curly braces `{variable_name}`, and must be included in the
+ dataset columns if specified. The variable names cannot contain spaces.
+ metric_column_mapping: An optional dictionary column mapping that
+ overrides the metric prompt template input variable names with
+ mapped the evaluation dataset column names, used during evaluation.
+ For example, if the input_variables of the metric prompt template
+ are ["context", "reference"], the metric_column_mapping can be
+ {
+ "context": "news_context",
+ "reference": "ground_truth",
+ "response": "model_1_response"
+ }
+ if the dataset has columns "news_context", "ground_truth" and
+ "model_1_response".
+ evaluation_service_qps: The custom QPS limit for the evaluation service.
+ retry_timeout: How long to keep retrying the evaluation requests for the
+ whole evaluation dataset, in seconds.
+
+ Returns:
+ EvalResult with summary metrics and a metrics table for per-instance
+ metrics.
+
+ Raises:
+ ValueError: If the metrics list is empty, or the prompt template is not
+ provided for PairwiseMetric, or multiple baseline models are specified for
+ PairwiseMetric instances, or both model and dataset model response column
+ are present.
+ """
+ _validate_metrics(metrics)
+ metrics = _convert_metric_prompt_template_example(metrics)
+ copied_metrics = []
+ for metric in metrics:
+ if isinstance(metric, pairwise_metric.PairwiseMetric):
+ copied_metrics.append(
+ pairwise_metric.PairwiseMetric(
+ metric=metric.metric_name,
+ metric_prompt_template=metric.metric_prompt_template,
+ baseline_model=metric.baseline_model,
+ )
+ )
+ else:
+ copied_metrics.append(copy.deepcopy(metric))
+ evaluation_run_config = evaluation_base.EvaluationRunConfig(
+ dataset=dataset.copy(deep=True),
+ metrics=copied_metrics,
+ metric_column_mapping=copy.deepcopy(metric_column_mapping),
+ client=utils.create_evaluation_service_client(),
+ evaluation_service_qps=(
+ evaluation_service_qps
+ if evaluation_service_qps
+ else constants.QuotaLimit.EVAL_SERVICE_QPS
+ ),
+ retry_timeout=retry_timeout,
+ )
+
+ if prompt_template:
+ _assemble_prompt_for_dataset(evaluation_run_config, prompt_template)
+
+ _run_model_inference(
+ model=model,
+ evaluation_run_config=evaluation_run_config,
+ response_column_name=constants.Dataset.MODEL_RESPONSE_COLUMN,
+ )
+ _validate_dataset(evaluation_run_config)
+
+ pairwise_metric_exists = any(
+ isinstance(metric, pairwise_metric.PairwiseMetric)
+ for metric in evaluation_run_config.metrics
+ )
+ if pairwise_metric_exists:
+ baseline_model = _get_baseline_model(evaluation_run_config)
+ _run_model_inference(
+ model=baseline_model,
+ evaluation_run_config=evaluation_run_config,
+ response_column_name=constants.Dataset.BASELINE_MODEL_RESPONSE_COLUMN,
+ )
+
+ _validate_metric_column_map(evaluation_run_config)
+ t1 = time.perf_counter()
+ evaluation_result = _compute_metrics(evaluation_run_config)
+ t2 = time.perf_counter()
+ _LOGGER.info(f"Evaluation Took:{t2 - t1} seconds")
+
+ return evaluation_result
diff --git a/testbed/googleapis__python-aiplatform/vertexai/evaluation/constants.py b/testbed/googleapis__python-aiplatform/vertexai/evaluation/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5b38f06cfa72616c28ebd98cabd0089e0d28d74
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/evaluation/constants.py
@@ -0,0 +1,175 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Constants for evaluation."""
+import dataclasses
+
+# The number of concurrent workers to use for making model inference and
+# evaluation requests.
+MAX_WORKERS = 100
+
+
+@dataclasses.dataclass(frozen=True)
+class Metric:
+ """Namespace for Metrics."""
+
+ # Model-based Pointwise Metrics.
+ COHERENCE = "coherence"
+ FLUENCY = "fluency"
+ SAFETY = "safety"
+ GROUNDEDNESS = "groundedness"
+ INSTRUCTION_FOLLOWING = "instruction_following"
+ VERBOSITY = "verbosity"
+ TEXT_QUALITY = "text_quality"
+ SUMMARIZATION_QUALITY = "summarization_quality"
+ QUESTION_ANSWERING_QUALITY = "question_answering_quality"
+ MULTI_TURN_CHAT_QUALITY = "multi_turn_chat_quality"
+ MULTI_TURN_SAFETY = "multi_turn_safety"
+
+ # Model-based Pairwise Metrics.
+ PAIRWISE_COHERENCE = "pairwise_coherence"
+ PAIRWISE_FLUENCY = "pairwise_fluency"
+ PAIRWISE_SAFETY = "pairwise_safety"
+ PAIRWISE_GROUNDEDNESS = "pairwise_groundedness"
+ PAIRWISE_INSTRUCTION_FOLLOWING = "pairwise_instruction_following"
+ PAIRWISE_VERBOSITY = "pairwise_verbosity"
+ PAIRWISE_TEXT_QUALITY = "pairwise_text_quality"
+ PAIRWISE_SUMMARIZATION_QUALITY = "pairwise_summarization_quality"
+ PAIRWISE_QUESTION_ANSWERING_QUALITY = "pairwise_question_answering_quality"
+ PAIRWISE_MULTI_TURN_CHAT_QUALITY = "pairwise_multi_turn_chat_quality"
+ PAIRWISE_MULTI_TURN_SAFETY = "pairwise_multi_turn_safety"
+
+ POINTWISE_METRIC = "pointwise_metric"
+ PAIRWISE_METRIC = "pairwise_metric"
+
+ # Model-based translation Metrics.
+ COMET = "comet"
+ METRICX = "metricx"
+
+ # Automatic Metrics.
+ EXACT_MATCH = "exact_match"
+ BLEU = "bleu"
+ ROUGE = "rouge"
+ ROUGE_1 = "rouge_1"
+ ROUGE_2 = "rouge_2"
+ ROUGE_L = "rouge_l"
+ ROUGE_L_SUM = "rouge_l_sum"
+ TOOL_CALL_VALID = "tool_call_valid"
+ TOOL_NAME_MATCH = "tool_name_match"
+ TOOL_PARAMETER_KEY_MATCH = "tool_parameter_key_match"
+ TOOL_PARAMETER_KV_MATCH = "tool_parameter_kv_match"
+
+ AUTOMATIC_METRIC_LIST = (
+ EXACT_MATCH,
+ BLEU,
+ ROUGE,
+ ROUGE_1,
+ ROUGE_2,
+ ROUGE_L,
+ ROUGE_L_SUM,
+ TOOL_CALL_VALID,
+ TOOL_NAME_MATCH,
+ TOOL_PARAMETER_KEY_MATCH,
+ TOOL_PARAMETER_KV_MATCH,
+ )
+
+ POINTWISE_METRIC_PROMPT_TEMPLATE_EXAMPLE_LIST = (
+ COHERENCE,
+ FLUENCY,
+ SAFETY,
+ GROUNDEDNESS,
+ INSTRUCTION_FOLLOWING,
+ VERBOSITY,
+ TEXT_QUALITY,
+ SUMMARIZATION_QUALITY,
+ QUESTION_ANSWERING_QUALITY,
+ MULTI_TURN_CHAT_QUALITY,
+ MULTI_TURN_SAFETY,
+ )
+
+ PAIRWISE_METRIC_PROMPT_TEMPLATE_EXAMPLE_LIST = (
+ PAIRWISE_COHERENCE,
+ PAIRWISE_FLUENCY,
+ PAIRWISE_SAFETY,
+ PAIRWISE_GROUNDEDNESS,
+ PAIRWISE_INSTRUCTION_FOLLOWING,
+ PAIRWISE_VERBOSITY,
+ PAIRWISE_TEXT_QUALITY,
+ PAIRWISE_SUMMARIZATION_QUALITY,
+ PAIRWISE_QUESTION_ANSWERING_QUALITY,
+ PAIRWISE_MULTI_TURN_CHAT_QUALITY,
+ PAIRWISE_MULTI_TURN_SAFETY,
+ )
+
+
+@dataclasses.dataclass(frozen=True)
+class MetricResult:
+ """Namespace for Metric Results."""
+
+ ROW_COUNT_KEY = "row_count"
+ SCORE_KEY = "score"
+ EXPLANATION_KEY = "explanation"
+ PAIRWISE_CHOICE_KEY = "pairwise_choice"
+
+ # Automatic Metrics.
+ EXACT_MATCH_RESULTS = "exact_match_results"
+ BLEU_RESULTS = "bleu_results"
+ ROUGE_RESULTS = "rouge_results"
+ TOOL_CALL_VALID_RESULTS = "tool_call_valid_results"
+ TOOL_NAME_MATCH_RESULTS = "tool_name_match_results"
+ TOOL_PARAMETER_KEY_MATCH_RESULTS = "tool_parameter_key_match_results"
+ TOOL_PARAMETER_KV_MATCH_RESULTS = "tool_parameter_kv_match_results"
+
+ POINTWISE_METRIC_RESULT = "pointwise_metric_result"
+ PAIRWISE_METRIC_RESULT = "pairwise_metric_result"
+
+ COMET_RESULT = "comet_result"
+ METRICX_RESULT = "metricx_result"
+
+ AUTOMATIC_METRIC_RESULTS_LIST = (
+ EXACT_MATCH_RESULTS,
+ BLEU_RESULTS,
+ ROUGE_RESULTS,
+ TOOL_CALL_VALID_RESULTS,
+ TOOL_NAME_MATCH_RESULTS,
+ TOOL_PARAMETER_KEY_MATCH_RESULTS,
+ TOOL_PARAMETER_KV_MATCH_RESULTS,
+ )
+
+
+@dataclasses.dataclass(frozen=True)
+class Dataset:
+ # Default evaluation dataset schema column names.
+ MODEL_RESPONSE_COLUMN = "response"
+ BASELINE_MODEL_RESPONSE_COLUMN = "baseline_model_response"
+ PROMPT_COLUMN = "prompt"
+ REFERENCE_COLUMN = "reference"
+ SOURCE_COLUMN = "source"
+
+
+@dataclasses.dataclass(frozen=True)
+class QuotaLimit:
+ """Generative AI on Vertex AI quota limits.
+
+ For more details about QPM quota by region for each available base model, see
+ https://cloud.google.com/vertex-ai/generative-ai/docs/quotas.
+ """
+
+ # Default Prediction Service QPS limit.
+ PREDICTION_SERVICE_QPS = 5
+
+ # Default Evaluation Service QPS limit.
+ EVAL_SERVICE_QPS = 1
diff --git a/testbed/googleapis__python-aiplatform/vertexai/evaluation/eval_task.py b/testbed/googleapis__python-aiplatform/vertexai/evaluation/eval_task.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c94a8e0269133632284f476e67ac9749ab6c7ae
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/evaluation/eval_task.py
@@ -0,0 +1,536 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from typing import Any, Callable, Dict, List, Literal, Optional, TYPE_CHECKING, Union
+import uuid
+
+from google.api_core import exceptions
+import vertexai
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform.metadata import metadata
+from vertexai import generative_models
+from vertexai.evaluation import _base as eval_base
+from vertexai.evaluation import _evaluation
+from vertexai.evaluation import constants
+from vertexai.evaluation import utils
+from vertexai.evaluation.metrics import (
+ _base as metrics_base,
+)
+from vertexai.evaluation.metrics import (
+ pairwise_metric,
+)
+from vertexai.evaluation.metrics import (
+ pointwise_metric,
+)
+import numpy as np
+
+if TYPE_CHECKING:
+ import pandas as pd
+
+# pylint: disable=g-import-not-at-top
+try:
+ from IPython import display as IPython_display
+except ImportError:
+ IPython_display = None
+
+_LOGGER = base.Logger(__name__)
+
+EvalResult = eval_base.EvalResult
+GenerativeModel = generative_models.GenerativeModel
+
+
+class EvalTask:
+ """A class representing an EvalTask.
+
+ An Evaluation Tasks is defined to measure the model's ability to perform a
+ certain task in response to specific prompts or inputs. Evaluation tasks must
+ contain an evaluation dataset, and a list of metrics to evaluate. Evaluation
+ tasks help developers compare propmpt templates, track experiments, compare
+ models and their settings, and assess the quality of the model's generated
+ text.
+
+ Dataset Details:
+
+ Default dataset column names:
+ * prompt_column_name: "prompt"
+ * reference_column_name: "reference"
+ * response_column_name: "response"
+ * baseline_model_response_column_name: "baseline_model_response"
+
+ Requirement for different use cases:
+ * Bring-your-own-response (BYOR): You already have the data that you
+ want to evaluate stored in the dataset. Response column name can be
+ customized by providing `response_column_name` parameter, or in the
+ `metric_column_mapping`. For BYOR pairwise evaluation, the baseline
+ model response column name can be customized by providing
+ `baseline_model_response_column_name` parameter, or
+ in the `metric_column_mapping`. If the `response` column or
+ `baseline_model_response` column is present while the
+ corresponding model is specified, an error will be raised.
+
+ * Perform model inference without a prompt template: You have a dataset
+ containing the input prompts to the model and want to perform model
+ inference before evaluation. A column named `prompt` is required
+ in the evaluation dataset and is used directly as input to the model.
+
+ * Perform model inference with a prompt template: You have a dataset
+ containing the input variables to the prompt template and want to
+ assemble the prompts for model inference. Evaluation dataset
+ must contain column names corresponding to the variable names in
+ the prompt template. For example, if prompt template is
+ "Instruction: {instruction}, context: {context}", the dataset must
+ contain `instruction` and `context` columns.
+
+ Metrics Details:
+
+ The supported metrics descriptions, rating rubrics, and the required
+ input variables can be found on the Vertex AI public documentation page.
+ [Evaluation methods and metrics](https://cloud.google.com/vertex-ai/generative-ai/docs/models/determine-eval).
+
+ Usage Examples:
+
+ 1. To perform bring-your-own-response(BYOR) evaluation, provide the model
+ responses in the `response` column in the dataset. If a pairwise metric is
+ used for BYOR evaluation, provide the baseline model responses in the
+ `baseline_model_response` column.
+
+ ```
+ eval_dataset = pd.DataFrame({
+ "prompt" : [...],
+ "reference": [...],
+ "response" : [...],
+ "baseline_model_response": [...],
+ })
+ eval_task = EvalTask(
+ dataset=eval_dataset,
+ metrics=[
+ "bleu",
+ "rouge_l_sum",
+ MetricPromptTemplateExamples.Pointwise.FLUENCY,
+ MetricPromptTemplateExamples.Pairwise.SAFETY
+ ],
+ experiment="my-experiment",
+ )
+ eval_result = eval_task.evaluate(experiment_run_name="eval-experiment-run")
+ ```
+
+ 2. To perform evaluation with Gemini model inference, specify the `model`
+ parameter with a `GenerativeModel` instance. The input column name to the
+ model is `prompt` and must be present in the dataset.
+
+ ```
+ eval_dataset = pd.DataFrame({
+ "reference": [...],
+ "prompt" : [...],
+ })
+ result = EvalTask(
+ dataset=eval_dataset,
+ metrics=["exact_match", "bleu", "rouge_1", "rouge_l_sum"],
+ experiment="my-experiment",
+ ).evaluate(
+ model=GenerativeModel("gemini-1.5-pro"),
+ experiment_run_name="gemini-eval-run"
+ )
+ ```
+
+ 3. If a `prompt_template` is specified, the `prompt` column is not required.
+ Prompts can be assembled from the evaluation dataset, and all prompt
+ template variable names must be present in the dataset columns.
+ ```
+ eval_dataset = pd.DataFrame({
+ "context" : [...],
+ "instruction": [...],
+ })
+ result = EvalTask(
+ dataset=eval_dataset,
+ metrics=[MetricPromptTemplateExamples.Pointwise.SUMMARIZATION_QUALITY],
+ ).evaluate(
+ model=GenerativeModel("gemini-1.5-pro"),
+ prompt_template="{instruction}. Article: {context}. Summary:",
+ )
+ ```
+
+ 4. To perform evaluation with custom model inference, specify the `model`
+ parameter with a custom inference function. The input column name to the
+ custom inference function is `prompt` and must be present in the dataset.
+
+ ```
+ from openai import OpenAI
+ client = OpenAI()
+ def custom_model_fn(input: str) -> str:
+ response = client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "user", "content": input}
+ ]
+ )
+ return response.choices[0].message.content
+
+ eval_dataset = pd.DataFrame({
+ "prompt" : [...],
+ "reference": [...],
+ })
+ result = EvalTask(
+ dataset=eval_dataset,
+ metrics=[MetricPromptTemplateExamples.Pointwise.SAFETY],
+ experiment="my-experiment",
+ ).evaluate(
+ model=custom_model_fn,
+ experiment_run_name="gpt-eval-run"
+ )
+ ```
+
+ 5. To perform pairwise metric evaluation with model inference step, specify
+ the `baseline_model` input to a `PairwiseMetric` instance and the candidate
+ `model` input to the `EvalTask.evaluate()` function. The input column name
+ to both models is `prompt` and must be present in the dataset.
+
+ ```
+ baseline_model = GenerativeModel("gemini-1.0-pro")
+ candidate_model = GenerativeModel("gemini-1.5-pro")
+
+ pairwise_groundedness = PairwiseMetric(
+ metric_prompt_template=MetricPromptTemplateExamples.get_prompt_template(
+ "pairwise_groundedness"
+ ),
+ baseline_model=baseline_model,
+ )
+ eval_dataset = pd.DataFrame({
+ "prompt" : [...],
+ })
+ result = EvalTask(
+ dataset=eval_dataset,
+ metrics=[pairwise_groundedness],
+ experiment="my-pairwise-experiment",
+ ).evaluate(
+ model=candidate_model,
+ experiment_run_name="gemini-pairwise-eval-run",
+ )
+ ```
+ """
+
+ _resource_noun = "evaluationTasks"
+
+ def __init__(
+ self,
+ *,
+ dataset: Union["pd.DataFrame", str, Dict[str, Any]],
+ metrics: List[
+ Union[
+ Literal[
+ "exact_match",
+ "bleu",
+ "rouge_1",
+ "rouge_2",
+ "rouge_l",
+ "rouge_l_sum",
+ "tool_call_valid",
+ "tool_name_match",
+ "tool_parameter_key_match",
+ "tool_parameter_kv_match",
+ ],
+ metrics_base.CustomMetric,
+ metrics_base._AutomaticMetric,
+ metrics_base._TranslationMetric,
+ pointwise_metric.PointwiseMetric,
+ pairwise_metric.PairwiseMetric,
+ ]
+ ],
+ experiment: Optional[str] = None,
+ metric_column_mapping: Optional[Dict[str, str]] = None,
+ output_uri_prefix: Optional[str] = "",
+ ):
+ """Initializes an EvalTask.
+
+ Args:
+ dataset: The dataset to be evaluated.
+ Supports the following dataset formats:
+ * pandas.DataFrame: Used directly for evaluation.
+ * Dict: Converted to a pandas DataFrame before evaluation.
+ * str: Interpreted as a file path or URI. Supported formats include:
+ * Local JSONL or CSV files: Loaded from the local filesystem.
+ * GCS JSONL or CSV files: Loaded from Google Cloud Storage
+ (e.g., 'gs://bucket/data.csv').
+ * BigQuery table URI: Loaded from Google Cloud BigQuery
+ (e.g., 'bq://project-id.dataset.table_name').
+ metrics: The list of metric names, or Metric instances to evaluate.
+ Prompt template is required for PairwiseMetric.
+ experiment: The name of the experiment to log the evaluations to.
+ metric_column_mapping: An optional dictionary column mapping that
+ overrides the metric prompt template input variable names with
+ mapped the evaluation dataset column names, used during evaluation.
+ For example, if the input_variables of the metric prompt template
+ are ["context", "reference"], the metric_column_mapping can be
+ {
+ "context": "news_context",
+ "reference": "ground_truth",
+ "response": "model_1_response"
+ }
+ if the dataset has columns "news_context", "ground_truth" and
+ "model_1_response".
+ output_uri_prefix: GCS location to store the metrics_table from
+ evaluation results.
+ """
+ self._dataset = utils.load_dataset(dataset)
+ self._metrics = metrics
+ self._experiment = experiment
+ self._metric_column_mapping = utils.initialize_metric_column_mapping(
+ metric_column_mapping, self._dataset
+ )
+ self.output_uri_prefix = output_uri_prefix
+
+ @property
+ def dataset(self) -> "pd.DataFrame":
+ """Returns evaluation dataset."""
+ return self._dataset
+
+ @property
+ def metrics(self) -> List[Union[str, metrics_base.CustomMetric]]:
+ """Returns metrics."""
+ return self._metrics
+
+ @property
+ def experiment(self) -> Optional[str]:
+ """Returns experiment name."""
+ return self._experiment
+
+ def _evaluate_with_experiment(
+ self,
+ *,
+ model: Optional[Union[GenerativeModel, Callable[[str], str]]] = None,
+ prompt_template: Optional[str] = None,
+ experiment_run_name: Optional[str] = None,
+ evaluation_service_qps: Optional[float] = None,
+ retry_timeout: float = 120.0,
+ ) -> EvalResult:
+ """Runs an evaluation for the EvalTask with an experiment.
+
+ Args:
+ model: A GenerativeModel instance or a custom model function to generate
+ responses to evaluate. If not provided, the evaluation is computed with
+ the `response` column in the `dataset`.
+ prompt_template: The prompt template to use for the evaluation. If not
+ set, the prompt template that was used to create the EvalTask will be
+ used.
+ experiment_run_name: The name of the experiment run to log the evaluation
+ to if an experiment is set for this EvalTask. If not provided, a random
+ unique experiment run name is used.
+ evaluation_service_qps: The custom QPS limit for the evaluation service.
+ retry_timeout: How long to keep retrying the evaluation requests for
+ the whole evaluation dataset, in seconds.
+
+ Returns:
+ The evaluation result.
+ """
+ self._validate_experiment_run()
+ with vertexai.preview.start_run(experiment_run_name):
+ self._log_eval_experiment_param(model, prompt_template)
+ eval_result = _evaluation.evaluate(
+ dataset=self._dataset,
+ metrics=self._metrics,
+ model=model,
+ prompt_template=prompt_template,
+ metric_column_mapping=self._metric_column_mapping,
+ evaluation_service_qps=evaluation_service_qps,
+ retry_timeout=retry_timeout,
+ )
+
+ eval_result.summary_metrics = {
+ k: ("NaN" if isinstance(v, float) and np.isnan(v) else v)
+ for k, v in eval_result.summary_metrics.items()
+ }
+ eval_result.metadata = {
+ "experiment": self._experiment,
+ "experiment_run": experiment_run_name,
+ }
+ try:
+ vertexai.preview.log_metrics(eval_result.summary_metrics)
+ except (TypeError, exceptions.InvalidArgument) as e:
+ _LOGGER.warning(f"Experiment metrics logging failed: {str(e)}")
+ return eval_result
+
+ def evaluate(
+ self,
+ *,
+ model: Optional[Union[GenerativeModel, Callable[[str], str]]] = None,
+ prompt_template: Optional[str] = None,
+ experiment_run_name: Optional[str] = None,
+ response_column_name: Optional[str] = None,
+ baseline_model_response_column_name: Optional[str] = None,
+ evaluation_service_qps: Optional[float] = None,
+ retry_timeout: float = 120.0,
+ output_file_name: Optional[str] = None,
+ ) -> EvalResult:
+ """Runs an evaluation for the EvalTask.
+
+ Args:
+ model: A GenerativeModel instance or a custom model function to generate
+ responses to evaluate. If not provided, the evaluation can be performed
+ in the bring-your-own-response (BYOR) mode.
+ prompt_template: The prompt template to use for the evaluation. If not
+ set, the prompt template that was used to create the EvalTask will be
+ used.
+ experiment_run_name: The name of the experiment run to log the evaluation
+ to if an experiment is set for this EvalTask. If not provided, a random
+ unique experiment run name is used.
+ response_column_name: The column name of model response in the dataset. If
+ provided, this will override the `metric_column_mapping` of the `EvalTask`.
+ baseline_model_response_column_name: The column name of baseline model
+ response in the dataset for pairwise metrics. If provided, this will
+ override the `metric_column_mapping` of the `EvalTask`
+ evaluation_service_qps: The custom QPS limit for the evaluation service.
+ retry_timeout: How long to keep retrying the evaluation requests for
+ the whole evaluation dataset, in seconds.
+ output_file_name: The file name with csv suffix to store the output
+ metrics_table.
+
+ Returns:
+ The evaluation result.
+ """
+ global_experiment_name = metadata._experiment_tracker.experiment_name
+ if experiment_run_name and not self._experiment and not global_experiment_name:
+ raise ValueError(
+ "Experiment is not set. Please initialize `EvalTask` with an"
+ " experiment, or initialize a global experiment with "
+ "`vertexai.init(experiment='experiment_name')`for logging this"
+ " evaluation run."
+ )
+ self._verify_and_set_response_column_name(
+ response_column_name=response_column_name,
+ metric_column_mapping_key=constants.Dataset.MODEL_RESPONSE_COLUMN,
+ )
+ self._verify_and_set_response_column_name(
+ response_column_name=baseline_model_response_column_name,
+ metric_column_mapping_key=constants.Dataset.BASELINE_MODEL_RESPONSE_COLUMN,
+ )
+
+ experiment_run_name = experiment_run_name or f"{uuid.uuid4()}"
+ if self._experiment and global_experiment_name:
+ metadata._experiment_tracker.set_experiment(
+ experiment=self._experiment, backing_tensorboard=False
+ )
+ eval_result = self._evaluate_with_experiment(
+ model=model,
+ prompt_template=prompt_template,
+ experiment_run_name=experiment_run_name,
+ evaluation_service_qps=evaluation_service_qps,
+ retry_timeout=retry_timeout,
+ )
+ metadata._experiment_tracker.set_experiment(
+ experiment=global_experiment_name, backing_tensorboard=False
+ )
+ elif self._experiment and not global_experiment_name:
+ metadata._experiment_tracker.set_experiment(
+ experiment=self._experiment, backing_tensorboard=False
+ )
+ eval_result = self._evaluate_with_experiment(
+ model=model,
+ prompt_template=prompt_template,
+ experiment_run_name=experiment_run_name,
+ evaluation_service_qps=evaluation_service_qps,
+ retry_timeout=retry_timeout,
+ )
+ metadata._experiment_tracker.reset()
+ elif not self._experiment and global_experiment_name:
+ eval_result = self._evaluate_with_experiment(
+ model=model,
+ prompt_template=prompt_template,
+ experiment_run_name=experiment_run_name,
+ evaluation_service_qps=evaluation_service_qps,
+ retry_timeout=retry_timeout,
+ )
+ else:
+ eval_result = _evaluation.evaluate(
+ dataset=self.dataset,
+ metrics=self.metrics,
+ model=model,
+ prompt_template=prompt_template,
+ metric_column_mapping=self._metric_column_mapping,
+ evaluation_service_qps=evaluation_service_qps,
+ retry_timeout=retry_timeout,
+ )
+ utils.upload_evaluation_results(
+ eval_result.metrics_table, self.output_uri_prefix, output_file_name
+ )
+ return eval_result
+
+ def _validate_experiment_run(self) -> None:
+ """Checks if an experiment run already exists."""
+ if metadata._experiment_tracker.experiment_run:
+ raise ValueError(
+ "Experiment run already exists. Please specify the name of the"
+ " experiment run to assign current session within this evaluation."
+ )
+
+ def _log_eval_experiment_param(
+ self,
+ model: Optional[Union[GenerativeModel, Callable[[str], str]]] = None,
+ prompt_template: Optional[str] = None,
+ ) -> None:
+ """Logs variable input parameters of an evaluation to an experiment run."""
+ model_metadata = {}
+
+ if prompt_template is not None:
+ model_metadata.update({"prompt_template": prompt_template})
+
+ if isinstance(model, GenerativeModel):
+ model_metadata.update(
+ {
+ "model_name": model._model_name,
+ }
+ )
+
+ if model._generation_config and isinstance(model._generation_config, dict):
+ model_metadata.update(**model._generation_config)
+
+ if model._safety_settings and isinstance(model._safety_settings, dict):
+ safety_settings = model._safety_settings
+ safety_settings_as_str = {
+ category.name: threshold.name
+ for category, threshold in safety_settings.items()
+ }
+ model_metadata.update(safety_settings_as_str)
+
+ if model_metadata:
+ _LOGGER.info(f"Logging Eval Experiment metadata: {model_metadata}")
+ try:
+ vertexai.preview.log_params(model_metadata)
+ except (ValueError, TypeError) as e:
+ _LOGGER.warning(f"Experiment metadata logging failed: {str(e)}")
+
+ def _verify_and_set_response_column_name(
+ self, response_column_name: str, metric_column_mapping_key: str
+ ) -> None:
+ """Verifies and sets the model response column names."""
+ if response_column_name:
+ if response_column_name in self._dataset.columns:
+ self._metric_column_mapping[
+ metric_column_mapping_key
+ ] = response_column_name
+ else:
+ raise ValueError(
+ f"(Baseline) Model response column {response_column_name} is not"
+ " found in the dataset."
+ )
+
+ def display_runs(self):
+ """Displays experiment runs associated with this EvalTask."""
+ if not self._experiment:
+ raise ValueError("Experiment is not set.")
+ elif IPython_display:
+ IPython_display.display(
+ vertexai.preview.get_experiment_df(self._experiment)
+ )
diff --git a/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/__init__.py b/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..45d1f2656248e35c9803ff78215dea1f8d77123f
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/__init__.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Evaluation Metrics Module."""
+
+from vertexai.evaluation.metrics import _base
+from vertexai.evaluation.metrics import _rouge
+from vertexai.evaluation.metrics import (
+ metric_prompt_template,
+)
+from vertexai.evaluation.metrics import (
+ metric_prompt_template_examples,
+)
+from vertexai.evaluation.metrics import pairwise_metric
+from vertexai.evaluation.metrics import pointwise_metric
+
+
+PairwiseMetric = pairwise_metric.PairwiseMetric
+PointwiseMetric = pointwise_metric.PointwiseMetric
+CustomMetric = _base.CustomMetric
+PairwiseMetricPromptTemplate = metric_prompt_template.PairwiseMetricPromptTemplate
+PointwiseMetricPromptTemplate = metric_prompt_template.PointwiseMetricPromptTemplate
+MetricPromptTemplateExamples = (
+ metric_prompt_template_examples.MetricPromptTemplateExamples
+)
+Rouge = _rouge.Rouge
+
+
+__all__ = [
+ "CustomMetric",
+ "PairwiseMetric",
+ "PointwiseMetric",
+ "PairwiseMetricPromptTemplate",
+ "PointwiseMetricPromptTemplate",
+ "MetricPromptTemplateExamples",
+ "Rouge",
+]
diff --git a/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/_base.py b/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..da13bf84307bffee6edc409feadcce8f84412ca5
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/_base.py
@@ -0,0 +1,171 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Base classes for evaluation metrics."""
+
+import abc
+from typing import Any, Callable, Dict, Literal, Union
+
+from vertexai.evaluation import constants
+from vertexai.evaluation.metrics import (
+ metric_prompt_template as metric_prompt_template_base,
+)
+
+
+class _Metric(abc.ABC):
+ """The abstract class for evaluation metric."""
+
+ def __init__(self, metric: str):
+ self._metric = metric
+
+ def __str__(self):
+ return self.metric_name
+
+ @property
+ def metric_name(self) -> str:
+ return self._metric
+
+
+class _ModelBasedMetric(_Metric):
+ """A Model-based Metric.
+
+ An evaluation metric that evaluates generative AI model responses with
+ another ML model (eg. Gemini) as a rater. It can be for a single model,
+ or two models.
+
+ For more details on when to use model-based metrics, see
+ [Evaluation methods and metrics](https://cloud.google.com/vertex-ai/generative-ai/docs/models/determine-eval).
+ """
+
+ def __init__(
+ self,
+ *,
+ metric: str,
+ metric_prompt_template: Union[
+ metric_prompt_template_base.PointwiseMetricPromptTemplate,
+ metric_prompt_template_base.PairwiseMetricPromptTemplate,
+ str,
+ ],
+ ):
+ """Initializes the model-based evaluation metric.
+
+ Args:
+ metric: Generic model based metric name.
+ metric_prompt_template: A metric prompt template for performing
+ the model-based evaluation. A freeform string is also accepted.
+ """
+ super().__init__(metric=metric)
+ self.metric_prompt_template = str(metric_prompt_template)
+
+
+class CustomMetric(_Metric):
+ """The custom evaluation metric.
+
+ A fully-customized CustomMetric that can be used to evaluate a single model
+ by defining a metric function for a computation-based metric. The
+ CustomMetric is computed on the client-side using the user-defined metric
+ function in SDK only, not by the Vertex Gen AI Evaluation Service.
+
+ Attributes:
+ name: The name of the metric.
+ metric_function: The user-defined evaluation function to compute a metric
+ score. Must use the dataset row dictionary as the metric function
+ input and return per-instance metric result as a dictionary output.
+ The metric score must mapped to the name of the CustomMetric as key.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ metric_function: Callable[
+ [Dict[str, Any]],
+ Dict[str, Any],
+ ],
+ ):
+ """Initializes the evaluation metric."""
+ super().__init__(name)
+ self.name = name
+ self.metric_function = metric_function
+
+
+class _AutomaticMetric(_Metric):
+ """An automatic metric that computes deterministic score based on reference.
+
+ An lexicon-based evaluation metric that evaluate a generative model's
+ response on the given evaluation task with reference ground truth answers.
+ It is a type of pointwise evaluation metric.
+
+ For more details on when to use automatic metrics, see
+ [Evaluation methods and
+ metrics](https://cloud.google.com/vertex-ai/generative-ai/docs/models/determine-eval).
+ """
+
+ def __init__(
+ self,
+ metric: Literal[constants.Metric.ROUGE],
+ ):
+ """Initializes the automatic evaluation metric.
+
+ Args:
+ metric: The automatic evaluation metric name.
+ """
+ super().__init__(metric=metric)
+
+
+class _TranslationMetric(_Metric):
+ """A Translation Metric.
+
+ Evaluates a score for the given instance using an underlying machine
+ learning model.
+ For now, only COMET and MetricX are supported.
+
+ For more details on how to evaluate translation, see
+ [Evaluation a translation
+ model](https://cloud.google.com/vertex-ai/generative-ai/docs/models/run-evaluation#translation).
+ """
+
+ def __init__(
+ self,
+ name: str,
+ version: str,
+ source_language: str,
+ target_language: str,
+ ):
+ """Initializes the Translation metric.
+
+ Args:
+ name: The name of the metric.
+ version: The version to use for evaluation.
+ source_language: The source language of the translation.
+ target_language: The target language of the translation.
+ """
+ self._version = version
+ self._source_language = source_language
+ self._target_language = target_language
+
+ super().__init__(metric=name)
+
+ @property
+ def version(self) -> str:
+ return self._version
+
+ @property
+ def source_language(self) -> str:
+ return self._source_language
+
+ @property
+ def target_language(self) -> str:
+ return self._target_language
diff --git a/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/_instance_evaluation.py b/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/_instance_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..cea00382808fe200bb63c95ada01b8a9482ae398
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/_instance_evaluation.py
@@ -0,0 +1,478 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Library for metrics computation with Gen AI Evaluation Service."""
+
+import json
+from typing import Any, Dict, Union
+
+from google import api_core
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform_v1.services import (
+ evaluation_service as gapic_evaluation_services,
+)
+from google.cloud.aiplatform_v1.types import (
+ evaluation_service as gapic_eval_service_types,
+)
+from vertexai.evaluation import _base as eval_base
+from vertexai.evaluation import constants
+from vertexai.evaluation import (
+ prompt_template as prompt_template_base,
+)
+from vertexai.evaluation import utils
+from vertexai.evaluation.metrics import (
+ _base as metrics_base,
+)
+from vertexai.evaluation.metrics import _rouge
+from vertexai.evaluation.metrics import pairwise_metric
+from vertexai.evaluation.metrics import pointwise_metric
+from google.protobuf import json_format
+
+
+_LOGGER = base.Logger(__name__)
+_METRIC_NAME_TO_METRIC_SPEC = {
+ # Automatic Metrics.
+ constants.Metric.EXACT_MATCH: (gapic_eval_service_types.ExactMatchSpec()),
+ constants.Metric.BLEU: gapic_eval_service_types.BleuSpec(),
+ constants.Metric.ROUGE: gapic_eval_service_types.RougeSpec(),
+ constants.Metric.ROUGE_1: gapic_eval_service_types.RougeSpec(rouge_type="rouge1"),
+ constants.Metric.ROUGE_2: gapic_eval_service_types.RougeSpec(rouge_type="rouge2"),
+ constants.Metric.ROUGE_L: gapic_eval_service_types.RougeSpec(rouge_type="rougeL"),
+ constants.Metric.ROUGE_L_SUM: gapic_eval_service_types.RougeSpec(
+ rouge_type="rougeLsum"
+ ),
+ constants.Metric.TOOL_CALL_VALID: (gapic_eval_service_types.ToolCallValidSpec()),
+ constants.Metric.TOOL_NAME_MATCH: (gapic_eval_service_types.ToolNameMatchSpec()),
+ constants.Metric.TOOL_PARAMETER_KV_MATCH: (
+ gapic_eval_service_types.ToolParameterKVMatchSpec()
+ ),
+ constants.Metric.TOOL_PARAMETER_KEY_MATCH: (
+ gapic_eval_service_types.ToolParameterKeyMatchSpec()
+ ),
+ # Pointwise Metrics.
+ constants.Metric.POINTWISE_METRIC: (gapic_eval_service_types.PointwiseMetricSpec()),
+ # Pairwise Metrics.
+ constants.Metric.PAIRWISE_METRIC: (gapic_eval_service_types.PairwiseMetricSpec()),
+ # Model-based Translation Metrics.
+ constants.Metric.COMET: gapic_eval_service_types.CometSpec(),
+ constants.Metric.METRICX: gapic_eval_service_types.MetricxSpec(),
+}
+
+
+def build_request(
+ metric: Union[str, metrics_base._Metric],
+ row_dict: Dict[str, Any],
+ evaluation_run_config: eval_base.EvaluationRunConfig,
+) -> gapic_eval_service_types.EvaluateInstancesRequest:
+ """Builds an EvaluateInstancesRequest for Vertex Gen AI Evaluation Service.
+
+ Args:
+ metric: The metric to be evaluated.
+ row_dict: An evaluation dataset instance as a dictionary.
+ evaluation_run_config: Evaluation run configurations.
+
+ Returns:
+ An EvaluateInstancesRequest for Vertex Gen AI Evaluation Service.
+ """
+ project = initializer.global_config.project
+ location = initializer.global_config.location
+ if not project or not location:
+ raise ValueError(
+ "No project or location specified. Please run `vertexai.init()` to"
+ " provide these parameters."
+ )
+ location_path = (
+ gapic_evaluation_services.EvaluationServiceClient.common_location_path(
+ project, location
+ )
+ )
+
+ if isinstance(metric, pointwise_metric.PointwiseMetric):
+ metric_name = constants.Metric.POINTWISE_METRIC
+ elif isinstance(metric, pairwise_metric.PairwiseMetric):
+ metric_name = constants.Metric.PAIRWISE_METRIC
+ else:
+ metric_name = str(metric)
+
+ try:
+ metric_spec = _METRIC_NAME_TO_METRIC_SPEC[metric_name]
+ except KeyError as e:
+ raise ValueError(f"Metric name: {metric_name} is not supported.") from e
+ model_based_metric_instance_input = {}
+ metric_column_mapping = evaluation_run_config.metric_column_mapping
+ if isinstance(
+ metric, metrics_base._ModelBasedMetric
+ ): # pylint: disable=protected-access
+ metric_spec.metric_prompt_template = metric.metric_prompt_template
+ for variable in prompt_template_base.PromptTemplate(
+ metric.metric_prompt_template
+ ).variables:
+ model_based_metric_instance_input[variable] = row_dict.get(
+ metric_column_mapping.get(variable),
+ "",
+ )
+ elif isinstance(metric, _rouge.Rouge):
+ metric_spec.rouge_type = metric.rouge_type
+ metric_spec.use_stemmer = metric.use_stemmer
+ metric_spec.split_summaries = metric.split_summaries
+ elif isinstance(
+ metric, metrics_base._TranslationMetric # pylint: disable=protected-access
+ ):
+ metric_spec.version = metric.version
+ metric_spec.source_language = metric.source_language
+ metric_spec.target_language = metric.target_language
+
+ response = row_dict.get(
+ metric_column_mapping.get(constants.Dataset.MODEL_RESPONSE_COLUMN), ""
+ )
+ reference = row_dict.get(
+ metric_column_mapping.get(constants.Dataset.REFERENCE_COLUMN), ""
+ )
+ source = row_dict.get(
+ metric_column_mapping.get(constants.Dataset.SOURCE_COLUMN), ""
+ )
+
+ if metric_name == constants.Metric.EXACT_MATCH:
+ instance = gapic_eval_service_types.ExactMatchInput(
+ metric_spec=metric_spec,
+ instances=[
+ gapic_eval_service_types.ExactMatchInstance(
+ prediction=response,
+ reference=reference,
+ )
+ ],
+ )
+ return gapic_eval_service_types.EvaluateInstancesRequest(
+ location=location_path,
+ exact_match_input=instance,
+ )
+ elif metric_name == constants.Metric.BLEU:
+ instance = gapic_eval_service_types.BleuInput(
+ metric_spec=metric_spec,
+ instances=[
+ gapic_eval_service_types.BleuInstance(
+ prediction=response,
+ reference=reference,
+ )
+ ],
+ )
+ return gapic_eval_service_types.EvaluateInstancesRequest(
+ location=location_path,
+ bleu_input=instance,
+ )
+ elif metric_name in (
+ constants.Metric.ROUGE,
+ constants.Metric.ROUGE_1,
+ constants.Metric.ROUGE_2,
+ constants.Metric.ROUGE_L,
+ constants.Metric.ROUGE_L_SUM,
+ ):
+ instance = gapic_eval_service_types.RougeInput(
+ metric_spec=metric_spec,
+ instances=[
+ gapic_eval_service_types.RougeInstance(
+ prediction=response,
+ reference=reference,
+ )
+ ],
+ )
+ return gapic_eval_service_types.EvaluateInstancesRequest(
+ location=location_path,
+ rouge_input=instance,
+ )
+ elif metric_name == constants.Metric.TOOL_CALL_VALID:
+ instance = gapic_eval_service_types.ToolCallValidInput(
+ metric_spec=metric_spec,
+ instances=[
+ gapic_eval_service_types.ToolCallValidInstance(
+ prediction=response,
+ reference=reference,
+ )
+ ],
+ )
+ return gapic_eval_service_types.EvaluateInstancesRequest(
+ location=location_path,
+ tool_call_valid_input=instance,
+ )
+ elif metric_name == constants.Metric.TOOL_NAME_MATCH:
+ instance = gapic_eval_service_types.ToolNameMatchInput(
+ metric_spec=metric_spec,
+ instances=[
+ gapic_eval_service_types.ToolNameMatchInstance(
+ prediction=response,
+ reference=reference,
+ )
+ ],
+ )
+ return gapic_eval_service_types.EvaluateInstancesRequest(
+ location=location_path,
+ tool_name_match_input=instance,
+ )
+ elif metric_name == constants.Metric.TOOL_PARAMETER_KEY_MATCH:
+ instance = gapic_eval_service_types.ToolParameterKeyMatchInput(
+ metric_spec=metric_spec,
+ instances=[
+ gapic_eval_service_types.ToolParameterKeyMatchInstance(
+ prediction=response,
+ reference=reference,
+ )
+ ],
+ )
+ return gapic_eval_service_types.EvaluateInstancesRequest(
+ location=location_path,
+ tool_parameter_key_match_input=instance,
+ )
+ elif metric_name == constants.Metric.TOOL_PARAMETER_KV_MATCH:
+ instance = gapic_eval_service_types.ToolParameterKVMatchInput(
+ metric_spec=metric_spec,
+ instances=[
+ gapic_eval_service_types.ToolParameterKVMatchInstance(
+ prediction=response,
+ reference=reference,
+ )
+ ],
+ )
+ return gapic_eval_service_types.EvaluateInstancesRequest(
+ location=location_path,
+ tool_parameter_kv_match_input=instance,
+ )
+ elif metric_name == constants.Metric.POINTWISE_METRIC:
+ instance = gapic_eval_service_types.PointwiseMetricInput(
+ metric_spec=metric_spec,
+ instance=gapic_eval_service_types.PointwiseMetricInstance(
+ json_instance=json.dumps(model_based_metric_instance_input),
+ ),
+ )
+ return gapic_eval_service_types.EvaluateInstancesRequest(
+ location=location_path,
+ pointwise_metric_input=instance,
+ )
+ elif metric_name == constants.Metric.PAIRWISE_METRIC:
+ instance = gapic_eval_service_types.PairwiseMetricInput(
+ metric_spec=metric_spec,
+ instance=gapic_eval_service_types.PairwiseMetricInstance(
+ json_instance=json.dumps(model_based_metric_instance_input),
+ ),
+ )
+ return gapic_eval_service_types.EvaluateInstancesRequest(
+ location=location_path, pairwise_metric_input=instance
+ )
+ elif metric_name == constants.Metric.COMET:
+ instance = gapic_eval_service_types.CometInput(
+ metric_spec=metric_spec,
+ instance=gapic_eval_service_types.CometInstance(
+ prediction=response,
+ reference=reference,
+ source=source,
+ ),
+ )
+ return gapic_eval_service_types.EvaluateInstancesRequest(
+ location=location_path,
+ comet_input=instance,
+ )
+ elif metric_name == constants.Metric.METRICX:
+ instance = gapic_eval_service_types.MetricxInput(
+ metric_spec=metric_spec,
+ instance=gapic_eval_service_types.MetricxInstance(
+ prediction=response,
+ reference=reference,
+ source=source,
+ ),
+ )
+ return gapic_eval_service_types.EvaluateInstancesRequest(
+ location=location_path,
+ metricx_input=instance,
+ )
+ else:
+ raise ValueError(f"Unknown metric type: {metric_name}")
+
+
+def _parse_autometric_results(
+ metric_result_dict: Dict[str, Any],
+) -> Dict[str, Any]:
+ """Parses the automatic metric results from the evaluation results.
+
+ Args:
+ metric_result_dict: The metric results dictionary.
+
+ Returns:
+ A dictionary containing metric score of the metric.
+ """
+ for value in metric_result_dict.values():
+ return {
+ constants.MetricResult.SCORE_KEY: value[0].get(
+ constants.MetricResult.SCORE_KEY
+ )
+ }
+
+
+def _parse_pointwise_results(
+ metric_result_dict: Dict[str, Any],
+) -> Dict[str, Any]:
+ """Parses the model-based pointwise metric result.
+
+ Args:
+ metric_result_dict: The metric result dictionary.
+
+ Returns:
+ A dictionary containing metric score, explanation of the pointwise
+ metric result.
+ """
+ return {
+ constants.MetricResult.SCORE_KEY: metric_result_dict.get(
+ constants.MetricResult.SCORE_KEY
+ ),
+ constants.MetricResult.EXPLANATION_KEY: metric_result_dict.get(
+ constants.MetricResult.EXPLANATION_KEY
+ ),
+ }
+
+
+def _parse_model_based_translation_results(
+ metric_result_dict: Dict[str, Any],
+) -> Dict[str, Any]:
+ """Parses the model-based pointwise translation metric result.
+
+ Args:
+ metric_result_dict: The metric result dictionary.
+
+ Returns:
+ A dictionary containing metric score.
+ """
+ return {
+ constants.MetricResult.SCORE_KEY: metric_result_dict.get(
+ constants.MetricResult.SCORE_KEY
+ ),
+ }
+
+
+def _parse_pairwise_results(
+ metric_result_dict: Dict[str, Any],
+) -> Dict[str, Any]:
+ """Parses the pairwise metric result.
+
+ Args:
+ metric_result_dict: The metric result dictionary.
+
+ Returns:
+ A dictionary containing metric score, explanation of the pairwise metric
+ result.
+ """
+ return {
+ constants.MetricResult.PAIRWISE_CHOICE_KEY: metric_result_dict.get(
+ constants.MetricResult.PAIRWISE_CHOICE_KEY,
+ ),
+ constants.MetricResult.EXPLANATION_KEY: metric_result_dict.get(
+ constants.MetricResult.EXPLANATION_KEY
+ ),
+ }
+
+
+def handle_response(
+ response: Union[str, gapic_eval_service_types.EvaluateInstancesResponse],
+) -> Union[str, Dict[str, Any]]:
+ """Handles the response from the evaluation service.
+
+ Args:
+ response: The response from the evaluation service.
+
+ Returns:
+ A parsed metric result dictionary, or an error message string.
+ """
+ if isinstance(response, str):
+ return response
+
+ metric_type = response._pb.WhichOneof("evaluation_results")
+
+ if metric_type == constants.MetricResult.EXACT_MATCH_RESULTS:
+ metric_result = response.exact_match_results
+ elif metric_type == constants.MetricResult.BLEU_RESULTS:
+ metric_result = response.bleu_results
+ elif metric_type == constants.MetricResult.ROUGE_RESULTS:
+ metric_result = response.rouge_results
+ elif metric_type == constants.MetricResult.TOOL_CALL_VALID_RESULTS:
+ metric_result = response.tool_call_valid_results
+ elif metric_type == constants.MetricResult.TOOL_NAME_MATCH_RESULTS:
+ metric_result = response.tool_name_match_results
+ elif metric_type == constants.MetricResult.TOOL_PARAMETER_KEY_MATCH_RESULTS:
+ metric_result = response.tool_parameter_key_match_results
+ elif metric_type == constants.MetricResult.TOOL_PARAMETER_KV_MATCH_RESULTS:
+ metric_result = response.tool_parameter_kv_match_results
+ elif metric_type == constants.MetricResult.POINTWISE_METRIC_RESULT:
+ metric_result = response.pointwise_metric_result
+ elif metric_type == constants.MetricResult.PAIRWISE_METRIC_RESULT:
+ metric_result = response.pairwise_metric_result
+ elif metric_type == constants.MetricResult.COMET_RESULT:
+ metric_result = response.comet_result
+ elif metric_type == constants.MetricResult.METRICX_RESULT:
+ metric_result = response.metricx_result
+ else:
+ raise ValueError(f"Unknown metric type: {metric_type}")
+
+ metric_result_dict = json_format.MessageToDict(
+ metric_result._pb, preserving_proto_field_name=True
+ )
+ if metric_type in constants.MetricResult.AUTOMATIC_METRIC_RESULTS_LIST:
+ result = _parse_autometric_results(metric_result_dict)
+ elif metric_type == constants.MetricResult.POINTWISE_METRIC_RESULT:
+ result = _parse_pointwise_results(metric_result_dict)
+ elif metric_type == constants.MetricResult.PAIRWISE_METRIC_RESULT:
+ result = _parse_pairwise_results(metric_result_dict)
+ elif metric_type in (
+ constants.MetricResult.COMET_RESULT,
+ constants.MetricResult.METRICX_RESULT,
+ ):
+ result = _parse_model_based_translation_results(metric_result_dict)
+ else:
+ raise ValueError(f"Unknown metric type: {metric_type}")
+ return result
+
+
+def evaluate_instances(
+ client: gapic_evaluation_services.EvaluationServiceClient,
+ request: gapic_eval_service_types.EvaluateInstancesRequest,
+ rate_limiter: utils.RateLimiter,
+ retry_timeout: float,
+) -> gapic_eval_service_types.EvaluateInstancesResponse:
+ """Evaluates an instance using Vertex Gen AI Evaluation Service.
+
+ Args:
+ client: The Vertex Gen AI evaluation service client for evaluation.
+ request: An EvaluateInstancesRequest.
+ rate_limiter: The rate limiter for evaluation service requests.
+ retry_timeout: How long to keep retrying the evaluation requests, in seconds.
+
+ Returns:
+ An EvaluateInstancesResponse from Vertex Gen AI Evaluation Service.
+ """
+ rate_limiter.sleep_and_advance()
+ return client.evaluate_instances(
+ request=request,
+ retry=api_core.retry.Retry(
+ initial=0.250,
+ maximum=90.0,
+ multiplier=1.45,
+ timeout=retry_timeout,
+ predicate=api_core.retry.if_exception_type(
+ api_core.exceptions.Aborted,
+ api_core.exceptions.DeadlineExceeded,
+ api_core.exceptions.ResourceExhausted,
+ api_core.exceptions.ServiceUnavailable,
+ api_core.exceptions.Cancelled,
+ ),
+ ),
+ )
diff --git a/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/_rouge.py b/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/_rouge.py
new file mode 100644
index 0000000000000000000000000000000000000000..aacc1b3cb8cd6d3b5bb292a6e2135950c8cecf1b
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/_rouge.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from typing import Literal
+from vertexai.evaluation import constants
+from vertexai.evaluation.metrics import _base
+
+
+class Rouge(_base._AutomaticMetric): # pylint: disable=protected-access
+ """The ROUGE Metric.
+
+ Calculates the recall of n-grams in prediction as compared to reference and
+ returns a score ranging between 0 and 1. Supported rouge types are
+ rougen[1-9], rougeL, and rougeLsum.
+ """
+
+ _metric_name = constants.Metric.ROUGE
+
+ def __init__(
+ self,
+ *,
+ rouge_type: Literal[
+ "rouge1",
+ "rouge2",
+ "rouge3",
+ "rouge4",
+ "rouge5",
+ "rouge6",
+ "rouge7",
+ "rouge8",
+ "rouge9",
+ "rougeL",
+ "rougeLsum",
+ ],
+ use_stemmer: bool = False,
+ split_summaries: bool = False
+ ):
+ """Initializes the ROUGE metric.
+
+ Args:
+ rouge_type: Supported rouge types are rougen[1-9], rougeL, and rougeLsum.
+ use_stemmer: Whether to use stemmer to compute rouge score.
+ split_summaries: Whether to split summaries while using 'rougeLsum' to
+ compute rouge score.
+ """
+ self._rouge_type = rouge_type
+ self._use_stemmer = use_stemmer
+ self._split_summaries = split_summaries
+
+ super().__init__(
+ metric=Rouge._metric_name,
+ )
+
+ @property
+ def rouge_type(self) -> str:
+ return self._rouge_type
+
+ @property
+ def use_stemmer(self) -> bool:
+ return self._use_stemmer
+
+ @property
+ def split_summaries(self) -> bool:
+ return self._split_summaries
diff --git a/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/metric_prompt_template.py b/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/metric_prompt_template.py
new file mode 100644
index 0000000000000000000000000000000000000000..75256d3ac3e4ad00eda89abc0620e407e05d852c
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/metric_prompt_template.py
@@ -0,0 +1,395 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Metric prompt template classes for model-based metrics evaluation."""
+
+from typing import Dict, List, Optional
+
+from google.cloud.aiplatform import base
+from vertexai.evaluation import (
+ prompt_template,
+)
+
+
+_LOGGER = base.Logger(__name__)
+_NEWLINE = "\n"
+
+
+def serialize_dict_in_order(elements: Optional[Dict[str, str]]):
+ """Serializes dictionary to ordered string value without brackets."""
+ if elements is None:
+ return ""
+ return _NEWLINE.join(f"{key}: {value}" for key, value in sorted(elements.items()))
+
+
+class _MetricPromptTemplate(prompt_template.PromptTemplate):
+ """Metric prompt template for generic model-based metrics evaluation."""
+
+ def __init__(
+ self,
+ *,
+ criteria: Dict[str, str],
+ rating_rubric: Dict[str, str],
+ input_variables: List[str],
+ instruction: Optional[str] = None,
+ evaluation_steps: Optional[Dict[str, str]] = None,
+ metric_definition: Optional[str] = None,
+ few_shot_examples: Optional[List[str]] = None,
+ ):
+ """Initializes a metric prompt template."""
+
+ self._input_variables = input_variables
+ self._instruction = instruction
+ self._metric_definition = metric_definition
+ self._criteria = criteria
+ self._rating_rubric = rating_rubric
+ self._evaluation_steps = evaluation_steps
+ self._few_shot_examples = few_shot_examples
+
+ self.template = self.__str__()
+
+ @property
+ def prompt_data(self) -> str:
+ return self.template
+
+
+class PointwiseMetricPromptTemplate(_MetricPromptTemplate):
+ """Pointwise metric prompt template for pointwise model-based metrics."""
+
+ def __init__(
+ self,
+ *,
+ criteria: Dict[str, str],
+ rating_rubric: Dict[str, str],
+ input_variables: Optional[List[str]] = None,
+ instruction: Optional[str] = None,
+ metric_definition: Optional[str] = None,
+ evaluation_steps: Optional[Dict[str, str]] = None,
+ few_shot_examples: Optional[List[str]] = None,
+ ):
+ """Initializes a pointwise metric prompt template.
+
+ Args:
+ criteria: The standards and measures used to evaluate the model
+ responses. It is a dictionary of criterion names and criterion
+ definitions.
+ rating_rubric: A dictionary mapping of rating name and rating
+ definition, used to assign ratings or scores based on specific
+ criteria.
+ input_variables: An optional list of input fields to use in the metric
+ prompt template for generating model-based evaluation results. Model
+ "response" column is included by default. If metric_column_mapping is
+ provided, the mapping values of the input fields will be used to
+ retrieve data from the evaluation dataset.
+ instruction: The general instruction to the model that performs the
+ evaluation. If not provided, a default pointwise metric instruction
+ will be used.
+ metric_definition: The optional metric definition. It is a string
+ describing the metric to be evaluated at a high level. If not
+ provided, this field will not be included in the prompt template.
+ evaluation_steps: The optional gudelines of evaluation steps. A
+ dictionary of evaluation step name and evaluation step definition. If
+ not provided, a default pointwise metric evaluation steps will be
+ used.
+ few_shot_examples: The optional list of few-shot examples to be used in
+ the prompt, to provide the model with demonstrations of how to perform
+ the evaluation, and improve the evaluation accuracy. If not provided,
+ this field will not be included in the prompt template.
+ """
+ if not input_variables:
+ input_variables = []
+ _LOGGER.info(
+ "The `input_variables` parameter is empty. Only the `response`"
+ " column is used for computing this model-based metric."
+ )
+ input_variables = list(set(input_variables + ["response"]))
+
+ instruction = instruction or self.get_default_pointwise_instruction()
+
+ evaluation_steps = (
+ evaluation_steps or self.get_default_pointwise_evaluation_steps()
+ )
+
+ super().__init__(
+ input_variables=input_variables,
+ criteria=criteria,
+ rating_rubric=rating_rubric,
+ instruction=instruction,
+ metric_definition=metric_definition,
+ evaluation_steps=evaluation_steps,
+ few_shot_examples=few_shot_examples,
+ )
+
+ def get_default_pointwise_instruction(self) -> str:
+ """Returns the default instruction for the metric prompt template."""
+
+ return (
+ "You are an expert evaluator. Your task is to evaluate the quality of"
+ " the responses generated by AI models. We will provide you with the"
+ " user prompt and an AI-generated responses.\nYou should first read"
+ " the user input carefully for analyzing the task, and then evaluate"
+ " the quality of the responses based on the Criteria provided in the"
+ " Evaluation section below.\nYou will assign the response a rating"
+ " following the Rating Rubric and Evaluation Steps. Give step by step"
+ " explanations for your rating, and only choose ratings from the Rating"
+ " Rubric."
+ )
+
+ def get_default_pointwise_evaluation_steps(self) -> Dict[str, str]:
+ """Returns the default evaluation steps for the metric prompt template."""
+ return {
+ "Step 1": (
+ "Assess the response in aspects of all criteria provided. Provide"
+ " assessment according to each criterion."
+ ),
+ "Step 2": (
+ "Score based on the rating rubric. Give a brief rationale to"
+ " explain your evaluation considering each individual criterion."
+ ),
+ }
+
+ def __str__(self):
+ """Serializes the pointwise metric prompt template to a string."""
+ metric_prompt_template_str = [
+ "# Instruction",
+ f"{self._instruction}",
+ _NEWLINE,
+ "# Evaluation",
+ ]
+ if self._metric_definition:
+ metric_prompt_template_str.extend(
+ [
+ "## Metric Definition",
+ f"{self._metric_definition}\n",
+ ]
+ )
+ metric_prompt_template_str.extend(
+ [
+ "## Criteria",
+ f"{serialize_dict_in_order(self._criteria)}\n",
+ "## Rating Rubric",
+ f"{serialize_dict_in_order(self._rating_rubric)}\n",
+ ]
+ )
+ if self._evaluation_steps:
+ metric_prompt_template_str.extend(
+ [
+ "## Evaluation Steps",
+ f"{serialize_dict_in_order(self._evaluation_steps)}\n",
+ ]
+ )
+ if self._few_shot_examples:
+ metric_prompt_template_str.extend(
+ [
+ "## Evaluation Examples",
+ f"{_NEWLINE.join(self._few_shot_examples)}\n",
+ ]
+ )
+ metric_prompt_template_str.extend(
+ ["\n# User Inputs and AI-generated Response", "## User Inputs"]
+ )
+ for input_variable in self._input_variables:
+ if input_variable == "response":
+ continue
+ metric_prompt_template_str.extend(
+ [
+ f"### {input_variable}",
+ f"{{{input_variable}}}\n",
+ ]
+ )
+ metric_prompt_template_str.extend(
+ [
+ _NEWLINE,
+ "\n## AI-generated Response",
+ "{response}",
+ ]
+ )
+ return _NEWLINE.join(metric_prompt_template_str)
+
+ def __repr__(self):
+ return (
+ f"PointwiseMetricPromptTemplate(prompt_data={self.prompt_data},"
+ f" variables={self.variables})"
+ )
+
+
+class PairwiseMetricPromptTemplate(_MetricPromptTemplate):
+ """Pairwise metric prompt template for pairwise model-based metrics."""
+
+ def __init__(
+ self,
+ *,
+ criteria: Dict[str, str],
+ rating_rubric: Dict[str, str],
+ input_variables: Optional[List[str]] = None,
+ instruction: Optional[str] = None,
+ metric_definition: Optional[str] = None,
+ evaluation_steps: Optional[Dict[str, str]] = None,
+ few_shot_examples: Optional[List[str]] = None,
+ ):
+ """Initializes a pairwise metric prompt template.
+
+ Args:
+ criteria: The standards and measures used to evaluate the model
+ responses. It is a dictionary of criterion names and criterion
+ definitions.
+ rating_rubric: A dictionary mapping of rating name and rating
+ definition, used to assign ratings or scores based on specific
+ criteria.
+ input_variables: An optional list of input fields to use in the metric
+ prompt template for generating model-based evaluation results.
+ Candidate model "response" column and "baseline_model_response" column
+ are included by default. If metric_column_mapping is provided, the
+ mapping values of the input fields will be used to retrieve data from
+ the evaluation dataset.
+ instruction: The general instruction to the model that performs the
+ evaluation. If not provided, a default pairwise metric instruction
+ will be used.
+ metric_definition: The optional metric definition. It is a string
+ describing the metric to be evaluated at a high level. If not
+ provided, this field will not be included in the prompt template.
+ evaluation_steps: The optional gudelines of evaluation steps. A
+ dictionary of evaluation step name and evaluation step definition. If
+ not provided, a default pairwise metric evaluation steps will be used.
+ few_shot_examples: The optional list of few-shot examples to be used in
+ the prompt, to provide the model with demonstrations of how to perform
+ the evaluation, and improve the evaluation accuracy. If not provided,
+ this field will not be included in the prompt template.
+ """
+ if not input_variables:
+ input_variables = []
+ _LOGGER.info(
+ "The `input_variables` parameter is empty. Only the `response`"
+ " and `baseline_model_response` columns are used for computing"
+ " this model-based metric."
+ )
+ input_variables = list(
+ set(input_variables + ["response", "baseline_model_response"])
+ )
+
+ instruction = instruction or self.get_default_pairwise_instruction()
+
+ evaluation_steps = (
+ evaluation_steps or self.get_default_pairwise_evaluation_steps()
+ )
+
+ super().__init__(
+ input_variables=input_variables,
+ criteria=criteria,
+ rating_rubric=rating_rubric,
+ instruction=instruction,
+ metric_definition=metric_definition,
+ evaluation_steps=evaluation_steps,
+ few_shot_examples=few_shot_examples,
+ )
+
+ def get_default_pairwise_instruction(self) -> str:
+ """Returns the default instruction for the metric prompt template."""
+
+ return (
+ "You are an expert evaluator. Your task is to evaluate the quality of"
+ " the responses generated by two AI models. We will provide you with"
+ " the user input and a pair of AI-generated responses (Response A and"
+ " Response B).\nYou should first read the user input carefully for"
+ " analyzing the task, and then evaluate the quality of the responses"
+ " based on based on the Criteria provided in the Evaluation section"
+ " below.\nYou will first judge responses individually, following the"
+ " Rating Rubric and Evaluation Steps. Then you will give step by step"
+ " explanations for your judgement, compare results to declare the"
+ " winner based on the Rating Rubric and Evaluation Steps."
+ )
+
+ def get_default_pairwise_evaluation_steps(self) -> Dict[str, str]:
+ """Returns the default evaluation steps for the metric prompt template."""
+ return {
+ "Step 1": "Analyze Response A based on all the Criteria.",
+ "Step 2": "Analyze Response B based on all the Criteria.",
+ "Step 3": (
+ "Compare the overall performance of Response A and Response B based"
+ " on your analyses and assessment."
+ ),
+ "Step 4": (
+ 'Output your preference of "A", "SAME" or "B" to the'
+ " pairwise_choice field according to the Rating Rubrics."
+ ),
+ "Step 5": "Output your assessment reasoning in the explanation field",
+ }
+
+ def __str__(self):
+ """Serializes the pairwise metric prompt template to a string."""
+ metric_prompt_template_str = [
+ "# Instruction",
+ f"{self._instruction}",
+ _NEWLINE,
+ "# Evaluation",
+ ]
+ if self._metric_definition:
+ metric_prompt_template_str.extend(
+ [
+ "## Metric Definition",
+ f"{self._metric_definition}\n",
+ ]
+ )
+ metric_prompt_template_str.extend(
+ [
+ "## Criteria",
+ f"{serialize_dict_in_order(self._criteria)}\n",
+ "## Rating Rubric",
+ f"{serialize_dict_in_order(self._rating_rubric)}\n",
+ ]
+ )
+ if self._evaluation_steps:
+ metric_prompt_template_str.extend(
+ [
+ "## Evaluation Steps",
+ f"{serialize_dict_in_order(self._evaluation_steps)}\n",
+ ]
+ )
+ if self._few_shot_examples:
+ metric_prompt_template_str.extend(
+ [
+ "## Evaluation Examples",
+ f"{_NEWLINE.join(self._few_shot_examples)}\n",
+ ]
+ )
+ metric_prompt_template_str.extend(
+ ["\n# User Inputs and AI-generated Responses", "## User Inputs"]
+ )
+ for input_variable in self._input_variables:
+ if input_variable in ["response", "baseline_model_response"]:
+ continue
+ metric_prompt_template_str.extend(
+ [
+ f"### {input_variable}",
+ f"{{{input_variable}}}\n",
+ ]
+ )
+ metric_prompt_template_str.extend(
+ [
+ "\n## AI-generated Responses",
+ "### Response A",
+ "{baseline_model_response}\n",
+ "### Response B",
+ "{response}",
+ ]
+ )
+ return _NEWLINE.join(metric_prompt_template_str)
+
+ def __repr__(self):
+ return (
+ f"PairwiseMetricPromptTemplate(prompt_data={self.prompt_data},"
+ f" variables={self.variables})"
+ )
diff --git a/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/pointwise_metric.py b/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/pointwise_metric.py
new file mode 100644
index 0000000000000000000000000000000000000000..15109e39d88ae7164a258dec097aa970b02171d5
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/evaluation/metrics/pointwise_metric.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Model-based Pointwise Metric."""
+
+from typing import Union
+
+from vertexai.evaluation import constants
+from vertexai.evaluation.metrics import _base
+from vertexai.evaluation.metrics import (
+ metric_prompt_template as metric_prompt_template_base,
+)
+
+
+class PointwiseMetric(_base._ModelBasedMetric): # pylint: disable=protected-access
+ """A Model-based Pointwise Metric.
+
+ A model-based evaluation metric that evaluate a single generative model's
+ response.
+
+ For more details on when to use model-based pointwise metrics, see
+ [Evaluation methods and metrics](https://cloud.google.com/vertex-ai/generative-ai/docs/models/determine-eval).
+
+ Usage Examples:
+
+ ```
+ candidate_model = GenerativeModel("gemini-1.5-pro")
+ eval_dataset = pd.DataFrame({
+ "prompt" : [...],
+ })
+ fluency_metric = PointwiseMetric(
+ metric="fluency",
+ metric_prompt_template=MetricPromptTemplateExamples.get_prompt_template('fluency'),
+ )
+ pointwise_eval_task = EvalTask(
+ dataset=eval_dataset,
+ metrics=[
+ fluency_metric,
+ MetricPromptTemplateExamples.Pointwise.GROUNDEDNESS,
+ ],
+ )
+ pointwise_result = pointwise_eval_task.evaluate(
+ model=candidate_model,
+ )
+ ```
+ """
+
+ def __init__(
+ self,
+ *,
+ metric: str,
+ metric_prompt_template: Union[
+ metric_prompt_template_base.PointwiseMetricPromptTemplate, str
+ ],
+ ):
+ """Initializes a pointwise evaluation metric.
+
+ Args:
+ metric: The pointwise evaluation metric name.
+ metric_prompt_template: Pointwise metric prompt template for performing
+ the model-based evaluation. A freeform string is also accepted.
+ """
+ super().__init__(
+ metric_prompt_template=metric_prompt_template,
+ metric=metric,
+ )
+
+
+class Comet(_base._TranslationMetric): # pylint: disable=protected-access
+ """A COMET metric.
+
+ Evaluates a score for the given instance using
+ https://huggingface.co/Unbabel/wmt22-comet-da
+ """
+
+ _metric_name = constants.Metric.COMET
+
+ def __init__(
+ self,
+ *,
+ version: str = "COMET_22_SRC_REF",
+ source_language: str = None,
+ target_language: str = None,
+ ):
+ """Initializes the COMET metric.
+
+ Args:
+ version: The COMET version to use for evaluation eg.
+ "COMET_22_SRC_REF".
+ source_language: Optional. The source language of the translation.
+ target_language: Optional. The target language of the translation.
+ """
+
+ super().__init__(
+ name=Comet._metric_name,
+ version=version,
+ source_language=source_language,
+ target_language=target_language,
+ )
+
+
+class MetricX(_base._TranslationMetric): # pylint: disable=protected-access
+ """A MetricX metric.
+
+ Evaluates a score for the given instance using
+ https://github.com/google-research/metricx
+ """
+
+ _metric_name = constants.Metric.METRICX
+
+ def __init__(
+ self,
+ *,
+ version: str = "METRICX_24_SRC_REF",
+ source_language: str = None,
+ target_language: str = None,
+ ):
+ """Initializes the MetricX metric.
+
+ Args:
+ version: The MetricX version to use for evaluation. Can be one of
+ "METRICX_24_SRC_REF", "METRICX_24_SRC", or "METRICX_24_REF".
+ source_language: Optional. The source language of the translation.
+ target_language: Optional. The target language of the translation.
+ """
+
+ super().__init__(
+ name=MetricX._metric_name,
+ version=version,
+ source_language=source_language,
+ target_language=target_language,
+ )
diff --git a/testbed/googleapis__python-aiplatform/vertexai/evaluation/prompt_template.py b/testbed/googleapis__python-aiplatform/vertexai/evaluation/prompt_template.py
new file mode 100644
index 0000000000000000000000000000000000000000..813fe0cd445ca503e438b42d3dc38251da04e7da
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/evaluation/prompt_template.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import string
+from typing import Set
+
+
+class PromptTemplate:
+ """A prompt template for creating prompts with variables.
+
+ The `PromptTemplate` class allows users to define a template string with
+ variables represented in curly braces `{variable}`. The variable
+ names cannot contain spaces. These variables can be replaced with specific
+ values using the `assemble` method, providing flexibility in generating
+ dynamic prompts.
+
+ Usage:
+
+ ```
+ template_str = "Hello, {name}! Today is {day}. How are you?"
+ prompt_template = PromptTemplate(template_str)
+ completed_prompt = prompt_template.assemble(name="John", day="Monday")
+ print(completed_prompt)
+ ```
+ """
+
+ def __init__(self, template: str):
+ """Initializes the PromptTemplate with a given template.
+
+ Args:
+ template: The template string with variables. Variables should be
+ represented in curly braces `{variable}`.
+ """
+ self.template = str(template)
+ self.variables = self._get_variables()
+
+ def _get_variables(self) -> Set[str]:
+ """Extracts and return a set of variable names from the template."""
+ return set(
+ field_name
+ for _, field_name, _, _ in string.Formatter().parse(self.template)
+ if field_name is not None
+ )
+
+ def assemble(self, **kwargs) -> "PromptTemplate":
+ """Replaces only the provided variables in the template with specific values.
+
+ Args:
+ **kwargs: Keyword arguments where keys are placeholder names and values
+ are the replacements.
+
+ Returns:
+ A new PromptTemplate instance with the updated template string.
+ """
+ replaced_values = {
+ key: kwargs.get(key, "{" + key + "}") for key in self.variables
+ }
+ new_template = self.template.format(**replaced_values)
+ return PromptTemplate(new_template)
+
+ def __str__(self) -> str:
+ """Returns the template string."""
+ return self.template
+
+ def __repr__(self) -> str:
+ """Returns a string representation of the PromptTemplate."""
+ return f"PromptTemplate('{self.template}')"
diff --git a/testbed/googleapis__python-aiplatform/vertexai/evaluation/utils.py b/testbed/googleapis__python-aiplatform/vertexai/evaluation/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8ea27c183dedb5f95bfb5b7d0a88fd53cf31504
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/evaluation/utils.py
@@ -0,0 +1,330 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import functools
+import io
+import os
+import tempfile
+import threading
+import time
+from typing import Any, Dict, Optional, TYPE_CHECKING, Union, Callable, Literal
+
+from google.cloud import bigquery
+from google.cloud import storage
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import compat
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import utils
+from google.cloud.aiplatform_v1.services import (
+ evaluation_service as gapic_evaluation_services,
+)
+
+
+if TYPE_CHECKING:
+ import pandas as pd
+
+_BQ_PREFIX = "bq://"
+_GCS_PREFIX = "gs://"
+_LOGGER = base.Logger(__name__)
+
+
+class _EvaluationServiceClientWithOverride(utils.ClientWithOverride):
+ _is_temporary = False
+ _default_version = compat.V1
+ _version_map = (
+ (
+ compat.V1,
+ gapic_evaluation_services.EvaluationServiceClient,
+ ),
+ )
+
+
+class RateLimiter:
+ """Helper class for rate-limiting requests to Vertex AI to improve QoS.
+
+ Attributes:
+ seconds_per_event: The time interval (in seconds) between events to
+ maintain the desired rate.
+ last: The timestamp of the last event.
+ _lock: A lock to ensure thread safety.
+ """
+
+ def __init__(self, rate: Optional[float] = None):
+ """Initializes the rate limiter.
+
+ A simple rate limiter for controlling the frequency of API calls. This class
+ implements a token bucket algorithm to limit the rate at which events
+ can occur. It's designed for cases where the batch size (number of events
+ per call) is always 1 for traffic shaping and rate limiting.
+
+ Args:
+ rate: The number of queries allowed per second.
+ Raises:
+ ValueError: If the rate is not positive.
+ """
+ if not rate or rate <= 0:
+ raise ValueError("Rate must be a positive number")
+ self.seconds_per_event = 1.0 / rate
+ self.last = time.time() - self.seconds_per_event
+ self._lock = threading.Lock()
+
+ def _admit(self) -> float:
+ """Checks if an event can be admitted or calculates the remaining delay."""
+ now = time.time()
+ time_since_last = now - self.last
+ if time_since_last >= self.seconds_per_event:
+ self.last = now
+ return 0
+ else:
+ return self.seconds_per_event - time_since_last
+
+ def sleep_and_advance(self):
+ """Blocks the current thread until the next event can be admitted."""
+ with self._lock:
+ delay = self._admit()
+ if delay > 0:
+ time.sleep(delay)
+ self.last = time.time()
+
+
+def rate_limit(rate: Optional[float] = None) -> Callable[[Any], Any]:
+ """Decorator version of rate limiter."""
+
+ def _rate_limit(method):
+ limiter = RateLimiter(rate)
+
+ @functools.wraps(method)
+ def wrapper(*args, **kwargs):
+ limiter.sleep_and_advance()
+ return method(*args, **kwargs)
+
+ return wrapper
+
+ return _rate_limit
+
+
+def create_evaluation_service_client(
+ api_base_path_override: Optional[str] = None,
+) -> _EvaluationServiceClientWithOverride:
+ """Creates a client for the evaluation service.
+
+ Args:
+ api_base_path_override: Optional. Override default api base path.
+
+ Returns:
+ Instantiated Vertex AI EvaluationServiceClient with optional
+ overrides.
+ """
+ return initializer.global_config.create_client(
+ client_class=_EvaluationServiceClientWithOverride,
+ location_override=initializer.global_config.location,
+ api_base_path_override=api_base_path_override,
+ )
+
+
+def load_dataset(
+ source: Union[str, "pd.DataFrame", Dict[str, Any]],
+) -> "pd.DataFrame":
+ """Loads dataset from various sources into a DataFrame.
+
+ Args:
+ source: The dataset source. Supports the following dataset formats:
+ * pandas.DataFrame: Used directly for evaluation.
+ * Dict: Converted to a pandas DataFrame before evaluation.
+ * str: Interpreted as a file path or URI. Supported formats include:
+ * Local JSONL or CSV files: Loaded from the local filesystem.
+ * GCS JSONL or CSV files: Loaded from Google Cloud Storage
+ (e.g., 'gs://bucket/data.csv').
+ * BigQuery table URI: Loaded from Google Cloud BigQuery
+ (e.g., 'bq://project-id.dataset.table_name').
+
+ Returns:
+ The dataset in pandas DataFrame format.
+ """
+ try:
+ import pandas as pd
+ except ImportError:
+ raise ImportError(
+ 'Pandas is not installed. Please install the SDK using "pip install'
+ ' google-cloud-aiplatform[evaluation]"'
+ )
+ if isinstance(source, pd.DataFrame):
+ return source.copy()
+ elif isinstance(source, dict):
+ return pd.DataFrame(source)
+ elif isinstance(source, str):
+ if source.startswith(_BQ_PREFIX):
+ return _load_bigquery(source[len(_BQ_PREFIX) :])
+
+ _, extension = os.path.splitext(source)
+ file_type = extension.lower()[1:]
+
+ if file_type == "jsonl":
+ return _load_jsonl(source)
+ elif file_type == "csv":
+ return _load_csv(source)
+ else:
+ raise ValueError(
+ f"Unsupported file type: {file_type} from {source}. Please"
+ " provide a valid GCS path with `jsonl` or `csv` suffix or a valid"
+ " BigQuery table URI."
+ )
+ else:
+ raise TypeError(
+ "Unsupported dataset type. Must be a `pd.DataFrame`, Python dictionary,"
+ " valid GCS path with `jsonl` or `csv` suffix or a valid BigQuery table URI."
+ )
+
+
+def _load_jsonl(filepath: str) -> "pd.DataFrame":
+ """Loads data from a JSONL file into a DataFrame."""
+ try:
+ import pandas as pd
+ except ImportError:
+ raise ImportError(
+ 'Pandas is not installed. Please install the SDK using "pip install'
+ ' google-cloud-aiplatform[evaluation]"'
+ )
+ if filepath.startswith(_GCS_PREFIX):
+ file_contents = _read_gcs_file_contents(filepath)
+ return pd.read_json(file_contents, lines=True)
+ else:
+ with open(filepath, "r") as f:
+ return pd.read_json(f, lines=True)
+
+
+def _load_csv(filepath: str) -> "pd.DataFrame":
+ """Loads data from a CSV file into a DataFrame."""
+ try:
+ import pandas as pd
+ except ImportError:
+ raise ImportError(
+ 'Pandas is not installed. Please install the SDK using "pip install'
+ ' google-cloud-aiplatform[evaluation]"'
+ )
+ if filepath.startswith(_GCS_PREFIX):
+ file_contents = _read_gcs_file_contents(filepath)
+ return pd.read_csv(io.StringIO(file_contents), encoding="utf-8")
+ else:
+ return pd.read_csv(filepath, encoding="utf-8")
+
+
+def _load_bigquery(table_id: str) -> "pd.DataFrame":
+ """Loads data from a BigQuery table into a DataFrame."""
+
+ bigquery_client = bigquery.Client(project=initializer.global_config.project)
+ table = bigquery_client.get_table(table_id)
+ return bigquery_client.list_rows(table).to_dataframe()
+
+
+def _read_gcs_file_contents(filepath: str) -> str:
+ """Reads the contents of a file from Google Cloud Storage.
+
+ Args:
+ filepath: The GCS file path (e.g., 'gs://bucket_name/file.csv')
+
+ Returns:
+ str: The contents of the file.
+ """
+
+ storage_client = storage.Client(
+ project=initializer.global_config.project,
+ credentials=initializer.global_config.credentials,
+ )
+ bucket_name, blob_path = filepath[len(_GCS_PREFIX) :].split("/", 1)
+ bucket = storage_client.get_bucket(bucket_name)
+ blob = bucket.blob(blob_path)
+ return blob.download_as_string().decode("utf-8")
+
+
+def _upload_pandas_df_to_gcs(
+ df: "pd.DataFrame", upload_gcs_path: str, file_type: Literal["csv", "jsonl"]
+) -> None:
+ """Uploads the provided Pandas DataFrame to a GCS bucket.
+
+ Args:
+ df: The Pandas DataFrame to upload.
+ upload_gcs_path: The GCS path to upload the data file.
+ file_type: The file type of the data file.
+ """
+
+ with tempfile.TemporaryDirectory() as temp_dir:
+ if file_type == "csv":
+ local_dataset_path = os.path.join(temp_dir, "metrics_table.csv")
+ df.to_csv(path_or_buf=local_dataset_path)
+ elif file_type == "jsonl":
+ local_dataset_path = os.path.join(temp_dir, "metrics_table.jsonl")
+ df.to_json(path_or_buf=local_dataset_path, orient="records", lines=True)
+ else:
+ raise ValueError(
+ f"Unsupported file type: {file_type} from {upload_gcs_path}."
+ " Please provide a valid GCS path with `jsonl` or `csv` suffix."
+ )
+
+ storage_client = storage.Client(
+ project=initializer.global_config.project,
+ credentials=initializer.global_config.credentials,
+ )
+ storage.Blob.from_string(
+ uri=upload_gcs_path, client=storage_client
+ ).upload_from_filename(filename=local_dataset_path)
+
+
+def upload_evaluation_results(
+ dataset: "pd.DataFrame", destination_uri_prefix: str, file_name: str
+) -> None:
+ """Uploads eval results to GCS destination.
+
+ Args:
+ dataset: Pandas dataframe to upload.
+ destination_uri_prefix: GCS folder to store the data.
+ file_name: File name to store the data.
+ """
+ if not destination_uri_prefix:
+ return
+ if destination_uri_prefix.startswith(_GCS_PREFIX):
+ _, extension = os.path.splitext(file_name)
+ file_type = extension.lower()[1:]
+ output_path = destination_uri_prefix + "/" + file_name
+ _upload_pandas_df_to_gcs(dataset, output_path, file_type)
+ else:
+ raise ValueError(
+ f"Unsupported destination URI: {destination_uri_prefix}."
+ f" Please provide a valid GCS bucket URI prefix starting with"
+ f" {_GCS_PREFIX}."
+ )
+
+
+def initialize_metric_column_mapping(
+ metric_column_mapping: Optional[Dict[str, str]], dataset: "pd.DataFrame"
+):
+ """Initializes metric column mapping with dataset columns."""
+ initialized_metric_column_mapping = {}
+ for column in dataset.columns:
+ initialized_metric_column_mapping[column] = column
+ if metric_column_mapping:
+ for key, value in metric_column_mapping.items():
+ if key in initialized_metric_column_mapping:
+ _LOGGER.warning(
+ f"Cannot override `{key}` column with `{key}:{value}` mapping"
+ f" because `{key}` column is present in the evaluation"
+ " dataset. `metric_column_mapping` cannot override keys"
+ " that are already in evaluation dataset columns."
+ )
+ else:
+ initialized_metric_column_mapping[key] = value
+ return initialized_metric_column_mapping
diff --git a/testbed/googleapis__python-aiplatform/vertexai/prompts/_prompt_management.py b/testbed/googleapis__python-aiplatform/vertexai/prompts/_prompt_management.py
new file mode 100644
index 0000000000000000000000000000000000000000..88488f53bc9a1210d5aec38b5746813c2388ee72
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/prompts/_prompt_management.py
@@ -0,0 +1,739 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from __future__ import annotations
+
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer as aiplatform_initializer
+from google.cloud.aiplatform.compat.types import dataset as gca_dataset
+from google.cloud.aiplatform_v1.types import (
+ dataset_version as gca_dataset_version,
+)
+from google.cloud.aiplatform_v1beta1.types import (
+ prediction_service as gapic_prediction_service_types,
+)
+from vertexai.generative_models import (
+ Part,
+ Image,
+ GenerativeModel,
+ Tool,
+ ToolConfig,
+)
+from vertexai.generative_models._generative_models import (
+ _proto_to_dict,
+ _dict_to_proto,
+ _tool_types_to_gapic_tools,
+ PartsType,
+)
+from vertexai.prompts._prompts import Prompt
+from google.protobuf import field_mask_pb2 as field_mask
+
+import dataclasses
+from typing import (
+ Any,
+ Dict,
+ Optional,
+)
+
+_LOGGER = base.Logger(__name__)
+_dataset_client_value = None
+
+DEFAULT_API_SCHEMA_VERSION = "1.0.0"
+PROMPT_SCHEMA_URI = (
+ "gs://google-cloud-aiplatform/schema/dataset/metadata/text_prompt_1.0.0.yaml"
+)
+
+
+def _format_function_declaration_parameters(obj: Any):
+ """Recursively replaces type_ and format_ fields in-place."""
+ if isinstance(obj, (str, int, float)):
+ return obj
+ if isinstance(obj, dict):
+ new = obj.__class__()
+ for key, value in obj.items():
+ key = key.replace("type_", "type")
+ key = key.replace("format_", "format")
+ new[key] = _format_function_declaration_parameters(value)
+ elif isinstance(obj, (list, set, tuple)):
+ new = obj.__class__(
+ _format_function_declaration_parameters(value) for value in obj
+ )
+ else:
+ return obj
+ return new
+
+
+@dataclasses.dataclass
+class Arguments:
+ """Arguments. Child of Execution.
+
+ Attributes:
+ variables: The arguments of the execution.
+ """
+
+ variables: dict[str, list[Part]]
+
+ def to_dict(self) -> Dict[str, Any]:
+ dct = {}
+ for variable_name in self.variables:
+ dct[variable_name] = {
+ "partList": {
+ "parts": [part.to_dict() for part in self.variables[variable_name]]
+ }
+ }
+ return dct
+
+ @classmethod
+ def from_dict(cls, dct: Dict[str, Any]) -> "Arguments":
+ variables = {}
+ for variable_name in dct:
+ variables[variable_name] = [
+ Part.from_dict(part) for part in dct[variable_name]["partList"]["parts"]
+ ]
+ arguments = cls(variables=variables)
+ return arguments
+
+
+@dataclasses.dataclass
+class Execution:
+ """Execution. Child of MultimodalPrompt.
+
+ Attributes:
+ arguments: The arguments of the execution.
+ """
+
+ arguments: Arguments
+
+ def __init__(self, arguments: dict[str, list[Part]]):
+ self.arguments = Arguments(variables=arguments)
+
+ def to_dict(self) -> Dict[str, Any]:
+ dct = {}
+ dct["arguments"] = self.arguments.to_dict()
+ return dct
+
+ @classmethod
+ def from_dict(cls, dct: Dict[str, Any]) -> "Execution":
+ arguments = dct.get("arguments", None)
+ execution = cls(arguments=arguments)
+ return execution
+
+
+@dataclasses.dataclass
+class MultimodalPrompt:
+ """MultimodalPrompt. Child of PromptDatasetMetadata.
+
+ Attributes:
+ prompt_message: The schema for the prompt. A subset of the GenerateContentRequest schema.
+ api_schema_version: The api schema version of the prompt when it was last modified.
+ executions: Contains data related to an execution of a prompt (ex. variables)
+ """
+
+ prompt_message: gapic_prediction_service_types.GenerateContentRequest
+ api_schema_version: Optional[str] = DEFAULT_API_SCHEMA_VERSION
+ executions: Optional[list[Execution]] = None
+
+ def to_dict(self) -> Dict[str, Any]:
+ dct = {"multimodalPrompt": {}}
+ dct["apiSchemaVersion"] = self.api_schema_version
+ dct["multimodalPrompt"]["promptMessage"] = _proto_to_dict(self.prompt_message)
+
+ # Fix type_ and format_ fields
+ if dct["multimodalPrompt"]["promptMessage"].get("tools", None):
+ tools = dct["multimodalPrompt"]["promptMessage"]["tools"]
+ for tool in tools:
+ for function_declaration in tool.get("function_declarations", []):
+ function_declaration[
+ "parameters"
+ ] = _format_function_declaration_parameters(
+ function_declaration["parameters"]
+ )
+
+ if self.executions and self.executions[0]:
+ # Only add variable sets if they are non empty.
+ execution_dcts = []
+ for execution in self.executions:
+ exeuction_dct = execution.to_dict()
+ if exeuction_dct and exeuction_dct["arguments"]:
+ execution_dcts.append(exeuction_dct)
+ if execution_dcts:
+ dct["executions"] = execution_dcts
+ return dct
+
+ @classmethod
+ def from_dict(cls, dct: Dict[str, Any]) -> "MultimodalPrompt":
+ api_schema_version = dct.get("apiSchemaVersion", DEFAULT_API_SCHEMA_VERSION)
+ if int(api_schema_version.split(".")[0]) > int(
+ DEFAULT_API_SCHEMA_VERSION.split(".")[0]
+ ):
+ # Disallow loading prompts with higher major schema version
+ raise ValueError(
+ "This prompt was saved with a newer schema version and cannot be loaded."
+ )
+ prompt_message_dct = dct.get("multimodalPrompt", {}).get("promptMessage", None)
+ if not prompt_message_dct:
+ raise ValueError("This prompt is not supported in the SDK.")
+ # Tool function declaration will fail the proto conversion
+ tools = prompt_message_dct.get("tools", None)
+ if tools:
+ tools = [Tool.from_dict(tool) for tool in tools]
+ prompt_message_dct.pop("tools")
+ prompt_message = _dict_to_proto(
+ gapic_prediction_service_types.GenerateContentRequest, prompt_message_dct
+ )
+ if tools:
+ # Convert Tools to gapic to store in the prompt_message
+ prompt_message.tools = _tool_types_to_gapic_tools(tools)
+ executions_dct = dct.get("executions", [])
+ executions = [Execution.from_dict(execution) for execution in executions_dct]
+ if not executions:
+ executions = None
+ multimodal_prompt = cls(
+ prompt_message=prompt_message,
+ api_schema_version=api_schema_version,
+ executions=executions,
+ )
+ return multimodal_prompt
+
+
+@dataclasses.dataclass
+class PromptDatasetMetadata:
+ """PromptDatasetMetadata.
+
+ Attributes:
+ prompt_type: Required. SDK only supports "freeform" or "multimodal_freeform"
+ prompt_api_schema: Required. SDK only supports multimodalPrompt
+ """
+
+ prompt_type: str
+ prompt_api_schema: MultimodalPrompt
+
+ def to_dict(self) -> Dict[str, Any]:
+ dct = {}
+ dct["promptType"] = self.prompt_type
+ dct["promptApiSchema"] = self.prompt_api_schema.to_dict()
+ return dct
+
+ @classmethod
+ def from_dict(cls, dct: Dict[str, Any]) -> "PromptDatasetMetadata":
+ metadata = cls(
+ prompt_type=dct.get("promptType", None),
+ prompt_api_schema=MultimodalPrompt.from_dict(
+ dct.get("promptApiSchema", None)
+ ),
+ )
+ return metadata
+
+
+@dataclasses.dataclass
+class PromptMetadata:
+ """Metadata containing the display name and prompt id of a prompt.
+
+ Returned by the `list_prompts` method.
+
+ Attributes:
+ display_name: The display name of the prompt version.
+ prompt_id: The id of the prompt.
+ """
+
+ display_name: str
+ prompt_id: str
+
+
+@dataclasses.dataclass
+class PromptVersionMetadata:
+ """Metadata containing the display name, prompt id, and version id of a prompt version.
+
+ Returned by the `list_prompt_versions` method.
+
+ Attributes:
+ display_name: The display name of the prompt version.
+ prompt_id: The id of the prompt.
+ version_id: The version id of the prompt.
+ """
+
+ display_name: str
+ prompt_id: str
+ version_id: str
+
+
+def create_version(
+ prompt: Prompt,
+ prompt_id: Optional[str] = None,
+ version_name: Optional[str] = None,
+) -> Prompt:
+ """Creates a Prompt or Prompt Version in the online prompt store
+
+ Args:
+ prompt: The Prompt object to create a new version of.
+ prompt_id: The id of the prompt resource to create a new version under.
+ If it is not provided and the prompt has no prompt resource
+ associated with it, a new prompt resource will be created.
+ version_name: Optional display name of the new prompt version.
+ If not specified, a default name including a timestamp will be used.
+
+ Returns:
+ A new Prompt object with a reference to the newly created or updated
+ prompt resource. This new Prompt object is nearly identical to the
+ original Prompt object, except it has references to the new
+ prompt version.
+ """
+ if not (prompt_id or prompt._dataset):
+ # Case 1: Neither prompt id nor prompt._dataset exists, so we
+ # create a new prompt resource
+ return _create_prompt_resource(prompt=prompt, version_name=version_name)
+
+ # Case 2: No prompt_id override is given, so we update the existing prompt resource
+ if not prompt_id:
+ return _create_prompt_version_resource(prompt=prompt, version_name=version_name)
+
+ # Case 3: Save a new version to the prompt_id provided as an arg
+ # prompt_id is guaranteed to exist due to Cases 1 & 2 being handled before
+
+ # Store the original prompt resource name, if it exists
+ original_prompt_resource = None if not prompt._dataset else prompt._dataset.name
+
+ # Create a gapic dataset object if it doesn't exist
+ if not prompt._dataset:
+ project = aiplatform_initializer.global_config.project
+ location = aiplatform_initializer.global_config.location
+ name = f"projects/{project}/locations/{location}/datasets/{prompt_id}"
+ dataset_metadata = _format_dataset_metadata_dict(prompt=prompt)
+
+ prompt._dataset = gca_dataset.Dataset(
+ name=name,
+ display_name=prompt.prompt_name or "Untitled Prompt",
+ metadata_schema_uri=PROMPT_SCHEMA_URI,
+ metadata=dataset_metadata,
+ model_reference=prompt.model_name,
+ )
+
+ # Override the dataset prompt id with the new prompt id
+ project = aiplatform_initializer.global_config.project
+ location = aiplatform_initializer.global_config.location
+ prompt._dataset.name = (
+ f"projects/{project}/locations/{location}/datasets/{prompt_id}"
+ )
+ result = _create_prompt_version_resource(prompt=prompt, version_name=version_name)
+
+ # Restore the original prompt resource name. This is a no-op if there
+ # was no original prompt resource name.
+ prompt._dataset.name = original_prompt_resource
+ return result
+
+
+def _check_multimodal_contents(prompt_data: PartsType):
+ if isinstance(prompt_data, Image):
+ return "multimodal_freeform"
+ elif isinstance(prompt_data, list):
+ for part in prompt_data:
+ check = _check_multimodal_contents(part)
+ if check == "multimodal_freeform":
+ return "multimodal_freeform"
+ elif isinstance(prompt_data, Part):
+ if "text" not in prompt_data._raw_part:
+ return "multimodal_freeform"
+ return "freeform"
+
+
+def _format_dataset_metadata_dict(prompt: Prompt) -> dict[str, Any]:
+ """Helper function to convert the configs and prompt data stored in the Prompt object to a dataset metadata dict."""
+ model = GenerativeModel(model_name=prompt.model_name)
+ prompt_message = model._prepare_request(
+ contents=prompt.prompt_data or "temporary data",
+ model=prompt.model_name,
+ system_instruction=prompt.system_instruction,
+ tools=prompt.tools,
+ tool_config=prompt.tool_config,
+ safety_settings=prompt.safety_settings,
+ generation_config=prompt.generation_config,
+ )
+ # Remove temporary contents
+ if not prompt.prompt_data:
+ prompt_message.contents = None
+
+ # Stopgap solution to check for multimodal contents to set flag for UI
+ if prompt.prompt_data:
+ prompt_type = _check_multimodal_contents(prompt.prompt_data)
+ else:
+ prompt_type = "freeform"
+
+ return PromptDatasetMetadata(
+ prompt_type=prompt_type,
+ prompt_api_schema=MultimodalPrompt(
+ prompt_message=prompt_message,
+ executions=[Execution(variable_set) for variable_set in prompt.variables],
+ ),
+ ).to_dict()
+
+
+def _create_dataset(prompt: Prompt, parent: str) -> gca_dataset.Dataset:
+ dataset_metadata = _format_dataset_metadata_dict(prompt=prompt)
+ dataset = gca_dataset.Dataset(
+ name=parent,
+ display_name=prompt.prompt_name or "Untitled Prompt",
+ metadata_schema_uri=PROMPT_SCHEMA_URI,
+ metadata=dataset_metadata,
+ model_reference=prompt.model_name,
+ )
+ operation = prompt._dataset_client.create_dataset(
+ parent=parent,
+ dataset=dataset,
+ )
+ dataset = operation.result()
+
+ # Purge labels
+ dataset.labels = None
+ return dataset
+
+
+def _create_dataset_version(
+ prompt: Prompt, parent: str, version_name: Optional[str] = None
+):
+ dataset_version = gca_dataset_version.DatasetVersion(
+ display_name=version_name,
+ )
+
+ dataset_version = prompt._dataset_client.create_dataset_version(
+ parent=parent,
+ dataset_version=dataset_version,
+ )
+ return dataset_version.result()
+
+
+def _update_dataset(
+ prompt: Prompt,
+ dataset: gca_dataset.Dataset,
+) -> gca_dataset_version.DatasetVersion:
+ dataset.metadata = _format_dataset_metadata_dict(prompt=prompt)
+
+ mask_paths = ["modelReference", "metadata"]
+ if dataset.display_name != "Untitled Prompt":
+ mask_paths.append("displayName")
+
+ updated_dataset = prompt._dataset_client.update_dataset(
+ dataset=dataset,
+ update_mask=field_mask.FieldMask(paths=mask_paths),
+ )
+ # Remove etag to avoid error for repeated dataset updates
+ updated_dataset.etag = None
+ return updated_dataset
+
+
+def _create_prompt_resource(
+ prompt: Prompt, version_name: Optional[str] = None
+) -> Prompt:
+ project = aiplatform_initializer.global_config.project
+ location = aiplatform_initializer.global_config.location
+
+ # Step 1: Create prompt dataset API call
+ parent = f"projects/{project}/locations/{location}"
+ dataset = _create_dataset(prompt=prompt, parent=parent)
+
+ # Step 2: Create prompt version API call
+ dataset_version = _create_dataset_version(
+ prompt=prompt,
+ parent=dataset.name,
+ version_name=version_name,
+ )
+
+ # Step 3: Create new Prompt object to return
+ new_prompt = Prompt._clone(prompt=prompt)
+ new_prompt._dataset = dataset
+ new_prompt._version_id = dataset_version.name.split("/")[-1]
+ new_prompt._version_name = dataset_version.display_name
+ prompt_id = new_prompt._dataset.name.split("/")[5]
+
+ _LOGGER.info(
+ f"Created prompt resource with id {prompt_id} with version number {new_prompt._version_id}"
+ )
+ return new_prompt
+
+
+def _create_prompt_version_resource(
+ prompt: Prompt,
+ version_name: Optional[str] = None,
+) -> Prompt:
+ # Step 1: Update prompt API call
+ updated_dataset = _update_dataset(prompt=prompt, dataset=prompt._dataset)
+
+ # Step 2: Create prompt version API call
+ dataset_version = _create_dataset_version(
+ prompt=prompt,
+ parent=updated_dataset.name,
+ version_name=version_name,
+ )
+
+ # Step 3: Create new Prompt object to return
+ new_prompt = Prompt._clone(prompt=prompt)
+ new_prompt._dataset = updated_dataset
+ new_prompt._version_id = dataset_version.name.split("/")[-1]
+ new_prompt._version_name = dataset_version.display_name
+ prompt_id = prompt._dataset.name.split("/")[5]
+
+ _LOGGER.info(
+ f"Updated prompt resource with id {prompt_id} as version number {new_prompt._version_id}"
+ )
+ return new_prompt
+
+
+def _get_prompt_resource(prompt: Prompt, prompt_id: str) -> gca_dataset.Dataset:
+ """Helper function to get a prompt resource from a prompt id."""
+ project = aiplatform_initializer.global_config.project
+ location = aiplatform_initializer.global_config.location
+ name = f"projects/{project}/locations/{location}/datasets/{prompt_id}"
+ dataset = prompt._dataset_client.get_dataset(name=name)
+ return dataset
+
+
+def _get_prompt_resource_from_version(
+ prompt: Prompt, prompt_id: str, version_id: str
+) -> gca_dataset.Dataset:
+ """Helper function to get a prompt resource from a prompt version id."""
+ project = aiplatform_initializer.global_config.project
+ location = aiplatform_initializer.global_config.location
+ name = f"projects/{project}/locations/{location}/datasets/{prompt_id}/datasetVersions/{version_id}"
+
+ # Step 1: Get dataset version object
+ dataset_version = prompt._dataset_client.get_dataset_version(name=name)
+ prompt._version_name = dataset_version.display_name
+
+ # Step 2: Fetch dataset object to get the dataset display name
+ name = f"projects/{project}/locations/{location}/datasets/{prompt_id}"
+ dataset = prompt._dataset_client.get_dataset(name=name)
+
+ # Step 3: Convert to DatasetVersion object to Dataset object
+ dataset = gca_dataset.Dataset(
+ name=name,
+ display_name=dataset.display_name,
+ metadata_schema_uri=PROMPT_SCHEMA_URI,
+ metadata=dataset_version.metadata,
+ model_reference=dataset_version.model_reference,
+ )
+ return dataset
+
+
+def restore_version(prompt_id: str, version_id: str) -> PromptVersionMetadata:
+ """Restores a previous version of the prompt resource and
+ loads that version into the current Prompt object.
+
+ Args:
+ prompt_id: The id of the prompt resource to restore a version of.
+ version_id: The version id of the online prompt resource.
+ """
+
+ # Step 1: Make restore dataset version API call
+ project = aiplatform_initializer.global_config.project
+ location = aiplatform_initializer.global_config.location
+ name = f"projects/{project}/locations/{location}/datasets/{prompt_id}/datasetVersions/{version_id}"
+
+ # Create a temporary Prompt object for a dataset client
+ temp_prompt = Prompt()
+ operation = temp_prompt._dataset_client.restore_dataset_version(name=name)
+ result = operation.result()
+ new_version_id = result.name.split("/")[-1]
+ prompt_id = result.name.split("/")[5]
+
+ _LOGGER.info(
+ f"Restored prompt version {version_id} under prompt id {prompt_id} as version number {new_version_id}"
+ )
+
+ # Step 2: Create PromptVersionMetadata object to return
+ return PromptVersionMetadata(
+ display_name=result.display_name,
+ prompt_id=result.name.split("/")[5],
+ version_id=new_version_id,
+ )
+
+
+def get(prompt_id: str, version_id: Optional[str] = None) -> Prompt:
+ """Creates a Prompt object from an online resource.
+
+ Args:
+ prompt_id: The id of the prompt resource.
+ version_id: Optional version id of the prompt resource.
+ If not specified, the latest version will be used.
+
+ Returns:
+ A prompt loaded from the online resource as a `Prompt` object.
+ """
+ prompt = Prompt()
+ if version_id:
+ dataset = _get_prompt_resource_from_version(
+ prompt=prompt,
+ prompt_id=prompt_id,
+ version_id=version_id,
+ )
+ else:
+ dataset = _get_prompt_resource(prompt=prompt, prompt_id=prompt_id)
+
+ # Remove etag to avoid error for repeated dataset updates
+ dataset.etag = None
+
+ prompt._dataset = dataset
+ prompt._version_id = version_id
+
+ dataset_dict = _proto_to_dict(dataset)
+
+ metadata = PromptDatasetMetadata.from_dict(dataset_dict["metadata"])
+ _populate_fields_from_metadata(prompt=prompt, metadata=metadata)
+ return prompt
+
+
+def _populate_fields_from_metadata(
+ prompt: Prompt, metadata: PromptDatasetMetadata
+) -> None:
+ """Helper function to populate Promptfields from metadata object"""
+ # Populate model_name (Required, raw deserialized type is str)
+ prompt.model_name = metadata.prompt_api_schema.prompt_message.model
+
+ # Populate prompt_data (raw deserialized type is list[Content])
+ contents = metadata.prompt_api_schema.prompt_message.contents
+ if contents:
+ if len(contents) > 1:
+ raise ValueError("Multi-turn prompts are not supported yet.")
+ prompt_data = [Part._from_gapic(part) for part in list(contents[0].parts)]
+
+ # Unwrap single text part into str
+ if len(prompt_data) == 1 and "text" in prompt_data[0]._raw_part:
+ prompt.prompt_data = prompt_data[0].text
+ else:
+ prompt.prompt_data = prompt_data
+
+ # Populate system_instruction (raw deserialized type is single Content)
+ system_instruction = metadata.prompt_api_schema.prompt_message.system_instruction
+ if system_instruction:
+ system_instruction_parts = [
+ Part._from_gapic(part) for part in list(system_instruction.parts)
+ ]
+ # Unwrap single text part into str
+ if len(system_instruction_parts) == 1 and system_instruction_parts[0].text:
+ prompt.system_instruction = system_instruction_parts[0].text
+ else:
+ prompt.system_instruction = system_instruction_parts
+
+ # Populate variables
+ executions = metadata.prompt_api_schema.executions
+ variables = []
+ if executions:
+ for execution in executions:
+ serialized_variable_set = execution.arguments
+ variable_set = {}
+ if serialized_variable_set:
+ for name, value in serialized_variable_set.variables.items():
+ # Parts are dicts, not gapic objects for variables
+ variable_set[name] = [
+ Part.from_dict(part)
+ for part in list(value["partList"]["parts"])
+ ]
+ variables.append(variable_set)
+
+ # Unwrap variable single text part into str
+ for variable_set in variables:
+ for name, value in variable_set.items():
+ if len(value) == 1 and "text" in value[0]._raw_part:
+ variable_set[name] = value[0].text
+ prompt.variables = variables
+
+ # Populate generation_config (raw deserialized type is GenerationConfig)
+ generation_config = metadata.prompt_api_schema.prompt_message.generation_config
+ if generation_config:
+ prompt.generation_config = generation_config
+
+ # Populate safety_settings (raw deserialized type is RepeatedComposite of SafetySetting)
+ safety_settings = metadata.prompt_api_schema.prompt_message.safety_settings
+ if safety_settings:
+ prompt.safety_settings = list(safety_settings)
+
+ # Populate tools (raw deserialized type is RepeatedComposite of Tool)
+ tools = metadata.prompt_api_schema.prompt_message.tools
+ if tools:
+ prompt.tools = list(tools)
+
+ # Populate tool_config (raw deserialized type is ToolConfig)
+ tool_config = metadata.prompt_api_schema.prompt_message.tool_config
+ if tool_config:
+ prompt.tool_config = ToolConfig._from_gapic(tool_config)
+
+
+def list_prompts() -> list[PromptMetadata]:
+ """Lists all prompt resources in the online prompt store associated with the project."""
+ project = aiplatform_initializer.global_config.project
+ location = aiplatform_initializer.global_config.location
+ parent = f"projects/{project}/locations/{location}"
+
+ # Create a temporary Prompt object for a dataset client
+ temp_prompt = Prompt()
+ prompts_pager = temp_prompt._dataset_client.list_datasets(
+ parent=parent,
+ )
+ prompts_list = []
+ for prompt in prompts_pager:
+ prompts_list.append(
+ PromptMetadata(
+ display_name=prompt.display_name,
+ prompt_id=prompt.name.split("/")[5],
+ )
+ )
+ return prompts_list
+
+
+def list_versions(prompt_id: str) -> list[PromptVersionMetadata]:
+ """Returns a list of PromptVersionMetadata objects for the prompt resource.
+
+ Args:
+ prompt_id: The id of the prompt resource to list versions of.
+
+ Returns:
+ A list of PromptVersionMetadata objects for the prompt resource.
+ """
+ # Create a temporary Prompt object for a dataset client
+ temp_prompt = Prompt()
+ project = aiplatform_initializer.global_config.project
+ location = aiplatform_initializer.global_config.location
+ parent = f"projects/{project}/locations/{location}/datasets/{prompt_id}"
+
+ versions_pager = temp_prompt._dataset_client.list_dataset_versions(
+ parent=parent,
+ )
+ version_history = []
+ for version in versions_pager:
+ version_history.append(
+ PromptVersionMetadata(
+ display_name=version.display_name,
+ prompt_id=version.name.split("/")[5],
+ version_id=version.name.split("/")[-1],
+ )
+ )
+ return version_history
+
+
+def delete(prompt_id: str) -> None:
+ """Deletes the online prompt resource associated with the prompt id."""
+
+ # Create a temporary Prompt object for a dataset client
+ temp_prompt = Prompt()
+ project = aiplatform_initializer.global_config.project
+ location = aiplatform_initializer.global_config.location
+ name = f"projects/{project}/locations/{location}/datasets/{prompt_id}"
+
+ operation = temp_prompt._dataset_client.delete_dataset(
+ name=name,
+ )
+ operation.result()
+
+ _LOGGER.info(f"Deleted prompt resource with id {prompt_id}.")
diff --git a/testbed/googleapis__python-aiplatform/vertexai/prompts/_prompts.py b/testbed/googleapis__python-aiplatform/vertexai/prompts/_prompts.py
new file mode 100644
index 0000000000000000000000000000000000000000..03c172d2c10cf51228632c3c4282a840799587f6
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/prompts/_prompts.py
@@ -0,0 +1,687 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from copy import deepcopy
+
+from google.cloud.aiplatform import base
+from google.cloud.aiplatform import initializer as aiplatform_initializer
+from google.cloud.aiplatform.compat.services import dataset_service_client
+from vertexai.generative_models import (
+ Content,
+ Image,
+ Part,
+ GenerativeModel,
+ GenerationConfig,
+ SafetySetting,
+ Tool,
+ ToolConfig,
+)
+from vertexai.generative_models._generative_models import (
+ _to_content,
+ _validate_generate_content_parameters,
+ _reconcile_model_name,
+ _get_resource_name_from_model_name,
+ ContentsType,
+ GenerationConfigType,
+ GenerationResponse,
+ PartsType,
+ SafetySettingsType,
+)
+
+import re
+from typing import (
+ Any,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Union,
+)
+
+_LOGGER = base.Logger(__name__)
+
+DEFAULT_MODEL_NAME = "gemini-1.5-flash-002"
+VARIABLE_NAME_REGEX = r"(\{[^\W0-9]\w*\})"
+
+
+class Prompt:
+ """A prompt which may be a template with variables.
+
+ The `Prompt` class allows users to define a template string with
+ variables represented in curly braces `{variable}`. The variable
+ name must be a valid Python variable name (no spaces, must start with a
+ letter). These placeholders can be replaced with specific values using the
+ `assemble_contents` method, providing flexibility in generating dynamic prompts.
+
+ Usage:
+ Generate content from a single set of variables:
+ ```
+ prompt = Prompt(
+ prompt_data="Hello, {name}! Today is {day}. How are you?",
+ variables=[{"name": "Alice", "day": "Monday"}]
+ generation_config=GenerationConfig(
+ temperature=0.1,
+ top_p=0.95,
+ top_k=20,
+ candidate_count=1,
+ max_output_tokens=100,
+ ),
+ model_name="gemini-1.0-pro-002",
+ safety_settings=[SafetySetting(
+ category=SafetySetting.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
+ threshold=SafetySetting.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
+ method=SafetySetting.HarmBlockMethod.SEVERITY,
+ )],
+ system_instruction="Please answer in a short sentence.",
+ )
+
+ # Generate content using the assembled prompt.
+ prompt.generate_content(
+ contents=prompt.assemble_contents(**prompt.variables)
+ )
+ ```
+ Generate content with multiple sets of variables:
+ ```
+ prompt = Prompt(
+ prompt_data="Hello, {name}! Today is {day}. How are you?",
+ variables=[
+ {"name": "Alice", "day": "Monday"},
+ {"name": "Bob", "day": "Tuesday"},
+ ],
+ generation_config=GenerationConfig(
+ temperature=0.1,
+ top_p=0.95,
+ top_k=20,
+ candidate_count=1,
+ max_output_tokens=100,
+ ),
+ model_name="gemini-1.0-pro-002",
+ safety_settings=[SafetySetting(
+ category=SafetySetting.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
+ threshold=SafetySetting.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
+ method=SafetySetting.HarmBlockMethod.SEVERITY,
+ )],
+ system_instruction="Please answer in a short sentence.",
+ )
+
+ # Generate content using the assembled prompt for each variable set.
+ for i in range(len(prompt.variables)):
+ prompt.generate_content(
+ contents=prompt.assemble_contents(**prompt.variables[i])
+ )
+ ```
+ """
+
+ def __init__(
+ self,
+ prompt_data: Optional[PartsType] = None,
+ *,
+ variables: Optional[List[Dict[str, PartsType]]] = None,
+ prompt_name: Optional[str] = None,
+ generation_config: Optional[GenerationConfig] = None,
+ model_name: Optional[str] = None,
+ safety_settings: Optional[SafetySetting] = None,
+ system_instruction: Optional[PartsType] = None,
+ tools: Optional[List[Tool]] = None,
+ tool_config: Optional[ToolConfig] = None,
+ ):
+ """Initializes the Prompt with a given prompt, and variables.
+
+ Args:
+ prompt: A PartsType prompt which may be a template with variables or a prompt with no variables.
+ variables: A list of dictionaries containing the variable names and values.
+ prompt_name: The display name of the prompt, if stored in an online resource.
+ generation_config: A GenerationConfig object containing parameters for generation.
+ model_name: Model Garden model resource name.
+ Alternatively, a tuned model endpoint resource name can be provided.
+ If no model is provided, the default latest model will be used.
+ safety_settings: A SafetySetting object containing safety settings for generation.
+ system_instruction: A PartsType object representing the system instruction.
+ tools: A list of Tool objects for function calling.
+ tool_config: A ToolConfig object for function calling.
+ """
+ self._prompt_data = None
+ self._variables = None
+ self._model_name = None
+ self._generation_config = None
+ self._safety_settings = None
+ self._system_instruction = None
+ self._tools = None
+ self._tool_config = None
+
+ # Prompt Management
+ self._dataset_client_value = None
+ self._dataset = None
+ self._prompt_name = None
+ self._version_id = None
+ self._version_name = None
+
+ self.prompt_data = prompt_data
+ self.variables = variables if variables else [{}]
+ self.prompt_name = prompt_name
+ self.model_name = model_name
+ self.generation_config = generation_config
+ self.safety_settings = safety_settings
+ self.system_instruction = system_instruction
+ self.tools = tools
+ self.tool_config = tool_config
+
+ @property
+ def prompt_data(self) -> Optional[PartsType]:
+ return self._prompt_data
+
+ @property
+ def variables(self) -> Optional[List[Dict[str, PartsType]]]:
+ return self._variables
+
+ @property
+ def prompt_name(self) -> Optional[str]:
+ return self._prompt_name
+
+ @property
+ def generation_config(self) -> Optional[GenerationConfig]:
+ return self._generation_config
+
+ @property
+ def model_name(self) -> Optional[str]:
+ if self._model_name:
+ return self._model_name
+ else:
+ return Prompt._format_model_resource_name(DEFAULT_MODEL_NAME)
+
+ @property
+ def safety_settings(self) -> Optional[List[SafetySetting]]:
+ return self._safety_settings
+
+ @property
+ def system_instruction(self) -> Optional[PartsType]:
+ return self._system_instruction
+
+ @property
+ def tools(self) -> Optional[List[Tool]]:
+ return self._tools
+
+ @property
+ def tool_config(self) -> Optional[ToolConfig]:
+ return self._tool_config
+
+ @property
+ def prompt_id(self) -> Optional[str]:
+ if self._dataset:
+ return self._dataset.name.split("/")[-1]
+ return None
+
+ @property
+ def version_id(self) -> Optional[str]:
+ return self._version_id
+
+ @property
+ def version_name(self) -> Optional[str]:
+ return self._version_name
+
+ @prompt_data.setter
+ def prompt_data(self, prompt_data: Optional[PartsType]) -> None:
+ """Overwrites the existing saved local prompt_data.
+
+ Args:
+ prompt_data: A PartsType prompt.
+ """
+ if prompt_data is not None:
+ self._validate_parts_type_data(prompt_data)
+ self._prompt_data = prompt_data
+
+ @variables.setter
+ def variables(self, variables: List[Dict[str, PartsType]]) -> None:
+ """Overwrites the existing saved local variables.
+
+ Args:
+ variables: A list of dictionaries containing the variable names and values.
+ """
+ if isinstance(variables, list):
+ for i in range(len(variables)):
+ variables[i] = variables[i].copy()
+ Prompt._format_variable_value_to_parts(variables[i])
+ self._variables = variables
+ else:
+ raise TypeError(
+ f"Variables must be a list of dictionaries, not {type(variables)}"
+ )
+
+ @prompt_name.setter
+ def prompt_name(self, prompt_name: Optional[str]) -> None:
+ """Overwrites the existing saved local prompt_name."""
+ if prompt_name:
+ self._prompt_name = prompt_name
+ else:
+ self._prompt_name = None
+
+ @model_name.setter
+ def model_name(self, model_name: Optional[str]) -> None:
+ """Overwrites the existing saved local model_name."""
+ if model_name:
+ self._model_name = Prompt._format_model_resource_name(model_name)
+ else:
+ self._model_name = None
+
+ def _format_model_resource_name(model_name: Optional[str]) -> str:
+ """Formats the model resource name."""
+ project = aiplatform_initializer.global_config.project
+ location = aiplatform_initializer.global_config.location
+ model_name = _reconcile_model_name(model_name, project, location)
+
+ prediction_resource_name = _get_resource_name_from_model_name(
+ model_name, project, location
+ )
+ return prediction_resource_name
+
+ def _validate_configs(
+ self,
+ generation_config: Optional[GenerationConfig] = None,
+ safety_settings: Optional[SafetySetting] = None,
+ system_instruction: Optional[PartsType] = None,
+ tools: Optional[List[Tool]] = None,
+ tool_config: Optional[ToolConfig] = None,
+ ):
+ generation_config = generation_config or self._generation_config
+ safety_settings = safety_settings or self._safety_settings
+ tools = tools or self._tools
+ tool_config = tool_config or self._tool_config
+ system_instruction = system_instruction or self._system_instruction
+ return _validate_generate_content_parameters(
+ contents="test",
+ generation_config=generation_config,
+ safety_settings=safety_settings,
+ system_instruction=system_instruction,
+ tools=tools,
+ tool_config=tool_config,
+ )
+
+ @generation_config.setter
+ def generation_config(self, generation_config: Optional[GenerationConfig]) -> None:
+ """Overwrites the existing saved local generation_config.
+
+ Args:
+ generation_config: A GenerationConfig object containing parameters for generation.
+ """
+ self._validate_configs(generation_config=generation_config)
+ self._generation_config = generation_config
+
+ @safety_settings.setter
+ def safety_settings(self, safety_settings: Optional[SafetySetting]) -> None:
+ """Overwrites the existing saved local safety_settings.
+
+ Args:
+ safety_settings: A SafetySetting object containing safety settings for generation.
+ """
+ self._validate_configs(safety_settings=safety_settings)
+ self._safety_settings = safety_settings
+
+ @system_instruction.setter
+ def system_instruction(self, system_instruction: Optional[PartsType]) -> None:
+ """Overwrites the existing saved local system_instruction.
+
+ Args:
+ system_instruction: A PartsType object representing the system instruction.
+ """
+ if system_instruction:
+ self._validate_parts_type_data(system_instruction)
+ self._system_instruction = system_instruction
+
+ @tools.setter
+ def tools(self, tools: Optional[List[Tool]]) -> None:
+ """Overwrites the existing saved local tools.
+
+ Args:
+ tools: A list of Tool objects for function calling.
+ """
+ self._validate_configs(tools=tools)
+ self._tools = tools
+
+ @tool_config.setter
+ def tool_config(self, tool_config: Optional[ToolConfig] = None) -> None:
+ """Overwrites the existing saved local tool_config.
+
+ Args:
+ tool_config: A ToolConfig object for function calling.
+ """
+ self._validate_configs(tool_config=tool_config)
+ self._tool_config = tool_config
+
+ def _format_variable_value_to_parts(variables_dict: Dict[str, PartsType]) -> None:
+ """Formats the variables values to be List[Part].
+
+ Args:
+ variables_dict: A single dictionary containing the variable names and values.
+
+ Raises:
+ TypeError: If a variable value is not a PartsType Object.
+ """
+ for key in variables_dict.keys():
+ # Disallow Content as variable value.
+ if isinstance(variables_dict[key], Content):
+ raise TypeError(
+ "Variable values must be a PartsType object, not Content"
+ )
+
+ # Rely on type checks in _to_content for validation.
+ content = Content._from_gapic(_to_content(value=variables_dict[key]))
+ variables_dict[key] = content.parts
+
+ def _validate_parts_type_data(self, data: Any) -> None:
+ """
+ Args:
+ prompt_data: The prompt input to validate
+
+ Raises:
+ TypeError: If prompt_data is not a PartsType Object.
+ """
+ # Disallow Content as prompt_data.
+ if isinstance(data, Content):
+ raise TypeError("Prompt data must be a PartsType object, not Content")
+
+ # Rely on type checks in _to_content.
+ _to_content(value=data)
+
+ def assemble_contents(self, **variables_dict: PartsType) -> List[Content]:
+ """Returns the prompt data, as a List[Content], assembled with variables if applicable.
+ Can be ingested into model.generate_content to make API calls.
+
+ Returns:
+ A List[Content] prompt.
+ Usage:
+ ```
+ prompt = Prompt(
+ prompt_data="Hello, {name}! Today is {day}. How are you?",
+ )
+
+ model.generate_content(
+ contents=prompt.assemble_contents(name="Alice", day="Monday")
+ )
+ ```
+ """
+ # If prompt_data is None, throw an error.
+ if self.prompt_data is None:
+ raise ValueError("prompt_data must not be empty.")
+
+ variables_dict = variables_dict.copy()
+
+ # If there are no variables, return the prompt_data as a Content object.
+ if not variables_dict:
+ return [Content._from_gapic(_to_content(value=self.prompt_data))]
+
+ # Step 1) Convert the variables values to List[Part].
+ Prompt._format_variable_value_to_parts(variables_dict)
+
+ # Step 2) Assemble the prompt.
+ # prompt_data must have been previously validated using _validate_parts_type_data.
+ assembled_prompt = []
+ assembled_variables_cnt = {}
+ if isinstance(self.prompt_data, list):
+ # User inputted a List of Parts as prompt_data.
+ for part in self.prompt_data:
+ assembled_prompt.extend(
+ self._assemble_singular_part(
+ part, variables_dict, assembled_variables_cnt
+ )
+ )
+ else:
+ # User inputted a single str, Image, or Part as prompt_data.
+ assembled_prompt.extend(
+ self._assemble_singular_part(
+ self.prompt_data, variables_dict, assembled_variables_cnt
+ )
+ )
+
+ # Step 3) Simplify adjacent string Parts
+ simplified_assembled_prompt = [assembled_prompt[0]]
+ for i in range(1, len(assembled_prompt)):
+ # If the previous Part and current Part is a string, concatenate them.
+ try:
+ prev_text = simplified_assembled_prompt[-1].text
+ curr_text = assembled_prompt[i].text
+ if isinstance(prev_text, str) and isinstance(curr_text, str):
+ simplified_assembled_prompt[-1] = Part.from_text(
+ prev_text + curr_text
+ )
+ else:
+ simplified_assembled_prompt.append(assembled_prompt[i])
+ except AttributeError:
+ simplified_assembled_prompt.append(assembled_prompt[i])
+ continue
+
+ # Step 4) Validate that all variables were used, if specified.
+ for key in variables_dict:
+ if key not in assembled_variables_cnt:
+ raise ValueError(f"Variable {key} is not present in prompt_data.")
+
+ assemble_cnt_msg = "Assembled prompt replacing: "
+ for key in assembled_variables_cnt:
+ assemble_cnt_msg += (
+ f"{assembled_variables_cnt[key]} instances of variable {key}, "
+ )
+ if assemble_cnt_msg[-2:] == ", ":
+ assemble_cnt_msg = assemble_cnt_msg[:-2]
+ _LOGGER.info(assemble_cnt_msg)
+
+ # Step 5) Wrap List[Part] as a single Content object.
+ return [
+ Content(
+ parts=simplified_assembled_prompt,
+ role="user",
+ )
+ ]
+
+ def _assemble_singular_part(
+ self,
+ prompt_data_part: Union[str, Image, Part],
+ formatted_variables_set: Dict[str, List[Part]],
+ assembled_variables_cnt: Dict[str, int],
+ ) -> List[Part]:
+ """Assemble a str, Image, or Part."""
+ if isinstance(prompt_data_part, Image):
+ # Templating is not supported for Image prompt_data.
+ return [Part.from_image(prompt_data_part)]
+ elif isinstance(prompt_data_part, str):
+ # Assemble a single string
+ return self._assemble_single_str(
+ prompt_data_part, formatted_variables_set, assembled_variables_cnt
+ )
+ elif isinstance(prompt_data_part, Part):
+ # If the Part is a text Part, assemble it.
+ try:
+ text = prompt_data_part.text
+ except AttributeError:
+ return [prompt_data_part]
+ return self._assemble_single_str(
+ text, formatted_variables_set, assembled_variables_cnt
+ )
+
+ def _assemble_single_str(
+ self,
+ prompt_data_str: str,
+ formatted_variables_set: Dict[str, List[Part]],
+ assembled_variables_cnt: Dict[str, int],
+ ) -> List[Part]:
+ """Assemble a single string with 0 or more variables within the string."""
+ # Step 1) Find and isolate variables as their own string.
+ prompt_data_str_split = re.split(VARIABLE_NAME_REGEX, prompt_data_str)
+
+ assembled_data = []
+ # Step 2) Assemble variables with their values, creating a list of Parts.
+ for s in prompt_data_str_split:
+ if not s:
+ continue
+ variable_name = s[1:-1]
+ if (
+ re.match(VARIABLE_NAME_REGEX, s)
+ and variable_name in formatted_variables_set
+ ):
+ assembled_data.extend(formatted_variables_set[variable_name])
+ assembled_variables_cnt[variable_name] = (
+ assembled_variables_cnt.get(variable_name, 0) + 1
+ )
+ else:
+ assembled_data.append(Part.from_text(s))
+
+ return assembled_data
+
+ def generate_content(
+ self,
+ contents: ContentsType,
+ *,
+ generation_config: Optional[GenerationConfigType] = None,
+ safety_settings: Optional[SafetySettingsType] = None,
+ model_name: Optional[str] = None,
+ tools: Optional[List["Tool"]] = None,
+ tool_config: Optional["ToolConfig"] = None,
+ stream: bool = False,
+ system_instruction: Optional[PartsType] = None,
+ ) -> Union["GenerationResponse", Iterable["GenerationResponse"],]:
+ """Generates content using the saved Prompt configs.
+
+ Args:
+ contents: Contents to send to the model.
+ Supports either a list of Content objects (passing a multi-turn conversation)
+ or a value that can be converted to a single Content object (passing a single message).
+ Supports
+ * str, Image, Part,
+ * List[Union[str, Image, Part]],
+ * List[Content]
+ generation_config: Parameters for the generation.
+ model_name: Prediction model resource name.
+ safety_settings: Safety settings as a mapping from HarmCategory to HarmBlockThreshold.
+ tools: A list of tools (functions) that the model can try calling.
+ tool_config: Config shared for all tools provided in the request.
+ stream: Whether to stream the response.
+ system_instruction: System instruction to pass to the model.
+
+ Returns:
+ A single GenerationResponse object if stream == False
+ A stream of GenerationResponse objects if stream == True
+
+ Usage:
+ ```
+ prompt = Prompt(
+ prompt_data="Hello, {name}! Today is {day}. How are you?",
+ variables={"name": "Alice", "day": "Monday"},
+ generation_config=GenerationConfig(temperature=0.1,),
+ system_instruction="Please answer in a short sentence.",
+ model_name="gemini-1.0-pro-002",
+ )
+
+ prompt.generate_content(
+ contents=prompt.assemble_contents(**prompt.variables)
+ )
+ ```
+ """
+ if not (model_name or self._model_name):
+ _LOGGER.info(
+ "No model name specified, falling back to default model: %s",
+ self.model_name,
+ )
+ model_name = model_name or self.model_name
+
+ generation_config = generation_config or self.generation_config
+ safety_settings = safety_settings or self.safety_settings
+ tools = tools or self.tools
+ tool_config = tool_config or self.tool_config
+ system_instruction = system_instruction or self.system_instruction
+
+ if not model_name:
+ raise ValueError(
+ "Model name must be specified to use Prompt.generate_content()"
+ )
+ model_name = Prompt._format_model_resource_name(model_name)
+
+ model = GenerativeModel(
+ model_name=model_name, system_instruction=system_instruction
+ )
+ return model.generate_content(
+ contents=contents,
+ generation_config=generation_config,
+ safety_settings=safety_settings,
+ tools=tools,
+ tool_config=tool_config,
+ stream=stream,
+ )
+
+ @property
+ def _dataset_client(self) -> dataset_service_client.DatasetServiceClient:
+ if not getattr(self, "_dataset_client_value", None):
+ self._dataset_client_value = (
+ aiplatform_initializer.global_config.create_client(
+ client_class=dataset_service_client.DatasetServiceClient,
+ )
+ )
+ return self._dataset_client_value
+
+ @classmethod
+ def _clone(cls, prompt: "Prompt") -> "Prompt":
+ """Returns a copy of the Prompt."""
+ return Prompt(
+ prompt_data=deepcopy(prompt.prompt_data),
+ variables=deepcopy(prompt.variables),
+ generation_config=deepcopy(prompt.generation_config),
+ safety_settings=deepcopy(prompt.safety_settings),
+ tools=deepcopy(prompt.tools),
+ tool_config=deepcopy(prompt.tool_config),
+ system_instruction=deepcopy(prompt.system_instruction),
+ model_name=prompt.model_name,
+ )
+
+ def get_unassembled_prompt_data(self) -> PartsType:
+ """Returns the prompt data, without any variables replaced."""
+ return self.prompt_data
+
+ def __str__(self) -> str:
+ """Returns the prompt data as a string, without any variables replaced."""
+ return str(self.prompt_data or "")
+
+ def __repr__(self) -> str:
+ """Returns a string representation of the unassembled prompt."""
+ result = "Prompt("
+ if self.prompt_data:
+ result += f"prompt_data='{self.prompt_data}', "
+ if self.variables and self.variables[0]:
+ result += f"variables={self.variables}), "
+ if self.system_instruction:
+ result += f"system_instruction={self.system_instruction}), "
+ if self._model_name:
+ # Don't display default model in repr
+ result += f"model_name={self._model_name}), "
+ if self.generation_config:
+ result += f"generation_config={self.generation_config}), "
+ if self.safety_settings:
+ result += f"safety_settings={self.safety_settings}), "
+ if self.tools:
+ result += f"tools={self.tools}), "
+ if self.tool_config:
+ result += f"tool_config={self.tool_config}, "
+ if self.prompt_id:
+ result += f"prompt_id={self.prompt_id}, "
+ if self.version_id:
+ result += f"version_id={self.version_id}, "
+ if self.prompt_name:
+ result += f"prompt_name={self.prompt_name}, "
+ if self.version_name:
+ result += f"version_name={self.version_name}, "
+
+ # Remove trailing ", "
+ if result[-2:] == ", ":
+ result = result[:-2]
+ result += ")"
+ return result
diff --git a/testbed/googleapis__python-aiplatform/vertexai/rag/__init__.py b/testbed/googleapis__python-aiplatform/vertexai/rag/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f460654c6cb6abdb16c654774674080de96cacb
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/rag/__init__.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from vertexai.rag.rag_data import (
+ create_corpus,
+ update_corpus,
+ list_corpora,
+ get_corpus,
+ delete_corpus,
+ upload_file,
+ import_files,
+ import_files_async,
+ get_file,
+ list_files,
+ delete_file,
+)
+
+from vertexai.rag.rag_retrieval import (
+ retrieval_query,
+)
+
+from vertexai.rag.rag_store import (
+ Retrieval,
+ VertexRagStore,
+)
+from vertexai.rag.utils.resources import (
+ ChunkingConfig,
+ EmbeddingModelConfig,
+ Filter,
+ JiraQuery,
+ JiraSource,
+ Pinecone,
+ RagCorpus,
+ RagEmbeddingModelConfig,
+ RagFile,
+ RagManagedDb,
+ RagResource,
+ RagRetrievalConfig,
+ RagVectorDbConfig,
+ SharePointSource,
+ SharePointSources,
+ SlackChannel,
+ SlackChannelsSource,
+ TransformationConfig,
+ VertexPredictionEndpoint,
+ VertexVectorSearch,
+)
+
+
+__all__ = (
+ "ChunkingConfig",
+ "EmbeddingModelConfig",
+ "Filter",
+ "JiraQuery",
+ "JiraSource",
+ "Pinecone",
+ "RagCorpus",
+ "RagEmbeddingModelConfig",
+ "RagFile",
+ "RagManagedDb",
+ "RagResource",
+ "RagRetrievalConfig",
+ "RagVectorDbConfig",
+ "Retrieval",
+ "SharePointSource",
+ "SharePointSources",
+ "SlackChannel",
+ "SlackChannelsSource",
+ "TransformationConfig",
+ "VertexRagStore",
+ "VertexPredictionEndpoint",
+ "VertexVectorSearch",
+ "create_corpus",
+ "delete_corpus",
+ "delete_file",
+ "get_corpus",
+ "get_file",
+ "import_files",
+ "import_files_async",
+ "list_corpora",
+ "list_files",
+ "retrieval_query",
+ "upload_file",
+ "update_corpus",
+)
diff --git a/testbed/googleapis__python-aiplatform/vertexai/rag/rag_data.py b/testbed/googleapis__python-aiplatform/vertexai/rag/rag_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd6f39f4c0b324455d2be197f20913f50eb828ec
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/rag/rag_data.py
@@ -0,0 +1,779 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""RAG data management SDK."""
+
+from typing import Optional, Sequence, Union
+from google import auth
+from google.api_core import operation_async
+from google.auth.transport import requests as google_auth_requests
+from google.cloud import aiplatform
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform import utils
+from google.cloud.aiplatform_v1 import (
+ CreateRagCorpusRequest,
+ DeleteRagCorpusRequest,
+ DeleteRagFileRequest,
+ GetRagCorpusRequest,
+ GetRagFileRequest,
+ ImportRagFilesResponse,
+ ListRagCorporaRequest,
+ ListRagFilesRequest,
+ RagCorpus as GapicRagCorpus,
+ UpdateRagCorpusRequest,
+)
+from google.cloud.aiplatform_v1.services.vertex_rag_data_service.pagers import (
+ ListRagCorporaPager,
+ ListRagFilesPager,
+)
+from vertexai.rag.utils import (
+ _gapic_utils,
+)
+from vertexai.rag.utils.resources import (
+ JiraSource,
+ RagCorpus,
+ RagFile,
+ RagVectorDbConfig,
+ SharePointSources,
+ SlackChannelsSource,
+ TransformationConfig,
+)
+
+
+def create_corpus(
+ display_name: Optional[str] = None,
+ description: Optional[str] = None,
+ backend_config: Optional[
+ Union[
+ RagVectorDbConfig,
+ None,
+ ]
+ ] = None,
+) -> RagCorpus:
+ """Creates a new RagCorpus resource.
+
+ Example usage:
+ ```
+ import vertexai
+ from vertexai import rag
+
+ vertexai.init(project="my-project")
+
+ rag_corpus = rag.create_corpus(
+ display_name="my-corpus-1",
+ )
+ ```
+
+ Args:
+ display_name: If not provided, SDK will create one. The display name of
+ the RagCorpus. The name can be up to 128 characters long and can
+ consist of any UTF-8 characters.
+ description: The description of the RagCorpus.
+ backend_config: The backend config of the RagCorpus, specifying a
+ data store and/or embedding model.
+ Returns:
+ RagCorpus.
+ Raises:
+ RuntimeError: Failed in RagCorpus creation due to exception.
+ RuntimeError: Failed in RagCorpus creation due to operation error.
+ """
+ if not display_name:
+ display_name = "vertex-" + utils.timestamped_unique_name()
+ parent = initializer.global_config.common_location_path(project=None, location=None)
+
+ rag_corpus = GapicRagCorpus(display_name=display_name, description=description)
+ _gapic_utils.set_backend_config(
+ backend_config=backend_config,
+ rag_corpus=rag_corpus,
+ )
+
+ request = CreateRagCorpusRequest(
+ parent=parent,
+ rag_corpus=rag_corpus,
+ )
+ client = _gapic_utils.create_rag_data_service_client()
+
+ try:
+ response = client.create_rag_corpus(request=request)
+ except Exception as e:
+ raise RuntimeError("Failed in RagCorpus creation due to: ", e) from e
+ return _gapic_utils.convert_gapic_to_rag_corpus(response.result(timeout=600))
+
+
+def update_corpus(
+ corpus_name: str,
+ display_name: Optional[str] = None,
+ description: Optional[str] = None,
+ backend_config: Optional[
+ Union[
+ RagVectorDbConfig,
+ None,
+ ]
+ ] = None,
+) -> RagCorpus:
+ """Updates a RagCorpus resource.
+
+ Example usage:
+ ```
+ import vertexai
+ from vertexai import rag
+
+ vertexai.init(project="my-project")
+
+ rag_corpus = rag.update_corpus(
+ corpus_name="projects/my-project/locations/us-central1/ragCorpora/my-corpus-1",
+ display_name="my-corpus-1",
+ )
+ ```
+
+ Args:
+ corpus_name: The name of the RagCorpus resource to update. Format:
+ ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` or
+ ``{rag_corpus}``.
+ display_name: If not provided, the display name will not be updated. The
+ display name of the RagCorpus. The name can be up to 128 characters long
+ and can consist of any UTF-8 characters.
+ description: The description of the RagCorpus. If not provided, the
+ description will not be updated.
+ backend_config: The backend config of the RagCorpus, specifying a
+ data store and/or embedding model.
+
+ Returns:
+ RagCorpus.
+ Raises:
+ RuntimeError: Failed in RagCorpus update due to exception.
+ RuntimeError: Failed in RagCorpus update due to operation error.
+ """
+ corpus_name = _gapic_utils.get_corpus_name(corpus_name)
+ if display_name and description:
+ rag_corpus = GapicRagCorpus(
+ name=corpus_name, display_name=display_name, description=description
+ )
+ elif display_name:
+ rag_corpus = GapicRagCorpus(name=corpus_name, display_name=display_name)
+ elif description:
+ rag_corpus = GapicRagCorpus(name=corpus_name, description=description)
+ else:
+ rag_corpus = GapicRagCorpus(name=corpus_name)
+
+ _gapic_utils.set_backend_config(
+ backend_config=backend_config,
+ rag_corpus=rag_corpus,
+ )
+
+ request = UpdateRagCorpusRequest(
+ rag_corpus=rag_corpus,
+ )
+ client = _gapic_utils.create_rag_data_service_client()
+
+ try:
+ response = client.update_rag_corpus(request=request)
+ except Exception as e:
+ raise RuntimeError("Failed in RagCorpus update due to: ", e) from e
+ return _gapic_utils.convert_gapic_to_rag_corpus_no_embedding_model_config(
+ response.result(timeout=600)
+ )
+
+
+def get_corpus(name: str) -> RagCorpus:
+ """
+ Get an existing RagCorpus.
+
+ Args:
+ name: An existing RagCorpus resource name. Format:
+ ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}``
+ or ``{rag_corpus}``.
+ Returns:
+ RagCorpus.
+ """
+ corpus_name = _gapic_utils.get_corpus_name(name)
+ request = GetRagCorpusRequest(name=corpus_name)
+ client = _gapic_utils.create_rag_data_service_client()
+ try:
+ response = client.get_rag_corpus(request=request)
+ except Exception as e:
+ raise RuntimeError("Failed in getting the RagCorpus due to: ", e) from e
+ return _gapic_utils.convert_gapic_to_rag_corpus(response)
+
+
+def list_corpora(
+ page_size: Optional[int] = None, page_token: Optional[str] = None
+) -> ListRagCorporaPager:
+ """
+ List all RagCorpora in the same project and location.
+
+ Example usage:
+ ```
+ import vertexai
+ from vertexai import rag
+
+ vertexai.init(project="my-project")
+
+ # List all corpora.
+ rag_corpora = list(rag.list_corpora())
+
+ # Alternatively, return a ListRagCorporaPager.
+ pager_1 = rag.list_corpora(page_size=10)
+ # Then get the next page, use the generated next_page_token from the last pager.
+ pager_2 = rag.list_corpora(page_size=10, page_token=pager_1.next_page_token)
+
+ ```
+ Args:
+ page_size: The standard list page size. Leaving out the page_size
+ causes all of the results to be returned.
+ page_token: The standard list page token.
+
+ Returns:
+ ListRagCorporaPager.
+ """
+ parent = initializer.global_config.common_location_path(project=None, location=None)
+ request = ListRagCorporaRequest(
+ parent=parent,
+ page_size=page_size,
+ page_token=page_token,
+ )
+ client = _gapic_utils.create_rag_data_service_client()
+ try:
+ pager = client.list_rag_corpora(request=request)
+ except Exception as e:
+ raise RuntimeError("Failed in listing the RagCorpora due to: ", e) from e
+
+ return pager
+
+
+def delete_corpus(name: str) -> None:
+ """
+ Delete an existing RagCorpus.
+
+ Args:
+ name: An existing RagCorpus resource name. Format:
+ ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}``
+ or ``{rag_corpus}``.
+ """
+ corpus_name = _gapic_utils.get_corpus_name(name)
+ request = DeleteRagCorpusRequest(name=corpus_name)
+
+ client = _gapic_utils.create_rag_data_service_client()
+ try:
+ client.delete_rag_corpus(request=request)
+ print("Successfully deleted the RagCorpus.")
+ except Exception as e:
+ raise RuntimeError("Failed in RagCorpus deletion due to: ", e) from e
+ return None
+
+
+def upload_file(
+ corpus_name: str,
+ path: Union[str, Sequence[str]],
+ display_name: Optional[str] = None,
+ description: Optional[str] = None,
+ transformation_config: Optional[TransformationConfig] = None,
+) -> RagFile:
+ """
+ Synchronous file upload to an existing RagCorpus.
+
+ Example usage:
+
+ ```
+ import vertexai
+ from vertexai import rag
+
+ vertexai.init(project="my-project")
+
+ // Optional.
+ transformation_config = TransformationConfig(
+ chunking_config=ChunkingConfig(
+ chunk_size=1024,
+ chunk_overlap=200,
+ ),
+ )
+
+ rag_file = rag.upload_file(
+ corpus_name="projects/my-project/locations/us-central1/ragCorpora/my-corpus-1",
+ display_name="my_file.txt",
+ path="usr/home/my_file.txt",
+ transformation_config=transformation_config,
+ )
+ ```
+
+ Args:
+ corpus_name: The name of the RagCorpus resource into which to upload the file.
+ Format: ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}``
+ or ``{rag_corpus}``.
+ path: A local file path. For example,
+ "usr/home/my_file.txt".
+ display_name: The display name of the data file.
+ description: The description of the RagFile.
+ transformation_config: The config for transforming the RagFile, like chunking.
+
+ Returns:
+ RagFile.
+ Raises:
+ RuntimeError: Failed in RagFile upload.
+ ValueError: RagCorpus is not found.
+ RuntimeError: Failed in indexing the RagFile.
+ """
+ corpus_name = _gapic_utils.get_corpus_name(corpus_name)
+ location = initializer.global_config.location
+ # GAPIC doesn't expose a path (scotty). Use requests API instead
+ if display_name is None:
+ display_name = "vertex-" + utils.timestamped_unique_name()
+ headers = {"X-Goog-Upload-Protocol": "multipart"}
+ if not initializer.global_config.api_endpoint:
+ request_endpoint = "{}-{}".format(
+ location, aiplatform.constants.base.API_BASE_PATH
+ )
+ else:
+ request_endpoint = initializer.global_config.api_endpoint
+ upload_request_uri = "https://{}/upload/v1/{}/ragFiles:upload".format(
+ request_endpoint,
+ corpus_name,
+ )
+ js_rag_file = {"rag_file": {"display_name": display_name}}
+
+ if description:
+ js_rag_file["rag_file"]["description"] = description
+
+ if transformation_config and transformation_config.chunking_config:
+ chunk_size = transformation_config.chunking_config.chunk_size
+ chunk_overlap = transformation_config.chunking_config.chunk_overlap
+ js_rag_file["upload_rag_file_config"] = {
+ "rag_file_transformation_config": {
+ "rag_file_chunking_config": {
+ "fixed_length_chunking": {
+ "chunk_size": chunk_size,
+ "chunk_overlap": chunk_overlap,
+ }
+ }
+ }
+ }
+
+ files = {
+ "metadata": (None, str(js_rag_file)),
+ "file": open(path, "rb"),
+ }
+ credentials, _ = auth.default()
+ authorized_session = google_auth_requests.AuthorizedSession(credentials=credentials)
+ try:
+ response = authorized_session.post(
+ url=upload_request_uri,
+ files=files,
+ headers=headers,
+ )
+ except Exception as e:
+ raise RuntimeError("Failed in uploading the RagFile due to: ", e) from e
+
+ if response.status_code == 404:
+ raise ValueError(
+ "RagCorpus '%s' is not found: %s", corpus_name, upload_request_uri
+ )
+ if response.json().get("error"):
+ raise RuntimeError(
+ "Failed in indexing the RagFile due to: ", response.json().get("error")
+ )
+ return _gapic_utils.convert_json_to_rag_file(response.json())
+
+
+def import_files(
+ corpus_name: str,
+ paths: Optional[Sequence[str]] = None,
+ source: Optional[Union[SlackChannelsSource, JiraSource, SharePointSources]] = None,
+ transformation_config: Optional[TransformationConfig] = None,
+ timeout: int = 600,
+ max_embedding_requests_per_min: int = 1000,
+ partial_failures_sink: Optional[str] = None,
+) -> ImportRagFilesResponse:
+ """
+ Import files to an existing RagCorpus, wait until completion.
+
+ Example usage:
+
+ ```
+ import vertexai
+ from vertexai import rag
+ from google.protobuf import timestamp_pb2
+
+ vertexai.init(project="my-project")
+ # Google Drive example
+ paths = [
+ "https://drive.google.com/file/d/123",
+ "https://drive.google.com/drive/folders/456"
+ ]
+ # Google Cloud Storage example
+ paths = ["gs://my_bucket/my_files_dir", ...]
+
+ transformation_config = TransformationConfig(
+ chunking_config=ChunkingConfig(
+ chunk_size=1024,
+ chunk_overlap=200,
+ ),
+ )
+
+ response = rag.import_files(
+ corpus_name="projects/my-project/locations/us-central1/ragCorpora/my-corpus-1",
+ paths=paths,
+ transformation_config=transformation_config,
+ )
+
+ # Slack example
+ start_time = timestamp_pb2.Timestamp()
+ start_time.FromJsonString('2020-12-31T21:33:44Z')
+ end_time = timestamp_pb2.Timestamp()
+ end_time.GetCurrentTime()
+ source = rag.SlackChannelsSource(
+ channels = [
+ SlackChannel("channel1", "api_key1"),
+ SlackChannel("channel2", "api_key2", start_time, end_time)
+ ],
+ )
+ # Jira Example
+ jira_query = rag.JiraQuery(
+ email="xxx@yyy.com",
+ jira_projects=["project1", "project2"],
+ custom_queries=["query1", "query2"],
+ api_key="api_key",
+ server_uri="server.atlassian.net"
+ )
+ source = rag.JiraSource(
+ queries=[jira_query],
+ )
+
+ response = rag.import_files(
+ corpus_name="projects/my-project/locations/us-central1/ragCorpora/my-corpus-1",
+ source=source,
+ transformation_config=transformation_config,
+ )
+
+ # SharePoint Example.
+ sharepoint_query = rag.SharePointSource(
+ sharepoint_folder_path="https://my-sharepoint-site.com/my-folder",
+ sharepoint_site_name="my-sharepoint-site.com",
+ client_id="my-client-id",
+ client_secret="my-client-secret",
+ tenant_id="my-tenant-id",
+ drive_id="my-drive-id",
+ )
+ source = rag.SharePointSources(
+ share_point_sources=[sharepoint_query],
+ )
+
+ # Return the number of imported RagFiles after completion.
+ print(response.imported_rag_files_count)
+
+ ```
+ Args:
+ corpus_name: The name of the RagCorpus resource into which to import files.
+ Format: ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}``
+ or ``{rag_corpus}``.
+ paths: A list of uris. Eligible uris will be Google Cloud Storage
+ directory ("gs://my-bucket/my_dir") or a Google Drive url for file
+ (https://drive.google.com/file/... or folder
+ "https://drive.google.com/corp/drive/folders/...").
+ source: The source of the Slack or Jira import.
+ Must be either a SlackChannelsSource or JiraSource.
+ transformation_config: The config for transforming the imported
+ RagFiles.
+ max_embedding_requests_per_min:
+ Optional. The max number of queries per
+ minute that this job is allowed to make to the
+ embedding model specified on the corpus. This
+ value is specific to this job and not shared
+ across other import jobs. Consult the Quotas
+ page on the project to set an appropriate value
+ here. If unspecified, a default value of 1,000
+ QPM would be used.
+ timeout: Default is 600 seconds.
+ partial_failures_sink: Either a GCS path to store partial failures or a
+ BigQuery table to store partial failures. The format is
+ "gs://my-bucket/my/object.ndjson" for GCS or
+ "bq://my-project.my-dataset.my-table" for BigQuery. An existing GCS
+ object cannot be used. However, the BigQuery table may or may not
+ exist - if it does not exist, it will be created. If it does exist,
+ the schema will be checked and the partial failures will be appended
+ to the table.
+ Returns:
+ ImportRagFilesResponse.
+ """
+ if source is not None and paths is not None:
+ raise ValueError("Only one of source or paths must be passed in at a time")
+ if source is None and paths is None:
+ raise ValueError("One of source or paths must be passed in")
+ corpus_name = _gapic_utils.get_corpus_name(corpus_name)
+ request = _gapic_utils.prepare_import_files_request(
+ corpus_name=corpus_name,
+ paths=paths,
+ source=source,
+ transformation_config=transformation_config,
+ max_embedding_requests_per_min=max_embedding_requests_per_min,
+ partial_failures_sink=partial_failures_sink,
+ )
+ client = _gapic_utils.create_rag_data_service_client()
+ try:
+ response = client.import_rag_files(request=request)
+ except Exception as e:
+ raise RuntimeError("Failed in importing the RagFiles due to: ", e) from e
+
+ return response.result(timeout=timeout)
+
+
+async def import_files_async(
+ corpus_name: str,
+ paths: Optional[Sequence[str]] = None,
+ source: Optional[Union[SlackChannelsSource, JiraSource, SharePointSources]] = None,
+ transformation_config: Optional[TransformationConfig] = None,
+ max_embedding_requests_per_min: int = 1000,
+ partial_failures_sink: Optional[str] = None,
+) -> operation_async.AsyncOperation:
+ """
+ Import files to an existing RagCorpus asynchronously.
+
+ Example usage:
+
+ ```
+ import vertexai
+ from vertexai import rag
+ from google.protobuf import timestamp_pb2
+
+ vertexai.init(project="my-project")
+
+ # Google Drive example
+ paths = [
+ "https://drive.google.com/file/d/123",
+ "https://drive.google.com/drive/folders/456"
+ ]
+ # Google Cloud Storage example
+ paths = ["gs://my_bucket/my_files_dir", ...]
+
+ transformation_config = TransformationConfig(
+ chunking_config=ChunkingConfig(
+ chunk_size=1024,
+ chunk_overlap=200,
+ ),
+ )
+
+ response = await rag.import_files_async(
+ corpus_name="projects/my-project/locations/us-central1/ragCorpora/my-corpus-1",
+ paths=paths,
+ transformation_config=transformation_config,
+ )
+
+ # Slack example
+ start_time = timestamp_pb2.Timestamp()
+ start_time.FromJsonString('2020-12-31T21:33:44Z')
+ end_time = timestamp_pb2.Timestamp()
+ end_time.GetCurrentTime()
+ source = rag.SlackChannelsSource(
+ channels = [
+ SlackChannel("channel1", "api_key1"),
+ SlackChannel("channel2", "api_key2", start_time, end_time)
+ ],
+ )
+ # Jira Example
+ jira_query = rag.JiraQuery(
+ email="xxx@yyy.com",
+ jira_projects=["project1", "project2"],
+ custom_queries=["query1", "query2"],
+ api_key="api_key",
+ server_uri="server.atlassian.net"
+ )
+ source = rag.JiraSource(
+ queries=[jira_query],
+ )
+
+ response = await rag.import_files_async(
+ corpus_name="projects/my-project/locations/us-central1/ragCorpora/my-corpus-1",
+ source=source,
+ transformation_config=transformation_config,
+ )
+
+ # SharePoint Example.
+ sharepoint_query = rag.SharePointSource(
+ sharepoint_folder_path="https://my-sharepoint-site.com/my-folder",
+ sharepoint_site_name="my-sharepoint-site.com",
+ client_id="my-client-id",
+ client_secret="my-client-secret",
+ tenant_id="my-tenant-id",
+ drive_id="my-drive-id",
+ )
+ source = rag.SharePointSources(
+ share_point_sources=[sharepoint_query],
+ )
+
+ # Get the result.
+ await response.result()
+
+ ```
+ Args:
+ corpus_name: The name of the RagCorpus resource into which to import files.
+ Format: ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}``
+ or ``{rag_corpus}``.
+ paths: A list of uris. Eligible uris will be Google Cloud Storage
+ directory ("gs://my-bucket/my_dir") or a Google Drive url for file
+ (https://drive.google.com/file/... or folder
+ "https://drive.google.com/corp/drive/folders/...").
+ source: The source of the Slack or Jira import.
+ Must be either a SlackChannelsSource or JiraSource.
+ transformation_config: The config for transforming the imported
+ RagFiles.
+ max_embedding_requests_per_min:
+ Optional. The max number of queries per
+ minute that this job is allowed to make to the
+ embedding model specified on the corpus. This
+ value is specific to this job and not shared
+ across other import jobs. Consult the Quotas
+ page on the project to set an appropriate value
+ here. If unspecified, a default value of 1,000
+ QPM would be used.
+ partial_failures_sink: Either a GCS path to store partial failures or a
+ BigQuery table to store partial failures. The format is
+ "gs://my-bucket/my/object.ndjson" for GCS or
+ "bq://my-project.my-dataset.my-table" for BigQuery. An existing GCS
+ object cannot be used. However, the BigQuery table may or may not
+ exist - if it does not exist, it will be created. If it does exist,
+ the schema will be checked and the partial failures will be appended
+ to the table.
+ Returns:
+ operation_async.AsyncOperation.
+ """
+ if source is not None and paths is not None:
+ raise ValueError("Only one of source or paths must be passed in at a time")
+ if source is None and paths is None:
+ raise ValueError("One of source or paths must be passed in")
+ corpus_name = _gapic_utils.get_corpus_name(corpus_name)
+ request = _gapic_utils.prepare_import_files_request(
+ corpus_name=corpus_name,
+ paths=paths,
+ source=source,
+ transformation_config=transformation_config,
+ max_embedding_requests_per_min=max_embedding_requests_per_min,
+ partial_failures_sink=partial_failures_sink,
+ )
+ async_client = _gapic_utils.create_rag_data_service_async_client()
+ try:
+ response = await async_client.import_rag_files(request=request)
+ except Exception as e:
+ raise RuntimeError("Failed in importing the RagFiles due to: ", e) from e
+ return response
+
+
+def get_file(name: str, corpus_name: Optional[str] = None) -> RagFile:
+ """
+ Get an existing RagFile.
+
+ Args:
+ name: Either a full RagFile resource name must be provided, or a RagCorpus
+ name and a RagFile name must be provided. Format:
+ ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}``
+ or ``{rag_file}``.
+ corpus_name: If `name` is not a full resource name, an existing RagCorpus
+ name must be provided. Format:
+ ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}``
+ or ``{rag_corpus}``.
+ Returns:
+ RagFile.
+ """
+ corpus_name = _gapic_utils.get_corpus_name(corpus_name)
+ name = _gapic_utils.get_file_name(name, corpus_name)
+ request = GetRagFileRequest(name=name)
+ client = _gapic_utils.create_rag_data_service_client()
+ try:
+ response = client.get_rag_file(request=request)
+ except Exception as e:
+ raise RuntimeError("Failed in getting the RagFile due to: ", e) from e
+ return _gapic_utils.convert_gapic_to_rag_file(response)
+
+
+def list_files(
+ corpus_name: str, page_size: Optional[int] = None, page_token: Optional[str] = None
+) -> ListRagFilesPager:
+ """
+ List all RagFiles in an existing RagCorpus.
+
+ Example usage:
+ ```
+ import vertexai
+
+ vertexai.init(project="my-project")
+ # List all corpora.
+ rag_corpora = list(rag.list_corpora())
+
+ # List all files of the first corpus.
+ rag_files = list(rag.list_files(corpus_name=rag_corpora[0].name))
+
+ # Alternatively, return a ListRagFilesPager.
+ pager_1 = rag.list_files(
+ corpus_name=rag_corpora[0].name,
+ page_size=10
+ )
+ # Then get the next page, use the generated next_page_token from the last pager.
+ pager_2 = rag.list_files(
+ corpus_name=rag_corpora[0].name,
+ page_size=10,
+ page_token=pager_1.next_page_token
+ )
+
+ ```
+
+ Args:
+ corpus_name: An existing RagCorpus name. Format:
+ ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}``
+ or ``{rag_corpus}``.
+ page_size: The standard list page size. Leaving out the page_size
+ causes all of the results to be returned.
+ page_token: The standard list page token.
+ Returns:
+ ListRagFilesPager.
+ """
+ corpus_name = _gapic_utils.get_corpus_name(corpus_name)
+ request = ListRagFilesRequest(
+ parent=corpus_name,
+ page_size=page_size,
+ page_token=page_token,
+ )
+ client = _gapic_utils.create_rag_data_service_client()
+ try:
+ pager = client.list_rag_files(request=request)
+ except Exception as e:
+ raise RuntimeError("Failed in listing the RagFiles due to: ", e) from e
+
+ return pager
+
+
+def delete_file(name: str, corpus_name: Optional[str] = None) -> None:
+ """
+ Delete RagFile from an existing RagCorpus.
+
+ Args:
+ name: Either a full RagFile resource name must be provided, or a RagCorpus
+ name and a RagFile name must be provided. Format:
+ ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}``
+ or ``{rag_file}``.
+ corpus_name: If `name` is not a full resource name, an existing RagCorpus
+ name must be provided. Format:
+ ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}``
+ or ``{rag_corpus}``.
+ """
+ corpus_name = _gapic_utils.get_corpus_name(corpus_name)
+ name = _gapic_utils.get_file_name(name, corpus_name)
+ request = DeleteRagFileRequest(name=name)
+
+ client = _gapic_utils.create_rag_data_service_client()
+ try:
+ client.delete_rag_file(request=request)
+ print("Successfully deleted the RagFile.")
+ except Exception as e:
+ raise RuntimeError("Failed in RagFile deletion due to: ", e) from e
+ return None
diff --git a/testbed/googleapis__python-aiplatform/vertexai/rag/rag_retrieval.py b/testbed/googleapis__python-aiplatform/vertexai/rag/rag_retrieval.py
new file mode 100644
index 0000000000000000000000000000000000000000..35869cf45227206c627d8aad07e9fe6c4b74a482
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/rag/rag_retrieval.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Retrieval query to get relevant contexts."""
+
+import re
+from typing import List, Optional
+
+from google.cloud import aiplatform_v1
+from google.cloud.aiplatform import initializer
+from vertexai.rag.utils import _gapic_utils
+from vertexai.rag.utils import resources
+
+
+def retrieval_query(
+ text: str,
+ rag_resources: Optional[List[resources.RagResource]] = None,
+ rag_retrieval_config: Optional[resources.RagRetrievalConfig] = None,
+) -> aiplatform_v1.RetrieveContextsResponse:
+ """Retrieve top k relevant docs/chunks.
+
+ Example usage:
+ ```
+ import vertexai
+
+ vertexai.init(project="my-project")
+
+ config = vertexai.rag.rag_retrieval_config(
+ top_k=2,
+ filter=vertexai.rag.rag_retrieval_config.filter(
+ vector_distance_threshold=0.5
+ ),
+ )
+
+ results = vertexai.rag.retrieval_query(
+ text="Why is the sky blue?",
+ rag_resources=[vertexai.rag.RagResource(
+ rag_corpus="projects/my-project/locations/us-central1/ragCorpora/rag-corpus-1",
+ rag_file_ids=["rag-file-1", "rag-file-2", ...],
+ )],
+ rag_retrieval_config=config,
+ )
+ ```
+
+ Args:
+ text: The query in text format to get relevant contexts.
+ rag_resources: A list of RagResource. It can be used to specify corpus
+ only or ragfiles. Currently only support one corpus or multiple files
+ from one corpus. In the future we may open up multiple corpora support.
+ rag_retrieval_config: Optional. The config containing the retrieval
+ parameters, including similarity_top_k and vector_distance_threshold
+
+ Returns:
+ RetrieveContextsResonse.
+ """
+ parent = initializer.global_config.common_location_path()
+
+ client = _gapic_utils.create_rag_service_client()
+
+ if rag_resources:
+ if len(rag_resources) > 1:
+ raise ValueError("Currently only support 1 RagResource.")
+ name = rag_resources[0].rag_corpus
+ else:
+ raise ValueError("rag_resources must be specified.")
+
+ data_client = _gapic_utils.create_rag_data_service_client()
+ if data_client.parse_rag_corpus_path(name):
+ rag_corpus_name = name
+ elif re.match("^{}$".format(_gapic_utils._VALID_RESOURCE_NAME_REGEX), name):
+ rag_corpus_name = parent + "/ragCorpora/" + name
+ else:
+ raise ValueError(
+ f"Invalid RagCorpus name: {name}. Proper format should be:"
+ " projects/{project}/locations/{location}/ragCorpora/{rag_corpus_id}"
+ )
+
+ if rag_resources:
+ gapic_rag_resource = (
+ aiplatform_v1.RetrieveContextsRequest.VertexRagStore.RagResource(
+ rag_corpus=rag_corpus_name,
+ rag_file_ids=rag_resources[0].rag_file_ids,
+ )
+ )
+ vertex_rag_store = aiplatform_v1.RetrieveContextsRequest.VertexRagStore(
+ rag_resources=[gapic_rag_resource],
+ )
+ else:
+ vertex_rag_store = aiplatform_v1.RetrieveContextsRequest.VertexRagStore(
+ rag_corpora=[rag_corpus_name],
+ )
+
+ # If rag_retrieval_config is not specified, set it to default values.
+ if not rag_retrieval_config:
+ api_retrival_config = aiplatform_v1.RagRetrievalConfig()
+ else:
+ # If rag_retrieval_config is specified, check for missing parameters.
+ api_retrival_config = aiplatform_v1.RagRetrievalConfig()
+ api_retrival_config.top_k = rag_retrieval_config.top_k
+ # Set vector_distance_threshold to config value if specified
+ if rag_retrieval_config.filter:
+ # Check if both vector_distance_threshold and vector_similarity_threshold
+ # are specified.
+ if (
+ rag_retrieval_config.filter
+ and rag_retrieval_config.filter.vector_distance_threshold
+ and rag_retrieval_config.filter.vector_similarity_threshold
+ ):
+ raise ValueError(
+ "Only one of vector_distance_threshold or"
+ " vector_similarity_threshold can be specified at a time"
+ " in rag_retrieval_config."
+ )
+ api_retrival_config.filter.vector_distance_threshold = (
+ rag_retrieval_config.filter.vector_distance_threshold
+ )
+ api_retrival_config.filter.vector_similarity_threshold = (
+ rag_retrieval_config.filter.vector_similarity_threshold
+ )
+
+ query = aiplatform_v1.RagQuery(
+ text=text,
+ rag_retrieval_config=api_retrival_config,
+ )
+ request = aiplatform_v1.RetrieveContextsRequest(
+ vertex_rag_store=vertex_rag_store,
+ parent=parent,
+ query=query,
+ )
+ try:
+ response = client.retrieve_contexts(request=request)
+ except Exception as e:
+ raise RuntimeError("Failed in retrieving contexts due to: ", e) from e
+
+ return response
diff --git a/testbed/googleapis__python-aiplatform/vertexai/rag/rag_store.py b/testbed/googleapis__python-aiplatform/vertexai/rag/rag_store.py
new file mode 100644
index 0000000000000000000000000000000000000000..57cef287b39b6c816131519aa009725a12a0cb91
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/rag/rag_store.py
@@ -0,0 +1,138 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""RAG retrieval tool for content generation."""
+
+import re
+from typing import List, Optional, Union
+
+from google.cloud import aiplatform_v1beta1
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform_v1beta1.types import tool as gapic_tool_types
+from vertexai import generative_models
+from vertexai.rag.utils import _gapic_utils
+from vertexai.rag.utils import resources
+
+
+class Retrieval(generative_models.grounding.Retrieval):
+ """Defines a retrieval tool that a model can call to access external knowledge."""
+
+ def __init__(
+ self,
+ source: Union["VertexRagStore"],
+ disable_attribution: Optional[bool] = False,
+ ):
+ self._raw_retrieval = gapic_tool_types.Retrieval(
+ vertex_rag_store=source._raw_vertex_rag_store,
+ disable_attribution=disable_attribution,
+ )
+
+
+class VertexRagStore:
+ """Retrieve from Vertex RAG Store."""
+
+ def __init__(
+ self,
+ rag_resources: Optional[List[resources.RagResource]] = None,
+ rag_retrieval_config: Optional[resources.RagRetrievalConfig] = None,
+ ):
+ """Initializes a Vertex RAG store tool.
+
+ Example usage:
+ ```
+ import vertexai
+
+ vertexai.init(project="my-project")
+
+ config = vertexai.rag.RagRetrievalConfig(
+ top_k=2,
+ filter=vertexai.rag.RagRetrievalConfig.Filter(
+ vector_distance_threshold=0.5
+ ),
+ )
+
+ tool = Tool.from_retrieval(
+ retrieval=vertexai.rag.Retrieval(
+ source=vertexai.rag.VertexRagStore(
+ rag_corpora=["projects/my-project/locations/us-central1/ragCorpora/rag-corpus-1"],
+ rag_retrieval_config=config,
+ ),
+ )
+ )
+ ```
+
+ Args:
+ rag_resources: List of RagResource to retrieve from. It can be used
+ to specify corpus only or ragfiles. Currently only support one
+ corpus or multiple files from one corpus. In the future we
+ may open up multiple corpora support.
+ rag_retrieval_config: Optional. The config containing the retrieval
+ parameters, including similarity_top_k and vector_distance_threshold.
+ """
+
+ if rag_resources:
+ if len(rag_resources) > 1:
+ raise ValueError("Currently only support 1 RagResource.")
+ name = rag_resources[0].rag_corpus
+ else:
+ raise ValueError("rag_resources must be specified.")
+
+ data_client = _gapic_utils.create_rag_data_service_client()
+ if data_client.parse_rag_corpus_path(name):
+ rag_corpus_name = name
+ elif re.match("^{}$".format(_gapic_utils._VALID_RESOURCE_NAME_REGEX), name):
+ parent = initializer.global_config.common_location_path()
+ rag_corpus_name = parent + "/ragCorpora/" + name
+ else:
+ raise ValueError(
+ f"Invalid RagCorpus name: {name}. Proper format should be:"
+ " projects/{project}/locations/{location}/ragCorpora/{rag_corpus_id}"
+ )
+
+ # If rag_retrieval_config is not specified, set it to default values.
+ api_retrieval_config = aiplatform_v1beta1.RagRetrievalConfig()
+ # If rag_retrieval_config is specified, populate the default config.
+ if rag_retrieval_config:
+ api_retrieval_config.top_k = rag_retrieval_config.top_k
+ # Set vector_distance_threshold to config value if specified
+ if rag_retrieval_config.filter:
+ # Check if both vector_distance_threshold and
+ # vector_similarity_threshold are specified.
+ if (
+ rag_retrieval_config.filter
+ and rag_retrieval_config.filter.vector_distance_threshold
+ and rag_retrieval_config.filter.vector_similarity_threshold
+ ):
+ raise ValueError(
+ "Only one of vector_distance_threshold or"
+ " vector_similarity_threshold can be specified at a time"
+ " in rag_retrieval_config."
+ )
+ api_retrieval_config.filter.vector_distance_threshold = (
+ rag_retrieval_config.filter.vector_distance_threshold
+ )
+ api_retrieval_config.filter.vector_similarity_threshold = (
+ rag_retrieval_config.filter.vector_similarity_threshold
+ )
+
+ gapic_rag_resource = gapic_tool_types.VertexRagStore.RagResource(
+ rag_corpus=rag_corpus_name,
+ rag_file_ids=rag_resources[0].rag_file_ids,
+ )
+ self._raw_vertex_rag_store = gapic_tool_types.VertexRagStore(
+ rag_resources=[gapic_rag_resource],
+ rag_retrieval_config=api_retrieval_config,
+ )
diff --git a/testbed/googleapis__python-aiplatform/vertexai/rag/utils/_gapic_utils.py b/testbed/googleapis__python-aiplatform/vertexai/rag/utils/_gapic_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..31636491b337742bcca4d92189a919f6771e06d7
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/rag/utils/_gapic_utils.py
@@ -0,0 +1,571 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import re
+from typing import Any, Dict, Optional, Sequence, Union
+from google.cloud.aiplatform_v1.types import api_auth
+from google.cloud.aiplatform_v1 import (
+ RagEmbeddingModelConfig as GapicRagEmbeddingModelConfig,
+ GoogleDriveSource,
+ ImportRagFilesConfig,
+ ImportRagFilesRequest,
+ RagFileChunkingConfig,
+ RagFileTransformationConfig,
+ RagCorpus as GapicRagCorpus,
+ RagFile as GapicRagFile,
+ SharePointSources as GapicSharePointSources,
+ SlackSource as GapicSlackSource,
+ JiraSource as GapicJiraSource,
+ RagVectorDbConfig as GapicRagVectorDbConfig,
+)
+from google.cloud.aiplatform import initializer
+from google.cloud.aiplatform.utils import (
+ VertexRagDataAsyncClientWithOverride,
+ VertexRagDataClientWithOverride,
+ VertexRagClientWithOverride,
+)
+from vertexai.rag.utils.resources import (
+ Pinecone,
+ RagCorpus,
+ RagEmbeddingModelConfig,
+ RagFile,
+ RagManagedDb,
+ RagVectorDbConfig,
+ SharePointSources,
+ SlackChannelsSource,
+ TransformationConfig,
+ JiraSource,
+ VertexVectorSearch,
+ VertexPredictionEndpoint,
+)
+
+
+_VALID_RESOURCE_NAME_REGEX = "[a-z][a-zA-Z0-9._-]{0,127}"
+
+
+def create_rag_data_service_client():
+ return initializer.global_config.create_client(
+ client_class=VertexRagDataClientWithOverride,
+ ).select_version("v1")
+
+
+def create_rag_data_service_async_client():
+ return initializer.global_config.create_client(
+ client_class=VertexRagDataAsyncClientWithOverride,
+ ).select_version("v1")
+
+
+def create_rag_service_client():
+ return initializer.global_config.create_client(
+ client_class=VertexRagClientWithOverride,
+ ).select_version("v1")
+
+
+def convert_gapic_to_rag_embedding_model_config(
+ gapic_embedding_model_config: GapicRagEmbeddingModelConfig,
+) -> RagEmbeddingModelConfig:
+ """Convert GapicRagEmbeddingModelConfig to RagEmbeddingModelConfig."""
+ embedding_model_config = RagEmbeddingModelConfig()
+ path = gapic_embedding_model_config.vertex_prediction_endpoint.endpoint
+ publisher_model = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/publishers/google/models/(?P.+?)$",
+ path,
+ )
+ endpoint = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$",
+ path,
+ )
+ if publisher_model:
+ embedding_model_config.vertex_prediction_endpoint = VertexPredictionEndpoint(
+ publisher_model=path
+ )
+ if endpoint:
+ embedding_model_config.vertex_prediction_endpoint = VertexPredictionEndpoint(
+ endpoint=path,
+ model=gapic_embedding_model_config.vertex_prediction_endpoint.model,
+ model_version_id=gapic_embedding_model_config.vertex_prediction_endpoint.model_version_id,
+ )
+ return embedding_model_config
+
+
+def _check_weaviate(gapic_vector_db: GapicRagVectorDbConfig) -> bool:
+ try:
+ return gapic_vector_db.__contains__("weaviate")
+ except AttributeError:
+ return gapic_vector_db.weaviate.ByteSize() > 0
+
+
+def _check_rag_managed_db(gapic_vector_db: GapicRagVectorDbConfig) -> bool:
+ try:
+ return gapic_vector_db.__contains__("rag_managed_db")
+ except AttributeError:
+ return gapic_vector_db.rag_managed_db.ByteSize() > 0
+
+
+def _check_vertex_feature_store(gapic_vector_db: GapicRagVectorDbConfig) -> bool:
+ try:
+ return gapic_vector_db.__contains__("vertex_feature_store")
+ except AttributeError:
+ return gapic_vector_db.vertex_feature_store.ByteSize() > 0
+
+
+def _check_pinecone(gapic_vector_db: GapicRagVectorDbConfig) -> bool:
+ try:
+ return gapic_vector_db.__contains__("pinecone")
+ except AttributeError:
+ return gapic_vector_db.pinecone.ByteSize() > 0
+
+
+def _check_vertex_vector_search(gapic_vector_db: GapicRagVectorDbConfig) -> bool:
+ try:
+ return gapic_vector_db.__contains__("vertex_vector_search")
+ except AttributeError:
+ return gapic_vector_db.vertex_vector_search.ByteSize() > 0
+
+
+def _check_rag_embedding_model_config(
+ gapic_vector_db: GapicRagVectorDbConfig,
+) -> bool:
+ try:
+ return gapic_vector_db.__contains__("rag_embedding_model_config")
+ except AttributeError:
+ return gapic_vector_db.rag_embedding_model_config.ByteSize() > 0
+
+
+def convert_gapic_to_backend_config(
+ gapic_vector_db: GapicRagVectorDbConfig,
+) -> RagVectorDbConfig:
+ """Convert Gapic RagVectorDbConfig to VertexVectorSearch, Pinecone, or RagManagedDb."""
+ vector_config = RagVectorDbConfig()
+ if _check_pinecone(gapic_vector_db):
+ vector_config.vector_db = Pinecone(
+ index_name=gapic_vector_db.pinecone.index_name,
+ api_key=gapic_vector_db.api_auth.api_key_config.api_key_secret_version,
+ )
+ elif _check_vertex_vector_search(gapic_vector_db):
+ vector_config.vector_db = VertexVectorSearch(
+ index_endpoint=gapic_vector_db.vertex_vector_search.index_endpoint,
+ index=gapic_vector_db.vertex_vector_search.index,
+ )
+ elif _check_rag_managed_db(gapic_vector_db):
+ vector_config.vector_db = RagManagedDb()
+ if _check_rag_embedding_model_config(gapic_vector_db):
+ vector_config.rag_embedding_model_config = (
+ convert_gapic_to_rag_embedding_model_config(
+ gapic_vector_db.rag_embedding_model_config
+ )
+ )
+ return vector_config
+
+
+def convert_gapic_to_rag_corpus(gapic_rag_corpus: GapicRagCorpus) -> RagCorpus:
+ """Convert GapicRagCorpus to RagCorpus."""
+ rag_corpus = RagCorpus(
+ name=gapic_rag_corpus.name,
+ display_name=gapic_rag_corpus.display_name,
+ description=gapic_rag_corpus.description,
+ backend_config=convert_gapic_to_backend_config(
+ gapic_rag_corpus.vector_db_config
+ ),
+ )
+ return rag_corpus
+
+
+def convert_gapic_to_rag_corpus_no_embedding_model_config(
+ gapic_rag_corpus: GapicRagCorpus,
+) -> RagCorpus:
+ """Convert GapicRagCorpus without embedding model config (for UpdateRagCorpus) to RagCorpus."""
+ rag_vector_db_config_no_embedding_model_config = gapic_rag_corpus.vector_db_config
+ rag_vector_db_config_no_embedding_model_config.rag_embedding_model_config = None
+ rag_corpus = RagCorpus(
+ name=gapic_rag_corpus.name,
+ display_name=gapic_rag_corpus.display_name,
+ description=gapic_rag_corpus.description,
+ backend_config=convert_gapic_to_backend_config(
+ rag_vector_db_config_no_embedding_model_config
+ ),
+ )
+ return rag_corpus
+
+
+def convert_gapic_to_rag_file(gapic_rag_file: GapicRagFile) -> RagFile:
+ """Convert GapicRagFile to RagFile."""
+ rag_file = RagFile(
+ name=gapic_rag_file.name,
+ display_name=gapic_rag_file.display_name,
+ description=gapic_rag_file.description,
+ )
+ return rag_file
+
+
+def convert_json_to_rag_file(upload_rag_file_response: Dict[str, Any]) -> RagFile:
+ """Converts a JSON response to a RagFile."""
+ rag_file = RagFile(
+ name=upload_rag_file_response.get("ragFile").get("name"),
+ display_name=upload_rag_file_response.get("ragFile").get("displayName"),
+ description=upload_rag_file_response.get("ragFile").get("description"),
+ )
+ return rag_file
+
+
+def convert_path_to_resource_id(
+ path: str,
+) -> Union[str, GoogleDriveSource.ResourceId]:
+ """Converts a path to a Google Cloud storage uri or GoogleDriveSource.ResourceId."""
+ if path.startswith("gs://"):
+ # Google Cloud Storage source
+ return path
+ elif path.startswith("https://drive.google.com/"):
+ # Google Drive source
+ path_list = path.split("/")
+ if "file" in path_list:
+ index = path_list.index("file") + 2
+ resource_id = path_list[index].split("?")[0]
+ resource_type = GoogleDriveSource.ResourceId.ResourceType.RESOURCE_TYPE_FILE
+ elif "folders" in path_list:
+ index = path_list.index("folders") + 1
+ resource_id = path_list[index].split("?")[0]
+ resource_type = (
+ GoogleDriveSource.ResourceId.ResourceType.RESOURCE_TYPE_FOLDER
+ )
+ else:
+ raise ValueError("path %s is not a valid Google Drive url.", path)
+
+ return GoogleDriveSource.ResourceId(
+ resource_id=resource_id,
+ resource_type=resource_type,
+ )
+ else:
+ raise ValueError(
+ "path must be a Google Cloud Storage uri or a Google Drive url."
+ )
+
+
+def convert_source_for_rag_import(
+ source: Union[SlackChannelsSource, JiraSource, SharePointSources]
+) -> Union[GapicSlackSource, GapicJiraSource]:
+ """Converts a SlackChannelsSource or JiraSource to a GapicSlackSource or GapicJiraSource."""
+ if isinstance(source, SlackChannelsSource):
+ result_source_channels = []
+ for channel in source.channels:
+ api_key = channel.api_key
+ cid = channel.channel_id
+ start_time = channel.start_time
+ end_time = channel.end_time
+ result_channels = GapicSlackSource.SlackChannels(
+ channels=[
+ GapicSlackSource.SlackChannels.SlackChannel(
+ channel_id=cid,
+ start_time=start_time,
+ end_time=end_time,
+ )
+ ],
+ api_key_config=api_auth.ApiAuth.ApiKeyConfig(
+ api_key_secret_version=api_key
+ ),
+ )
+ result_source_channels.append(result_channels)
+ return GapicSlackSource(
+ channels=result_source_channels,
+ )
+ elif isinstance(source, JiraSource):
+ result_source_queries = []
+ for query in source.queries:
+ api_key = query.api_key
+ custom_queries = query.custom_queries
+ projects = query.jira_projects
+ email = query.email
+ server_uri = query.server_uri
+ result_query = GapicJiraSource.JiraQueries(
+ custom_queries=custom_queries,
+ projects=projects,
+ email=email,
+ server_uri=server_uri,
+ api_key_config=api_auth.ApiAuth.ApiKeyConfig(
+ api_key_secret_version=api_key
+ ),
+ )
+ result_source_queries.append(result_query)
+ return GapicJiraSource(
+ jira_queries=result_source_queries,
+ )
+ elif isinstance(source, SharePointSources):
+ result_source_share_point_sources = []
+ for share_point_source in source.share_point_sources:
+ sharepoint_folder_path = share_point_source.sharepoint_folder_path
+ sharepoint_folder_id = share_point_source.sharepoint_folder_id
+ drive_name = share_point_source.drive_name
+ drive_id = share_point_source.drive_id
+ client_id = share_point_source.client_id
+ client_secret = share_point_source.client_secret
+ tenant_id = share_point_source.tenant_id
+ sharepoint_site_name = share_point_source.sharepoint_site_name
+ result_share_point_source = GapicSharePointSources.SharePointSource(
+ client_id=client_id,
+ client_secret=api_auth.ApiAuth.ApiKeyConfig(
+ api_key_secret_version=client_secret
+ ),
+ tenant_id=tenant_id,
+ sharepoint_site_name=sharepoint_site_name,
+ )
+ if sharepoint_folder_path is not None and sharepoint_folder_id is not None:
+ raise ValueError(
+ "sharepoint_folder_path and sharepoint_folder_id cannot both be set."
+ )
+ elif sharepoint_folder_path is not None:
+ result_share_point_source.sharepoint_folder_path = (
+ sharepoint_folder_path
+ )
+ elif sharepoint_folder_id is not None:
+ result_share_point_source.sharepoint_folder_id = sharepoint_folder_id
+ if drive_name is not None and drive_id is not None:
+ raise ValueError("drive_name and drive_id cannot both be set.")
+ elif drive_name is not None:
+ result_share_point_source.drive_name = drive_name
+ elif drive_id is not None:
+ result_share_point_source.drive_id = drive_id
+ else:
+ raise ValueError("Either drive_name and drive_id must be set.")
+ result_source_share_point_sources.append(result_share_point_source)
+ return GapicSharePointSources(
+ share_point_sources=result_source_share_point_sources,
+ )
+ else:
+ raise TypeError(
+ "source must be a SlackChannelsSource or JiraSource or SharePointSources."
+ )
+
+
+def prepare_import_files_request(
+ corpus_name: str,
+ paths: Optional[Sequence[str]] = None,
+ source: Optional[Union[SlackChannelsSource, JiraSource, SharePointSources]] = None,
+ transformation_config: Optional[TransformationConfig] = None,
+ max_embedding_requests_per_min: int = 1000,
+ partial_failures_sink: Optional[str] = None,
+) -> ImportRagFilesRequest:
+ if len(corpus_name.split("/")) != 6:
+ raise ValueError(
+ "corpus_name must be of the format `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`"
+ )
+
+ chunk_size = 1024
+ chunk_overlap = 200
+ if transformation_config and transformation_config.chunking_config:
+ chunk_size = transformation_config.chunking_config.chunk_size
+ chunk_overlap = transformation_config.chunking_config.chunk_overlap
+
+ rag_file_transformation_config = RagFileTransformationConfig(
+ rag_file_chunking_config=RagFileChunkingConfig(
+ fixed_length_chunking=RagFileChunkingConfig.FixedLengthChunking(
+ chunk_size=chunk_size,
+ chunk_overlap=chunk_overlap,
+ ),
+ ),
+ )
+
+ import_rag_files_config = ImportRagFilesConfig(
+ rag_file_transformation_config=rag_file_transformation_config,
+ max_embedding_requests_per_min=max_embedding_requests_per_min,
+ )
+
+ if source is not None:
+ gapic_source = convert_source_for_rag_import(source)
+ if isinstance(gapic_source, GapicSlackSource):
+ import_rag_files_config.slack_source = gapic_source
+ if isinstance(gapic_source, GapicJiraSource):
+ import_rag_files_config.jira_source = gapic_source
+ if isinstance(gapic_source, GapicSharePointSources):
+ import_rag_files_config.share_point_sources = gapic_source
+ else:
+ uris = []
+ resource_ids = []
+ for p in paths:
+ output = convert_path_to_resource_id(p)
+ if isinstance(output, str):
+ uris.append(p)
+ else:
+ resource_ids.append(output)
+ if uris:
+ import_rag_files_config.gcs_source.uris = uris
+ if resource_ids:
+ google_drive_source = GoogleDriveSource(
+ resource_ids=resource_ids,
+ )
+ import_rag_files_config.google_drive_source = google_drive_source
+
+ request = ImportRagFilesRequest(
+ parent=corpus_name, import_rag_files_config=import_rag_files_config
+ )
+ return request
+
+
+def get_corpus_name(
+ name: str,
+) -> str:
+ if name:
+ client = create_rag_data_service_client()
+ if client.parse_rag_corpus_path(name):
+ return name
+ elif re.match("^{}$".format(_VALID_RESOURCE_NAME_REGEX), name):
+ return client.rag_corpus_path(
+ project=initializer.global_config.project,
+ location=initializer.global_config.location,
+ rag_corpus=name,
+ )
+ else:
+ raise ValueError(
+ "name must be of the format `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` or `{rag_corpus}`"
+ )
+ return name
+
+
+def get_file_name(
+ name: str,
+ corpus_name: str,
+) -> str:
+ client = create_rag_data_service_client()
+ if client.parse_rag_file_path(name):
+ return name
+ elif re.match("^{}$".format(_VALID_RESOURCE_NAME_REGEX), name):
+ if not corpus_name:
+ raise ValueError(
+ "corpus_name must be provided if name is a `{rag_file}`, not a "
+ "full resource name (`projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}`). "
+ )
+ return client.rag_file_path(
+ project=initializer.global_config.project,
+ location=initializer.global_config.location,
+ rag_corpus=get_corpus_name(corpus_name),
+ rag_file=name,
+ )
+ else:
+ raise ValueError(
+ "name must be of the format `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}` or `{rag_file}`"
+ )
+
+
+def set_embedding_model_config(
+ embedding_model_config: RagEmbeddingModelConfig,
+ rag_corpus: GapicRagCorpus,
+) -> None:
+ if embedding_model_config.vertex_prediction_endpoint is None:
+ return
+ if (
+ embedding_model_config.vertex_prediction_endpoint.publisher_model
+ and embedding_model_config.vertex_prediction_endpoint.endpoint
+ ):
+ raise ValueError("publisher_model and endpoint cannot be set at the same time.")
+ if (
+ not embedding_model_config.vertex_prediction_endpoint.publisher_model
+ and not embedding_model_config.vertex_prediction_endpoint.endpoint
+ ):
+ raise ValueError("At least one of publisher_model and endpoint must be set.")
+ parent = initializer.global_config.common_location_path(project=None, location=None)
+
+ if embedding_model_config.vertex_prediction_endpoint.publisher_model:
+ publisher_model = (
+ embedding_model_config.vertex_prediction_endpoint.publisher_model
+ )
+ full_resource_name = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/publishers/google/models/(?P.+?)$",
+ publisher_model,
+ )
+ resource_name = re.match(
+ r"^publishers/google/models/(?P.+?)$",
+ publisher_model,
+ )
+ if full_resource_name:
+ rag_corpus.vector_db_config.rag_embedding_model_config.vertex_prediction_endpoint.endpoint = (
+ publisher_model
+ )
+ elif resource_name:
+ rag_corpus.vector_db_config.rag_embedding_model_config.vertex_prediction_endpoint.endpoint = (
+ parent + "/" + publisher_model
+ )
+ else:
+ raise ValueError(
+ "publisher_model must be of the format `projects/{project}/locations/{location}/publishers/google/models/{model_id}` or `publishers/google/models/{model_id}`"
+ )
+
+ if embedding_model_config.vertex_prediction_endpoint.endpoint:
+ endpoint = embedding_model_config.vertex_prediction_endpoint.endpoint
+ full_resource_name = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$",
+ endpoint,
+ )
+ resource_name = re.match(
+ r"^endpoints/(?P.+?)$",
+ endpoint,
+ )
+ if full_resource_name:
+ rag_corpus.vector_db_config.rag_embedding_model_config.vertex_prediction_endpoint.endpoint = (
+ endpoint
+ )
+ elif resource_name:
+ rag_corpus.vector_db_config.rag_embedding_model_config.vertex_prediction_endpoint.endpoint = (
+ parent + "/" + endpoint
+ )
+ else:
+ raise ValueError(
+ "endpoint must be of the format `projects/{project}/locations/{location}/endpoints/{endpoint}` or `endpoints/{endpoint}`"
+ )
+
+
+def set_backend_config(
+ backend_config: Optional[
+ Union[
+ RagVectorDbConfig,
+ None,
+ ]
+ ],
+ rag_corpus: GapicRagCorpus,
+) -> None:
+ """Sets the vector db configuration for the rag corpus."""
+ if backend_config is None:
+ return
+
+ if backend_config.vector_db is not None:
+ vector_config = backend_config.vector_db
+ if vector_config is None or isinstance(vector_config, RagManagedDb):
+ rag_corpus.vector_db_config.rag_managed_db.CopyFrom(
+ GapicRagVectorDbConfig.RagManagedDb()
+ )
+ elif isinstance(vector_config, VertexVectorSearch):
+ index_endpoint = vector_config.index_endpoint
+ index = vector_config.index
+
+ rag_corpus.vector_db_config.vertex_vector_search.index_endpoint = (
+ index_endpoint
+ )
+ rag_corpus.vector_db_config.vertex_vector_search.index = index
+ elif isinstance(vector_config, Pinecone):
+ index_name = vector_config.index_name
+ api_key = vector_config.api_key
+
+ rag_corpus.vector_db_config.pinecone.index_name = index_name
+ rag_corpus.vector_db_config.api_auth.api_key_config.api_key_secret_version = (
+ api_key
+ )
+ else:
+ raise TypeError(
+ "backend_config must be a VertexFeatureStore,"
+ "RagManagedDb, or Pinecone."
+ )
+ if backend_config.rag_embedding_model_config:
+ set_embedding_model_config(
+ backend_config.rag_embedding_model_config, rag_corpus
+ )
diff --git a/testbed/googleapis__python-aiplatform/vertexai/tokenization/_tokenizer_loading.py b/testbed/googleapis__python-aiplatform/vertexai/tokenization/_tokenizer_loading.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8167b2b47ce23b68b078cf211780cdaaaf78292
--- /dev/null
+++ b/testbed/googleapis__python-aiplatform/vertexai/tokenization/_tokenizer_loading.py
@@ -0,0 +1,187 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import requests
+import uuid
+import os
+import tempfile
+import hashlib
+import dataclasses
+
+import sentencepiece as spm
+import functools
+from sentencepiece import sentencepiece_model_pb2
+
+
+@dataclasses.dataclass(frozen=True)
+class _TokenizerConfig:
+ model_url: str
+ model_hash: str
+
+
+_GEMMA_TOKENIZER = "google/gemma"
+
+# SoT: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models
+_GEMINI_MODEL_NAMES = ["gemini-1.0-pro", "gemini-1.5-pro", "gemini-1.5-flash"]
+_GEMINI_STABLE_MODEL_NAMES = [
+ "gemini-1.0-pro-001",
+ "gemini-1.0-pro-002",
+ "gemini-1.5-pro-001",
+ "gemini-1.5-flash-001",
+ "gemini-1.5-flash-002",
+ "gemini-1.5-pro-002",
+]
+
+_TOKENIZERS = {
+ _GEMMA_TOKENIZER: _TokenizerConfig(
+ model_url="https://raw.githubusercontent.com/google/gemma_pytorch/33b652c465537c6158f9a472ea5700e5e770ad3f/tokenizer/tokenizer.model",
+ model_hash="61a7b147390c64585d6c3543dd6fc636906c9af3865a5548f27f31aee1d4c8e2",
+ )
+}
+
+
+def _load_file(file_url_path: str) -> bytes:
+ """Loads file bytes from the given file url path."""
+ resp = requests.get(file_url_path)
+ resp.raise_for_status()
+ return resp.content
+
+
+def _is_valid_model(*, model_data: bytes, expected_hash: str) -> bool:
+ """Returns true if the content is valid by checking the hash."""
+ if not expected_hash:
+ raise ValueError("expected_hash is required")
+ return hashlib.sha256(model_data).hexdigest() == expected_hash
+
+
+def _maybe_remove_file(file_path: str) -> None:
+ """Removes the file if exists."""
+ if not os.path.exists(file_path):
+ return
+ try:
+ os.remove(file_path)
+ except OSError:
+ # Don't raise if we cannot remove file.
+ pass
+
+
+def _maybe_load_from_cache(*, file_path: str, expected_hash: str) -> bytes:
+ """Loads the content from the cache path."""
+ if not os.path.exists(file_path):
+ return
+ with open(file_path, "rb") as f:
+ content = f.read()
+ if _is_valid_model(model_data=content, expected_hash=expected_hash):
+ return content
+
+ # Cached file corrupted.
+ _maybe_remove_file(file_path)
+
+
+def _maybe_save_to_cache(*, cache_dir: str, cache_path: str, content: bytes) -> None:
+ """Saves the content to the cache path."""
+ try:
+ os.makedirs(cache_dir, exist_ok=True)
+ tmp_path = cache_dir + "." + str(uuid.uuid4()) + ".tmp"
+ with open(tmp_path, "wb") as f:
+ f.write(content)
+ os.rename(tmp_path, cache_path)
+ except OSError:
+ # Don't raise if we cannot write file.
+ pass
+
+
+def _load_from_url(*, file_url: str, expected_hash: str) -> bytes:
+ """Loads model bytes from the given file url."""
+ content = _load_file(file_url)
+ if not _is_valid_model(model_data=content, expected_hash=expected_hash):
+ actual_hash = hashlib.sha256(content).hexdigest()
+ raise ValueError(
+ f"Downloaded model file is corrupted."
+ f" Expected hash {expected_hash}. Got file hash {actual_hash}."
+ )
+ return content
+
+
+def _load(*, file_url: str, expected_hash: str) -> bytes:
+ """Loads model bytes from the given file url.
+
+ 1. If the find local cached file for the given url and the cached file hash
+ matches the expected hash, the cached file is returned.
+ 2. If local cached file is not found or the hash does not match, the file is
+ downloaded from the given url. And write to local cache and return the
+ file bytes.
+ 3. If the file downloaded from the given url does not match the expected
+ hash, raise ValueError.
+
+ Args:
+ file_url: The url of the file to load.
+ expected_hash: The expected hash of the file.
+
+ Returns:
+ The file bytes.
+ """
+ model_dir = os.path.join(tempfile.gettempdir(), "vertexai_tokenizer_model")
+ filename = hashlib.sha1(file_url.encode()).hexdigest()
+ model_path = os.path.join(model_dir, filename)
+
+ model_data = _maybe_load_from_cache(
+ file_path=model_path, expected_hash=expected_hash
+ )
+ if not model_data:
+ model_data = _load_from_url(file_url=file_url, expected_hash=expected_hash)
+
+ _maybe_save_to_cache(cache_dir=model_dir, cache_path=model_path, content=model_data)
+ return model_data
+
+
+def _load_model_proto_bytes(tokenizer_name: str) -> bytes:
+ """Loads model proto bytes from the given tokenizer name."""
+ if tokenizer_name not in _TOKENIZERS:
+ raise ValueError(
+ f"Tokenizer {tokenizer_name} is not supported."
+ f"Supported tokenizers: {list(_TOKENIZERS.keys())}"
+ )
+ return _load(
+ file_url=_TOKENIZERS[tokenizer_name].model_url,
+ expected_hash=_TOKENIZERS[tokenizer_name].model_hash,
+ )
+
+
+@functools.lru_cache()
+def load_model_proto(tokenizer_name) -> sentencepiece_model_pb2.ModelProto:
+ """Loads model proto from the given tokenizer name."""
+ model_proto = sentencepiece_model_pb2.ModelProto()
+ model_proto.ParseFromString(_load_model_proto_bytes(tokenizer_name))
+ return model_proto
+
+
+def get_tokenizer_name(model_name: str):
+ """Gets the tokenizer name for the given model name."""
+ if model_name in _GEMINI_MODEL_NAMES:
+ return _GEMMA_TOKENIZER
+ if model_name in _GEMINI_STABLE_MODEL_NAMES:
+ return _GEMMA_TOKENIZER
+ raise ValueError(
+ f"Model {model_name} is not supported. Supported models: {', '.join(_GEMINI_STABLE_MODEL_NAMES)}.\n" # pylint: disable=line-too-long
+ )
+
+
+@functools.lru_cache()
+def get_sentencepiece(tokenizer_name: str) -> spm.SentencePieceProcessor:
+ """Loads sentencepiece tokenizer from the given tokenizer name."""
+ processor = spm.SentencePieceProcessor()
+ processor.LoadFromSerializedProto(_load_model_proto_bytes(tokenizer_name))
+ return processor
diff --git a/testbed/googleapis__python-bigquery/.coveragerc b/testbed/googleapis__python-bigquery/.coveragerc
new file mode 100644
index 0000000000000000000000000000000000000000..04092257a47fe58436a915d39581bda255a01772
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.coveragerc
@@ -0,0 +1,14 @@
+[run]
+branch = True
+
+[report]
+fail_under = 100
+show_missing = True
+omit =
+ google/cloud/bigquery/__init__.py
+ google/cloud/bigquery_v2/* # Legacy proto-based types.
+exclude_lines =
+ # Re-enable the standard pragma
+ pragma: NO COVER
+ # Ignore debug-only repr
+ def __repr__
diff --git a/testbed/googleapis__python-bigquery/.flake8 b/testbed/googleapis__python-bigquery/.flake8
new file mode 100644
index 0000000000000000000000000000000000000000..32986c79287ade6066b1ddc4ff8e0fc690110744
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.flake8
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Generated by synthtool. DO NOT EDIT!
+[flake8]
+ignore = E203, E231, E266, E501, W503
+exclude =
+ # Exclude generated code.
+ **/proto/**
+ **/gapic/**
+ **/services/**
+ **/types/**
+ *_pb2.py
+
+ # Standard linting exemptions.
+ **/.nox/**
+ __pycache__,
+ .git,
+ *.pyc,
+ conf.py
diff --git a/testbed/googleapis__python-bigquery/.github/.OwlBot.lock.yaml b/testbed/googleapis__python-bigquery/.github/.OwlBot.lock.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..597e0c3261ca293796f6fb409db0d88f170d3897
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.github/.OwlBot.lock.yaml
@@ -0,0 +1,17 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+docker:
+ image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest
+ digest: sha256:e8dcfd7cbfd8beac3a3ff8d3f3185287ea0625d859168cc80faccfc9a7a00455
+# created: 2024-09-16T21:04:09.091105552Z
diff --git a/testbed/googleapis__python-bigquery/.github/.OwlBot.yaml b/testbed/googleapis__python-bigquery/.github/.OwlBot.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8b142686cf8942ce4949d4a2762dc8013760cd00
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.github/.OwlBot.yaml
@@ -0,0 +1,22 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker:
+ image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest
+
+deep-remove-regex:
+ - /owl-bot-staging
+
+begin-after-commit-hash: f2de93abafa306b2ebadf1d10d947db8bcf2bf15
+
diff --git a/testbed/googleapis__python-bigquery/.github/CODEOWNERS b/testbed/googleapis__python-bigquery/.github/CODEOWNERS
new file mode 100644
index 0000000000000000000000000000000000000000..6763f258cdb5dee9a4e38fc426603e93ab54b31a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.github/CODEOWNERS
@@ -0,0 +1,11 @@
+# Code owners file.
+# This file controls who is tagged for review for any given pull request.
+#
+# For syntax help see:
+# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax
+
+# The @googleapis/api-bigquery is the default owner for changes in this repo
+* @googleapis/api-bigquery @googleapis/yoshi-python
+
+# The python-samples-reviewers team is the default owner for samples changes
+/samples/ @googleapis/api-bigquery @googleapis/python-samples-owners @googleapis/yoshi-python
diff --git a/testbed/googleapis__python-bigquery/.github/CONTRIBUTING.md b/testbed/googleapis__python-bigquery/.github/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..939e5341e74dc2371c8b47f0e27b50581bed5f63
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.github/CONTRIBUTING.md
@@ -0,0 +1,28 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are
+just a few small guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution;
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
+information on using pull requests.
+
+## Community Guidelines
+
+This project follows [Google's Open Source Community
+Guidelines](https://opensource.google.com/conduct/).
diff --git a/testbed/googleapis__python-bigquery/.github/ISSUE_TEMPLATE/bug_report.md b/testbed/googleapis__python-bigquery/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000000000000000000000000000000000000..5b5339350a60846ae3a832140e19dc40f077f169
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,43 @@
+---
+name: Bug report
+about: Create a report to help us improve
+
+---
+
+Thanks for stopping by to let us know something could be better!
+
+**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response.
+
+Please run down the following list and make sure you've tried the usual "quick fixes":
+
+ - Search the issues already opened: https://github.com/googleapis/python-bigquery/issues
+ - Search StackOverflow: https://stackoverflow.com/questions/tagged/google-cloud-platform+python
+
+If you are still having issues, please be sure to include as much information as possible:
+
+#### Environment details
+
+ - OS type and version:
+ - Python version: `python --version`
+ - pip version: `pip --version`
+ - `google-cloud-bigquery` version: `pip show google-cloud-bigquery`
+
+#### Steps to reproduce
+
+ 1. ?
+ 2. ?
+
+#### Code example
+
+```python
+# example
+```
+
+#### Stack trace
+```
+# example
+```
+
+Making sure to follow these steps will guarantee the quickest resolution possible.
+
+Thanks!
diff --git a/testbed/googleapis__python-bigquery/.github/ISSUE_TEMPLATE/feature_request.md b/testbed/googleapis__python-bigquery/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000000000000000000000000000000000000..6365857f33c6c4c98eb292cf87dcfa000b301fb9
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,18 @@
+---
+name: Feature request
+about: Suggest an idea for this library
+
+---
+
+Thanks for stopping by to let us know something could be better!
+
+**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response.
+
+ **Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+ **Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+ **Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+ **Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/testbed/googleapis__python-bigquery/.github/ISSUE_TEMPLATE/support_request.md b/testbed/googleapis__python-bigquery/.github/ISSUE_TEMPLATE/support_request.md
new file mode 100644
index 0000000000000000000000000000000000000000..995869032125fb2e0d6830a2898ea07e0c061499
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.github/ISSUE_TEMPLATE/support_request.md
@@ -0,0 +1,7 @@
+---
+name: Support request
+about: If you have a support contract with Google, please create an issue in the Google Cloud Support console.
+
+---
+
+**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response.
diff --git a/testbed/googleapis__python-bigquery/.github/PULL_REQUEST_TEMPLATE.md b/testbed/googleapis__python-bigquery/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000000000000000000000000000000000000..65ceeeb5e4904afb48a75df5c91c7985626a9885
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,7 @@
+Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly:
+- [ ] Make sure to open an issue as a [bug/issue](https://github.com/googleapis/python-bigquery/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea
+- [ ] Ensure the tests and linter pass
+- [ ] Code coverage does not decrease (if any source code was changed)
+- [ ] Appropriate docs were updated (if necessary)
+
+Fixes # 🦕
diff --git a/testbed/googleapis__python-bigquery/.github/auto-approve.yml b/testbed/googleapis__python-bigquery/.github/auto-approve.yml
new file mode 100644
index 0000000000000000000000000000000000000000..311ebbb853a9397483eb1681527bfdac28b4c935
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.github/auto-approve.yml
@@ -0,0 +1,3 @@
+# https://github.com/googleapis/repo-automation-bots/tree/main/packages/auto-approve
+processes:
+ - "OwlBotTemplateChanges"
diff --git a/testbed/googleapis__python-bigquery/.github/auto-label.yaml b/testbed/googleapis__python-bigquery/.github/auto-label.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..21786a4eb08509738cd546ab3fe2bf7e029eec24
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.github/auto-label.yaml
@@ -0,0 +1,20 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+requestsize:
+ enabled: true
+
+path:
+ pullrequest: true
+ paths:
+ samples: "samples"
diff --git a/testbed/googleapis__python-bigquery/.github/blunderbuss.yml b/testbed/googleapis__python-bigquery/.github/blunderbuss.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5b7383dc76656d3e1555e83aeeb971ffa3bf993c
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.github/blunderbuss.yml
@@ -0,0 +1,17 @@
+# Blunderbuss config
+#
+# This file controls who is assigned for pull requests and issues.
+# Note: This file is autogenerated. To make changes to the assignee
+# team, please update `codeowner_team` in `.repo-metadata.json`.
+assign_issues:
+ - googleapis/api-bigquery
+
+assign_issues_by:
+ - labels:
+ - "samples"
+ to:
+ - googleapis/python-samples-reviewers
+ - googleapis/api-bigquery
+
+assign_prs:
+ - googleapis/api-bigquery
diff --git a/testbed/googleapis__python-bigquery/.github/header-checker-lint.yml b/testbed/googleapis__python-bigquery/.github/header-checker-lint.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6fe78aa7987a6961a8593f4d4235e84845a7764b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.github/header-checker-lint.yml
@@ -0,0 +1,15 @@
+{"allowedCopyrightHolders": ["Google LLC"],
+ "allowedLicenses": ["Apache-2.0", "MIT", "BSD-3"],
+ "ignoreFiles": ["**/requirements.txt", "**/requirements-test.txt", "**/__init__.py", "samples/**/constraints.txt", "samples/**/constraints-test.txt"],
+ "sourceFileExtensions": [
+ "ts",
+ "js",
+ "java",
+ "sh",
+ "Dockerfile",
+ "yaml",
+ "py",
+ "html",
+ "txt"
+ ]
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.github/release-please.yml b/testbed/googleapis__python-bigquery/.github/release-please.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5161ab347cdf7cd1865a6afa20f31b81809b3f17
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.github/release-please.yml
@@ -0,0 +1,14 @@
+releaseType: python
+handleGHRelease: true
+# NOTE: this section is generated by synthtool.languages.python
+# See https://github.com/googleapis/synthtool/blob/master/synthtool/languages/python.py
+branches:
+- branch: v2
+ handleGHRelease: true
+ releaseType: python
+- branch: v1
+ handleGHRelease: true
+ releaseType: python
+- branch: v0
+ handleGHRelease: true
+ releaseType: python
diff --git a/testbed/googleapis__python-bigquery/.github/release-trigger.yml b/testbed/googleapis__python-bigquery/.github/release-trigger.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d4ca94189e160a70677ece805ad0cadcaf7edfd5
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.github/release-trigger.yml
@@ -0,0 +1 @@
+enabled: true
diff --git a/testbed/googleapis__python-bigquery/.github/snippet-bot.yml b/testbed/googleapis__python-bigquery/.github/snippet-bot.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/testbed/googleapis__python-bigquery/.github/sync-repo-settings.yaml b/testbed/googleapis__python-bigquery/.github/sync-repo-settings.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6543d52850a9a5838d41ed7fd9da7c370d29984d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.github/sync-repo-settings.yaml
@@ -0,0 +1,35 @@
+# https://github.com/googleapis/repo-automation-bots/tree/main/packages/sync-repo-settings
+mergeCommitAllowed: false
+# Rules for main branch protection
+branchProtectionRules:
+# Identifies the protection rule pattern. Name of the branch to be protected.
+# Defaults to `main`
+- pattern: main
+ requiresLinearHistory: true
+ requiresCodeOwnerReviews: true
+ requiresStrictStatusChecks: true
+ requiredStatusCheckContexts:
+ - 'Kokoro'
+ - 'Kokoro snippets-3.8'
+ - 'Kokoro snippets-3.12'
+ - 'Kokoro system-3.8'
+ - 'Kokoro system-3.12'
+ - 'cla/google'
+ - 'Samples - Lint'
+ - 'Samples - Python 3.7'
+ - 'Samples - Python 3.8'
+ - 'Samples - Python 3.9'
+ - 'Samples - Python 3.10'
+ - 'Samples - Python 3.11'
+ - 'Samples - Python 3.12'
+- pattern: v2
+ requiresLinearHistory: true
+ requiresCodeOwnerReviews: true
+ requiresStrictStatusChecks: true
+ requiredStatusCheckContexts:
+ - 'Kokoro'
+ - 'Kokoro snippets-3.8'
+ - 'cla/google'
+ - 'Samples - Lint'
+ - 'Samples - Python 3.7'
+ - 'Samples - Python 3.8'
diff --git a/testbed/googleapis__python-bigquery/.gitignore b/testbed/googleapis__python-bigquery/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..168b201f66f41cd99f06d7c1b8052c822992dc14
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.gitignore
@@ -0,0 +1,65 @@
+*.py[cod]
+*.sw[op]
+
+# C extensions
+*.so
+
+# Packages
+*.egg
+*.egg-info
+dist
+build
+eggs
+.eggs
+parts
+bin
+var
+sdist
+develop-eggs
+.installed.cfg
+lib
+lib64
+__pycache__
+
+# Installer logs
+pip-log.txt
+
+# Unit test / coverage reports
+.coverage
+.nox
+.cache
+.pytest_cache
+.pytype
+
+
+# Mac
+.DS_Store
+
+# JetBrains
+.idea
+
+# VS Code
+.vscode
+
+# emacs
+*~
+
+# Built documentation
+docs/_build
+bigquery/docs/generated
+docs.metadata
+
+# Virtual environment
+env/
+venv/
+
+# Test logs
+coverage.xml
+*sponge_log.xml
+
+# System test environment variables.
+system_tests/local_test_setup
+
+# Make sure a generated file isn't accidentally committed.
+pylintrc
+pylintrc.test
diff --git a/testbed/googleapis__python-bigquery/.kokoro/build.sh b/testbed/googleapis__python-bigquery/.kokoro/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e4da2e2a76bc7f720df31482a057748670284033
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/build.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+if [[ -z "${PROJECT_ROOT:-}" ]]; then
+ PROJECT_ROOT="github/python-bigquery"
+fi
+
+cd "${PROJECT_ROOT}"
+
+# Disable buffering, so that the logs stream through.
+export PYTHONUNBUFFERED=1
+
+# Debug: show build environment
+env | grep KOKORO
+
+# Setup service account credentials.
+export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json
+
+# Setup project id.
+export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json")
+
+# If this is a continuous build, send the test log to the FlakyBot.
+# See https://github.com/googleapis/repo-automation-bots/tree/main/packages/flakybot.
+if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]]; then
+ cleanup() {
+ chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ }
+ trap cleanup EXIT HUP
+fi
+
+# If NOX_SESSION is set, it only runs the specified session,
+# otherwise run all the sessions.
+if [[ -n "${NOX_SESSION:-}" ]]; then
+ python3 -m nox -s ${NOX_SESSION:-}
+else
+ python3 -m nox
+fi
diff --git a/testbed/googleapis__python-bigquery/.kokoro/continuous/common.cfg b/testbed/googleapis__python-bigquery/.kokoro/continuous/common.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..1f46f62708d921f754fa602da224099c8123d002
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/continuous/common.cfg
@@ -0,0 +1,27 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Download resources for system tests (service account key, etc.)
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-python"
+
+# Use the trampoline script to run in docker.
+build_file: "python-bigquery/.kokoro/trampoline.sh"
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-multi"
+}
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/build.sh"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/continuous/continuous.cfg b/testbed/googleapis__python-bigquery/.kokoro/continuous/continuous.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..8f43917d92fe947f29fccb20f0cf8aee25a87fd8
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/continuous/continuous.cfg
@@ -0,0 +1 @@
+# Format: //devtools/kokoro/config/proto/build.proto
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/continuous/prerelease-deps-3.12.cfg b/testbed/googleapis__python-bigquery/.kokoro/continuous/prerelease-deps-3.12.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..ece962a1703e08a204251be5e6f76bf901cb3d1a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/continuous/prerelease-deps-3.12.cfg
@@ -0,0 +1,7 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Only run this nox session.
+env_vars: {
+ key: "NOX_SESSION"
+ value: "prerelease_deps-3.12"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/continuous/unit-tests-misc.cfg b/testbed/googleapis__python-bigquery/.kokoro/continuous/unit-tests-misc.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..6598baee77e10ced18aea18c908f8eaa38f1c5d0
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/continuous/unit-tests-misc.cfg
@@ -0,0 +1,9 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Only run these nox sessions.
+# A subset based on Python versions that are neither our newest OR oldest
+# supported versions of Python
+env_vars: {
+ key: "NOX_SESSION"
+ value: "unit_noextras-3.9 unit_noextras-3.10 unit_noextras-3.11 unit-3.9 unit-3.10 unit-3.11"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/docker/docs/Dockerfile b/testbed/googleapis__python-bigquery/.kokoro/docker/docs/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..e5410e296bd854b5936c08b904522e03149c3af8
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/docker/docs/Dockerfile
@@ -0,0 +1,89 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ubuntu:24.04
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Ensure local Python is preferred over distribution Python.
+ENV PATH /usr/local/bin:$PATH
+
+# Install dependencies.
+RUN apt-get update \
+ && apt-get install -y --no-install-recommends \
+ apt-transport-https \
+ build-essential \
+ ca-certificates \
+ curl \
+ dirmngr \
+ git \
+ gpg-agent \
+ graphviz \
+ libbz2-dev \
+ libdb5.3-dev \
+ libexpat1-dev \
+ libffi-dev \
+ liblzma-dev \
+ libreadline-dev \
+ libsnappy-dev \
+ libssl-dev \
+ libsqlite3-dev \
+ portaudio19-dev \
+ redis-server \
+ software-properties-common \
+ ssh \
+ sudo \
+ tcl \
+ tcl-dev \
+ tk \
+ tk-dev \
+ uuid-dev \
+ wget \
+ zlib1g-dev \
+ && add-apt-repository universe \
+ && apt-get update \
+ && apt-get -y install jq \
+ && apt-get clean autoclean \
+ && apt-get autoremove -y \
+ && rm -rf /var/lib/apt/lists/* \
+ && rm -f /var/cache/apt/archives/*.deb
+
+
+###################### Install python 3.10.14 for docs/docfx session
+
+# Download python 3.10.14
+RUN wget https://www.python.org/ftp/python/3.10.14/Python-3.10.14.tgz
+
+# Extract files
+RUN tar -xvf Python-3.10.14.tgz
+
+# Install python 3.10.14
+RUN ./Python-3.10.14/configure --enable-optimizations
+RUN make altinstall
+
+ENV PATH /usr/local/bin/python3.10:$PATH
+
+###################### Install pip
+RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \
+ && python3.10 /tmp/get-pip.py \
+ && rm /tmp/get-pip.py
+
+# Test pip
+RUN python3.10 -m pip
+
+# Install build requirements
+COPY requirements.txt /requirements.txt
+RUN python3.10 -m pip install --require-hashes -r requirements.txt
+
+CMD ["python3.10"]
diff --git a/testbed/googleapis__python-bigquery/.kokoro/docker/docs/fetch_gpg_keys.sh b/testbed/googleapis__python-bigquery/.kokoro/docker/docs/fetch_gpg_keys.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d653dd868e4b8ea6f95d9cfbd35f8a050298a183
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/docker/docs/fetch_gpg_keys.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# A script to fetch gpg keys with retry.
+# Avoid jinja parsing the file.
+#
+
+function retry {
+ if [[ "${#}" -le 1 ]]; then
+ echo "Usage: ${0} retry_count commands.."
+ exit 1
+ fi
+ local retries=${1}
+ local command="${@:2}"
+ until [[ "${retries}" -le 0 ]]; do
+ $command && return 0
+ if [[ $? -ne 0 ]]; then
+ echo "command failed, retrying"
+ ((retries--))
+ fi
+ done
+ return 1
+}
+
+# 3.6.9, 3.7.5 (Ned Deily)
+retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \
+ 0D96DF4D4110E5C43FBFB17F2D347EA6AA65421D
+
+# 3.8.0 (Łukasz Langa)
+retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \
+ E3FF2839C048B25C084DEBE9B26995E310250568
+
+#
diff --git a/testbed/googleapis__python-bigquery/.kokoro/docker/docs/requirements.in b/testbed/googleapis__python-bigquery/.kokoro/docker/docs/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..816817c672a14585906032152367b57710381b47
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/docker/docs/requirements.in
@@ -0,0 +1 @@
+nox
diff --git a/testbed/googleapis__python-bigquery/.kokoro/docker/docs/requirements.txt b/testbed/googleapis__python-bigquery/.kokoro/docker/docs/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7129c771559448803415d37f2c2d35228a69b7de
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/docker/docs/requirements.txt
@@ -0,0 +1,42 @@
+#
+# This file is autogenerated by pip-compile with Python 3.9
+# by the following command:
+#
+# pip-compile --allow-unsafe --generate-hashes requirements.in
+#
+argcomplete==3.4.0 \
+ --hash=sha256:69a79e083a716173e5532e0fa3bef45f793f4e61096cf52b5a42c0211c8b8aa5 \
+ --hash=sha256:c2abcdfe1be8ace47ba777d4fce319eb13bf8ad9dace8d085dcad6eded88057f
+ # via nox
+colorlog==6.8.2 \
+ --hash=sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44 \
+ --hash=sha256:4dcbb62368e2800cb3c5abd348da7e53f6c362dda502ec27c560b2e58a66bd33
+ # via nox
+distlib==0.3.8 \
+ --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \
+ --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64
+ # via virtualenv
+filelock==3.15.4 \
+ --hash=sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb \
+ --hash=sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7
+ # via virtualenv
+nox==2024.4.15 \
+ --hash=sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565 \
+ --hash=sha256:ecf6700199cdfa9e5ea0a41ff5e6ef4641d09508eda6edb89d9987864115817f
+ # via -r requirements.in
+packaging==24.1 \
+ --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \
+ --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124
+ # via nox
+platformdirs==4.2.2 \
+ --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \
+ --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3
+ # via virtualenv
+tomli==2.0.1 \
+ --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \
+ --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f
+ # via nox
+virtualenv==20.26.3 \
+ --hash=sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a \
+ --hash=sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589
+ # via nox
diff --git a/testbed/googleapis__python-bigquery/.kokoro/docs/common.cfg b/testbed/googleapis__python-bigquery/.kokoro/docs/common.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..41b86fc29f1590fd4c581bbfa3e86dd52b3ff1a4
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/docs/common.cfg
@@ -0,0 +1,66 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-bigquery/.kokoro/trampoline_v2.sh"
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-lib-docs"
+}
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/publish-docs.sh"
+}
+
+env_vars: {
+ key: "STAGING_BUCKET"
+ value: "docs-staging"
+}
+
+env_vars: {
+ key: "V2_STAGING_BUCKET"
+ # Push google cloud library docs to the Cloud RAD bucket `docs-staging-v2`
+ value: "docs-staging-v2"
+}
+
+# It will upload the docker image after successful builds.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE_UPLOAD"
+ value: "true"
+}
+
+# It will always build the docker image.
+env_vars: {
+ key: "TRAMPOLINE_DOCKERFILE"
+ value: ".kokoro/docker/docs/Dockerfile"
+}
+
+# Fetch the token needed for reporting release status to GitHub
+before_action {
+ fetch_keystore {
+ keystore_resource {
+ keystore_config_id: 73713
+ keyname: "yoshi-automation-github-key"
+ }
+ }
+}
+
+before_action {
+ fetch_keystore {
+ keystore_resource {
+ keystore_config_id: 73713
+ keyname: "docuploader_service_account"
+ }
+ }
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/docs/docs-presubmit.cfg b/testbed/googleapis__python-bigquery/.kokoro/docs/docs-presubmit.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..08adb2e28baf86e258fedb19cc3366605359a962
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/docs/docs-presubmit.cfg
@@ -0,0 +1,28 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "STAGING_BUCKET"
+ value: "gcloud-python-test"
+}
+
+env_vars: {
+ key: "V2_STAGING_BUCKET"
+ value: "gcloud-python-test"
+}
+
+# We only upload the image in the main `docs` build.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE_UPLOAD"
+ value: "false"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/build.sh"
+}
+
+# Only run this nox session.
+env_vars: {
+ key: "NOX_SESSION"
+ value: "docs docfx"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/docs/docs.cfg b/testbed/googleapis__python-bigquery/.kokoro/docs/docs.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..8f43917d92fe947f29fccb20f0cf8aee25a87fd8
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/docs/docs.cfg
@@ -0,0 +1 @@
+# Format: //devtools/kokoro/config/proto/build.proto
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/populate-secrets.sh b/testbed/googleapis__python-bigquery/.kokoro/populate-secrets.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c435402f473e484860fbd87518b365b561c5e8ba
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/populate-secrets.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright 2024 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+function now { date +"%Y-%m-%d %H:%M:%S" | tr -d '\n' ;}
+function msg { println "$*" >&2 ;}
+function println { printf '%s\n' "$(now) $*" ;}
+
+
+# Populates requested secrets set in SECRET_MANAGER_KEYS from service account:
+# kokoro-trampoline@cloud-devrel-kokoro-resources.iam.gserviceaccount.com
+SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager"
+msg "Creating folder on disk for secrets: ${SECRET_LOCATION}"
+mkdir -p ${SECRET_LOCATION}
+for key in $(echo ${SECRET_MANAGER_KEYS} | sed "s/,/ /g")
+do
+ msg "Retrieving secret ${key}"
+ docker run --entrypoint=gcloud \
+ --volume=${KOKORO_GFILE_DIR}:${KOKORO_GFILE_DIR} \
+ gcr.io/google.com/cloudsdktool/cloud-sdk \
+ secrets versions access latest \
+ --project cloud-devrel-kokoro-resources \
+ --secret ${key} > \
+ "${SECRET_LOCATION}/${key}"
+ if [[ $? == 0 ]]; then
+ msg "Secret written to ${SECRET_LOCATION}/${key}"
+ else
+ msg "Error retrieving secret ${key}"
+ fi
+done
diff --git a/testbed/googleapis__python-bigquery/.kokoro/presubmit/common.cfg b/testbed/googleapis__python-bigquery/.kokoro/presubmit/common.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..1f46f62708d921f754fa602da224099c8123d002
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/presubmit/common.cfg
@@ -0,0 +1,27 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Download resources for system tests (service account key, etc.)
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-python"
+
+# Use the trampoline script to run in docker.
+build_file: "python-bigquery/.kokoro/trampoline.sh"
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-multi"
+}
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/build.sh"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/presubmit/linting-typing.cfg b/testbed/googleapis__python-bigquery/.kokoro/presubmit/linting-typing.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..b1a7406c2a29e1ea5ebf9659065ba2d923c20486
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/presubmit/linting-typing.cfg
@@ -0,0 +1,7 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Only run these nox sessions.
+env_vars: {
+ key: "NOX_SESSION"
+ value: "lint lint_setup_py blacken mypy mypy_samples pytype"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/presubmit/prerelease-deps.cfg b/testbed/googleapis__python-bigquery/.kokoro/presubmit/prerelease-deps.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..3595fb43f5c0b44f434d4b803476faa881eb063b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/presubmit/prerelease-deps.cfg
@@ -0,0 +1,7 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Only run this nox session.
+env_vars: {
+ key: "NOX_SESSION"
+ value: "prerelease_deps"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/presubmit/presubmit.cfg b/testbed/googleapis__python-bigquery/.kokoro/presubmit/presubmit.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..ac4cc5847eeb0a7bb29cfb85c0ac0eff5379d1b0
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/presubmit/presubmit.cfg
@@ -0,0 +1,7 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Disable system tests.
+env_vars: {
+ key: "NOX_SESSION"
+ value: "unit_noextras unit cover docs docfx"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/presubmit/snippets-3.12.cfg b/testbed/googleapis__python-bigquery/.kokoro/presubmit/snippets-3.12.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..1381e832349b216529603b8b0cd4970d7e6d76d1
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/presubmit/snippets-3.12.cfg
@@ -0,0 +1,7 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Only run this nox session.
+env_vars: {
+ key: "NOX_SESSION"
+ value: "snippets-3.12"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/presubmit/snippets-3.8.cfg b/testbed/googleapis__python-bigquery/.kokoro/presubmit/snippets-3.8.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..840d9e7166503eceb9110b12d8900bdbbd9f22d1
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/presubmit/snippets-3.8.cfg
@@ -0,0 +1,7 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Only run this nox session.
+env_vars: {
+ key: "NOX_SESSION"
+ value: "snippets-3.8"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/presubmit/system-3.12.cfg b/testbed/googleapis__python-bigquery/.kokoro/presubmit/system-3.12.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..789455bd69731a300a234e411357d88006bfb69d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/presubmit/system-3.12.cfg
@@ -0,0 +1,7 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Only run this nox session.
+env_vars: {
+ key: "NOX_SESSION"
+ value: "system-3.12"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/presubmit/system-3.8.cfg b/testbed/googleapis__python-bigquery/.kokoro/presubmit/system-3.8.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..f4bcee3db0f0cea2b2042c82a700434b7a90d800
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/presubmit/system-3.8.cfg
@@ -0,0 +1,7 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Only run this nox session.
+env_vars: {
+ key: "NOX_SESSION"
+ value: "system-3.8"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/publish-docs.sh b/testbed/googleapis__python-bigquery/.kokoro/publish-docs.sh
new file mode 100644
index 0000000000000000000000000000000000000000..233205d580e98f945fadeb341188d34fa12fccba
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/publish-docs.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+# Disable buffering, so that the logs stream through.
+export PYTHONUNBUFFERED=1
+
+export PATH="${HOME}/.local/bin:${PATH}"
+
+# Install nox
+python3.10 -m pip install --require-hashes -r .kokoro/requirements.txt
+python3.10 -m nox --version
+
+# build docs
+nox -s docs
+
+# create metadata
+python3.10 -m docuploader create-metadata \
+ --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \
+ --version=$(python3.10 setup.py --version) \
+ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \
+ --distribution-name=$(python3.10 setup.py --name) \
+ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \
+ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \
+ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json)
+
+cat docs.metadata
+
+# upload docs
+python3.10 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}"
+
+
+# docfx yaml files
+nox -s docfx
+
+# create metadata.
+python3.10 -m docuploader create-metadata \
+ --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \
+ --version=$(python3.10 setup.py --version) \
+ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \
+ --distribution-name=$(python3.10 setup.py --name) \
+ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \
+ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \
+ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json)
+
+cat docs.metadata
+
+# upload docs
+python3.10 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}"
diff --git a/testbed/googleapis__python-bigquery/.kokoro/release.sh b/testbed/googleapis__python-bigquery/.kokoro/release.sh
new file mode 100644
index 0000000000000000000000000000000000000000..453d6f702c285695eb869455b2449fbf16c34c14
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/release.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+# Start the releasetool reporter
+python3 -m pip install --require-hashes -r github/python-bigquery/.kokoro/requirements.txt
+python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source /tmp/publisher-script
+
+# Disable buffering, so that the logs stream through.
+export PYTHONUNBUFFERED=1
+
+# Move into the package, build the distribution and upload.
+TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google-cloud-pypi-token-keystore-2")
+cd github/python-bigquery
+python3 setup.py sdist bdist_wheel
+twine upload --username __token__ --password "${TWINE_PASSWORD}" dist/*
diff --git a/testbed/googleapis__python-bigquery/.kokoro/release/common.cfg b/testbed/googleapis__python-bigquery/.kokoro/release/common.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..43b5a1f2785e976b4f40c80741cf2d92af04ffbc
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/release/common.cfg
@@ -0,0 +1,49 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-bigquery/.kokoro/trampoline.sh"
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-multi"
+}
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/release.sh"
+}
+
+# Fetch PyPI password
+before_action {
+ fetch_keystore {
+ keystore_resource {
+ keystore_config_id: 73713
+ keyname: "google-cloud-pypi-token-keystore-2"
+ }
+ }
+}
+
+# Tokens needed to report release status back to GitHub
+env_vars: {
+ key: "SECRET_MANAGER_KEYS"
+ value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem"
+}
+
+# Store the packages we uploaded to PyPI. That way, we have a record of exactly
+# what we published, which we can use to generate SBOMs and attestations.
+action {
+ define_artifacts {
+ regex: "github/python-bigquery/**/*.tar.gz"
+ strip_prefix: "github/python-bigquery"
+ }
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/release/release.cfg b/testbed/googleapis__python-bigquery/.kokoro/release/release.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..8f43917d92fe947f29fccb20f0cf8aee25a87fd8
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/release/release.cfg
@@ -0,0 +1 @@
+# Format: //devtools/kokoro/config/proto/build.proto
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/requirements.in b/testbed/googleapis__python-bigquery/.kokoro/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..fff4d9ce0d0a646881c8b6abdc6cf60a5748fa53
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/requirements.in
@@ -0,0 +1,11 @@
+gcp-docuploader
+gcp-releasetool>=2 # required for compatibility with cryptography>=42.x
+importlib-metadata
+typing-extensions
+twine
+wheel
+setuptools
+nox>=2022.11.21 # required to remove dependency on py
+charset-normalizer<3
+click<8.1.0
+cryptography>=42.0.5
diff --git a/testbed/googleapis__python-bigquery/.kokoro/requirements.txt b/testbed/googleapis__python-bigquery/.kokoro/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9622baf0ba38760091df7ebc983010732956f6d0
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/requirements.txt
@@ -0,0 +1,537 @@
+#
+# This file is autogenerated by pip-compile with Python 3.9
+# by the following command:
+#
+# pip-compile --allow-unsafe --generate-hashes requirements.in
+#
+argcomplete==3.4.0 \
+ --hash=sha256:69a79e083a716173e5532e0fa3bef45f793f4e61096cf52b5a42c0211c8b8aa5 \
+ --hash=sha256:c2abcdfe1be8ace47ba777d4fce319eb13bf8ad9dace8d085dcad6eded88057f
+ # via nox
+attrs==23.2.0 \
+ --hash=sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30 \
+ --hash=sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1
+ # via gcp-releasetool
+backports-tarfile==1.2.0 \
+ --hash=sha256:77e284d754527b01fb1e6fa8a1afe577858ebe4e9dad8919e34c862cb399bc34 \
+ --hash=sha256:d75e02c268746e1b8144c278978b6e98e85de6ad16f8e4b0844a154557eca991
+ # via jaraco-context
+cachetools==5.3.3 \
+ --hash=sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945 \
+ --hash=sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105
+ # via google-auth
+certifi==2024.7.4 \
+ --hash=sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b \
+ --hash=sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90
+ # via requests
+cffi==1.16.0 \
+ --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \
+ --hash=sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a \
+ --hash=sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417 \
+ --hash=sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab \
+ --hash=sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520 \
+ --hash=sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36 \
+ --hash=sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743 \
+ --hash=sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8 \
+ --hash=sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed \
+ --hash=sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684 \
+ --hash=sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56 \
+ --hash=sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324 \
+ --hash=sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d \
+ --hash=sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235 \
+ --hash=sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e \
+ --hash=sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088 \
+ --hash=sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000 \
+ --hash=sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7 \
+ --hash=sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e \
+ --hash=sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673 \
+ --hash=sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c \
+ --hash=sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe \
+ --hash=sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2 \
+ --hash=sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098 \
+ --hash=sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8 \
+ --hash=sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a \
+ --hash=sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0 \
+ --hash=sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b \
+ --hash=sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896 \
+ --hash=sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e \
+ --hash=sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9 \
+ --hash=sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2 \
+ --hash=sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b \
+ --hash=sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6 \
+ --hash=sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404 \
+ --hash=sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f \
+ --hash=sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0 \
+ --hash=sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4 \
+ --hash=sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc \
+ --hash=sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936 \
+ --hash=sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba \
+ --hash=sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872 \
+ --hash=sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb \
+ --hash=sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614 \
+ --hash=sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1 \
+ --hash=sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d \
+ --hash=sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969 \
+ --hash=sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b \
+ --hash=sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4 \
+ --hash=sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627 \
+ --hash=sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956 \
+ --hash=sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357
+ # via cryptography
+charset-normalizer==2.1.1 \
+ --hash=sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845 \
+ --hash=sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f
+ # via
+ # -r requirements.in
+ # requests
+click==8.0.4 \
+ --hash=sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1 \
+ --hash=sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb
+ # via
+ # -r requirements.in
+ # gcp-docuploader
+ # gcp-releasetool
+colorlog==6.8.2 \
+ --hash=sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44 \
+ --hash=sha256:4dcbb62368e2800cb3c5abd348da7e53f6c362dda502ec27c560b2e58a66bd33
+ # via
+ # gcp-docuploader
+ # nox
+cryptography==42.0.8 \
+ --hash=sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad \
+ --hash=sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583 \
+ --hash=sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b \
+ --hash=sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c \
+ --hash=sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1 \
+ --hash=sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648 \
+ --hash=sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949 \
+ --hash=sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba \
+ --hash=sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c \
+ --hash=sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9 \
+ --hash=sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d \
+ --hash=sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c \
+ --hash=sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e \
+ --hash=sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2 \
+ --hash=sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d \
+ --hash=sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7 \
+ --hash=sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70 \
+ --hash=sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2 \
+ --hash=sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7 \
+ --hash=sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14 \
+ --hash=sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe \
+ --hash=sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e \
+ --hash=sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71 \
+ --hash=sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961 \
+ --hash=sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7 \
+ --hash=sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c \
+ --hash=sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28 \
+ --hash=sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842 \
+ --hash=sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902 \
+ --hash=sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801 \
+ --hash=sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a \
+ --hash=sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e
+ # via
+ # -r requirements.in
+ # gcp-releasetool
+ # secretstorage
+distlib==0.3.8 \
+ --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \
+ --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64
+ # via virtualenv
+docutils==0.21.2 \
+ --hash=sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f \
+ --hash=sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2
+ # via readme-renderer
+filelock==3.15.4 \
+ --hash=sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb \
+ --hash=sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7
+ # via virtualenv
+gcp-docuploader==0.6.5 \
+ --hash=sha256:30221d4ac3e5a2b9c69aa52fdbef68cc3f27d0e6d0d90e220fc024584b8d2318 \
+ --hash=sha256:b7458ef93f605b9d46a4bf3a8dc1755dad1f31d030c8679edf304e343b347eea
+ # via -r requirements.in
+gcp-releasetool==2.0.1 \
+ --hash=sha256:34314a910c08e8911d9c965bd44f8f2185c4f556e737d719c33a41f6a610de96 \
+ --hash=sha256:b0d5863c6a070702b10883d37c4bdfd74bf930fe417f36c0c965d3b7c779ae62
+ # via -r requirements.in
+google-api-core==2.19.1 \
+ --hash=sha256:f12a9b8309b5e21d92483bbd47ce2c445861ec7d269ef6784ecc0ea8c1fa6125 \
+ --hash=sha256:f4695f1e3650b316a795108a76a1c416e6afb036199d1c1f1f110916df479ffd
+ # via
+ # google-cloud-core
+ # google-cloud-storage
+google-auth==2.31.0 \
+ --hash=sha256:042c4702efa9f7d3c48d3a69341c209381b125faa6dbf3ebe56bc7e40ae05c23 \
+ --hash=sha256:87805c36970047247c8afe614d4e3af8eceafc1ebba0c679fe75ddd1d575e871
+ # via
+ # gcp-releasetool
+ # google-api-core
+ # google-cloud-core
+ # google-cloud-storage
+google-cloud-core==2.4.1 \
+ --hash=sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073 \
+ --hash=sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61
+ # via google-cloud-storage
+google-cloud-storage==2.17.0 \
+ --hash=sha256:49378abff54ef656b52dca5ef0f2eba9aa83dc2b2c72c78714b03a1a95fe9388 \
+ --hash=sha256:5b393bc766b7a3bc6f5407b9e665b2450d36282614b7945e570b3480a456d1e1
+ # via gcp-docuploader
+google-crc32c==1.5.0 \
+ --hash=sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a \
+ --hash=sha256:02c65b9817512edc6a4ae7c7e987fea799d2e0ee40c53ec573a692bee24de876 \
+ --hash=sha256:02ebb8bf46c13e36998aeaad1de9b48f4caf545e91d14041270d9dca767b780c \
+ --hash=sha256:07eb3c611ce363c51a933bf6bd7f8e3878a51d124acfc89452a75120bc436289 \
+ --hash=sha256:1034d91442ead5a95b5aaef90dbfaca8633b0247d1e41621d1e9f9db88c36298 \
+ --hash=sha256:116a7c3c616dd14a3de8c64a965828b197e5f2d121fedd2f8c5585c547e87b02 \
+ --hash=sha256:19e0a019d2c4dcc5e598cd4a4bc7b008546b0358bd322537c74ad47a5386884f \
+ --hash=sha256:1c7abdac90433b09bad6c43a43af253e688c9cfc1c86d332aed13f9a7c7f65e2 \
+ --hash=sha256:1e986b206dae4476f41bcec1faa057851f3889503a70e1bdb2378d406223994a \
+ --hash=sha256:272d3892a1e1a2dbc39cc5cde96834c236d5327e2122d3aaa19f6614531bb6eb \
+ --hash=sha256:278d2ed7c16cfc075c91378c4f47924c0625f5fc84b2d50d921b18b7975bd210 \
+ --hash=sha256:2ad40e31093a4af319dadf503b2467ccdc8f67c72e4bcba97f8c10cb078207b5 \
+ --hash=sha256:2e920d506ec85eb4ba50cd4228c2bec05642894d4c73c59b3a2fe20346bd00ee \
+ --hash=sha256:3359fc442a743e870f4588fcf5dcbc1bf929df1fad8fb9905cd94e5edb02e84c \
+ --hash=sha256:37933ec6e693e51a5b07505bd05de57eee12f3e8c32b07da7e73669398e6630a \
+ --hash=sha256:398af5e3ba9cf768787eef45c803ff9614cc3e22a5b2f7d7ae116df8b11e3314 \
+ --hash=sha256:3b747a674c20a67343cb61d43fdd9207ce5da6a99f629c6e2541aa0e89215bcd \
+ --hash=sha256:461665ff58895f508e2866824a47bdee72497b091c730071f2b7575d5762ab65 \
+ --hash=sha256:4c6fdd4fccbec90cc8a01fc00773fcd5fa28db683c116ee3cb35cd5da9ef6c37 \
+ --hash=sha256:5829b792bf5822fd0a6f6eb34c5f81dd074f01d570ed7f36aa101d6fc7a0a6e4 \
+ --hash=sha256:596d1f98fc70232fcb6590c439f43b350cb762fb5d61ce7b0e9db4539654cc13 \
+ --hash=sha256:5ae44e10a8e3407dbe138984f21e536583f2bba1be9491239f942c2464ac0894 \
+ --hash=sha256:635f5d4dd18758a1fbd1049a8e8d2fee4ffed124462d837d1a02a0e009c3ab31 \
+ --hash=sha256:64e52e2b3970bd891309c113b54cf0e4384762c934d5ae56e283f9a0afcd953e \
+ --hash=sha256:66741ef4ee08ea0b2cc3c86916ab66b6aef03768525627fd6a1b34968b4e3709 \
+ --hash=sha256:67b741654b851abafb7bc625b6d1cdd520a379074e64b6a128e3b688c3c04740 \
+ --hash=sha256:6ac08d24c1f16bd2bf5eca8eaf8304812f44af5cfe5062006ec676e7e1d50afc \
+ --hash=sha256:6f998db4e71b645350b9ac28a2167e6632c239963ca9da411523bb439c5c514d \
+ --hash=sha256:72218785ce41b9cfd2fc1d6a017dc1ff7acfc4c17d01053265c41a2c0cc39b8c \
+ --hash=sha256:74dea7751d98034887dbd821b7aae3e1d36eda111d6ca36c206c44478035709c \
+ --hash=sha256:759ce4851a4bb15ecabae28f4d2e18983c244eddd767f560165563bf9aefbc8d \
+ --hash=sha256:77e2fd3057c9d78e225fa0a2160f96b64a824de17840351b26825b0848022906 \
+ --hash=sha256:7c074fece789b5034b9b1404a1f8208fc2d4c6ce9decdd16e8220c5a793e6f61 \
+ --hash=sha256:7c42c70cd1d362284289c6273adda4c6af8039a8ae12dc451dcd61cdabb8ab57 \
+ --hash=sha256:7f57f14606cd1dd0f0de396e1e53824c371e9544a822648cd76c034d209b559c \
+ --hash=sha256:83c681c526a3439b5cf94f7420471705bbf96262f49a6fe546a6db5f687a3d4a \
+ --hash=sha256:8485b340a6a9e76c62a7dce3c98e5f102c9219f4cfbf896a00cf48caf078d438 \
+ --hash=sha256:84e6e8cd997930fc66d5bb4fde61e2b62ba19d62b7abd7a69920406f9ecca946 \
+ --hash=sha256:89284716bc6a5a415d4eaa11b1726d2d60a0cd12aadf5439828353662ede9dd7 \
+ --hash=sha256:8b87e1a59c38f275c0e3676fc2ab6d59eccecfd460be267ac360cc31f7bcde96 \
+ --hash=sha256:8f24ed114432de109aa9fd317278518a5af2d31ac2ea6b952b2f7782b43da091 \
+ --hash=sha256:98cb4d057f285bd80d8778ebc4fde6b4d509ac3f331758fb1528b733215443ae \
+ --hash=sha256:998679bf62b7fb599d2878aa3ed06b9ce688b8974893e7223c60db155f26bd8d \
+ --hash=sha256:9ba053c5f50430a3fcfd36f75aff9caeba0440b2d076afdb79a318d6ca245f88 \
+ --hash=sha256:9c99616c853bb585301df6de07ca2cadad344fd1ada6d62bb30aec05219c45d2 \
+ --hash=sha256:a1fd716e7a01f8e717490fbe2e431d2905ab8aa598b9b12f8d10abebb36b04dd \
+ --hash=sha256:a2355cba1f4ad8b6988a4ca3feed5bff33f6af2d7f134852cf279c2aebfde541 \
+ --hash=sha256:b1f8133c9a275df5613a451e73f36c2aea4fe13c5c8997e22cf355ebd7bd0728 \
+ --hash=sha256:b8667b48e7a7ef66afba2c81e1094ef526388d35b873966d8a9a447974ed9178 \
+ --hash=sha256:ba1eb1843304b1e5537e1fca632fa894d6f6deca8d6389636ee5b4797affb968 \
+ --hash=sha256:be82c3c8cfb15b30f36768797a640e800513793d6ae1724aaaafe5bf86f8f346 \
+ --hash=sha256:c02ec1c5856179f171e032a31d6f8bf84e5a75c45c33b2e20a3de353b266ebd8 \
+ --hash=sha256:c672d99a345849301784604bfeaeba4db0c7aae50b95be04dd651fd2a7310b93 \
+ --hash=sha256:c6c777a480337ac14f38564ac88ae82d4cd238bf293f0a22295b66eb89ffced7 \
+ --hash=sha256:cae0274952c079886567f3f4f685bcaf5708f0a23a5f5216fdab71f81a6c0273 \
+ --hash=sha256:cd67cf24a553339d5062eff51013780a00d6f97a39ca062781d06b3a73b15462 \
+ --hash=sha256:d3515f198eaa2f0ed49f8819d5732d70698c3fa37384146079b3799b97667a94 \
+ --hash=sha256:d5280312b9af0976231f9e317c20e4a61cd2f9629b7bfea6a693d1878a264ebd \
+ --hash=sha256:de06adc872bcd8c2a4e0dc51250e9e65ef2ca91be023b9d13ebd67c2ba552e1e \
+ --hash=sha256:e1674e4307fa3024fc897ca774e9c7562c957af85df55efe2988ed9056dc4e57 \
+ --hash=sha256:e2096eddb4e7c7bdae4bd69ad364e55e07b8316653234a56552d9c988bd2d61b \
+ --hash=sha256:e560628513ed34759456a416bf86b54b2476c59144a9138165c9a1575801d0d9 \
+ --hash=sha256:edfedb64740750e1a3b16152620220f51d58ff1b4abceb339ca92e934775c27a \
+ --hash=sha256:f13cae8cc389a440def0c8c52057f37359014ccbc9dc1f0827936bcd367c6100 \
+ --hash=sha256:f314013e7dcd5cf45ab1945d92e713eec788166262ae8deb2cfacd53def27325 \
+ --hash=sha256:f583edb943cf2e09c60441b910d6a20b4d9d626c75a36c8fcac01a6c96c01183 \
+ --hash=sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556 \
+ --hash=sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4
+ # via
+ # google-cloud-storage
+ # google-resumable-media
+google-resumable-media==2.7.1 \
+ --hash=sha256:103ebc4ba331ab1bfdac0250f8033627a2cd7cde09e7ccff9181e31ba4315b2c \
+ --hash=sha256:eae451a7b2e2cdbaaa0fd2eb00cc8a1ee5e95e16b55597359cbc3d27d7d90e33
+ # via google-cloud-storage
+googleapis-common-protos==1.63.2 \
+ --hash=sha256:27a2499c7e8aff199665b22741997e485eccc8645aa9176c7c988e6fae507945 \
+ --hash=sha256:27c5abdffc4911f28101e635de1533fb4cfd2c37fbaa9174587c799fac90aa87
+ # via google-api-core
+idna==3.7 \
+ --hash=sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc \
+ --hash=sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0
+ # via requests
+importlib-metadata==8.0.0 \
+ --hash=sha256:15584cf2b1bf449d98ff8a6ff1abef57bf20f3ac6454f431736cd3e660921b2f \
+ --hash=sha256:188bd24e4c346d3f0a933f275c2fec67050326a856b9a359881d7c2a697e8812
+ # via
+ # -r requirements.in
+ # keyring
+ # twine
+jaraco-classes==3.4.0 \
+ --hash=sha256:47a024b51d0239c0dd8c8540c6c7f484be3b8fcf0b2d85c13825780d3b3f3acd \
+ --hash=sha256:f662826b6bed8cace05e7ff873ce0f9283b5c924470fe664fff1c2f00f581790
+ # via keyring
+jaraco-context==5.3.0 \
+ --hash=sha256:3e16388f7da43d384a1a7cd3452e72e14732ac9fe459678773a3608a812bf266 \
+ --hash=sha256:c2f67165ce1f9be20f32f650f25d8edfc1646a8aeee48ae06fb35f90763576d2
+ # via keyring
+jaraco-functools==4.0.1 \
+ --hash=sha256:3b24ccb921d6b593bdceb56ce14799204f473976e2a9d4b15b04d0f2c2326664 \
+ --hash=sha256:d33fa765374c0611b52f8b3a795f8900869aa88c84769d4d1746cd68fb28c3e8
+ # via keyring
+jeepney==0.8.0 \
+ --hash=sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806 \
+ --hash=sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755
+ # via
+ # keyring
+ # secretstorage
+jinja2==3.1.4 \
+ --hash=sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369 \
+ --hash=sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d
+ # via gcp-releasetool
+keyring==25.2.1 \
+ --hash=sha256:2458681cdefc0dbc0b7eb6cf75d0b98e59f9ad9b2d4edd319d18f68bdca95e50 \
+ --hash=sha256:daaffd42dbda25ddafb1ad5fec4024e5bbcfe424597ca1ca452b299861e49f1b
+ # via
+ # gcp-releasetool
+ # twine
+markdown-it-py==3.0.0 \
+ --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \
+ --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb
+ # via rich
+markupsafe==2.1.5 \
+ --hash=sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf \
+ --hash=sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff \
+ --hash=sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f \
+ --hash=sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3 \
+ --hash=sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532 \
+ --hash=sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f \
+ --hash=sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617 \
+ --hash=sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df \
+ --hash=sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4 \
+ --hash=sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906 \
+ --hash=sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f \
+ --hash=sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4 \
+ --hash=sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8 \
+ --hash=sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371 \
+ --hash=sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2 \
+ --hash=sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465 \
+ --hash=sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52 \
+ --hash=sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6 \
+ --hash=sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169 \
+ --hash=sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad \
+ --hash=sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2 \
+ --hash=sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0 \
+ --hash=sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029 \
+ --hash=sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f \
+ --hash=sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a \
+ --hash=sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced \
+ --hash=sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5 \
+ --hash=sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c \
+ --hash=sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf \
+ --hash=sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9 \
+ --hash=sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb \
+ --hash=sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad \
+ --hash=sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3 \
+ --hash=sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1 \
+ --hash=sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46 \
+ --hash=sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc \
+ --hash=sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a \
+ --hash=sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee \
+ --hash=sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900 \
+ --hash=sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5 \
+ --hash=sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea \
+ --hash=sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f \
+ --hash=sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5 \
+ --hash=sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e \
+ --hash=sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a \
+ --hash=sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f \
+ --hash=sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50 \
+ --hash=sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a \
+ --hash=sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b \
+ --hash=sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4 \
+ --hash=sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff \
+ --hash=sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2 \
+ --hash=sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46 \
+ --hash=sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b \
+ --hash=sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf \
+ --hash=sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5 \
+ --hash=sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5 \
+ --hash=sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab \
+ --hash=sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd \
+ --hash=sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68
+ # via jinja2
+mdurl==0.1.2 \
+ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \
+ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba
+ # via markdown-it-py
+more-itertools==10.3.0 \
+ --hash=sha256:e5d93ef411224fbcef366a6e8ddc4c5781bc6359d43412a65dd5964e46111463 \
+ --hash=sha256:ea6a02e24a9161e51faad17a8782b92a0df82c12c1c8886fec7f0c3fa1a1b320
+ # via
+ # jaraco-classes
+ # jaraco-functools
+nh3==0.2.18 \
+ --hash=sha256:0411beb0589eacb6734f28d5497ca2ed379eafab8ad8c84b31bb5c34072b7164 \
+ --hash=sha256:14c5a72e9fe82aea5fe3072116ad4661af5cf8e8ff8fc5ad3450f123e4925e86 \
+ --hash=sha256:19aaba96e0f795bd0a6c56291495ff59364f4300d4a39b29a0abc9cb3774a84b \
+ --hash=sha256:34c03fa78e328c691f982b7c03d4423bdfd7da69cd707fe572f544cf74ac23ad \
+ --hash=sha256:36c95d4b70530b320b365659bb5034341316e6a9b30f0b25fa9c9eff4c27a204 \
+ --hash=sha256:3a157ab149e591bb638a55c8c6bcb8cdb559c8b12c13a8affaba6cedfe51713a \
+ --hash=sha256:42c64511469005058cd17cc1537578eac40ae9f7200bedcfd1fc1a05f4f8c200 \
+ --hash=sha256:5f36b271dae35c465ef5e9090e1fdaba4a60a56f0bb0ba03e0932a66f28b9189 \
+ --hash=sha256:6955369e4d9f48f41e3f238a9e60f9410645db7e07435e62c6a9ea6135a4907f \
+ --hash=sha256:7b7c2a3c9eb1a827d42539aa64091640bd275b81e097cd1d8d82ef91ffa2e811 \
+ --hash=sha256:8ce0f819d2f1933953fca255db2471ad58184a60508f03e6285e5114b6254844 \
+ --hash=sha256:94a166927e53972a9698af9542ace4e38b9de50c34352b962f4d9a7d4c927af4 \
+ --hash=sha256:a7f1b5b2c15866f2db413a3649a8fe4fd7b428ae58be2c0f6bca5eefd53ca2be \
+ --hash=sha256:c8b3a1cebcba9b3669ed1a84cc65bf005728d2f0bc1ed2a6594a992e817f3a50 \
+ --hash=sha256:de3ceed6e661954871d6cd78b410213bdcb136f79aafe22aa7182e028b8c7307 \
+ --hash=sha256:f0eca9ca8628dbb4e916ae2491d72957fdd35f7a5d326b7032a345f111ac07fe
+ # via readme-renderer
+nox==2024.4.15 \
+ --hash=sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565 \
+ --hash=sha256:ecf6700199cdfa9e5ea0a41ff5e6ef4641d09508eda6edb89d9987864115817f
+ # via -r requirements.in
+packaging==24.1 \
+ --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \
+ --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124
+ # via
+ # gcp-releasetool
+ # nox
+pkginfo==1.10.0 \
+ --hash=sha256:5df73835398d10db79f8eecd5cd86b1f6d29317589ea70796994d49399af6297 \
+ --hash=sha256:889a6da2ed7ffc58ab5b900d888ddce90bce912f2d2de1dc1c26f4cb9fe65097
+ # via twine
+platformdirs==4.2.2 \
+ --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \
+ --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3
+ # via virtualenv
+proto-plus==1.24.0 \
+ --hash=sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445 \
+ --hash=sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12
+ # via google-api-core
+protobuf==5.27.2 \
+ --hash=sha256:0e341109c609749d501986b835f667c6e1e24531096cff9d34ae411595e26505 \
+ --hash=sha256:176c12b1f1c880bf7a76d9f7c75822b6a2bc3db2d28baa4d300e8ce4cde7409b \
+ --hash=sha256:354d84fac2b0d76062e9b3221f4abbbacdfd2a4d8af36bab0474f3a0bb30ab38 \
+ --hash=sha256:4fadd8d83e1992eed0248bc50a4a6361dc31bcccc84388c54c86e530b7f58863 \
+ --hash=sha256:54330f07e4949d09614707c48b06d1a22f8ffb5763c159efd5c0928326a91470 \
+ --hash=sha256:610e700f02469c4a997e58e328cac6f305f649826853813177e6290416e846c6 \
+ --hash=sha256:7fc3add9e6003e026da5fc9e59b131b8f22b428b991ccd53e2af8071687b4fce \
+ --hash=sha256:9e8f199bf7f97bd7ecebffcae45ebf9527603549b2b562df0fbc6d4d688f14ca \
+ --hash=sha256:a109916aaac42bff84702fb5187f3edadbc7c97fc2c99c5ff81dd15dcce0d1e5 \
+ --hash=sha256:b848dbe1d57ed7c191dfc4ea64b8b004a3f9ece4bf4d0d80a367b76df20bf36e \
+ --hash=sha256:f3ecdef226b9af856075f28227ff2c90ce3a594d092c39bee5513573f25e2714
+ # via
+ # gcp-docuploader
+ # gcp-releasetool
+ # google-api-core
+ # googleapis-common-protos
+ # proto-plus
+pyasn1==0.6.0 \
+ --hash=sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c \
+ --hash=sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473
+ # via
+ # pyasn1-modules
+ # rsa
+pyasn1-modules==0.4.0 \
+ --hash=sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6 \
+ --hash=sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b
+ # via google-auth
+pycparser==2.22 \
+ --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \
+ --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc
+ # via cffi
+pygments==2.18.0 \
+ --hash=sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199 \
+ --hash=sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a
+ # via
+ # readme-renderer
+ # rich
+pyjwt==2.8.0 \
+ --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \
+ --hash=sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320
+ # via gcp-releasetool
+pyperclip==1.9.0 \
+ --hash=sha256:b7de0142ddc81bfc5c7507eea19da920b92252b548b96186caf94a5e2527d310
+ # via gcp-releasetool
+python-dateutil==2.9.0.post0 \
+ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \
+ --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427
+ # via gcp-releasetool
+readme-renderer==44.0 \
+ --hash=sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151 \
+ --hash=sha256:8712034eabbfa6805cacf1402b4eeb2a73028f72d1166d6f5cb7f9c047c5d1e1
+ # via twine
+requests==2.32.3 \
+ --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \
+ --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6
+ # via
+ # gcp-releasetool
+ # google-api-core
+ # google-cloud-storage
+ # requests-toolbelt
+ # twine
+requests-toolbelt==1.0.0 \
+ --hash=sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6 \
+ --hash=sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06
+ # via twine
+rfc3986==2.0.0 \
+ --hash=sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd \
+ --hash=sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c
+ # via twine
+rich==13.7.1 \
+ --hash=sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222 \
+ --hash=sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432
+ # via twine
+rsa==4.9 \
+ --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \
+ --hash=sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21
+ # via google-auth
+secretstorage==3.3.3 \
+ --hash=sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77 \
+ --hash=sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99
+ # via keyring
+six==1.16.0 \
+ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
+ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
+ # via
+ # gcp-docuploader
+ # python-dateutil
+tomli==2.0.1 \
+ --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \
+ --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f
+ # via nox
+twine==5.1.1 \
+ --hash=sha256:215dbe7b4b94c2c50a7315c0275d2258399280fbb7d04182c7e55e24b5f93997 \
+ --hash=sha256:9aa0825139c02b3434d913545c7b847a21c835e11597f5255842d457da2322db
+ # via -r requirements.in
+typing-extensions==4.12.2 \
+ --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \
+ --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8
+ # via -r requirements.in
+urllib3==2.2.2 \
+ --hash=sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472 \
+ --hash=sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168
+ # via
+ # requests
+ # twine
+virtualenv==20.26.3 \
+ --hash=sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a \
+ --hash=sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589
+ # via nox
+wheel==0.43.0 \
+ --hash=sha256:465ef92c69fa5c5da2d1cf8ac40559a8c940886afcef87dcf14b9470862f1d85 \
+ --hash=sha256:55c570405f142630c6b9f72fe09d9b67cf1477fcf543ae5b8dcb1f5b7377da81
+ # via -r requirements.in
+zipp==3.19.2 \
+ --hash=sha256:bf1dcf6450f873a13e952a29504887c89e6de7506209e5b1bcc3460135d4de19 \
+ --hash=sha256:f091755f667055f2d02b32c53771a7a6c8b47e1fdbc4b72a8b9072b3eef8015c
+ # via importlib-metadata
+
+# The following packages are considered to be unsafe in a requirements file:
+setuptools==70.2.0 \
+ --hash=sha256:b8b8060bb426838fbe942479c90296ce976249451118ef566a5a0b7d8b78fb05 \
+ --hash=sha256:bd63e505105011b25c3c11f753f7e3b8465ea739efddaccef8f0efac2137bac1
+ # via -r requirements.in
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/lint/common.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/lint/common.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..153746cccae755885dff4411f7f8865829af4f3c
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/lint/common.cfg
@@ -0,0 +1,34 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Specify which tests to run
+env_vars: {
+ key: "RUN_TESTS_SESSION"
+ value: "lint"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/test-samples.sh"
+}
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker"
+}
+
+# Download secrets for samples
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-bigquery/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/lint/continuous.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/lint/continuous.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..a1c8d9759c8822b35fc3914b2f9dd9a91e2e43be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/lint/continuous.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/lint/periodic.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/lint/periodic.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..50fec964973212c5b45fe3dd407b44f8b1f707ad
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/lint/periodic.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "False"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/lint/presubmit.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/lint/presubmit.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..a1c8d9759c8822b35fc3914b2f9dd9a91e2e43be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/lint/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.10/common.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.10/common.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..da4003d76d91dd9d3b800a9052eb0c88dd4c2123
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.10/common.cfg
@@ -0,0 +1,40 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Specify which tests to run
+env_vars: {
+ key: "RUN_TESTS_SESSION"
+ value: "py-3.10"
+}
+
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-310"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/test-samples.sh"
+}
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker"
+}
+
+# Download secrets for samples
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-bigquery/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.10/continuous.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.10/continuous.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..a1c8d9759c8822b35fc3914b2f9dd9a91e2e43be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.10/continuous.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.10/periodic-head.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.10/periodic-head.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..5aa01bab5bf33a147bb3ec37eaa7c8a528c1a711
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.10/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/test-samples-against-head.sh"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.10/periodic.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.10/periodic.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..71cd1e597e385db30b49639bbcdf0a74e5c5efb8
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.10/periodic.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "False"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.10/presubmit.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.10/presubmit.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..a1c8d9759c8822b35fc3914b2f9dd9a91e2e43be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.10/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.11/common.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.11/common.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..f5adc870378fc841b02dad5d49ba202cec2671eb
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.11/common.cfg
@@ -0,0 +1,40 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Specify which tests to run
+env_vars: {
+ key: "RUN_TESTS_SESSION"
+ value: "py-3.11"
+}
+
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-311"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/test-samples.sh"
+}
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker"
+}
+
+# Download secrets for samples
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-bigquery/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.11/continuous.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.11/continuous.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..a1c8d9759c8822b35fc3914b2f9dd9a91e2e43be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.11/continuous.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.11/periodic-head.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.11/periodic-head.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..5aa01bab5bf33a147bb3ec37eaa7c8a528c1a711
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.11/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/test-samples-against-head.sh"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.11/periodic.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.11/periodic.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..71cd1e597e385db30b49639bbcdf0a74e5c5efb8
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.11/periodic.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "False"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.11/presubmit.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.11/presubmit.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..a1c8d9759c8822b35fc3914b2f9dd9a91e2e43be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.11/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.12/common.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.12/common.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..6eb699edd456540267efdf56dd90a424b2631ecd
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.12/common.cfg
@@ -0,0 +1,40 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Specify which tests to run
+env_vars: {
+ key: "RUN_TESTS_SESSION"
+ value: "py-3.12"
+}
+
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-312"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/test-samples.sh"
+}
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker"
+}
+
+# Download secrets for samples
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-bigquery/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.12/continuous.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.12/continuous.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..a1c8d9759c8822b35fc3914b2f9dd9a91e2e43be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.12/continuous.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.12/periodic-head.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.12/periodic-head.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..5aa01bab5bf33a147bb3ec37eaa7c8a528c1a711
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.12/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/test-samples-against-head.sh"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.12/periodic.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.12/periodic.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..71cd1e597e385db30b49639bbcdf0a74e5c5efb8
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.12/periodic.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "False"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.12/presubmit.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.12/presubmit.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..a1c8d9759c8822b35fc3914b2f9dd9a91e2e43be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.12/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.7/common.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.7/common.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..d30dc6018ebc1d4af2a0d93227a2d25ae26912fb
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.7/common.cfg
@@ -0,0 +1,40 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Specify which tests to run
+env_vars: {
+ key: "RUN_TESTS_SESSION"
+ value: "py-3.7"
+}
+
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-py37"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/test-samples.sh"
+}
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker"
+}
+
+# Download secrets for samples
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-bigquery/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.7/continuous.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.7/continuous.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..a1c8d9759c8822b35fc3914b2f9dd9a91e2e43be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.7/continuous.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.7/periodic-head.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.7/periodic-head.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..5aa01bab5bf33a147bb3ec37eaa7c8a528c1a711
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.7/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/test-samples-against-head.sh"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.7/periodic.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.7/periodic.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..71cd1e597e385db30b49639bbcdf0a74e5c5efb8
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.7/periodic.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "False"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.7/presubmit.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.7/presubmit.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..a1c8d9759c8822b35fc3914b2f9dd9a91e2e43be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.7/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.8/common.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.8/common.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..46759c6d61bf02193d967c800b85feaec212244d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.8/common.cfg
@@ -0,0 +1,40 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Specify which tests to run
+env_vars: {
+ key: "RUN_TESTS_SESSION"
+ value: "py-3.8"
+}
+
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-py38"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/test-samples.sh"
+}
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker"
+}
+
+# Download secrets for samples
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-bigquery/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.8/continuous.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.8/continuous.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..a1c8d9759c8822b35fc3914b2f9dd9a91e2e43be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.8/continuous.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.8/periodic-head.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.8/periodic-head.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..5aa01bab5bf33a147bb3ec37eaa7c8a528c1a711
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.8/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/test-samples-against-head.sh"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.8/periodic.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.8/periodic.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..71cd1e597e385db30b49639bbcdf0a74e5c5efb8
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.8/periodic.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "False"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.8/presubmit.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.8/presubmit.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..a1c8d9759c8822b35fc3914b2f9dd9a91e2e43be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.8/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.9/common.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.9/common.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..58d56ce743c954742d665cd6ffd40c852c7e1860
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.9/common.cfg
@@ -0,0 +1,40 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Specify which tests to run
+env_vars: {
+ key: "RUN_TESTS_SESSION"
+ value: "py-3.9"
+}
+
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-py39"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/test-samples.sh"
+}
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker"
+}
+
+# Download secrets for samples
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-bigquery/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.9/continuous.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.9/continuous.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..a1c8d9759c8822b35fc3914b2f9dd9a91e2e43be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.9/continuous.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.9/periodic-head.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.9/periodic-head.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..5aa01bab5bf33a147bb3ec37eaa7c8a528c1a711
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.9/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigquery/.kokoro/test-samples-against-head.sh"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.9/periodic.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.9/periodic.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..71cd1e597e385db30b49639bbcdf0a74e5c5efb8
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.9/periodic.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "False"
+}
diff --git a/testbed/googleapis__python-bigquery/.kokoro/samples/python3.9/presubmit.cfg b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.9/presubmit.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..a1c8d9759c8822b35fc3914b2f9dd9a91e2e43be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/samples/python3.9/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/test-samples-against-head.sh b/testbed/googleapis__python-bigquery/.kokoro/test-samples-against-head.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e9d8bd79a644f2a103c0d352ef7c172897b0fdaa
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/test-samples-against-head.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# A customized test runner for samples.
+#
+# For periodic builds, you can specify this file for testing against head.
+
+# `-e` enables the script to automatically fail when a command fails
+# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero
+set -eo pipefail
+# Enables `**` to include files nested inside sub-folders
+shopt -s globstar
+
+exec .kokoro/test-samples-impl.sh
diff --git a/testbed/googleapis__python-bigquery/.kokoro/test-samples-impl.sh b/testbed/googleapis__python-bigquery/.kokoro/test-samples-impl.sh
new file mode 100644
index 0000000000000000000000000000000000000000..55910c8ba1783e425c8b2ddff190c35a105a768c
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/test-samples-impl.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# `-e` enables the script to automatically fail when a command fails
+# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero
+set -eo pipefail
+# Enables `**` to include files nested inside sub-folders
+shopt -s globstar
+
+# Exit early if samples don't exist
+if ! find samples -name 'requirements.txt' | grep -q .; then
+ echo "No tests run. './samples/**/requirements.txt' not found"
+ exit 0
+fi
+
+# Disable buffering, so that the logs stream through.
+export PYTHONUNBUFFERED=1
+
+# Debug: show build environment
+env | grep KOKORO
+
+# Install nox
+python3.9 -m pip install --upgrade --quiet nox
+
+# Use secrets acessor service account to get secrets
+if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then
+ gcloud auth activate-service-account \
+ --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \
+ --project="cloud-devrel-kokoro-resources"
+fi
+
+# This script will create 3 files:
+# - testing/test-env.sh
+# - testing/service-account.json
+# - testing/client-secrets.json
+./scripts/decrypt-secrets.sh
+
+source ./testing/test-env.sh
+export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json
+
+# For cloud-run session, we activate the service account for gcloud sdk.
+gcloud auth activate-service-account \
+ --key-file "${GOOGLE_APPLICATION_CREDENTIALS}"
+
+export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json
+
+echo -e "\n******************** TESTING PROJECTS ********************"
+
+# Switch to 'fail at end' to allow all tests to complete before exiting.
+set +e
+# Use RTN to return a non-zero value if the test fails.
+RTN=0
+ROOT=$(pwd)
+# Find all requirements.txt in the samples directory (may break on whitespace).
+for file in samples/**/requirements.txt; do
+ cd "$ROOT"
+ # Navigate to the project folder.
+ file=$(dirname "$file")
+ cd "$file"
+
+ echo "------------------------------------------------------------"
+ echo "- testing $file"
+ echo "------------------------------------------------------------"
+
+ # Use nox to execute the tests for the project.
+ python3.9 -m nox -s "$RUN_TESTS_SESSION"
+ EXIT=$?
+
+ # If this is a periodic build, send the test log to the FlakyBot.
+ # See https://github.com/googleapis/repo-automation-bots/tree/main/packages/flakybot.
+ if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
+ chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ fi
+
+ if [[ $EXIT -ne 0 ]]; then
+ RTN=1
+ echo -e "\n Testing failed: Nox returned a non-zero exit code. \n"
+ else
+ echo -e "\n Testing completed.\n"
+ fi
+
+done
+cd "$ROOT"
+
+# Workaround for Kokoro permissions issue: delete secrets
+rm testing/{test-env.sh,client-secrets.json,service-account.json}
+
+exit "$RTN"
diff --git a/testbed/googleapis__python-bigquery/.kokoro/test-samples.sh b/testbed/googleapis__python-bigquery/.kokoro/test-samples.sh
new file mode 100644
index 0000000000000000000000000000000000000000..7933d820149a3ddbe8764fd15f4c2403f405b427
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/test-samples.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The default test runner for samples.
+#
+# For periodic builds, we rewinds the repo to the latest release, and
+# run test-samples-impl.sh.
+
+# `-e` enables the script to automatically fail when a command fails
+# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero
+set -eo pipefail
+# Enables `**` to include files nested inside sub-folders
+shopt -s globstar
+
+# Run periodic samples tests at latest release
+if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
+ # preserving the test runner implementation.
+ cp .kokoro/test-samples-impl.sh "${TMPDIR}/test-samples-impl.sh"
+ echo "--- IMPORTANT IMPORTANT IMPORTANT ---"
+ echo "Now we rewind the repo back to the latest release..."
+ LATEST_RELEASE=$(git describe --abbrev=0 --tags)
+ git checkout $LATEST_RELEASE
+ echo "The current head is: "
+ echo $(git rev-parse --verify HEAD)
+ echo "--- IMPORTANT IMPORTANT IMPORTANT ---"
+ # move back the test runner implementation if there's no file.
+ if [ ! -f .kokoro/test-samples-impl.sh ]; then
+ cp "${TMPDIR}/test-samples-impl.sh" .kokoro/test-samples-impl.sh
+ fi
+fi
+
+exec .kokoro/test-samples-impl.sh
diff --git a/testbed/googleapis__python-bigquery/.kokoro/trampoline.sh b/testbed/googleapis__python-bigquery/.kokoro/trampoline.sh
new file mode 100644
index 0000000000000000000000000000000000000000..48f79699706e1ea990af459b1f33cea134830589
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/trampoline.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+# Always run the cleanup script, regardless of the success of bouncing into
+# the container.
+function cleanup() {
+ chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh
+ ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh
+ echo "cleanup";
+}
+trap cleanup EXIT
+
+$(dirname $0)/populate-secrets.sh # Secret Manager secrets.
+python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py"
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/.kokoro/trampoline_v2.sh b/testbed/googleapis__python-bigquery/.kokoro/trampoline_v2.sh
new file mode 100644
index 0000000000000000000000000000000000000000..35fa529231dc6df2cd4b4f92ee34fdfe1ca3e470
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.kokoro/trampoline_v2.sh
@@ -0,0 +1,487 @@
+#!/usr/bin/env bash
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# trampoline_v2.sh
+#
+# This script does 3 things.
+#
+# 1. Prepare the Docker image for the test
+# 2. Run the Docker with appropriate flags to run the test
+# 3. Upload the newly built Docker image
+#
+# in a way that is somewhat compatible with trampoline_v1.
+#
+# To run this script, first download few files from gcs to /dev/shm.
+# (/dev/shm is passed into the container as KOKORO_GFILE_DIR).
+#
+# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.json /dev/shm
+# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm
+#
+# Then run the script.
+# .kokoro/trampoline_v2.sh
+#
+# These environment variables are required:
+# TRAMPOLINE_IMAGE: The docker image to use.
+# TRAMPOLINE_DOCKERFILE: The location of the Dockerfile.
+#
+# You can optionally change these environment variables:
+# TRAMPOLINE_IMAGE_UPLOAD:
+# (true|false): Whether to upload the Docker image after the
+# successful builds.
+# TRAMPOLINE_BUILD_FILE: The script to run in the docker container.
+# TRAMPOLINE_WORKSPACE: The workspace path in the docker container.
+# Defaults to /workspace.
+# Potentially there are some repo specific envvars in .trampolinerc in
+# the project root.
+
+
+set -euo pipefail
+
+TRAMPOLINE_VERSION="2.0.5"
+
+if command -v tput >/dev/null && [[ -n "${TERM:-}" ]]; then
+ readonly IO_COLOR_RED="$(tput setaf 1)"
+ readonly IO_COLOR_GREEN="$(tput setaf 2)"
+ readonly IO_COLOR_YELLOW="$(tput setaf 3)"
+ readonly IO_COLOR_RESET="$(tput sgr0)"
+else
+ readonly IO_COLOR_RED=""
+ readonly IO_COLOR_GREEN=""
+ readonly IO_COLOR_YELLOW=""
+ readonly IO_COLOR_RESET=""
+fi
+
+function function_exists {
+ [ $(LC_ALL=C type -t $1)"" == "function" ]
+}
+
+# Logs a message using the given color. The first argument must be one
+# of the IO_COLOR_* variables defined above, such as
+# "${IO_COLOR_YELLOW}". The remaining arguments will be logged in the
+# given color. The log message will also have an RFC-3339 timestamp
+# prepended (in UTC). You can disable the color output by setting
+# TERM=vt100.
+function log_impl() {
+ local color="$1"
+ shift
+ local timestamp="$(date -u "+%Y-%m-%dT%H:%M:%SZ")"
+ echo "================================================================"
+ echo "${color}${timestamp}:" "$@" "${IO_COLOR_RESET}"
+ echo "================================================================"
+}
+
+# Logs the given message with normal coloring and a timestamp.
+function log() {
+ log_impl "${IO_COLOR_RESET}" "$@"
+}
+
+# Logs the given message in green with a timestamp.
+function log_green() {
+ log_impl "${IO_COLOR_GREEN}" "$@"
+}
+
+# Logs the given message in yellow with a timestamp.
+function log_yellow() {
+ log_impl "${IO_COLOR_YELLOW}" "$@"
+}
+
+# Logs the given message in red with a timestamp.
+function log_red() {
+ log_impl "${IO_COLOR_RED}" "$@"
+}
+
+readonly tmpdir=$(mktemp -d -t ci-XXXXXXXX)
+readonly tmphome="${tmpdir}/h"
+mkdir -p "${tmphome}"
+
+function cleanup() {
+ rm -rf "${tmpdir}"
+}
+trap cleanup EXIT
+
+RUNNING_IN_CI="${RUNNING_IN_CI:-false}"
+
+# The workspace in the container, defaults to /workspace.
+TRAMPOLINE_WORKSPACE="${TRAMPOLINE_WORKSPACE:-/workspace}"
+
+pass_down_envvars=(
+ # TRAMPOLINE_V2 variables.
+ # Tells scripts whether they are running as part of CI or not.
+ "RUNNING_IN_CI"
+ # Indicates which CI system we're in.
+ "TRAMPOLINE_CI"
+ # Indicates the version of the script.
+ "TRAMPOLINE_VERSION"
+)
+
+log_yellow "Building with Trampoline ${TRAMPOLINE_VERSION}"
+
+# Detect which CI systems we're in. If we're in any of the CI systems
+# we support, `RUNNING_IN_CI` will be true and `TRAMPOLINE_CI` will be
+# the name of the CI system. Both envvars will be passing down to the
+# container for telling which CI system we're in.
+if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then
+ # descriptive env var for indicating it's on CI.
+ RUNNING_IN_CI="true"
+ TRAMPOLINE_CI="kokoro"
+ if [[ "${TRAMPOLINE_USE_LEGACY_SERVICE_ACCOUNT:-}" == "true" ]]; then
+ if [[ ! -f "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" ]]; then
+ log_red "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json does not exist. Did you forget to mount cloud-devrel-kokoro-resources/trampoline? Aborting."
+ exit 1
+ fi
+ # This service account will be activated later.
+ TRAMPOLINE_SERVICE_ACCOUNT="${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json"
+ else
+ if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then
+ gcloud auth list
+ fi
+ log_yellow "Configuring Container Registry access"
+ gcloud auth configure-docker --quiet
+ fi
+ pass_down_envvars+=(
+ # KOKORO dynamic variables.
+ "KOKORO_BUILD_NUMBER"
+ "KOKORO_BUILD_ID"
+ "KOKORO_JOB_NAME"
+ "KOKORO_GIT_COMMIT"
+ "KOKORO_GITHUB_COMMIT"
+ "KOKORO_GITHUB_PULL_REQUEST_NUMBER"
+ "KOKORO_GITHUB_PULL_REQUEST_COMMIT"
+ # For FlakyBot
+ "KOKORO_GITHUB_COMMIT_URL"
+ "KOKORO_GITHUB_PULL_REQUEST_URL"
+ )
+elif [[ "${TRAVIS:-}" == "true" ]]; then
+ RUNNING_IN_CI="true"
+ TRAMPOLINE_CI="travis"
+ pass_down_envvars+=(
+ "TRAVIS_BRANCH"
+ "TRAVIS_BUILD_ID"
+ "TRAVIS_BUILD_NUMBER"
+ "TRAVIS_BUILD_WEB_URL"
+ "TRAVIS_COMMIT"
+ "TRAVIS_COMMIT_MESSAGE"
+ "TRAVIS_COMMIT_RANGE"
+ "TRAVIS_JOB_NAME"
+ "TRAVIS_JOB_NUMBER"
+ "TRAVIS_JOB_WEB_URL"
+ "TRAVIS_PULL_REQUEST"
+ "TRAVIS_PULL_REQUEST_BRANCH"
+ "TRAVIS_PULL_REQUEST_SHA"
+ "TRAVIS_PULL_REQUEST_SLUG"
+ "TRAVIS_REPO_SLUG"
+ "TRAVIS_SECURE_ENV_VARS"
+ "TRAVIS_TAG"
+ )
+elif [[ -n "${GITHUB_RUN_ID:-}" ]]; then
+ RUNNING_IN_CI="true"
+ TRAMPOLINE_CI="github-workflow"
+ pass_down_envvars+=(
+ "GITHUB_WORKFLOW"
+ "GITHUB_RUN_ID"
+ "GITHUB_RUN_NUMBER"
+ "GITHUB_ACTION"
+ "GITHUB_ACTIONS"
+ "GITHUB_ACTOR"
+ "GITHUB_REPOSITORY"
+ "GITHUB_EVENT_NAME"
+ "GITHUB_EVENT_PATH"
+ "GITHUB_SHA"
+ "GITHUB_REF"
+ "GITHUB_HEAD_REF"
+ "GITHUB_BASE_REF"
+ )
+elif [[ "${CIRCLECI:-}" == "true" ]]; then
+ RUNNING_IN_CI="true"
+ TRAMPOLINE_CI="circleci"
+ pass_down_envvars+=(
+ "CIRCLE_BRANCH"
+ "CIRCLE_BUILD_NUM"
+ "CIRCLE_BUILD_URL"
+ "CIRCLE_COMPARE_URL"
+ "CIRCLE_JOB"
+ "CIRCLE_NODE_INDEX"
+ "CIRCLE_NODE_TOTAL"
+ "CIRCLE_PREVIOUS_BUILD_NUM"
+ "CIRCLE_PROJECT_REPONAME"
+ "CIRCLE_PROJECT_USERNAME"
+ "CIRCLE_REPOSITORY_URL"
+ "CIRCLE_SHA1"
+ "CIRCLE_STAGE"
+ "CIRCLE_USERNAME"
+ "CIRCLE_WORKFLOW_ID"
+ "CIRCLE_WORKFLOW_JOB_ID"
+ "CIRCLE_WORKFLOW_UPSTREAM_JOB_IDS"
+ "CIRCLE_WORKFLOW_WORKSPACE_ID"
+ )
+fi
+
+# Configure the service account for pulling the docker image.
+function repo_root() {
+ local dir="$1"
+ while [[ ! -d "${dir}/.git" ]]; do
+ dir="$(dirname "$dir")"
+ done
+ echo "${dir}"
+}
+
+# Detect the project root. In CI builds, we assume the script is in
+# the git tree and traverse from there, otherwise, traverse from `pwd`
+# to find `.git` directory.
+if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then
+ PROGRAM_PATH="$(realpath "$0")"
+ PROGRAM_DIR="$(dirname "${PROGRAM_PATH}")"
+ PROJECT_ROOT="$(repo_root "${PROGRAM_DIR}")"
+else
+ PROJECT_ROOT="$(repo_root $(pwd))"
+fi
+
+log_yellow "Changing to the project root: ${PROJECT_ROOT}."
+cd "${PROJECT_ROOT}"
+
+# To support relative path for `TRAMPOLINE_SERVICE_ACCOUNT`, we need
+# to use this environment variable in `PROJECT_ROOT`.
+if [[ -n "${TRAMPOLINE_SERVICE_ACCOUNT:-}" ]]; then
+
+ mkdir -p "${tmpdir}/gcloud"
+ gcloud_config_dir="${tmpdir}/gcloud"
+
+ log_yellow "Using isolated gcloud config: ${gcloud_config_dir}."
+ export CLOUDSDK_CONFIG="${gcloud_config_dir}"
+
+ log_yellow "Using ${TRAMPOLINE_SERVICE_ACCOUNT} for authentication."
+ gcloud auth activate-service-account \
+ --key-file "${TRAMPOLINE_SERVICE_ACCOUNT}"
+ log_yellow "Configuring Container Registry access"
+ gcloud auth configure-docker --quiet
+fi
+
+required_envvars=(
+ # The basic trampoline configurations.
+ "TRAMPOLINE_IMAGE"
+ "TRAMPOLINE_BUILD_FILE"
+)
+
+if [[ -f "${PROJECT_ROOT}/.trampolinerc" ]]; then
+ source "${PROJECT_ROOT}/.trampolinerc"
+fi
+
+log_yellow "Checking environment variables."
+for e in "${required_envvars[@]}"
+do
+ if [[ -z "${!e:-}" ]]; then
+ log "Missing ${e} env var. Aborting."
+ exit 1
+ fi
+done
+
+# We want to support legacy style TRAMPOLINE_BUILD_FILE used with V1
+# script: e.g. "github/repo-name/.kokoro/run_tests.sh"
+TRAMPOLINE_BUILD_FILE="${TRAMPOLINE_BUILD_FILE#github/*/}"
+log_yellow "Using TRAMPOLINE_BUILD_FILE: ${TRAMPOLINE_BUILD_FILE}"
+
+# ignore error on docker operations and test execution
+set +e
+
+log_yellow "Preparing Docker image."
+# We only download the docker image in CI builds.
+if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then
+ # Download the docker image specified by `TRAMPOLINE_IMAGE`
+
+ # We may want to add --max-concurrent-downloads flag.
+
+ log_yellow "Start pulling the Docker image: ${TRAMPOLINE_IMAGE}."
+ if docker pull "${TRAMPOLINE_IMAGE}"; then
+ log_green "Finished pulling the Docker image: ${TRAMPOLINE_IMAGE}."
+ has_image="true"
+ else
+ log_red "Failed pulling the Docker image: ${TRAMPOLINE_IMAGE}."
+ has_image="false"
+ fi
+else
+ # For local run, check if we have the image.
+ if docker images "${TRAMPOLINE_IMAGE}:latest" | grep "${TRAMPOLINE_IMAGE}"; then
+ has_image="true"
+ else
+ has_image="false"
+ fi
+fi
+
+
+# The default user for a Docker container has uid 0 (root). To avoid
+# creating root-owned files in the build directory we tell docker to
+# use the current user ID.
+user_uid="$(id -u)"
+user_gid="$(id -g)"
+user_name="$(id -un)"
+
+# To allow docker in docker, we add the user to the docker group in
+# the host os.
+docker_gid=$(cut -d: -f3 < <(getent group docker))
+
+update_cache="false"
+if [[ "${TRAMPOLINE_DOCKERFILE:-none}" != "none" ]]; then
+ # Build the Docker image from the source.
+ context_dir=$(dirname "${TRAMPOLINE_DOCKERFILE}")
+ docker_build_flags=(
+ "-f" "${TRAMPOLINE_DOCKERFILE}"
+ "-t" "${TRAMPOLINE_IMAGE}"
+ "--build-arg" "UID=${user_uid}"
+ "--build-arg" "USERNAME=${user_name}"
+ )
+ if [[ "${has_image}" == "true" ]]; then
+ docker_build_flags+=("--cache-from" "${TRAMPOLINE_IMAGE}")
+ fi
+
+ log_yellow "Start building the docker image."
+ if [[ "${TRAMPOLINE_VERBOSE:-false}" == "true" ]]; then
+ echo "docker build" "${docker_build_flags[@]}" "${context_dir}"
+ fi
+
+ # ON CI systems, we want to suppress docker build logs, only
+ # output the logs when it fails.
+ if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then
+ if docker build "${docker_build_flags[@]}" "${context_dir}" \
+ > "${tmpdir}/docker_build.log" 2>&1; then
+ if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then
+ cat "${tmpdir}/docker_build.log"
+ fi
+
+ log_green "Finished building the docker image."
+ update_cache="true"
+ else
+ log_red "Failed to build the Docker image, aborting."
+ log_yellow "Dumping the build logs:"
+ cat "${tmpdir}/docker_build.log"
+ exit 1
+ fi
+ else
+ if docker build "${docker_build_flags[@]}" "${context_dir}"; then
+ log_green "Finished building the docker image."
+ update_cache="true"
+ else
+ log_red "Failed to build the Docker image, aborting."
+ exit 1
+ fi
+ fi
+else
+ if [[ "${has_image}" != "true" ]]; then
+ log_red "We do not have ${TRAMPOLINE_IMAGE} locally, aborting."
+ exit 1
+ fi
+fi
+
+# We use an array for the flags so they are easier to document.
+docker_flags=(
+ # Remove the container after it exists.
+ "--rm"
+
+ # Use the host network.
+ "--network=host"
+
+ # Run in priviledged mode. We are not using docker for sandboxing or
+ # isolation, just for packaging our dev tools.
+ "--privileged"
+
+ # Run the docker script with the user id. Because the docker image gets to
+ # write in ${PWD} you typically want this to be your user id.
+ # To allow docker in docker, we need to use docker gid on the host.
+ "--user" "${user_uid}:${docker_gid}"
+
+ # Pass down the USER.
+ "--env" "USER=${user_name}"
+
+ # Mount the project directory inside the Docker container.
+ "--volume" "${PROJECT_ROOT}:${TRAMPOLINE_WORKSPACE}"
+ "--workdir" "${TRAMPOLINE_WORKSPACE}"
+ "--env" "PROJECT_ROOT=${TRAMPOLINE_WORKSPACE}"
+
+ # Mount the temporary home directory.
+ "--volume" "${tmphome}:/h"
+ "--env" "HOME=/h"
+
+ # Allow docker in docker.
+ "--volume" "/var/run/docker.sock:/var/run/docker.sock"
+
+ # Mount the /tmp so that docker in docker can mount the files
+ # there correctly.
+ "--volume" "/tmp:/tmp"
+ # Pass down the KOKORO_GFILE_DIR and KOKORO_KEYSTORE_DIR
+ # TODO(tmatsuo): This part is not portable.
+ "--env" "TRAMPOLINE_SECRET_DIR=/secrets"
+ "--volume" "${KOKORO_GFILE_DIR:-/dev/shm}:/secrets/gfile"
+ "--env" "KOKORO_GFILE_DIR=/secrets/gfile"
+ "--volume" "${KOKORO_KEYSTORE_DIR:-/dev/shm}:/secrets/keystore"
+ "--env" "KOKORO_KEYSTORE_DIR=/secrets/keystore"
+)
+
+# Add an option for nicer output if the build gets a tty.
+if [[ -t 0 ]]; then
+ docker_flags+=("-it")
+fi
+
+# Passing down env vars
+for e in "${pass_down_envvars[@]}"
+do
+ if [[ -n "${!e:-}" ]]; then
+ docker_flags+=("--env" "${e}=${!e}")
+ fi
+done
+
+# If arguments are given, all arguments will become the commands run
+# in the container, otherwise run TRAMPOLINE_BUILD_FILE.
+if [[ $# -ge 1 ]]; then
+ log_yellow "Running the given commands '" "${@:1}" "' in the container."
+ readonly commands=("${@:1}")
+ if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then
+ echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}"
+ fi
+ docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}"
+else
+ log_yellow "Running the tests in a Docker container."
+ docker_flags+=("--entrypoint=${TRAMPOLINE_BUILD_FILE}")
+ if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then
+ echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}"
+ fi
+ docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}"
+fi
+
+
+test_retval=$?
+
+if [[ ${test_retval} -eq 0 ]]; then
+ log_green "Build finished with ${test_retval}"
+else
+ log_red "Build finished with ${test_retval}"
+fi
+
+# Only upload it when the test passes.
+if [[ "${update_cache}" == "true" ]] && \
+ [[ $test_retval == 0 ]] && \
+ [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]]; then
+ log_yellow "Uploading the Docker image."
+ if docker push "${TRAMPOLINE_IMAGE}"; then
+ log_green "Finished uploading the Docker image."
+ else
+ log_red "Failed uploading the Docker image."
+ fi
+ # Call trampoline_after_upload_hook if it's defined.
+ if function_exists trampoline_after_upload_hook; then
+ trampoline_after_upload_hook
+ fi
+
+fi
+
+exit "${test_retval}"
diff --git a/testbed/googleapis__python-bigquery/.pre-commit-config.yaml b/testbed/googleapis__python-bigquery/.pre-commit-config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1d74695f70b61c333a8a4cf1ff26658a88283734
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.pre-commit-config.yaml
@@ -0,0 +1,31 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# See https://pre-commit.com for more information
+# See https://pre-commit.com/hooks.html for more hooks
+repos:
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.0.1
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: check-yaml
+- repo: https://github.com/psf/black
+ rev: 23.7.0
+ hooks:
+ - id: black
+- repo: https://github.com/pycqa/flake8
+ rev: 6.1.0
+ hooks:
+ - id: flake8
diff --git a/testbed/googleapis__python-bigquery/.repo-metadata.json b/testbed/googleapis__python-bigquery/.repo-metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..d1be7ec4d9d2aea21f4e2f95abc9dd48ce4ca932
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.repo-metadata.json
@@ -0,0 +1,18 @@
+{
+ "name": "bigquery",
+ "name_pretty": "Google Cloud BigQuery",
+ "product_documentation": "https://cloud.google.com/bigquery",
+ "client_documentation": "https://cloud.google.com/python/docs/reference/bigquery/latest",
+ "issue_tracker": "https://issuetracker.google.com/savedsearches/559654",
+ "release_level": "stable",
+ "language": "python",
+ "library_type": "GAPIC_COMBO",
+ "repo": "googleapis/python-bigquery",
+ "distribution_name": "google-cloud-bigquery",
+ "api_id": "bigquery.googleapis.com",
+ "requires_billing": false,
+ "default_version": "v2",
+ "codeowner_team": "@googleapis/api-bigquery",
+ "api_shortname": "bigquery",
+ "api_description": "is a fully managed, NoOps, low cost data analytics service.\nData can be streamed into BigQuery at millions of rows per second to enable real-time analysis.\nWith BigQuery you can easily deploy Petabyte-scale Databases."
+}
diff --git a/testbed/googleapis__python-bigquery/.trampolinerc b/testbed/googleapis__python-bigquery/.trampolinerc
new file mode 100644
index 0000000000000000000000000000000000000000..0080152373d5d919b6d310d21d1be23bcf81a920
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/.trampolinerc
@@ -0,0 +1,61 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Add required env vars here.
+required_envvars+=(
+)
+
+# Add env vars which are passed down into the container here.
+pass_down_envvars+=(
+ "NOX_SESSION"
+ ###############
+ # Docs builds
+ ###############
+ "STAGING_BUCKET"
+ "V2_STAGING_BUCKET"
+ ##################
+ # Samples builds
+ ##################
+ "INSTALL_LIBRARY_FROM_SOURCE"
+ "RUN_TESTS_SESSION"
+ "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ # Target directories.
+ "RUN_TESTS_DIRS"
+ # The nox session to run.
+ "RUN_TESTS_SESSION"
+)
+
+# Prevent unintentional override on the default image.
+if [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]] && \
+ [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then
+ echo "Please set TRAMPOLINE_IMAGE if you want to upload the Docker image."
+ exit 1
+fi
+
+# Define the default value if it makes sense.
+if [[ -z "${TRAMPOLINE_IMAGE_UPLOAD:-}" ]]; then
+ TRAMPOLINE_IMAGE_UPLOAD=""
+fi
+
+if [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then
+ TRAMPOLINE_IMAGE=""
+fi
+
+if [[ -z "${TRAMPOLINE_DOCKERFILE:-}" ]]; then
+ TRAMPOLINE_DOCKERFILE=""
+fi
+
+if [[ -z "${TRAMPOLINE_BUILD_FILE:-}" ]]; then
+ TRAMPOLINE_BUILD_FILE=""
+fi
diff --git a/testbed/googleapis__python-bigquery/CHANGELOG.md b/testbed/googleapis__python-bigquery/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..5de99a6ca2b7f286c12c93d8cf326f93a859af42
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/CHANGELOG.md
@@ -0,0 +1,2440 @@
+# Changelog
+
+[PyPI History][1]
+
+[1]: https://pypi.org/project/google-cloud-bigquery/#history
+
+
+## [3.26.0](https://github.com/googleapis/python-bigquery/compare/v3.25.0...v3.26.0) (2024-09-25)
+
+
+### Features
+
+* Include LegacyPandasError in init imports ([#2014](https://github.com/googleapis/python-bigquery/issues/2014)) ([3ab5e95](https://github.com/googleapis/python-bigquery/commit/3ab5e95984ad521027a4e1efd9f16767403e668d))
+* Use `bigquery-magics` package for the `%%bigquery` magic ([#1965](https://github.com/googleapis/python-bigquery/issues/1965)) ([60128a5](https://github.com/googleapis/python-bigquery/commit/60128a522375823422f238312521a2ce356d9177))
+
+
+### Bug Fixes
+
+* Add docfx to the presubmit configuration and delete docs-presubmit ([#1995](https://github.com/googleapis/python-bigquery/issues/1995)) ([bd83cfd](https://github.com/googleapis/python-bigquery/commit/bd83cfd2eb25cec58d59af8048f5188d748b083d))
+* Add warning when encountering unknown field types ([#1989](https://github.com/googleapis/python-bigquery/issues/1989)) ([8f5a41d](https://github.com/googleapis/python-bigquery/commit/8f5a41d283a965ca161019588d3a3b2947b04b5b))
+* Allow protobuf 5.x; require protobuf >=3.20.2; proto-plus >=1.22.3 ([#1976](https://github.com/googleapis/python-bigquery/issues/1976)) ([57bf873](https://github.com/googleapis/python-bigquery/commit/57bf873474382cc2cb34243b704bc928fa1b64c6))
+* Do not set job timeout extra property if None ([#1987](https://github.com/googleapis/python-bigquery/issues/1987)) ([edcb79c](https://github.com/googleapis/python-bigquery/commit/edcb79ca69dba30d8102abebb9d53bc76e4882ee))
+* Set pyarrow field nullable to False for a BigQuery field in REPEATED mode ([#1999](https://github.com/googleapis/python-bigquery/issues/1999)) ([5352870](https://github.com/googleapis/python-bigquery/commit/5352870283ca7d4652aefc73f12645bcf6e1363c))
+
+
+### Dependencies
+
+* Bump min version of google-api-core and google-cloud-core to 2.x ([#1972](https://github.com/googleapis/python-bigquery/issues/1972)) ([a958732](https://github.com/googleapis/python-bigquery/commit/a958732aed7d9bd51ffde3dc0e6cae9ad7455b54))
+
+
+### Documentation
+
+* Add short mode query sample & test ([#1978](https://github.com/googleapis/python-bigquery/issues/1978)) ([ba61a8a](https://github.com/googleapis/python-bigquery/commit/ba61a8ab0da541ba1940211875d7ea2e9e17dfa8))
+* Improve QueryJobConfig.destination docstring ([#2016](https://github.com/googleapis/python-bigquery/issues/2016)) ([1b4cca0](https://github.com/googleapis/python-bigquery/commit/1b4cca0a3cc788a4570705572d5f04172f6b4b24))
+
+## [3.25.0](https://github.com/googleapis/python-bigquery/compare/v3.24.0...v3.25.0) (2024-06-17)
+
+
+### Features
+
+* Add prefer_bqstorage_client option for Connection ([#1945](https://github.com/googleapis/python-bigquery/issues/1945)) ([bfdeb3f](https://github.com/googleapis/python-bigquery/commit/bfdeb3fdbc1d5b26fcd3d1433abfb0be49d12018))
+* Support load job option ColumnNameCharacterMap ([#1952](https://github.com/googleapis/python-bigquery/issues/1952)) ([7e522ee](https://github.com/googleapis/python-bigquery/commit/7e522eea776cd9a74f8078c4236f63d5ff11f20e))
+
+
+### Bug Fixes
+
+* Do not overwrite page_size with max_results when start_index is set ([#1956](https://github.com/googleapis/python-bigquery/issues/1956)) ([7d0fcee](https://github.com/googleapis/python-bigquery/commit/7d0fceefdf28278c1f2cdaab571de9b235320998))
+
+## [3.24.0](https://github.com/googleapis/python-bigquery/compare/v3.23.1...v3.24.0) (2024-06-04)
+
+
+### Features
+
+* Add default timeout for Client.get_job() ([#1935](https://github.com/googleapis/python-bigquery/issues/1935)) ([9fbad76](https://github.com/googleapis/python-bigquery/commit/9fbad767cc228e02040436742d0cb6743d370b90))
+* Add support for map target type in Parquet options ([#1919](https://github.com/googleapis/python-bigquery/issues/1919)) ([c3f7b23](https://github.com/googleapis/python-bigquery/commit/c3f7b237383d4705ed6e720544728c4db61f6c83))
+
+
+### Bug Fixes
+
+* Create query job in job.result() if doesn't exist ([#1944](https://github.com/googleapis/python-bigquery/issues/1944)) ([8f5b4b7](https://github.com/googleapis/python-bigquery/commit/8f5b4b70423c277ffd559d2034bc0b2b5fb93169))
+* Retry `is_job_done` on `ConnectionError` ([#1930](https://github.com/googleapis/python-bigquery/issues/1930)) ([4f72723](https://github.com/googleapis/python-bigquery/commit/4f72723f539d35977bc52c5950f6e00889b5c7be))
+
+
+### Performance Improvements
+
+* If `page_size` or `max_results` is set on `QueryJob.result()`, use to download first page of results ([#1942](https://github.com/googleapis/python-bigquery/issues/1942)) ([3e7a48d](https://github.com/googleapis/python-bigquery/commit/3e7a48d36e3c7bf6abe1b5550097178f6ca6e174))
+
+## [3.23.1](https://github.com/googleapis/python-bigquery/compare/v3.23.0...v3.23.1) (2024-05-21)
+
+
+### Performance Improvements
+
+* Decrease the threshold in which we use the BQ Storage Read API ([#1925](https://github.com/googleapis/python-bigquery/issues/1925)) ([eaa1a52](https://github.com/googleapis/python-bigquery/commit/eaa1a52b360646909c14ca7194b8c6b17fefdd79))
+
+## [3.23.0](https://github.com/googleapis/python-bigquery/compare/v3.22.0...v3.23.0) (2024-05-16)
+
+
+### Features
+
+* Adds timer decorator to facilitate debugging ([#1917](https://github.com/googleapis/python-bigquery/issues/1917)) ([ea750e0](https://github.com/googleapis/python-bigquery/commit/ea750e0248473b6207b8517aa7ea1cf4e19bccf2))
+* Support insertAll for range ([#1909](https://github.com/googleapis/python-bigquery/issues/1909)) ([74e75e8](https://github.com/googleapis/python-bigquery/commit/74e75e89ce3a5ac18112b2c1c33248445ff072e4))
+
+
+### Bug Fixes
+
+* Add pyarrow version check for range support ([#1914](https://github.com/googleapis/python-bigquery/issues/1914)) ([a86d7b9](https://github.com/googleapis/python-bigquery/commit/a86d7b96813f67fea28b46c5252416222edca9a6))
+* Edit presubmit for to simplify configuration ([#1915](https://github.com/googleapis/python-bigquery/issues/1915)) ([b739596](https://github.com/googleapis/python-bigquery/commit/b739596f37b8c00b375cc811c316b618097d761a))
+
+## [3.22.0](https://github.com/googleapis/python-bigquery/compare/v3.21.0...v3.22.0) (2024-04-19)
+
+
+### Features
+
+* Support RANGE in queries Part 2: Arrow ([#1868](https://github.com/googleapis/python-bigquery/issues/1868)) ([5251b5d](https://github.com/googleapis/python-bigquery/commit/5251b5dbb254732ea730bab664ad319bd5be47e7))
+
+## [3.21.0](https://github.com/googleapis/python-bigquery/compare/v3.20.1...v3.21.0) (2024-04-18)
+
+
+### Features
+
+* Add compression option ZSTD. ([#1890](https://github.com/googleapis/python-bigquery/issues/1890)) ([5ed9cce](https://github.com/googleapis/python-bigquery/commit/5ed9ccee204b7cf8e96cb0e050f6830c05f3b4fd))
+* Adds billing to opentel ([#1889](https://github.com/googleapis/python-bigquery/issues/1889)) ([38697fb](https://github.com/googleapis/python-bigquery/commit/38697fb942516fc2f6f5e21e19a11811fbaeb1f4))
+* Support RANGE in queries Part 1: JSON ([#1884](https://github.com/googleapis/python-bigquery/issues/1884)) ([3634405](https://github.com/googleapis/python-bigquery/commit/3634405fa1b40ae5f69b06d7c7f8de4e3d246d92))
+
+
+### Bug Fixes
+
+* Add types to DatasetReference constructor ([#1601](https://github.com/googleapis/python-bigquery/issues/1601)) ([bf8861c](https://github.com/googleapis/python-bigquery/commit/bf8861c3473a1af978db7a06463ddc0bad86f326))
+* Creates linting-typing.cfg in presubmit ([#1881](https://github.com/googleapis/python-bigquery/issues/1881)) ([c852c15](https://github.com/googleapis/python-bigquery/commit/c852c153c55025ba1187d61e313ead2308616c55))
+* Remove duplicate key time_partitioning from Table._PROPERTY_TO_A… ([#1898](https://github.com/googleapis/python-bigquery/issues/1898)) ([82ae908](https://github.com/googleapis/python-bigquery/commit/82ae908fbf3b2361343fff1859d3533383dc50ec))
+* Retry query jobs that fail even with ambiguous `jobs.getQueryResults` REST errors ([#1903](https://github.com/googleapis/python-bigquery/issues/1903), [#1900](https://github.com/googleapis/python-bigquery/issues/1900)) ([1367b58](https://github.com/googleapis/python-bigquery/commit/1367b584b68d917ec325ce4383a0e9a36205b894))
+
+
+### Performance Improvements
+
+* Avoid unnecessary API call in `QueryJob.result()` when job is already finished ([#1900](https://github.com/googleapis/python-bigquery/issues/1900)) ([1367b58](https://github.com/googleapis/python-bigquery/commit/1367b584b68d917ec325ce4383a0e9a36205b894))
+
+## [3.20.1](https://github.com/googleapis/python-bigquery/compare/v3.20.0...v3.20.1) (2024-04-01)
+
+
+### Bug Fixes
+
+* Make `pyarrow` an optional dependency post-3.20.0 yanked release ([#1879](https://github.com/googleapis/python-bigquery/issues/1879)) ([21714e1](https://github.com/googleapis/python-bigquery/commit/21714e18bad8d8d89ed5642dbdb61d14e97d5f33))
+
+## [3.20.0](https://github.com/googleapis/python-bigquery/compare/v3.19.0...v3.20.0) (2024-03-27)
+
+
+### Features
+
+* Add `fields` parameter to `set_iam_policy` for consistency with update methods ([#1872](https://github.com/googleapis/python-bigquery/issues/1872)) ([08b1e6f](https://github.com/googleapis/python-bigquery/commit/08b1e6f9c41121907c345daedbae40ece18e8b6a))
+
+
+### Bug Fixes
+
+* Correct type checking ([#1848](https://github.com/googleapis/python-bigquery/issues/1848)) ([2660dbd](https://github.com/googleapis/python-bigquery/commit/2660dbd4821a89a1e20e3e1541504a409f1979aa))
+* Update error logging when converting to pyarrow column fails ([#1836](https://github.com/googleapis/python-bigquery/issues/1836)) ([0ac6e9b](https://github.com/googleapis/python-bigquery/commit/0ac6e9bf186945832f5dcdf5a4d95667b4da223e))
+* Updates a number of optional dependencies ([#1864](https://github.com/googleapis/python-bigquery/issues/1864)) ([c2496a1](https://github.com/googleapis/python-bigquery/commit/c2496a1014a7d99e805b3d0a66e4517165bd7e01))
+* Use an allowlist instead of denylist to determine when `query_and_wait` uses `jobs.query` API ([#1869](https://github.com/googleapis/python-bigquery/issues/1869)) ([e265db6](https://github.com/googleapis/python-bigquery/commit/e265db6a6a37d13056dcaac240c2cf3975dfd644))
+
+## [3.19.0](https://github.com/googleapis/python-bigquery/compare/v3.18.0...v3.19.0) (2024-03-11)
+
+
+### Features
+
+* Support RANGE query parameters ([#1827](https://github.com/googleapis/python-bigquery/issues/1827)) ([b359a9a](https://github.com/googleapis/python-bigquery/commit/b359a9a55936a759a36aa69c5e5b014685e1fca6))
+* Support range sql ([#1807](https://github.com/googleapis/python-bigquery/issues/1807)) ([86a45c9](https://github.com/googleapis/python-bigquery/commit/86a45c989836b34dca456bac014352e55d6f86c0))
+
+
+### Bug Fixes
+
+* Add google-auth as a direct dependency ([713ce2c](https://github.com/googleapis/python-bigquery/commit/713ce2c2f6ce9931f67cbbcd63ad436ad336ad26))
+* Augment universe_domain handling ([#1837](https://github.com/googleapis/python-bigquery/issues/1837)) ([53c2cbf](https://github.com/googleapis/python-bigquery/commit/53c2cbf98d2961f553747514de273bcd5c117f0e))
+* **deps:** Require google-api-core>=1.34.1, >=2.11.0 ([713ce2c](https://github.com/googleapis/python-bigquery/commit/713ce2c2f6ce9931f67cbbcd63ad436ad336ad26))
+* Supplementary fix to env-based universe resolution ([#1844](https://github.com/googleapis/python-bigquery/issues/1844)) ([b818992](https://github.com/googleapis/python-bigquery/commit/b8189929b6008f7780214822062f8ed05d8d2a01))
+* Supplementary fix to env-based universe resolution ([#1847](https://github.com/googleapis/python-bigquery/issues/1847)) ([6dff50f](https://github.com/googleapis/python-bigquery/commit/6dff50f4fbc5aeb644383a4050dd5ffc05015ffe))
+
+## [3.18.0](https://github.com/googleapis/python-bigquery/compare/v3.17.2...v3.18.0) (2024-02-29)
+
+
+### Features
+
+* Support nullable boolean and Int64 dtypes in `insert_rows_from_dataframe` ([#1816](https://github.com/googleapis/python-bigquery/issues/1816)) ([ab0cf4c](https://github.com/googleapis/python-bigquery/commit/ab0cf4cc03292f62b56a8813cfb7681daa87f872))
+* Support slot_ms in QueryPlanEntry ([#1831](https://github.com/googleapis/python-bigquery/issues/1831)) ([d62cabb](https://github.com/googleapis/python-bigquery/commit/d62cabbf115637ecbaf8cc378f39329a5ae74c26))
+
+
+### Bug Fixes
+
+* Keyword rendering and docstring improvements ([#1829](https://github.com/googleapis/python-bigquery/issues/1829)) ([4dfb920](https://github.com/googleapis/python-bigquery/commit/4dfb920b106784e98f343b3e3fc8e8ff70c50560))
+
+
+### Documentation
+
+* **samples:** Updates to urllib3 constraint for Python 3.7 ([#1834](https://github.com/googleapis/python-bigquery/issues/1834)) ([b099c32](https://github.com/googleapis/python-bigquery/commit/b099c32a83946a347560f6a71d08c3f263e56cb6))
+* Update `client_query_w_named_params.py` to use `query_and_wait` API ([#1782](https://github.com/googleapis/python-bigquery/issues/1782)) ([89dfcb6](https://github.com/googleapis/python-bigquery/commit/89dfcb6469d22e78003a70371a0938a6856e033c))
+
+## [3.17.2](https://github.com/googleapis/python-bigquery/compare/v3.17.1...v3.17.2) (2024-01-30)
+
+
+### Bug Fixes
+
+* Change load_table_from_json autodetect logic ([#1804](https://github.com/googleapis/python-bigquery/issues/1804)) ([6249032](https://github.com/googleapis/python-bigquery/commit/62490325f64e5d66303d9218992e28ac5f21cb3f))
+
+
+### Documentation
+
+* Update to use API ([#1781](https://github.com/googleapis/python-bigquery/issues/1781)) ([81563b0](https://github.com/googleapis/python-bigquery/commit/81563b06298fe3a64be6a89b583c3d64758ca12a))
+* Update `client_query_destination_table.py` sample to use `query_and_wait` ([#1783](https://github.com/googleapis/python-bigquery/issues/1783)) ([68ebbe1](https://github.com/googleapis/python-bigquery/commit/68ebbe12d455ce8e9b1784fb11787c2fb842ef22))
+* Update query_external_sheets_permanent_table.py to use query_and_wait API ([#1778](https://github.com/googleapis/python-bigquery/issues/1778)) ([a7be88a](https://github.com/googleapis/python-bigquery/commit/a7be88adf8a480ee61aa79789cb53df1b79bb091))
+* Update sample for query_to_arrow to use query_and_wait API ([#1776](https://github.com/googleapis/python-bigquery/issues/1776)) ([dbf10de](https://github.com/googleapis/python-bigquery/commit/dbf10dee51a7635e9b98658f205ded2de087a06f))
+* Update the query destination table legacy file to use query_and_wait API ([#1775](https://github.com/googleapis/python-bigquery/issues/1775)) ([ef89f9e](https://github.com/googleapis/python-bigquery/commit/ef89f9e58c22b3af5a7757b69daa030116012350))
+* Update to use `query_and_wait` in `client_query_w_positional_params.py` ([#1786](https://github.com/googleapis/python-bigquery/issues/1786)) ([410f71e](https://github.com/googleapis/python-bigquery/commit/410f71e6b6e755928e363ed89c1044e14b0db9cc))
+* Update to use `query_and_wait` in `samples/client_query_w_timestamp_params.py` ([#1785](https://github.com/googleapis/python-bigquery/issues/1785)) ([ba36948](https://github.com/googleapis/python-bigquery/commit/ba3694852c13c8a29fe0f9d923353e82acfd4278))
+* Update to_geodataframe to use query_and_wait functionality ([#1800](https://github.com/googleapis/python-bigquery/issues/1800)) ([1298594](https://github.com/googleapis/python-bigquery/commit/12985942942b8f205ecd261fcdf620df9a640460))
+
+## [3.17.1](https://github.com/googleapis/python-bigquery/compare/v3.17.0...v3.17.1) (2024-01-24)
+
+
+### Bug Fixes
+
+* Add pyarrow.large_strign to the _ARROW_SCALAR_IDS_TO_BQ map ([#1796](https://github.com/googleapis/python-bigquery/issues/1796)) ([b402a6d](https://github.com/googleapis/python-bigquery/commit/b402a6df92e656aee10dd2c11c48f6ed93c74fd7))
+* Retry 'job exceeded rate limits' for DDL queries ([#1794](https://github.com/googleapis/python-bigquery/issues/1794)) ([39f33b2](https://github.com/googleapis/python-bigquery/commit/39f33b210ecbe9c2fd390825d29393c2d80257f5))
+
+## [3.17.0](https://github.com/googleapis/python-bigquery/compare/v3.16.0...v3.17.0) (2024-01-24)
+
+
+### Features
+
+* Support universe resolution ([#1774](https://github.com/googleapis/python-bigquery/issues/1774)) ([0b5c1d5](https://github.com/googleapis/python-bigquery/commit/0b5c1d597cdec3a05a16fb935595f773c5840bd4))
+
+
+### Bug Fixes
+
+* `query_and_wait` now retains unknown query configuration `_properties` ([#1793](https://github.com/googleapis/python-bigquery/issues/1793)) ([4ba4342](https://github.com/googleapis/python-bigquery/commit/4ba434287a0a25f027e3b63a80f8881a9b16723e))
+* Raise `ValueError` in `query_and_wait` with wrong `job_config` type ([4ba4342](https://github.com/googleapis/python-bigquery/commit/4ba434287a0a25f027e3b63a80f8881a9b16723e))
+
+
+### Documentation
+
+* Remove unused query code sample ([#1769](https://github.com/googleapis/python-bigquery/issues/1769)) ([1f96439](https://github.com/googleapis/python-bigquery/commit/1f96439b3dbd27f11be5e2af84f290ec6094d0a4))
+* Update `snippets.py` to use `query_and_wait` ([#1773](https://github.com/googleapis/python-bigquery/issues/1773)) ([d90602d](https://github.com/googleapis/python-bigquery/commit/d90602de87e58b665cb974401a327a640805822f))
+* Update multiple samples to change query to query_and_wait ([#1784](https://github.com/googleapis/python-bigquery/issues/1784)) ([d1161dd](https://github.com/googleapis/python-bigquery/commit/d1161dddde41a7d35b30033ccbf6984a5de640bd))
+* Update the query with no cache sample to use query_and_wait API ([#1770](https://github.com/googleapis/python-bigquery/issues/1770)) ([955a4cd](https://github.com/googleapis/python-bigquery/commit/955a4cd99e21cbca1b2f9c1dc6aa3fd8070cd61f))
+* Updates `query` to `query and wait` in samples/desktopapp/user_credentials.py ([#1787](https://github.com/googleapis/python-bigquery/issues/1787)) ([89f1299](https://github.com/googleapis/python-bigquery/commit/89f1299b3164b51fb0f29bc600a34ded59c10682))
+
+## [3.16.0](https://github.com/googleapis/python-bigquery/compare/v3.15.0...v3.16.0) (2024-01-12)
+
+
+### Features
+
+* Add `table_constraints` field to Table model ([#1755](https://github.com/googleapis/python-bigquery/issues/1755)) ([a167f9a](https://github.com/googleapis/python-bigquery/commit/a167f9a95f0a8fbf0bdb4943d06f07c03768c132))
+* Support jsonExtension in LoadJobConfig ([#1751](https://github.com/googleapis/python-bigquery/issues/1751)) ([0fd7347](https://github.com/googleapis/python-bigquery/commit/0fd7347ddb4ae1993f02b3bc109f64297437b3e2))
+
+
+### Bug Fixes
+
+* Add detailed message in job error ([#1762](https://github.com/googleapis/python-bigquery/issues/1762)) ([08483fb](https://github.com/googleapis/python-bigquery/commit/08483fba675f3b87571787e1e4420134a8fc8177))
+
+## [3.15.0](https://github.com/googleapis/python-bigquery/compare/v3.14.1...v3.15.0) (2024-01-09)
+
+
+### Features
+
+* Support JSON type in `insert_rows` and as a scalar query parameter ([#1757](https://github.com/googleapis/python-bigquery/issues/1757)) ([02a7d12](https://github.com/googleapis/python-bigquery/commit/02a7d129776b7da7da844ffa9c5cdf21811cd3af))
+* Support RANGE in schema ([#1746](https://github.com/googleapis/python-bigquery/issues/1746)) ([8585747](https://github.com/googleapis/python-bigquery/commit/8585747058e6db49a8078ae44d8e10735cdc27f9))
+
+
+### Bug Fixes
+
+* Deserializing JSON subfields within structs fails ([#1742](https://github.com/googleapis/python-bigquery/issues/1742)) ([0d93073](https://github.com/googleapis/python-bigquery/commit/0d930739c78b557db6cd48b38fe16eba93719c40))
+* Due to upstream change in dataset, updates expected results ([#1761](https://github.com/googleapis/python-bigquery/issues/1761)) ([132c14b](https://github.com/googleapis/python-bigquery/commit/132c14bbddfb61ea8bc408bef5e958e21b5b819c))
+* Load_table_from_dataframe for higher scale decimal ([#1703](https://github.com/googleapis/python-bigquery/issues/1703)) ([b9c8be0](https://github.com/googleapis/python-bigquery/commit/b9c8be0982c76187444300c414e0dda8b0ad105b))
+* Updates types-protobuf version for mypy-samples nox session ([#1764](https://github.com/googleapis/python-bigquery/issues/1764)) ([c0de695](https://github.com/googleapis/python-bigquery/commit/c0de6958e5761ad6ff532dd933b0f4387e18f1b9))
+
+
+### Performance Improvements
+
+* DB-API uses more efficient `query_and_wait` when no job ID is provided ([#1747](https://github.com/googleapis/python-bigquery/issues/1747)) ([d225a94](https://github.com/googleapis/python-bigquery/commit/d225a94e718a85877c495fbd32eca607b8919ac6))
+
+## [3.14.1](https://github.com/googleapis/python-bigquery/compare/v3.14.0...v3.14.1) (2023-12-13)
+
+
+### Bug Fixes
+
+* Add missing handler for deserializing json value ([#1587](https://github.com/googleapis/python-bigquery/issues/1587)) ([09017a9](https://github.com/googleapis/python-bigquery/commit/09017a997010f78bb6e34238fab15247ed14ea7e))
+
+## [3.14.0](https://github.com/googleapis/python-bigquery/compare/v3.13.0...v3.14.0) (2023-12-08)
+
+
+### Features
+
+* Add `Client.query_and_wait` which directly returns a `RowIterator` of results ([#1722](https://github.com/googleapis/python-bigquery/issues/1722)) ([89a647e](https://github.com/googleapis/python-bigquery/commit/89a647e19fe5d7302c0a39bba77a155635c5c29d))
+* Add `job_id`, `location`, `project`, and `query_id` properties on `RowIterator` ([#1733](https://github.com/googleapis/python-bigquery/issues/1733)) ([494f275](https://github.com/googleapis/python-bigquery/commit/494f275ab2493dc7904f685c4d12e60bef51ab21))
+* Add `job_timeout_ms` to job configuration classes ([#1675](https://github.com/googleapis/python-bigquery/issues/1675)) ([84d64cd](https://github.com/googleapis/python-bigquery/commit/84d64cdd157afef4a7bf7807e557d59452133434))
+* Add support dataset.max_time_travel_hours ([#1683](https://github.com/googleapis/python-bigquery/issues/1683)) ([f22eff2](https://github.com/googleapis/python-bigquery/commit/f22eff25f116f1c4973ac2b8b03bc8a4ae1f3f42))
+* Add support for Dataset.isCaseInsensitive ([#1671](https://github.com/googleapis/python-bigquery/issues/1671)) ([386fa86](https://github.com/googleapis/python-bigquery/commit/386fa86c89b8cff69fc02213254a1c53c02fee42))
+* Add support for Python 3.12 ([#1736](https://github.com/googleapis/python-bigquery/issues/1736)) ([3c0976a](https://github.com/googleapis/python-bigquery/commit/3c0976aecb0f917477feef4e9ed865997c2bb106))
+* Removed pkg_resources from all test files and moved importlib into pandas extra ([#1726](https://github.com/googleapis/python-bigquery/issues/1726)) ([1f4ebb1](https://github.com/googleapis/python-bigquery/commit/1f4ebb1eca4f9380a31172fc8cb2fae125f8c5a2))
+* Support data_governance_type ([#1708](https://github.com/googleapis/python-bigquery/issues/1708)) ([eff365d](https://github.com/googleapis/python-bigquery/commit/eff365dc17755d0855338e2f273428ffe2056f67))
+
+
+### Bug Fixes
+
+* `load_table_from_dataframe` now assumes there may be local null values ([#1735](https://github.com/googleapis/python-bigquery/issues/1735)) ([f05dc69](https://github.com/googleapis/python-bigquery/commit/f05dc69a1f8c65ac32085bfcc6950c2c83f8a843))
+* Ensure query job retry has longer deadline than API request deadline ([#1734](https://github.com/googleapis/python-bigquery/issues/1734)) ([5573579](https://github.com/googleapis/python-bigquery/commit/55735791122f97b7f67cb962b489fd1f12210af5))
+* Keep `RowIterator.total_rows` populated after iteration ([#1748](https://github.com/googleapis/python-bigquery/issues/1748)) ([8482f47](https://github.com/googleapis/python-bigquery/commit/8482f4759ce3c4b00fa06a7f306a2ac4d4ee8eb7))
+* Move grpc, proto-plus and protobuf packages to extras ([#1721](https://github.com/googleapis/python-bigquery/issues/1721)) ([5ce4d13](https://github.com/googleapis/python-bigquery/commit/5ce4d136af97b91fbe1cc56bba1021e50a9c8476))
+
+
+### Performance Improvements
+
+* Use the first page a results when `query(api_method="QUERY")` ([#1723](https://github.com/googleapis/python-bigquery/issues/1723)) ([6290517](https://github.com/googleapis/python-bigquery/commit/6290517d6b153a31f20098f75aee580b7915aca9))
+
+## [3.13.0](https://github.com/googleapis/python-bigquery/compare/v3.12.0...v3.13.0) (2023-10-30)
+
+
+### Features
+
+* Add `Model.transform_columns` property ([#1661](https://github.com/googleapis/python-bigquery/issues/1661)) ([5ceed05](https://github.com/googleapis/python-bigquery/commit/5ceed056482f6d1f2fc45e7e6b84382de45c85ed))
+* Add support for dataset.default_rounding_mode ([#1688](https://github.com/googleapis/python-bigquery/issues/1688)) ([83bc768](https://github.com/googleapis/python-bigquery/commit/83bc768b90a852d258a4805603020a296e02d2f9))
+
+
+### Bug Fixes
+
+* AccessEntry API representation parsing ([#1682](https://github.com/googleapis/python-bigquery/issues/1682)) ([a40d7ae](https://github.com/googleapis/python-bigquery/commit/a40d7ae03149708fc34c962b43a6ac198780b6aa))
+
+
+### Documentation
+
+* Remove redundant `bigquery_update_table_expiration` code sample ([#1673](https://github.com/googleapis/python-bigquery/issues/1673)) ([2dded33](https://github.com/googleapis/python-bigquery/commit/2dded33626b3de6c4ab5e1229eb4c85786b2ff53))
+* Revised `create_partitioned_table` sample ([#1447](https://github.com/googleapis/python-bigquery/issues/1447)) ([40ba859](https://github.com/googleapis/python-bigquery/commit/40ba859059c3e463e17ea7781bc5a9aff8244c5d))
+* Revised relax column mode sample ([#1467](https://github.com/googleapis/python-bigquery/issues/1467)) ([b8c9276](https://github.com/googleapis/python-bigquery/commit/b8c9276be011d971b941b583fd3d4417d438067f))
+
+## [3.12.0](https://github.com/googleapis/python-bigquery/compare/v3.11.4...v3.12.0) (2023-10-02)
+
+
+### Features
+
+* Add `Dataset.storage_billing_model` setter, use `client.update_dataset(ds, fields=["storage_billing_model"])` to update ([#1643](https://github.com/googleapis/python-bigquery/issues/1643)) ([5deba50](https://github.com/googleapis/python-bigquery/commit/5deba50b8c2d91d08bd5f5fb68742268c494b4a9))
+* Search statistics ([#1616](https://github.com/googleapis/python-bigquery/issues/1616)) ([b930e46](https://github.com/googleapis/python-bigquery/commit/b930e4673b0d1cceb53f683e47578d87af9361f3))
+* Widen retry predicate to include ServiceUnavailable ([#1641](https://github.com/googleapis/python-bigquery/issues/1641)) ([3e021a4](https://github.com/googleapis/python-bigquery/commit/3e021a46d387a0e3cb69913a281062fc221bb926))
+
+
+### Bug Fixes
+
+* Allow `storage_billing_model` to be explicitly set to `None` to use project default value ([#1665](https://github.com/googleapis/python-bigquery/issues/1665)) ([514d3e1](https://github.com/googleapis/python-bigquery/commit/514d3e12e5131bd589dff08893fd89bf40338ba3))
+* Relax timeout expectations ([#1645](https://github.com/googleapis/python-bigquery/issues/1645)) ([1760e94](https://github.com/googleapis/python-bigquery/commit/1760e945d16163980027fecf21113cd77ddc35a1))
+* Use isinstance() per E721, unpin flake8 ([#1659](https://github.com/googleapis/python-bigquery/issues/1659)) ([54a7769](https://github.com/googleapis/python-bigquery/commit/54a77694afcd80be4ba469c6ebb7ca8be112b04e))
+
+
+### Documentation
+
+* Revise update_table_expiration sample ([#1457](https://github.com/googleapis/python-bigquery/issues/1457)) ([03194e0](https://github.com/googleapis/python-bigquery/commit/03194e0156ed9201cb36301967c5af117d7ef29c))
+
+## [3.11.4](https://github.com/googleapis/python-bigquery/compare/v3.11.3...v3.11.4) (2023-07-19)
+
+
+### Bug Fixes
+
+* Updates typing in function definitions ([#1613](https://github.com/googleapis/python-bigquery/issues/1613)) ([db755ce](https://github.com/googleapis/python-bigquery/commit/db755ce5d2ae21e458f33f02cf63d2e5fbc45cf5))
+
+## [3.11.3](https://github.com/googleapis/python-bigquery/compare/v3.11.2...v3.11.3) (2023-06-27)
+
+
+### Bug Fixes
+
+* Type annotations include Optional when None is accepted ([#1554](https://github.com/googleapis/python-bigquery/issues/1554)) ([6c1ab80](https://github.com/googleapis/python-bigquery/commit/6c1ab802b09124ba837d6d5358962e3fce2d4a2c))
+
+## [3.11.2](https://github.com/googleapis/python-bigquery/compare/v3.11.1...v3.11.2) (2023-06-21)
+
+
+### Bug Fixes
+
+* Updates tests based on revised hacker_news tables ([#1591](https://github.com/googleapis/python-bigquery/issues/1591)) ([d73cf49](https://github.com/googleapis/python-bigquery/commit/d73cf495b8dfa032a43dc1d58599d0691aaa0efb))
+
+## [3.11.1](https://github.com/googleapis/python-bigquery/compare/v3.11.0...v3.11.1) (2023-06-09)
+
+
+### Documentation
+
+* Add/reformat return types for cloud RAD docs ([#1582](https://github.com/googleapis/python-bigquery/issues/1582)) ([6efdce1](https://github.com/googleapis/python-bigquery/commit/6efdce13cc3b25d37d22a856f2308daed569e637))
+
+## [3.11.0](https://github.com/googleapis/python-bigquery/compare/v3.10.0...v3.11.0) (2023-06-01)
+
+
+### Features
+
+* Add remote function options to routines ([#1558](https://github.com/googleapis/python-bigquery/issues/1558)) ([84ad11d](https://github.com/googleapis/python-bigquery/commit/84ad11d00d99d279e4e6e0fa4ca60e59575b1dad))
+
+
+### Bug Fixes
+
+* Filter None values from OpenTelemetry attributes ([#1567](https://github.com/googleapis/python-bigquery/issues/1567)) ([9ea2e21](https://github.com/googleapis/python-bigquery/commit/9ea2e21c35783782993d1ad2d3b910bbe9981ce2))
+* Handle case when expirationMs is None ([#1553](https://github.com/googleapis/python-bigquery/issues/1553)) ([fa6e13d](https://github.com/googleapis/python-bigquery/commit/fa6e13d5006caadb36899b4e2a24ca82b7f11b17))
+* Raise most recent exception when not able to fetch query job after starting the job ([#1362](https://github.com/googleapis/python-bigquery/issues/1362)) ([09cc1df](https://github.com/googleapis/python-bigquery/commit/09cc1df6babaf90ea0b0a6fd926f8013822a31ed))
+
+## [3.10.0](https://github.com/googleapis/python-bigquery/compare/v3.9.0...v3.10.0) (2023-04-18)
+
+
+### Features
+
+* Add date, datetime, time, timestamp dtype to to_dataframe ([#1547](https://github.com/googleapis/python-bigquery/issues/1547)) ([64e913d](https://github.com/googleapis/python-bigquery/commit/64e913d73832f6363466cbea5ace2337c86fa58b))
+
+## [3.9.0](https://github.com/googleapis/python-bigquery/compare/v3.8.0...v3.9.0) (2023-03-28)
+
+
+### Features
+
+* Expose query job on dbapi cursor ([#1520](https://github.com/googleapis/python-bigquery/issues/1520)) ([339eb0e](https://github.com/googleapis/python-bigquery/commit/339eb0e86040a7c30d140800f34810ffc6a7c76b))
+
+
+### Bug Fixes
+
+* Keyerror when the load_table_from_dataframe accesses a unmapped dtype dataframe index ([#1535](https://github.com/googleapis/python-bigquery/issues/1535)) ([a69348a](https://github.com/googleapis/python-bigquery/commit/a69348a558f48cfc61d03d3e8bb7f9aee48bea86))
+
+## [3.8.0](https://github.com/googleapis/python-bigquery/compare/v3.7.0...v3.8.0) (2023-03-24)
+
+
+### Features
+
+* Add bool, int, float, string dtype to to_dataframe ([#1529](https://github.com/googleapis/python-bigquery/issues/1529)) ([5e4465d](https://github.com/googleapis/python-bigquery/commit/5e4465d0975f54e8da885006686d9431ff9c5653))
+* Add default LoadJobConfig to Client ([#1526](https://github.com/googleapis/python-bigquery/issues/1526)) ([a2520ca](https://github.com/googleapis/python-bigquery/commit/a2520cabf7ec6bcb923c21e338188f1c10dc4d5d))
+* Expose configuration property on CopyJob, ExtractJob, LoadJob, QueryJob ([#1521](https://github.com/googleapis/python-bigquery/issues/1521)) ([8270a10](https://github.com/googleapis/python-bigquery/commit/8270a10df8f40750a7ac541a1781a71d7e79ce67))
+
+
+### Bug Fixes
+
+* Loosen ipywidgets restrictions further to address ipython compatibility issues ([#1531](https://github.com/googleapis/python-bigquery/issues/1531)) ([50e5026](https://github.com/googleapis/python-bigquery/commit/50e502674807b9771d7e26c0e784539bed8f9da6))
+
+## [3.7.0](https://github.com/googleapis/python-bigquery/compare/v3.6.0...v3.7.0) (2023-03-06)
+
+
+### Features
+
+* Add `connection_properties` and `create_session` to `LoadJobConfig` ([#1509](https://github.com/googleapis/python-bigquery/issues/1509)) ([cd0aaa1](https://github.com/googleapis/python-bigquery/commit/cd0aaa15960e9ca7a0aaf411c8e4990f95421816))
+* Add default_query_job_config property and property setter to BQ client ([#1511](https://github.com/googleapis/python-bigquery/issues/1511)) ([a23092c](https://github.com/googleapis/python-bigquery/commit/a23092cad834c6a016f455d46fefa13bb6cdbf0f))
+
+
+### Documentation
+
+* Remove < 3.11 reference from README ([#1502](https://github.com/googleapis/python-bigquery/issues/1502)) ([c7417f4](https://github.com/googleapis/python-bigquery/commit/c7417f43563e20a3e6f1a57f46925fb274b28b07))
+
+## [3.6.0](https://github.com/googleapis/python-bigquery/compare/v3.5.0...v3.6.0) (2023-02-22)
+
+
+### Features
+
+* Adding preserveAsciiControlCharacter to CSVOptions ([#1491](https://github.com/googleapis/python-bigquery/issues/1491)) ([f832e7a](https://github.com/googleapis/python-bigquery/commit/f832e7a0b79f3567a0773ff11630e2f48bed60db))
+
+
+### Bug Fixes
+
+* Annotate optional integer parameters with optional type ([#1487](https://github.com/googleapis/python-bigquery/issues/1487)) ([a190aaa](https://github.com/googleapis/python-bigquery/commit/a190aaa09ae73e8b6a83b7b213247f95fde57615))
+* Loosen ipywidget dependency ([#1504](https://github.com/googleapis/python-bigquery/issues/1504)) ([20d3276](https://github.com/googleapis/python-bigquery/commit/20d3276cc29e9467eef9476d5fd572099d9a3f6f))
+* Removes scope to avoid unnecessary duplication ([#1503](https://github.com/googleapis/python-bigquery/issues/1503)) ([665d7ba](https://github.com/googleapis/python-bigquery/commit/665d7ba74a1b45de1ef51cc75b6860125afc5fe6))
+
+
+### Dependencies
+
+* Update minimum google-cloud-core to 1.6.0 ([a190aaa](https://github.com/googleapis/python-bigquery/commit/a190aaa09ae73e8b6a83b7b213247f95fde57615))
+
+## [3.5.0](https://github.com/googleapis/python-bigquery/compare/v3.4.2...v3.5.0) (2023-01-31)
+
+
+### Features
+
+* Add __str__ method to DatasetReference ([#1477](https://github.com/googleapis/python-bigquery/issues/1477)) ([f32df1f](https://github.com/googleapis/python-bigquery/commit/f32df1fb74e4aea24cd8a4099040ad2f7436e54d))
+* Add preserveAsciiControlCharacter to LoadJobConfig ([#1484](https://github.com/googleapis/python-bigquery/issues/1484)) ([bd1da9a](https://github.com/googleapis/python-bigquery/commit/bd1da9aa0a40b02b7d5409a0b094d8380e255c91))
+
+
+### Documentation
+
+* Adds snippet for creating table with external data config ([#1420](https://github.com/googleapis/python-bigquery/issues/1420)) ([f0ace2a](https://github.com/googleapis/python-bigquery/commit/f0ace2ac2307ef359511a235f80f5ce9e46264c1))
+* Revise delete label table code sample, add TODO to clean up sni… ([#1466](https://github.com/googleapis/python-bigquery/issues/1466)) ([0dab7d2](https://github.com/googleapis/python-bigquery/commit/0dab7d25ace4b63d2984485e7b0c5bb38f20476f))
+* **samples:** Table variable fix ([#1287](https://github.com/googleapis/python-bigquery/issues/1287)) ([a71888a](https://github.com/googleapis/python-bigquery/commit/a71888a60d1e5e5815ab459fe24368ad5b0d032a))
+
+## [3.4.2](https://github.com/googleapis/python-bigquery/compare/v3.4.1...v3.4.2) (2023-01-13)
+
+
+### Bug Fixes
+
+* Add support for python 3.11 ([#1463](https://github.com/googleapis/python-bigquery/issues/1463)) ([730a1de](https://github.com/googleapis/python-bigquery/commit/730a1dec8be49df26a3d805ebd4ad185ba72170d))
+* Require grpcio >= 1.49.1 for python 3.11 ([72b25c5](https://github.com/googleapis/python-bigquery/commit/72b25c52bc4b9a92c4cb187b6230b280d4af905c))
+
+
+### Dependencies
+
+* Remove upper bound on packaging dependency ([#1440](https://github.com/googleapis/python-bigquery/issues/1440)) ([6088129](https://github.com/googleapis/python-bigquery/commit/60881296a35067e7aa025d92b2425572f10fd4ec))
+
+
+### Documentation
+
+* Create sample to write schema file from table ([#1439](https://github.com/googleapis/python-bigquery/issues/1439)) ([093cc68](https://github.com/googleapis/python-bigquery/commit/093cc6852ada29898c4a4d047fd216544ef15bba))
+* Created samples for load table and create table from schema file ([#1436](https://github.com/googleapis/python-bigquery/issues/1436)) ([8ad2e5b](https://github.com/googleapis/python-bigquery/commit/8ad2e5bc1c04bf16fffe4c8773e722b68117c916))
+* Revise create table cmek sample ([#1452](https://github.com/googleapis/python-bigquery/issues/1452)) ([57740e4](https://github.com/googleapis/python-bigquery/commit/57740e49af7418449aec73a6fdd307fcb588c655))
+* Revise get table labels code sample, add TODO to clean up snipp… ([#1464](https://github.com/googleapis/python-bigquery/issues/1464)) ([b5ccbfe](https://github.com/googleapis/python-bigquery/commit/b5ccbfe4eee91d7f481d9708084cd29d0c85e666))
+* Revise label table code samples ([#1451](https://github.com/googleapis/python-bigquery/issues/1451)) ([14ae1f2](https://github.com/googleapis/python-bigquery/commit/14ae1f20538ea00829a1325f91f5e8524234bd0c))
+* Revise sample for nested schema ([#1446](https://github.com/googleapis/python-bigquery/issues/1446)) ([a097631](https://github.com/googleapis/python-bigquery/commit/a0976318fc5ad1620a68250c3e059e2a51d4946d))
+
+## [3.4.1](https://github.com/googleapis/python-bigquery/compare/v3.4.0...v3.4.1) (2022-12-09)
+
+
+### Documentation
+
+* Add info about streaming quota limits to `insert_rows*` methods ([#1409](https://github.com/googleapis/python-bigquery/issues/1409)) ([0f08e9a](https://github.com/googleapis/python-bigquery/commit/0f08e9a8ff638e78006d71acd974de2dff89b5d9))
+
+
+### Dependencies
+
+* make pyarrow and BQ Storage optional dependencies ([e1aa921](https://github.com/googleapis/python-bigquery/commit/e1aa9218ad22f85c9a6cab8b61d013779376a582))
+
+## [3.4.0](https://github.com/googleapis/python-bigquery/compare/v3.3.6...v3.4.0) (2022-11-17)
+
+
+### Features
+
+* Add `reference_file_schema_uri` to LoadJobConfig, ExternalConfig ([#1399](https://github.com/googleapis/python-bigquery/issues/1399)) ([931285f](https://github.com/googleapis/python-bigquery/commit/931285ff85842ab07a0ef2ff9db808181ea3c5e4))
+* Add default value expression ([#1408](https://github.com/googleapis/python-bigquery/issues/1408)) ([207aa50](https://github.com/googleapis/python-bigquery/commit/207aa506ab634bdb13256fa5bd8745ec9de23290))
+* Add More Specific Type Annotations for Row Dictionaries ([#1295](https://github.com/googleapis/python-bigquery/issues/1295)) ([eb49873](https://github.com/googleapis/python-bigquery/commit/eb49873176dee478617eb50472d44703abca53b5))
+
+## [3.3.6](https://github.com/googleapis/python-bigquery/compare/v3.3.4...v3.3.6) (2022-11-02)
+
+
+### Features
+
+* Reconfigure tqdm progress bar in %%bigquery magic ([#1355](https://github.com/googleapis/python-bigquery/issues/1355)) ([506f781](https://github.com/googleapis/python-bigquery/commit/506f781c2dd775193336ab9432f32148250ed81d))
+
+
+### Bug Fixes
+
+* Corrects test for non-existent attribute ([#1395](https://github.com/googleapis/python-bigquery/issues/1395)) ([a80f436](https://github.com/googleapis/python-bigquery/commit/a80f436f2e75a8fb680316f17a22eecb31a7101d))
+* **deps:** Allow protobuf 3.19.5 ([#1379](https://github.com/googleapis/python-bigquery/issues/1379)) ([3e4a074](https://github.com/googleapis/python-bigquery/commit/3e4a074a981eb2920c5f9a711c253565d4844858))
+* **deps:** Allow pyarrow < 11 ([#1393](https://github.com/googleapis/python-bigquery/issues/1393)) ([c898546](https://github.com/googleapis/python-bigquery/commit/c898546d3292f9ec1ba6120cd3f9e2805aa087bb))
+* **deps:** Require requests>=2.21.0 ([#1388](https://github.com/googleapis/python-bigquery/issues/1388)) ([e398336](https://github.com/googleapis/python-bigquery/commit/e39833673582e4a7a34103cfc45603932c9c33b3))
+* Refactor to adapt to changes to shapely dependency ([#1376](https://github.com/googleapis/python-bigquery/issues/1376)) ([2afd278](https://github.com/googleapis/python-bigquery/commit/2afd278febe1eb247adc6278ab59903962a5bb6c))
+
+
+### Documentation
+
+* Fix typos ([#1372](https://github.com/googleapis/python-bigquery/issues/1372)) ([21cc525](https://github.com/googleapis/python-bigquery/commit/21cc525a86a06acfe73e5c5a74ec5f0b61e410f2))
+
+
+### Miscellaneous Chores
+
+* release 3.3.6 ([4fce1d9](https://github.com/googleapis/python-bigquery/commit/4fce1d93b1763703b115a0480a2b97021786aff7))
+
+## [3.3.4](https://github.com/googleapis/python-bigquery/compare/v3.3.3...v3.3.4) (2022-09-29)
+
+
+### Bug Fixes
+
+* **deps:** Require protobuf >= 3.20.2 ([#1369](https://github.com/googleapis/python-bigquery/issues/1369)) ([f13383a](https://github.com/googleapis/python-bigquery/commit/f13383a22d7b1a0a714dc1b1210ad970146bd094))
+
+## [3.3.3](https://github.com/googleapis/python-bigquery/compare/v3.3.2...v3.3.3) (2022-09-28)
+
+
+### Bug Fixes
+
+* Refactors code to account for a tdqm code deprecation ([#1357](https://github.com/googleapis/python-bigquery/issues/1357)) ([1369a9d](https://github.com/googleapis/python-bigquery/commit/1369a9d937b85d6a2a6bf9a672c71620648b1e3e))
+* Validate opentelemetry span job attributes have values ([#1327](https://github.com/googleapis/python-bigquery/issues/1327)) ([8287af1](https://github.com/googleapis/python-bigquery/commit/8287af1299169546f847126f03ae04e48890139e))
+
+
+### Documentation
+
+* **samples:** uses function (create_job) more appropriate to the described sample intent ([5aeedaa](https://github.com/googleapis/python-bigquery/commit/5aeedaa2f4e6a0200d50521dfd90f39f9a24d0cc))
+
+## [3.3.2](https://github.com/googleapis/python-bigquery/compare/v3.3.1...v3.3.2) (2022-08-16)
+
+
+### Bug Fixes
+
+* **deps:** require proto-plus >= 1.22.0 ([1de7a52](https://github.com/googleapis/python-bigquery/commit/1de7a52cb85d4876e4aa87346aff5725c8294c4e))
+* **deps:** require protobuf >=3.19, < 5.0.0 ([#1311](https://github.com/googleapis/python-bigquery/issues/1311)) ([1de7a52](https://github.com/googleapis/python-bigquery/commit/1de7a52cb85d4876e4aa87346aff5725c8294c4e))
+
+## [3.3.1](https://github.com/googleapis/python-bigquery/compare/v3.3.0...v3.3.1) (2022-08-09)
+
+
+### Bug Fixes
+
+* **deps:** allow pyarrow < 10 ([#1304](https://github.com/googleapis/python-bigquery/issues/1304)) ([13616a9](https://github.com/googleapis/python-bigquery/commit/13616a910ba2e9b7bc3595847229b56e70c99f84))
+
+## [3.3.0](https://github.com/googleapis/python-bigquery/compare/v3.2.0...v3.3.0) (2022-07-25)
+
+
+### Features
+
+* add destination_expiration_time property to copy job ([#1277](https://github.com/googleapis/python-bigquery/issues/1277)) ([728b07c](https://github.com/googleapis/python-bigquery/commit/728b07c9177532bbbbfd1890f23e98950aea3f02))
+
+
+### Bug Fixes
+
+* require python 3.7+ ([#1284](https://github.com/googleapis/python-bigquery/issues/1284)) ([52d9f14](https://github.com/googleapis/python-bigquery/commit/52d9f14fb1d183f64a62fee1fddc0bf576a0a3e9))
+
+
+### Documentation
+
+* **samples:** add table snapshot sample ([#1274](https://github.com/googleapis/python-bigquery/issues/1274)) ([e760d1b](https://github.com/googleapis/python-bigquery/commit/e760d1bcb76561b4247adde2fd06ae0b686befb9))
+* **samples:** explicitly add bq to samples reqs, upgrade grpc to fix bug on m1 ([#1290](https://github.com/googleapis/python-bigquery/issues/1290)) ([9b7e3e4](https://github.com/googleapis/python-bigquery/commit/9b7e3e424cbd08af8b08c91e6397a3f1b7811064))
+
+## [3.2.0](https://github.com/googleapis/python-bigquery/compare/v3.1.0...v3.2.0) (2022-06-06)
+
+
+### Features
+
+* add support for table clones ([#1235](https://github.com/googleapis/python-bigquery/issues/1235)) ([176fb2a](https://github.com/googleapis/python-bigquery/commit/176fb2afc9888c6b0cd74d590065b3002bdbf533))
+
+
+### Bug Fixes
+
+* **deps:** proto-plus >= 1.15.0, <2.0.0dev ([ba58d3a](https://github.com/googleapis/python-bigquery/commit/ba58d3af80ca796be09c813529d3aadb79e0413c))
+* **deps:** require packaging >= 14.3, <22.0.0dev ([ba58d3a](https://github.com/googleapis/python-bigquery/commit/ba58d3af80ca796be09c813529d3aadb79e0413c))
+* **deps:** require protobuf>= 3.12.0, <4.0.0dev ([#1263](https://github.com/googleapis/python-bigquery/issues/1263)) ([ba58d3a](https://github.com/googleapis/python-bigquery/commit/ba58d3af80ca796be09c813529d3aadb79e0413c))
+
+
+### Documentation
+
+* fix changelog header to consistent size ([#1268](https://github.com/googleapis/python-bigquery/issues/1268)) ([d03e2a2](https://github.com/googleapis/python-bigquery/commit/d03e2a29ecfa5d2ccd5599f5c0faac55286e52e7))
+
+## [3.1.0](https://github.com/googleapis/python-bigquery/compare/v3.0.1...v3.1.0) (2022-05-09)
+
+
+### Features
+
+* add str method to table ([#1199](https://github.com/googleapis/python-bigquery/issues/1199)) ([8da4fa9](https://github.com/googleapis/python-bigquery/commit/8da4fa9e77bcfd2b68818b5d65b38ccc59899a01))
+* refactor AccessEntry to use _properties pattern ([#1125](https://github.com/googleapis/python-bigquery/issues/1125)) ([acd5612](https://github.com/googleapis/python-bigquery/commit/acd5612d2fc469633936dbc463ce4d70951e7fdd))
+* support using BIGQUERY_EMULATOR_HOST environment variable ([#1222](https://github.com/googleapis/python-bigquery/issues/1222)) ([39294b4](https://github.com/googleapis/python-bigquery/commit/39294b4950896b084573bedb4c5adc2b8d371eac))
+
+
+### Bug Fixes
+
+* **deps:** allow pyarrow v8 ([#1245](https://github.com/googleapis/python-bigquery/issues/1245)) ([d258690](https://github.com/googleapis/python-bigquery/commit/d258690dbf01108e1426f0e28d792c418a88bce0))
+* export bigquery.HivePartitioningOptions ([#1217](https://github.com/googleapis/python-bigquery/issues/1217)) ([8eb757b](https://github.com/googleapis/python-bigquery/commit/8eb757bcded7a3ef3b2264f47ec080c0a8fca579))
+* Skip geography_as_object conversion for REPEATED fields ([#1220](https://github.com/googleapis/python-bigquery/issues/1220)) ([4d3d6ec](https://github.com/googleapis/python-bigquery/commit/4d3d6ec9e667a781f8cb4a3aee0376c6179d5ce1))
+
+
+### Documentation
+
+* updated variable typo in comment in code sample ([#1239](https://github.com/googleapis/python-bigquery/issues/1239)) ([e420112](https://github.com/googleapis/python-bigquery/commit/e4201128bdb7f49cb732e12609448bbdbc122736))
+
+## [3.0.1](https://github.com/googleapis/python-bigquery/compare/v3.0.0...v3.0.1) (2022-03-30)
+
+
+### Bug Fixes
+
+* **deps:** raise exception when pandas is installed but db-dtypes is not ([#1191](https://github.com/googleapis/python-bigquery/issues/1191)) ([4333910](https://github.com/googleapis/python-bigquery/commit/433391097bae57dd12a93db18fc2bab573d8f128))
+* **deps:** restore dependency on python-dateutil ([#1187](https://github.com/googleapis/python-bigquery/issues/1187)) ([212d7ec](https://github.com/googleapis/python-bigquery/commit/212d7ec1f0740d04c26fb3ceffc9a4dd9eed6756))
+
+## [3.0.0](https://github.com/googleapis/python-bigquery/compare/v2.34.3...v3.0.0) (2022-03-29)
+
+
+### ⚠ BREAKING CHANGES
+
+* BigQuery Storage and pyarrow are required dependencies (#776)
+* use nullable `Int64` and `boolean` dtypes in `to_dataframe` (#786)
+* destination tables are no-longer removed by `create_job` (#891)
+* In `to_dataframe`, use `dbdate` and `dbtime` dtypes from db-dtypes package for BigQuery DATE and TIME columns (#972)
+* automatically convert out-of-bounds dates in `to_dataframe`, remove `date_as_object` argument (#972)
+* mark the package as type-checked (#1058)
+* default to DATETIME type when loading timezone-naive datetimes from Pandas (#1061)
+* remove out-of-date BigQuery ML protocol buffers (#1178)
+
+### Features
+
+* add `api_method` parameter to `Client.query` to select `INSERT` or `QUERY` API ([#967](https://github.com/googleapis/python-bigquery/issues/967)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+* default to DATETIME type when loading timezone-naive datetimes from Pandas ([#1061](https://github.com/googleapis/python-bigquery/issues/1061)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+* destination tables are no-longer removed by `create_job` ([#891](https://github.com/googleapis/python-bigquery/issues/891)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+* In `to_dataframe`, use `dbdate` and `dbtime` dtypes from db-dtypes package for BigQuery DATE and TIME columns ([#972](https://github.com/googleapis/python-bigquery/issues/972)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+* mark the package as type-checked ([#1058](https://github.com/googleapis/python-bigquery/issues/1058)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+* use `StandardSqlField` class for `Model.feature_columns` and `Model.label_columns` ([#1117](https://github.com/googleapis/python-bigquery/issues/1117)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+
+
+### Bug Fixes
+
+* automatically convert out-of-bounds dates in `to_dataframe`, remove `date_as_object` argument ([#972](https://github.com/googleapis/python-bigquery/issues/972)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+* improve type annotations for mypy validation ([#1081](https://github.com/googleapis/python-bigquery/issues/1081)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+* remove out-of-date BigQuery ML protocol buffers ([#1178](https://github.com/googleapis/python-bigquery/issues/1178)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+* use nullable `Int64` and `boolean` dtypes in `to_dataframe` ([#786](https://github.com/googleapis/python-bigquery/issues/786)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+
+
+### Documentation
+
+* Add migration guide from version 2.x to 3.x ([#1027](https://github.com/googleapis/python-bigquery/issues/1027)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+
+
+### Dependencies
+
+* BigQuery Storage and pyarrow are required dependencies ([#776](https://github.com/googleapis/python-bigquery/issues/776)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+
+## [2.34.3](https://github.com/googleapis/python-bigquery/compare/v2.34.2...v2.34.3) (2022-03-29)
+
+
+### Bug Fixes
+
+* update content-type header ([#1171](https://github.com/googleapis/python-bigquery/issues/1171)) ([921b440](https://github.com/googleapis/python-bigquery/commit/921b440fdd151e88ee5b3e0d9fb90177877dc11a))
+
+## [2.34.2](https://github.com/googleapis/python-bigquery/compare/v2.34.1...v2.34.2) (2022-03-05)
+
+
+### Bug Fixes
+
+* **deps:** require google-api-core>=1.31.5, >=2.3.2 ([#1157](https://github.com/googleapis/python-bigquery/issues/1157)) ([0c15790](https://github.com/googleapis/python-bigquery/commit/0c15790720ff573a501cfe760dd74ee166e1a353))
+* **deps:** require proto-plus>=1.15.0 ([0c15790](https://github.com/googleapis/python-bigquery/commit/0c15790720ff573a501cfe760dd74ee166e1a353))
+
+## [2.34.1](https://github.com/googleapis/python-bigquery/compare/v2.34.0...v2.34.1) (2022-03-02)
+
+
+### Dependencies
+
+* add "extra" for IPython, exclude bad IPython release ([#1151](https://github.com/googleapis/python-bigquery/issues/1151)) ([0fbe12d](https://github.com/googleapis/python-bigquery/commit/0fbe12d1ababa45aa774b026a93c0af9e8f343d9))
+* allow pyarrow 7.0 ([#1112](https://github.com/googleapis/python-bigquery/issues/1112)) ([57f8ea9](https://github.com/googleapis/python-bigquery/commit/57f8ea95e152dc238e7a4941f96c54be53f7eaf3))
+
+## [2.34.0](https://github.com/googleapis/python-bigquery/compare/v2.33.0...v2.34.0) (2022-02-18)
+
+
+### Features
+
+* support BI Engine statistics in query job ([#1144](https://github.com/googleapis/python-bigquery/issues/1144)) ([7482549](https://github.com/googleapis/python-bigquery/commit/7482549cb42ed5302634ab4fb7b4efcd97b35c68))
+
+## [2.33.0](https://github.com/googleapis/python-bigquery/compare/v2.32.0...v2.33.0) (2022-02-16)
+
+
+### Features
+
+* add `--no_query_cache` option to `%%bigquery` magics to disable query cache ([#1141](https://github.com/googleapis/python-bigquery/issues/1141)) ([7dd30af](https://github.com/googleapis/python-bigquery/commit/7dd30af41b8a595b96176c964ba14aa41645ef0d))
+
+
+### Bug Fixes
+
+* return 403 when VPC-SC violation happens ([#1131](https://github.com/googleapis/python-bigquery/issues/1131)) ([f5daa9b](https://github.com/googleapis/python-bigquery/commit/f5daa9b41377a58cb3220bb2ab7c72adc6462196))
+
+
+### Documentation
+
+* reference BigQuery REST API defaults in `LoadJobConfig` descrip… ([#1132](https://github.com/googleapis/python-bigquery/issues/1132)) ([18d9580](https://github.com/googleapis/python-bigquery/commit/18d958062721d6be81e7bd7a5bd66f277344a864))
+* show common job properties in `get_job` and `cancel_job` samples ([#1137](https://github.com/googleapis/python-bigquery/issues/1137)) ([8edc10d](https://github.com/googleapis/python-bigquery/commit/8edc10d019bd96defebc4f92a47774901e9b956f))
+
+## [2.32.0](https://github.com/googleapis/python-bigquery/compare/v2.31.0...v2.32.0) (2022-01-12)
+
+
+### Features
+
+* support authorized dataset entity ([#1075](https://github.com/googleapis/python-bigquery/issues/1075)) ([c098cd0](https://github.com/googleapis/python-bigquery/commit/c098cd01c755633bfaba7193dd5c044a489a5b61))
+
+
+### Bug Fixes
+
+* remove query text from exception message, use `exception.debug_message` instead ([#1105](https://github.com/googleapis/python-bigquery/issues/1105)) ([e23114c](https://github.com/googleapis/python-bigquery/commit/e23114ce362e09ac72f733a640e53a561cc9ce69))
+
+## [2.31.0](https://www.github.com/googleapis/python-bigquery/compare/v2.30.1...v2.31.0) (2021-11-24)
+
+
+### Features
+
+* allow cell magic body to be a $variable ([#1053](https://www.github.com/googleapis/python-bigquery/issues/1053)) ([3a681e0](https://www.github.com/googleapis/python-bigquery/commit/3a681e046819df18118aa0b2b5733416d004c9b3))
+* promote `RowIterator.to_arrow_iterable` to public method ([#1073](https://www.github.com/googleapis/python-bigquery/issues/1073)) ([21cd710](https://www.github.com/googleapis/python-bigquery/commit/21cd71022d60c32104f8f90ee2ca445fbb43f7f3))
+
+
+### Bug Fixes
+
+* apply timeout to all resumable upload requests ([#1070](https://www.github.com/googleapis/python-bigquery/issues/1070)) ([3314dfb](https://www.github.com/googleapis/python-bigquery/commit/3314dfbed62488503dc41b11e403a672fcf71048))
+
+
+### Dependencies
+
+* support OpenTelemetry >= 1.1.0 ([#1050](https://www.github.com/googleapis/python-bigquery/issues/1050)) ([4616cd5](https://www.github.com/googleapis/python-bigquery/commit/4616cd58d3c6da641fb881ce99a87dcdedc20ba2))
+
+## [2.30.1](https://www.github.com/googleapis/python-bigquery/compare/v2.30.0...v2.30.1) (2021-11-04)
+
+
+### Bug Fixes
+
+* error if eval()-ing repr(SchemaField) ([#1046](https://www.github.com/googleapis/python-bigquery/issues/1046)) ([13ac860](https://www.github.com/googleapis/python-bigquery/commit/13ac860de689ea13b35932c67042bc35e388cb30))
+
+
+### Documentation
+
+* show gcloud command to authorize against sheets ([#1045](https://www.github.com/googleapis/python-bigquery/issues/1045)) ([20c9024](https://www.github.com/googleapis/python-bigquery/commit/20c9024b5760f7ae41301f4da54568496922cbe2))
+* use stable URL for pandas intersphinx links ([#1048](https://www.github.com/googleapis/python-bigquery/issues/1048)) ([73312f8](https://www.github.com/googleapis/python-bigquery/commit/73312f8f0f22ff9175a4f5f7db9bb438a496c164))
+
+## [2.30.0](https://www.github.com/googleapis/python-bigquery/compare/v2.29.0...v2.30.0) (2021-11-03)
+
+
+### Features
+
+* accept TableListItem where TableReference is accepted ([#1016](https://www.github.com/googleapis/python-bigquery/issues/1016)) ([fe16adc](https://www.github.com/googleapis/python-bigquery/commit/fe16adc86a170d0992c32091b349b036f8b43884))
+* support Python 3.10 ([#1043](https://www.github.com/googleapis/python-bigquery/issues/1043)) ([5bbb832](https://www.github.com/googleapis/python-bigquery/commit/5bbb832a83ebb66db4b5ee740cdfc53f4df8430b))
+
+
+### Documentation
+
+* add code samples for Jupyter/IPython magics ([#1013](https://www.github.com/googleapis/python-bigquery/issues/1013)) ([61141ee](https://www.github.com/googleapis/python-bigquery/commit/61141ee0634024ad261d1595c95cd14a896fb87e))
+* **samples:** add create external table with hive partitioning ([#1033](https://www.github.com/googleapis/python-bigquery/issues/1033)) ([d64f5b6](https://www.github.com/googleapis/python-bigquery/commit/d64f5b682854a2293244426316890df4ab1e079e))
+
+## [2.29.0](https://www.github.com/googleapis/python-bigquery/compare/v2.28.1...v2.29.0) (2021-10-27)
+
+
+### Features
+
+* add `QueryJob.schema` property for dry run queries ([#1014](https://www.github.com/googleapis/python-bigquery/issues/1014)) ([2937fa1](https://www.github.com/googleapis/python-bigquery/commit/2937fa1386898766c561579fd39d42958182d260))
+* add session and connection properties to QueryJobConfig ([#1024](https://www.github.com/googleapis/python-bigquery/issues/1024)) ([e4c94f4](https://www.github.com/googleapis/python-bigquery/commit/e4c94f446c27eb474f30b033c1b62d11bd0acd98))
+* add support for INTERVAL data type to `list_rows` ([#840](https://www.github.com/googleapis/python-bigquery/issues/840)) ([e37380a](https://www.github.com/googleapis/python-bigquery/commit/e37380a959cbd5bb9cbbf6807f0a8ea147e0a713))
+* allow queryJob.result() to be called on a dryRun ([#1015](https://www.github.com/googleapis/python-bigquery/issues/1015)) ([685f06a](https://www.github.com/googleapis/python-bigquery/commit/685f06a5e7b5df17a53e9eb340ff04ecd1e51d1d))
+
+
+### Documentation
+
+* document ScriptStatistics and other missing resource classes ([#1023](https://www.github.com/googleapis/python-bigquery/issues/1023)) ([6679109](https://www.github.com/googleapis/python-bigquery/commit/66791093c61f262ea063d2a7950fc643915ee693))
+* fix formatting of generated client docstrings ([#1009](https://www.github.com/googleapis/python-bigquery/issues/1009)) ([f7b0ee4](https://www.github.com/googleapis/python-bigquery/commit/f7b0ee45a664295ccc9f209eeeac122af8de3c80))
+
+
+### Dependencies
+
+* allow pyarrow 6.x ([#1031](https://www.github.com/googleapis/python-bigquery/issues/1031)) ([1c2de74](https://www.github.com/googleapis/python-bigquery/commit/1c2de74a55046a343bcf9474f67100a82fb05401))
+
+## [2.28.1](https://www.github.com/googleapis/python-bigquery/compare/v2.28.0...v2.28.1) (2021-10-07)
+
+
+### Bug Fixes
+
+* support ARRAY data type when loading from DataFrame with Parquet ([#980](https://www.github.com/googleapis/python-bigquery/issues/980)) ([1e59083](https://www.github.com/googleapis/python-bigquery/commit/1e5908302d36e15442013af6f46b1c20af28255e))
+
+## [2.28.0](https://www.github.com/googleapis/python-bigquery/compare/v2.27.1...v2.28.0) (2021-09-30)
+
+
+### Features
+
+* add `AvroOptions` to configure AVRO external data ([#994](https://www.github.com/googleapis/python-bigquery/issues/994)) ([1a9431d](https://www.github.com/googleapis/python-bigquery/commit/1a9431d9e02eeb99e4712b61c623f9cca80134a6))
+
+
+### Documentation
+
+* link to stable pandas docs ([#990](https://www.github.com/googleapis/python-bigquery/issues/990)) ([ea50e80](https://www.github.com/googleapis/python-bigquery/commit/ea50e8031fc035b3772a338bc00982de263cefad))
+
+## [2.27.1](https://www.github.com/googleapis/python-bigquery/compare/v2.27.0...v2.27.1) (2021-09-27)
+
+
+### Bug Fixes
+
+* remove py.typed since package fails mypy check ([#988](https://www.github.com/googleapis/python-bigquery/issues/988)) ([39030f2](https://www.github.com/googleapis/python-bigquery/commit/39030f26ce081cfacd456b84694c68e3f04ed48d))
+
+## [2.27.0](https://www.github.com/googleapis/python-bigquery/compare/v2.26.0...v2.27.0) (2021-09-24)
+
+
+### Features
+
+* Add py.typed for PEP 561 compliance ([#976](https://www.github.com/googleapis/python-bigquery/issues/976)) ([96e6bee](https://www.github.com/googleapis/python-bigquery/commit/96e6beef3c63b663b7e5879b1458f4dd1a47a5b5))
+* include key metadata in Job representation ([#964](https://www.github.com/googleapis/python-bigquery/issues/964)) ([acca1cb](https://www.github.com/googleapis/python-bigquery/commit/acca1cb7baaa3b00508246c994ade40314d421c3))
+
+
+### Bug Fixes
+
+* Arrow extension-type metadata was not set when calling the REST API or when there are no rows ([#946](https://www.github.com/googleapis/python-bigquery/issues/946)) ([864383b](https://www.github.com/googleapis/python-bigquery/commit/864383bc01636b3774f7da194587b8b7edd0383d))
+* disambiguate missing policy tags from explicitly unset policy tags ([#983](https://www.github.com/googleapis/python-bigquery/issues/983)) ([f83c00a](https://www.github.com/googleapis/python-bigquery/commit/f83c00acead70fc0ce9959eefb133a672d816277))
+* remove default timeout ([#974](https://www.github.com/googleapis/python-bigquery/issues/974)) ([1cef0d4](https://www.github.com/googleapis/python-bigquery/commit/1cef0d4664bf448168b26487a71795144b7f4d6b))
+
+
+### Documentation
+
+* simplify destination table sample with f-strings ([#966](https://www.github.com/googleapis/python-bigquery/issues/966)) ([ab6e76f](https://www.github.com/googleapis/python-bigquery/commit/ab6e76f9489262fd9c1876a1c4f93d7e139aa999))
+
+## [2.26.0](https://www.github.com/googleapis/python-bigquery/compare/v2.25.2...v2.26.0) (2021-09-01)
+
+
+### Features
+
+* set the X-Server-Timeout header when timeout is set ([#927](https://www.github.com/googleapis/python-bigquery/issues/927)) ([ba02f24](https://www.github.com/googleapis/python-bigquery/commit/ba02f248ba9c449c34859579a4011f4bfd2f4a93))
+
+
+### Bug Fixes
+
+* guard imports against unsupported pyarrow versions ([#934](https://www.github.com/googleapis/python-bigquery/issues/934)) ([b289076](https://www.github.com/googleapis/python-bigquery/commit/b28907693bbe889becc1b9c8963f0a7e1ee6c35a))
+
+## [2.25.2](https://www.github.com/googleapis/python-bigquery/compare/v2.25.1...v2.25.2) (2021-08-31)
+
+
+### Bug Fixes
+
+* error inserting DataFrame with REPEATED field ([#925](https://www.github.com/googleapis/python-bigquery/issues/925)) ([656d2fa](https://www.github.com/googleapis/python-bigquery/commit/656d2fa6f870573a21235c83463752a2d084caba))
+* underscores weren't allowed in struct field names when passing parameters to the DB API ([#930](https://www.github.com/googleapis/python-bigquery/issues/930)) ([fcb0bc6](https://www.github.com/googleapis/python-bigquery/commit/fcb0bc68c972c2c98bb8542f54e9228308177ecb))
+
+
+### Documentation
+
+* update docstring for bigquery_create_routine sample ([#883](https://www.github.com/googleapis/python-bigquery/issues/883)) ([#917](https://www.github.com/googleapis/python-bigquery/issues/917)) ([e2d12b7](https://www.github.com/googleapis/python-bigquery/commit/e2d12b795ef2dc51b0ee36f1b3000edb1e64ce05))
+
+## [2.25.1](https://www.github.com/googleapis/python-bigquery/compare/v2.25.0...v2.25.1) (2021-08-25)
+
+
+### Bug Fixes
+
+* populate default `timeout` and retry after client-side timeout ([#896](https://www.github.com/googleapis/python-bigquery/issues/896)) ([b508809](https://www.github.com/googleapis/python-bigquery/commit/b508809c0f887575274309a463e763c56ddd017d))
+* use REST API in cell magic when requested ([#892](https://www.github.com/googleapis/python-bigquery/issues/892)) ([1cb3e55](https://www.github.com/googleapis/python-bigquery/commit/1cb3e55253e824e3a1da5201f6ec09065fb6b627))
+
+## [2.25.0](https://www.github.com/googleapis/python-bigquery/compare/v2.24.1...v2.25.0) (2021-08-24)
+
+
+### Features
+
+* Support using GeoPandas for GEOGRAPHY columns ([#848](https://www.github.com/googleapis/python-bigquery/issues/848)) ([16f65e6](https://www.github.com/googleapis/python-bigquery/commit/16f65e6ae15979217ceea6c6d398c9057a363a13))
+
+## [2.24.1](https://www.github.com/googleapis/python-bigquery/compare/v2.24.0...v2.24.1) (2021-08-13)
+
+
+### Bug Fixes
+
+* remove pytz dependency and require pyarrow>=3.0.0 ([#875](https://www.github.com/googleapis/python-bigquery/issues/875)) ([2cb3563](https://www.github.com/googleapis/python-bigquery/commit/2cb3563ee863edef7eaf5d04d739bcfe7bc6438e))
+
+## [2.24.0](https://www.github.com/googleapis/python-bigquery/compare/v2.23.3...v2.24.0) (2021-08-11)
+
+
+### Features
+
+* add support for transaction statistics ([#849](https://www.github.com/googleapis/python-bigquery/issues/849)) ([7f7b1a8](https://www.github.com/googleapis/python-bigquery/commit/7f7b1a808d50558772a0deb534ca654da65d629e))
+* make the same `Table*` instances equal to each other ([#867](https://www.github.com/googleapis/python-bigquery/issues/867)) ([c1a3d44](https://www.github.com/googleapis/python-bigquery/commit/c1a3d4435739a21d25aa154145e36d3a7c42eeb6))
+* retry failed query jobs in `result()` ([#837](https://www.github.com/googleapis/python-bigquery/issues/837)) ([519d99c](https://www.github.com/googleapis/python-bigquery/commit/519d99c20e7d1101f76981f3de036fdf3c7a4ecc))
+* support `ScalarQueryParameterType` for `type_` argument in `ScalarQueryParameter` constructor ([#850](https://www.github.com/googleapis/python-bigquery/issues/850)) ([93d15e2](https://www.github.com/googleapis/python-bigquery/commit/93d15e2e5405c2cc6d158c4e5737361344193dbc))
+
+
+### Bug Fixes
+
+* make unicode characters working well in load_table_from_json ([#865](https://www.github.com/googleapis/python-bigquery/issues/865)) ([ad9c802](https://www.github.com/googleapis/python-bigquery/commit/ad9c8026f0e667f13dd754279f9dc40d06f4fa78))
+
+## [2.23.3](https://www.github.com/googleapis/python-bigquery/compare/v2.23.2...v2.23.3) (2021-08-06)
+
+
+### Bug Fixes
+
+* increase default retry deadline to 10 minutes ([#859](https://www.github.com/googleapis/python-bigquery/issues/859)) ([30770fd](https://www.github.com/googleapis/python-bigquery/commit/30770fd0575fbd5aaa70c14196a4cc54627aecd2))
+
+## [2.23.2](https://www.github.com/googleapis/python-bigquery/compare/v2.23.1...v2.23.2) (2021-07-29)
+
+
+### Dependencies
+
+* expand pyarrow pins to support 5.x releases ([#833](https://www.github.com/googleapis/python-bigquery/issues/833)) ([80e3a61](https://www.github.com/googleapis/python-bigquery/commit/80e3a61c60419fb19b70b664c6415cd01ba82f5b))
+
+## [2.23.1](https://www.github.com/googleapis/python-bigquery/compare/v2.23.0...v2.23.1) (2021-07-28)
+
+
+### Bug Fixes
+
+* `insert_rows()` accepts float column values as strings again ([#824](https://www.github.com/googleapis/python-bigquery/issues/824)) ([d9378af](https://www.github.com/googleapis/python-bigquery/commit/d9378af13add879118a1d004529b811f72c325d6))
+
+## [2.23.0](https://www.github.com/googleapis/python-bigquery/compare/v2.22.1...v2.23.0) (2021-07-27)
+
+
+### Features
+
+* Update proto definitions for bigquery/v2 to support new proto fields for BQML. ([#817](https://www.github.com/googleapis/python-bigquery/issues/817)) ([fe7a902](https://www.github.com/googleapis/python-bigquery/commit/fe7a902e8b3e723ace335c9b499aea6d180a025b))
+
+
+### Bug Fixes
+
+* no longer raise a warning in `to_dataframe` if `max_results` set ([#815](https://www.github.com/googleapis/python-bigquery/issues/815)) ([3c1be14](https://www.github.com/googleapis/python-bigquery/commit/3c1be149e76b1d1d8879fdcf0924ddb1c1839e94))
+* retry ChunkedEncodingError by default ([#802](https://www.github.com/googleapis/python-bigquery/issues/802)) ([419d36d](https://www.github.com/googleapis/python-bigquery/commit/419d36d6b1887041e5795dbc8fc808890e91ab11))
+
+
+### Documentation
+
+* correct docs for `LoadJobConfig.destination_table_description` ([#810](https://www.github.com/googleapis/python-bigquery/issues/810)) ([da87fd9](https://www.github.com/googleapis/python-bigquery/commit/da87fd921cc8067b187d7985c978aac8eb58d107))
+
+## [2.22.1](https://www.github.com/googleapis/python-bigquery/compare/v2.22.0...v2.22.1) (2021-07-22)
+
+
+### Bug Fixes
+
+* issue a warning if buggy pyarrow is detected ([#787](https://www.github.com/googleapis/python-bigquery/issues/787)) ([e403721](https://www.github.com/googleapis/python-bigquery/commit/e403721af1373eb1f1a1c7be5b2182e3819ed1f9))
+* use a larger chunk size when loading data ([#799](https://www.github.com/googleapis/python-bigquery/issues/799)) ([b804373](https://www.github.com/googleapis/python-bigquery/commit/b804373277c1c1baa3370ebfb4783503b7ff360f))
+
+
+### Documentation
+
+* add Samples section to CONTRIBUTING.rst ([#785](https://www.github.com/googleapis/python-bigquery/issues/785)) ([e587029](https://www.github.com/googleapis/python-bigquery/commit/e58702967d572e83b4c774278818302594a511b7))
+* add sample to delete job metadata ([#798](https://www.github.com/googleapis/python-bigquery/issues/798)) ([be9b242](https://www.github.com/googleapis/python-bigquery/commit/be9b242f2180f5b795dfb3a168a97af1682999fd))
+
+## [2.22.0](https://www.github.com/googleapis/python-bigquery/compare/v2.21.0...v2.22.0) (2021-07-19)
+
+
+### Features
+
+* add `LoadJobConfig.projection_fields` to select DATASTORE_BACKUP fields ([#736](https://www.github.com/googleapis/python-bigquery/issues/736)) ([c45a738](https://www.github.com/googleapis/python-bigquery/commit/c45a7380871af3dfbd3c45524cb606c60e1a01d1))
+* add standard sql table type, update scalar type enums ([#777](https://www.github.com/googleapis/python-bigquery/issues/777)) ([b8b5433](https://www.github.com/googleapis/python-bigquery/commit/b8b5433898ec881f8da1303614780a660d94733a))
+* add support for more detailed DML stats ([#758](https://www.github.com/googleapis/python-bigquery/issues/758)) ([36fe86f](https://www.github.com/googleapis/python-bigquery/commit/36fe86f41c1a8f46167284f752a6d6bbf886a04b))
+* add support for user defined Table View Functions ([#724](https://www.github.com/googleapis/python-bigquery/issues/724)) ([8c7b839](https://www.github.com/googleapis/python-bigquery/commit/8c7b839a6ac1491c1c3b6b0e8755f4b70ed72ee3))
+
+
+### Bug Fixes
+
+* avoid possible job already exists error ([#751](https://www.github.com/googleapis/python-bigquery/issues/751)) ([45b9308](https://www.github.com/googleapis/python-bigquery/commit/45b93089f5398740413104285cc8acfd5ebc9c08))
+
+
+### Dependencies
+
+* allow 2.x versions of `google-api-core`, `google-cloud-core`, `google-resumable-media` ([#770](https://www.github.com/googleapis/python-bigquery/issues/770)) ([87a09fa](https://www.github.com/googleapis/python-bigquery/commit/87a09fa3f2a9ab35728a1ac925f9d5f2e6616c65))
+
+
+### Documentation
+
+* add loading data from Firestore backup sample ([#737](https://www.github.com/googleapis/python-bigquery/issues/737)) ([22fd848](https://www.github.com/googleapis/python-bigquery/commit/22fd848cae4af1148040e1faa31dd15a4d674687))
+
+## [2.21.0](https://www.github.com/googleapis/python-bigquery/compare/v2.20.0...v2.21.0) (2021-07-12)
+
+
+### Features
+
+* Add max_results parameter to some of the `QueryJob` methods. ([#698](https://www.github.com/googleapis/python-bigquery/issues/698)) ([2a9618f](https://www.github.com/googleapis/python-bigquery/commit/2a9618f4daaa4a014161e1a2f7376844eec9e8da))
+* Add support for decimal target types. ([#735](https://www.github.com/googleapis/python-bigquery/issues/735)) ([7d2d3e9](https://www.github.com/googleapis/python-bigquery/commit/7d2d3e906a9eb161911a198fb925ad79de5df934))
+* Add support for table snapshots. ([#740](https://www.github.com/googleapis/python-bigquery/issues/740)) ([ba86b2a](https://www.github.com/googleapis/python-bigquery/commit/ba86b2a6300ae5a9f3c803beeb42bda4c522e34c))
+* Enable unsetting policy tags on schema fields. ([#703](https://www.github.com/googleapis/python-bigquery/issues/703)) ([18bb443](https://www.github.com/googleapis/python-bigquery/commit/18bb443c7acd0a75dcb57d9aebe38b2d734ff8c7))
+* Make it easier to disable best-effort deduplication with streaming inserts. ([#734](https://www.github.com/googleapis/python-bigquery/issues/734)) ([1246da8](https://www.github.com/googleapis/python-bigquery/commit/1246da86b78b03ca1aa2c45ec71649e294cfb2f1))
+* Support passing struct data to the DB API. ([#718](https://www.github.com/googleapis/python-bigquery/issues/718)) ([38b3ef9](https://www.github.com/googleapis/python-bigquery/commit/38b3ef96c3dedc139b84f0ff06885141ae7ce78c))
+
+
+### Bug Fixes
+
+* Inserting non-finite floats with `insert_rows()`. ([#728](https://www.github.com/googleapis/python-bigquery/issues/728)) ([d047419](https://www.github.com/googleapis/python-bigquery/commit/d047419879e807e123296da2eee89a5253050166))
+* Use `pandas` function to check for `NaN`. ([#750](https://www.github.com/googleapis/python-bigquery/issues/750)) ([67bc5fb](https://www.github.com/googleapis/python-bigquery/commit/67bc5fbd306be7cdffd216f3791d4024acfa95b3))
+
+
+### Documentation
+
+* Add docs for all enums in module. ([#745](https://www.github.com/googleapis/python-bigquery/issues/745)) ([145944f](https://www.github.com/googleapis/python-bigquery/commit/145944f24fedc4d739687399a8309f9d51d43dfd))
+* Omit mention of Python 2.7 in `CONTRIBUTING.rst`. ([#706](https://www.github.com/googleapis/python-bigquery/issues/706)) ([27d6839](https://www.github.com/googleapis/python-bigquery/commit/27d6839ee8a40909e4199cfa0da8b6b64705b2e9))
+
+## [2.20.0](https://www.github.com/googleapis/python-bigquery/compare/v2.19.0...v2.20.0) (2021-06-07)
+
+
+### Features
+
+* support script options in query job config ([#690](https://www.github.com/googleapis/python-bigquery/issues/690)) ([1259e16](https://www.github.com/googleapis/python-bigquery/commit/1259e16394784315368e8be959c1ac097782b62e))
+
+## [2.19.0](https://www.github.com/googleapis/python-bigquery/compare/v2.18.0...v2.19.0) (2021-06-06)
+
+
+### Features
+
+* list_tables, list_projects, list_datasets, list_models, list_routines, and list_jobs now accept a page_size parameter to control page size ([#686](https://www.github.com/googleapis/python-bigquery/issues/686)) ([1f1c4b7](https://www.github.com/googleapis/python-bigquery/commit/1f1c4b7ba4390fc4c5c8186bc22b83b45304ca06))
+
+## [2.18.0](https://www.github.com/googleapis/python-bigquery/compare/v2.17.0...v2.18.0) (2021-06-02)
+
+
+### Features
+
+* add support for Parquet options ([#679](https://www.github.com/googleapis/python-bigquery/issues/679)) ([d792ce0](https://www.github.com/googleapis/python-bigquery/commit/d792ce09388a6ee3706777915dd2818d4c854f79))
+
+## [2.17.0](https://www.github.com/googleapis/python-bigquery/compare/v2.16.1...v2.17.0) (2021-05-21)
+
+
+### Features
+
+* detect obsolete BQ Storage extra at runtime ([#666](https://www.github.com/googleapis/python-bigquery/issues/666)) ([bd7dbda](https://www.github.com/googleapis/python-bigquery/commit/bd7dbdae5c972b16bafc53c67911eeaa3255a880))
+* Support parameterized NUMERIC, BIGNUMERIC, STRING, and BYTES types ([#673](https://www.github.com/googleapis/python-bigquery/issues/673)) ([45421e7](https://www.github.com/googleapis/python-bigquery/commit/45421e73bfcddb244822e6a5cd43be6bd1ca2256))
+
+
+### Bug Fixes
+
+* **tests:** invalid path to strptime() ([#672](https://www.github.com/googleapis/python-bigquery/issues/672)) ([591cdd8](https://www.github.com/googleapis/python-bigquery/commit/591cdd851bb1321b048a05a378a0ef48d3ade462))
+
+## [2.16.1](https://www.github.com/googleapis/python-bigquery/compare/v2.16.0...v2.16.1) (2021-05-12)
+
+
+### Bug Fixes
+
+* executemany rowcount only reflected the last execution ([#660](https://www.github.com/googleapis/python-bigquery/issues/660)) ([aeadc8c](https://www.github.com/googleapis/python-bigquery/commit/aeadc8c2d614bb9f0883ec901fca48930f3aaf19))
+
+## [2.16.0](https://www.github.com/googleapis/python-bigquery/compare/v2.15.0...v2.16.0) (2021-05-05)
+
+
+### Features
+
+* add with_name() to ScalarQueryParameterType ([#644](https://www.github.com/googleapis/python-bigquery/issues/644)) ([6cc6876](https://www.github.com/googleapis/python-bigquery/commit/6cc6876eb0e5bf49fdc047256a945dcf1b289576))
+
+
+### Dependencies
+
+* expand supported pyarrow versions to v4 ([#643](https://www.github.com/googleapis/python-bigquery/issues/643)) ([9e1d386](https://www.github.com/googleapis/python-bigquery/commit/9e1d3869c2024fe7a8af57ff59838d904ca5db03))
+
+## [2.15.0](https://www.github.com/googleapis/python-bigquery/compare/v2.14.0...v2.15.0) (2021-04-29)
+
+
+### Features
+
+* Extended DB API parameter syntax to optionally provide parameter types ([#626](https://www.github.com/googleapis/python-bigquery/issues/626)) ([8bcf397](https://www.github.com/googleapis/python-bigquery/commit/8bcf397fbe2527e06317741875a059b109cfcd9c))
+
+
+### Bug Fixes
+
+* add DECIMAL and BIGDECIMAL as aliases for NUMERIC and BIGNUMERIC ([#638](https://www.github.com/googleapis/python-bigquery/issues/638)) ([aa59023](https://www.github.com/googleapis/python-bigquery/commit/aa59023317b1c63720fb717b3544f755652da58d))
+* The DB API Binary function accepts bytes data ([#630](https://www.github.com/googleapis/python-bigquery/issues/630)) ([4396e70](https://www.github.com/googleapis/python-bigquery/commit/4396e70771af6889d3242c37c5ff2e80241023a2))
+
+## [2.14.0](https://www.github.com/googleapis/python-bigquery/compare/v2.13.1...v2.14.0) (2021-04-26)
+
+
+### Features
+
+* accept DatasetListItem where DatasetReference is accepted ([#597](https://www.github.com/googleapis/python-bigquery/issues/597)) ([c8b5581](https://www.github.com/googleapis/python-bigquery/commit/c8b5581ea3c94005d69755c4a3b5a0d8900f3fe2))
+* accept job object as argument to `get_job` and `cancel_job` ([#617](https://www.github.com/googleapis/python-bigquery/issues/617)) ([f75dcdf](https://www.github.com/googleapis/python-bigquery/commit/f75dcdf3943b87daba60011c9a3b42e34ff81910))
+* add `Client.delete_job_metadata` method to remove job metadata ([#610](https://www.github.com/googleapis/python-bigquery/issues/610)) ([0abb566](https://www.github.com/googleapis/python-bigquery/commit/0abb56669c097c59fbffce007c702e7a55f2d9c1))
+* add `max_queue_size` argument to `RowIterator.to_dataframe_iterable` ([#575](https://www.github.com/googleapis/python-bigquery/issues/575)) ([f95f415](https://www.github.com/googleapis/python-bigquery/commit/f95f415d3441b3928f6cc705cb8a75603d790fd6))
+* add type hints for public methods ([#613](https://www.github.com/googleapis/python-bigquery/issues/613)) ([f8d4aaa](https://www.github.com/googleapis/python-bigquery/commit/f8d4aaa335a0eef915e73596fc9b43b11d11be9f))
+* DB API cursors are now iterable ([#618](https://www.github.com/googleapis/python-bigquery/issues/618)) ([e0b373d](https://www.github.com/googleapis/python-bigquery/commit/e0b373d0e721a70656ed8faceb7f5c70f642d144))
+* retry google.auth TransportError by default ([#624](https://www.github.com/googleapis/python-bigquery/issues/624)) ([34ecc3f](https://www.github.com/googleapis/python-bigquery/commit/34ecc3f1ca0ff073330c0c605673d89b43af7ed9))
+* use pyarrow stream compression, if available ([#593](https://www.github.com/googleapis/python-bigquery/issues/593)) ([dde9dc5](https://www.github.com/googleapis/python-bigquery/commit/dde9dc5114c2311fb76fafc5b222fff561e8abf1))
+
+
+### Bug Fixes
+
+* consistent percents handling in DB API query ([#619](https://www.github.com/googleapis/python-bigquery/issues/619)) ([6502a60](https://www.github.com/googleapis/python-bigquery/commit/6502a602337ae562652a20b20270949f2c9d5073))
+* missing license headers in new test files ([#604](https://www.github.com/googleapis/python-bigquery/issues/604)) ([df48cc5](https://www.github.com/googleapis/python-bigquery/commit/df48cc5a0be99ad39d5835652d1b7422209afc5d))
+* unsetting clustering fields on Table is now possible ([#622](https://www.github.com/googleapis/python-bigquery/issues/622)) ([33a871f](https://www.github.com/googleapis/python-bigquery/commit/33a871f06329f9bf5a6a92fab9ead65bf2bee75d))
+
+
+### Documentation
+
+* add sample to run DML query ([#591](https://www.github.com/googleapis/python-bigquery/issues/591)) ([ff2ec3a](https://www.github.com/googleapis/python-bigquery/commit/ff2ec3abe418a443cd07751c08e654f94e8b3155))
+* update the description of the return value of `_QueryResults.rows()` ([#594](https://www.github.com/googleapis/python-bigquery/issues/594)) ([8f4c0b8](https://www.github.com/googleapis/python-bigquery/commit/8f4c0b84dac3840532d7865247b8ad94b625b897))
+
+## [2.13.1](https://www.github.com/googleapis/python-bigquery/compare/v2.13.0...v2.13.1) (2021-03-23)
+
+
+### Bug Fixes
+
+* add ConnectionError to default retry ([#571](https://www.github.com/googleapis/python-bigquery/issues/571)) ([a3edb8b](https://www.github.com/googleapis/python-bigquery/commit/a3edb8b921e029e2c03d33302d408ad5d4e9d4ad))
+
+## [2.13.0](https://www.github.com/googleapis/python-bigquery/compare/v2.12.0...v2.13.0) (2021-03-22)
+
+
+### Features
+
+* add `ExternalConfig.connection_id` property to connect to external sources ([#560](https://www.github.com/googleapis/python-bigquery/issues/560)) ([d93986e](https://www.github.com/googleapis/python-bigquery/commit/d93986e0259952257f2571f60719b52099c29c0c))
+
+
+### Bug Fixes
+
+* avoid overly strict dependency on pyarrow 3.x ([#564](https://www.github.com/googleapis/python-bigquery/issues/564)) ([97ee6ec](https://www.github.com/googleapis/python-bigquery/commit/97ee6ec6cd4bc9f833cd506dc6d244d103654cfd))
+* avoid policy tags 403 error in `load_table_from_dataframe` ([#557](https://www.github.com/googleapis/python-bigquery/issues/557)) ([84e646e](https://www.github.com/googleapis/python-bigquery/commit/84e646e6b7087a1626e56ad51eeb130f4ddfa2fb))
+
+## [2.12.0](https://www.github.com/googleapis/python-bigquery/compare/v2.11.0...v2.12.0) (2021-03-16)
+
+
+### Features
+
+* make QueryJob.done() method more performant ([#544](https://www.github.com/googleapis/python-bigquery/issues/544)) ([a3ab9ef](https://www.github.com/googleapis/python-bigquery/commit/a3ab9efdd0758829845cfcb6ca0ac1f03ab44f64))
+
+
+### Bug Fixes
+
+* remove DB-API dependency on pyarrow with decimal query parameters ([#551](https://www.github.com/googleapis/python-bigquery/issues/551)) ([1b946ba](https://www.github.com/googleapis/python-bigquery/commit/1b946ba23ee7df86114c6acb338ec34e6c92af6d))
+
+## [2.11.0](https://www.github.com/googleapis/python-bigquery/compare/v2.10.0...v2.11.0) (2021-03-09)
+
+
+### Features
+
+* add context manager support to client ([#540](https://www.github.com/googleapis/python-bigquery/issues/540)) ([d5c7e11](https://www.github.com/googleapis/python-bigquery/commit/d5c7e11a1dc2a149d74294bfadbae62d70573e69))
+
+## [2.10.0](https://www.github.com/googleapis/python-bigquery/compare/v2.9.0...v2.10.0) (2021-02-25)
+
+
+### Features
+
+* add BIGNUMERIC support ([#527](https://www.github.com/googleapis/python-bigquery/issues/527)) ([cc3394f](https://www.github.com/googleapis/python-bigquery/commit/cc3394f80934419eb00c2029bb81c92a696e7d88))
+
+
+### Bug Fixes
+
+* error using empty array of structs parameter ([#474](https://www.github.com/googleapis/python-bigquery/issues/474)) ([c1d15f4](https://www.github.com/googleapis/python-bigquery/commit/c1d15f4e5da4b7e10c00afffd59a5c7f3ded027a))
+* QueryJob.exception() *returns* the errors, not raises them ([#467](https://www.github.com/googleapis/python-bigquery/issues/467)) ([d763279](https://www.github.com/googleapis/python-bigquery/commit/d7632799769248b09a8558ba18f5025ebdd9675a))
+
+
+### Documentation
+
+* **bigquery:** Add alternative approach to setting credentials ([#517](https://www.github.com/googleapis/python-bigquery/issues/517)) ([60fbf28](https://www.github.com/googleapis/python-bigquery/commit/60fbf287b0d34d5db2e61cce7a5b42735ed43d0e))
+* explain retry behavior for DONE jobs ([#532](https://www.github.com/googleapis/python-bigquery/issues/532)) ([696c443](https://www.github.com/googleapis/python-bigquery/commit/696c443f0a6740be0767e12b706a7771bc1460c3))
+
+## [2.9.0](https://www.github.com/googleapis/python-bigquery/compare/v2.8.0...v2.9.0) (2021-02-18)
+
+
+### Features
+
+* add determinism level for javascript UDFs ([#522](https://www.github.com/googleapis/python-bigquery/issues/522)) ([edd3328](https://www.github.com/googleapis/python-bigquery/commit/edd3328fffa3040b2cd3a3c668c90a0e43e4c94c))
+* expose reservation usage stats on jobs ([#524](https://www.github.com/googleapis/python-bigquery/issues/524)) ([4ffb4e0](https://www.github.com/googleapis/python-bigquery/commit/4ffb4e067abdaa54dad6eff49a7fbdb0fa358637))
+
+
+### Documentation
+
+* clarify `%%bigquery`` magics and fix broken link ([#508](https://www.github.com/googleapis/python-bigquery/issues/508)) ([eedf93b](https://www.github.com/googleapis/python-bigquery/commit/eedf93b6636c5ff1bd810c6038cfeaea8ccb64d8))
+* update python contributing guide ([#514](https://www.github.com/googleapis/python-bigquery/issues/514)) ([01e851d](https://www.github.com/googleapis/python-bigquery/commit/01e851d00fc17a780375580776753d78f6d74174))
+
+## [2.8.0](https://www.github.com/googleapis/python-bigquery/compare/v2.7.0...v2.8.0) (2021-02-08)
+
+
+### Features
+
+* Add mTLS support to client. ([#492](https://www.github.com/googleapis/python-bigquery/issues/492)) ([1823cad](https://www.github.com/googleapis/python-bigquery/commit/1823cadee3acf95c516d0479400e4175349ea199))
+
+
+### Bug Fixes
+
+* Don't try to close closed cursors. ([#498](https://www.github.com/googleapis/python-bigquery/issues/498)) ([bf44e7b](https://www.github.com/googleapis/python-bigquery/commit/bf44e7b67d2de41c13053a4550484b9ea049db3e))
+
+## [2.7.0](https://www.github.com/googleapis/python-bigquery/compare/v2.6.2...v2.7.0) (2021-01-27)
+
+
+### Bug Fixes
+
+* invalid conversion of timezone-aware datetime values to JSON ([#480](https://www.github.com/googleapis/python-bigquery/issues/480)) ([61b4385](https://www.github.com/googleapis/python-bigquery/commit/61b438523d305ce66a68fde7cb49e9abbf0a8d1d))
+* reading the labels attribute on Job instances ([#471](https://www.github.com/googleapis/python-bigquery/issues/471)) ([80944f0](https://www.github.com/googleapis/python-bigquery/commit/80944f080bcc4fda870a6daf1d884de616d39ae7))
+* use explicitly given project over the client's default project for load jobs ([#482](https://www.github.com/googleapis/python-bigquery/issues/482)) ([530e1e8](https://www.github.com/googleapis/python-bigquery/commit/530e1e8d8fe8939e914a78ff1b220907c1b87af7))
+
+
+### Dependencies
+
+* declare support for Python 3.9 ([#488](https://www.github.com/googleapis/python-bigquery/issues/488)) ([55daa7d](https://www.github.com/googleapis/python-bigquery/commit/55daa7da9857a8a2fb14a80a4efa3f466386a85f))
+
+## [2.6.2](https://www.github.com/googleapis/python-bigquery/compare/v2.6.1...v2.6.2) (2021-01-11)
+
+
+### Bug Fixes
+
+* add minimum timeout to getQueryResults API requests ([#444](https://www.github.com/googleapis/python-bigquery/issues/444)) ([015a73e](https://www.github.com/googleapis/python-bigquery/commit/015a73e1839e3427408ef6e0f879717d9ddbdb61))
+* use debug logging level for OpenTelemetry message ([#442](https://www.github.com/googleapis/python-bigquery/issues/442)) ([7ea6b7c](https://www.github.com/googleapis/python-bigquery/commit/7ea6b7c2469d2415192cfdacc379e38e49d24775))
+
+
+### Documentation
+
+* add GEOGRAPHY data type code samples ([#428](https://www.github.com/googleapis/python-bigquery/issues/428)) ([dbc68b3](https://www.github.com/googleapis/python-bigquery/commit/dbc68b3d1f325f80d24a2da5f028b0f653fb0317))
+* fix Shapely import in GEOGRAPHY sample ([#431](https://www.github.com/googleapis/python-bigquery/issues/431)) ([96a1c5b](https://www.github.com/googleapis/python-bigquery/commit/96a1c5b3c72855ba6ae8c88dfd0cdb02d2faf909))
+* move and refresh view samples ([#420](https://www.github.com/googleapis/python-bigquery/issues/420)) ([079b6a1](https://www.github.com/googleapis/python-bigquery/commit/079b6a162f6929bf801366d92f8daeb3318426c4))
+
+## [2.6.1](https://www.github.com/googleapis/python-bigquery/compare/v2.6.0...v2.6.1) (2020-12-09)
+
+
+### Bug Fixes
+
+* handle null values in array query parameters ([#426](https://www.github.com/googleapis/python-bigquery/issues/426)) ([78fde4a](https://www.github.com/googleapis/python-bigquery/commit/78fde4a92e61a89d0b490b93acc90fff9635d1bf))
+
+
+### Documentation
+
+* add examples of `fields` argument to update methods ([#418](https://www.github.com/googleapis/python-bigquery/issues/418)) ([8c7e02b](https://www.github.com/googleapis/python-bigquery/commit/8c7e02b0de2c92ee965414e7c430eb57d1877326))
+
+## [2.6.0](https://www.github.com/googleapis/python-bigquery/compare/v2.5.0...v2.6.0) (2020-12-07)
+
+
+### Features
+
+* add support for materialized views ([#408](https://www.github.com/googleapis/python-bigquery/issues/408)) ([57ffc66](https://www.github.com/googleapis/python-bigquery/commit/57ffc665319331e0a00583d5d652fd14a510cf2a)), closes [#407](https://www.github.com/googleapis/python-bigquery/issues/407)
+* convert `BIGNUMERIC` values to decimal objects ([#414](https://www.github.com/googleapis/python-bigquery/issues/414)) ([d472d2d](https://www.github.com/googleapis/python-bigquery/commit/d472d2d2b33e40b954652d31476dea8c90e6a2dc)), closes [#367](https://www.github.com/googleapis/python-bigquery/issues/367)
+* support CSV format in `load_table_from_dataframe` pandas connector ([#399](https://www.github.com/googleapis/python-bigquery/issues/399)) ([0046742](https://www.github.com/googleapis/python-bigquery/commit/0046742abdd2b5eab3c3e935316f91e7eef44d44))
+
+
+### Bug Fixes
+
+* preserve timestamp microsecond precision with rows from REST API ([#402](https://www.github.com/googleapis/python-bigquery/issues/402)) ([04510a7](https://www.github.com/googleapis/python-bigquery/commit/04510a7dc7570466550bbdf500d7020bef2af44d))
+
+
+### Documentation
+
+* update intersphinx links ([#404](https://www.github.com/googleapis/python-bigquery/issues/404)) ([a9d8ae8](https://www.github.com/googleapis/python-bigquery/commit/a9d8ae8a920dec655b77dca9d9128e569f1d07a7))
+
+## [2.5.0](https://www.github.com/googleapis/python-bigquery/compare/v2.4.0...v2.5.0) (2020-12-02)
+
+
+### Features
+
+* add `TableReference.__str__` to get table ID in standard SQL ([#405](https://www.github.com/googleapis/python-bigquery/issues/405)) ([53dff2a](https://www.github.com/googleapis/python-bigquery/commit/53dff2ad3889af04369a22437e6ab9b92c5755b6)), closes [#354](https://www.github.com/googleapis/python-bigquery/issues/354)
+* add progress bar for magics ([#396](https://www.github.com/googleapis/python-bigquery/issues/396)) ([04d0273](https://www.github.com/googleapis/python-bigquery/commit/04d027317a99e3f353e0b7a18076da9b6ba4d8d3))
+* add support for unrecognized model types ([#401](https://www.github.com/googleapis/python-bigquery/issues/401)) ([168f035](https://www.github.com/googleapis/python-bigquery/commit/168f0354c4815bd1aeadbd4e388dcc9b32f97d6b))
+
+
+### Bug Fixes
+
+* avoid floating point for timestamp in `insert_rows` ([#393](https://www.github.com/googleapis/python-bigquery/issues/393)) ([a1949ae](https://www.github.com/googleapis/python-bigquery/commit/a1949ae20ec4f9c771b0cffbcd70792dd6a30dbf))
+
+
+### Performance Improvements
+
+* don't fetch rows when waiting for query to finish ([#400](https://www.github.com/googleapis/python-bigquery/issues/400)) ([730df17](https://www.github.com/googleapis/python-bigquery/commit/730df17ae1ab0b0bb2454f3c134c8f62665bc51b)), closes [#374](https://www.github.com/googleapis/python-bigquery/issues/374) [#394](https://www.github.com/googleapis/python-bigquery/issues/394)
+
+
+### Documentation
+
+* **samples:** add more clustering code snippets ([#330](https://www.github.com/googleapis/python-bigquery/issues/330)) ([809e4a2](https://www.github.com/googleapis/python-bigquery/commit/809e4a27b94ba30c10e0c9a7e89576a9de9fda2b)), closes [#329](https://www.github.com/googleapis/python-bigquery/issues/329)
+
+
+### Dependencies
+
+* update required version of opentelementry for opentelemetry-exporter-google-cloud ([#398](https://www.github.com/googleapis/python-bigquery/issues/398)) ([673a9cb](https://www.github.com/googleapis/python-bigquery/commit/673a9cb51c577c1dd016e76f3634b1e9e21482c5))
+
+## [2.4.0](https://www.github.com/googleapis/python-bigquery/compare/v2.3.1...v2.4.0) (2020-11-16)
+
+
+### Features
+
+* add progress bar to `QueryJob.to_dataframe` and `to_arrow` ([#352](https://www.github.com/googleapis/python-bigquery/issues/352)) ([dc78edd](https://www.github.com/googleapis/python-bigquery/commit/dc78eddde7a6a312c8fed7bace7d64036837ab1a))
+* allow routine references ([#378](https://www.github.com/googleapis/python-bigquery/issues/378)) ([f9480dc](https://www.github.com/googleapis/python-bigquery/commit/f9480dc2a1bc58367083176bd74725aa8b903301))
+
+
+### Bug Fixes
+
+* **dbapi:** allow rows to be fetched from scripts ([#387](https://www.github.com/googleapis/python-bigquery/issues/387)) ([b899ad1](https://www.github.com/googleapis/python-bigquery/commit/b899ad12e17cb87c58d3ae46b4388d917c5743f2)), closes [#377](https://www.github.com/googleapis/python-bigquery/issues/377)
+
+
+### Performance Improvements
+
+* avoid extra API calls from `to_dataframe` if all rows are cached ([#384](https://www.github.com/googleapis/python-bigquery/issues/384)) ([c52b317](https://www.github.com/googleapis/python-bigquery/commit/c52b31789998fc0dfde07c3296650c85104d719d))
+* cache first page of `jobs.getQueryResults` rows ([#374](https://www.github.com/googleapis/python-bigquery/issues/374)) ([86f6a51](https://www.github.com/googleapis/python-bigquery/commit/86f6a516d1c7c5dc204ab085ea2578793e6561ff))
+* use `getQueryResults` from DB-API ([#375](https://www.github.com/googleapis/python-bigquery/issues/375)) ([30de15f](https://www.github.com/googleapis/python-bigquery/commit/30de15f7255de5ea221df4e8db7991d279e0ea28))
+
+
+### Dependencies
+
+* expand pyarrow dependencies to include version 2 ([#368](https://www.github.com/googleapis/python-bigquery/issues/368)) ([cd9febd](https://www.github.com/googleapis/python-bigquery/commit/cd9febd20c34983781386c3bf603e5fca7135695))
+
+## 2.3.1
+
+11-05-2020 09:27 PST
+
+### Internal / Testing Changes
+
+- update `google.cloud.bigquery.__version__`
+
+## [2.3.0](https://www.github.com/googleapis/python-bigquery/compare/v2.2.0...v2.3.0) (2020-11-04)
+
+
+### Features
+
+* add `reload` argument to `*Job.done()` functions ([#341](https://www.github.com/googleapis/python-bigquery/issues/341)) ([e51fd45](https://www.github.com/googleapis/python-bigquery/commit/e51fd45fdb0481ac5d59cc0edbfa0750928b2596))
+* pass retry from Job.result() to Job.done() ([#41](https://www.github.com/googleapis/python-bigquery/issues/41)) ([284e17a](https://www.github.com/googleapis/python-bigquery/commit/284e17a17adf6844a17db2c6fed54a649b1f997e))
+
+
+### Bug Fixes
+
+* add missing spaces in opentelemetry log message ([#360](https://www.github.com/googleapis/python-bigquery/issues/360)) ([4f326b1](https://www.github.com/googleapis/python-bigquery/commit/4f326b1ca4411cfbf5ded86955a963d3e05a409f))
+* **dbapi:** avoid running % format with no query parameters ([#348](https://www.github.com/googleapis/python-bigquery/issues/348)) ([5dd1a5e](https://www.github.com/googleapis/python-bigquery/commit/5dd1a5e77f13b8e576e917069e247c5390a81900))
+* create_job method accepts dictionary arguments ([#300](https://www.github.com/googleapis/python-bigquery/issues/300)) ([155bacc](https://www.github.com/googleapis/python-bigquery/commit/155bacc156f181384ca6dba699ab83d0398176d1))
+
+
+### Performance Improvements
+
+* use `jobs.getQueryResults` to download result sets ([#363](https://www.github.com/googleapis/python-bigquery/issues/363)) ([0c3476d](https://www.github.com/googleapis/python-bigquery/commit/0c3476d56380d70115f6fd765bf5c5261967052f))
+
+
+### Documentation
+
+* add documents for QueryPlanEntry and QueryPlanEntryStep ([#344](https://www.github.com/googleapis/python-bigquery/issues/344)) ([dca2e4c](https://www.github.com/googleapis/python-bigquery/commit/dca2e4ca7c2ae183ac4bb60f653d425a43a86bea))
+
+## [2.2.0](https://www.github.com/googleapis/python-bigquery/compare/v2.1.0...v2.2.0) (2020-10-19)
+
+
+### Features
+
+* add method api_repr for table list item ([#299](https://www.github.com/googleapis/python-bigquery/issues/299)) ([07c70f0](https://www.github.com/googleapis/python-bigquery/commit/07c70f0292f9212f0c968cd5c9206e8b0409c0da))
+* add support for listing arima, automl, boosted tree, DNN, and matrix factorization models ([#328](https://www.github.com/googleapis/python-bigquery/issues/328)) ([502a092](https://www.github.com/googleapis/python-bigquery/commit/502a0926018abf058cb84bd18043c25eba15a2cc))
+* add timeout paramter to load_table_from_file and it dependent methods ([#327](https://www.github.com/googleapis/python-bigquery/issues/327)) ([b0dd892](https://www.github.com/googleapis/python-bigquery/commit/b0dd892176e31ac25fddd15554b5bfa054299d4d))
+* add to_api_repr method to Model ([#326](https://www.github.com/googleapis/python-bigquery/issues/326)) ([fb401bd](https://www.github.com/googleapis/python-bigquery/commit/fb401bd94477323bba68cf252dd88166495daf54))
+* allow client options to be set in magics context ([#322](https://www.github.com/googleapis/python-bigquery/issues/322)) ([5178b55](https://www.github.com/googleapis/python-bigquery/commit/5178b55682f5e264bfc082cde26acb1fdc953a18))
+
+
+### Bug Fixes
+
+* make TimePartitioning repr evaluable ([#110](https://www.github.com/googleapis/python-bigquery/issues/110)) ([20f473b](https://www.github.com/googleapis/python-bigquery/commit/20f473bfff5ae98377f5d9cdf18bfe5554d86ff4)), closes [#109](https://www.github.com/googleapis/python-bigquery/issues/109)
+* use version.py instead of pkg_resources.get_distribution ([#307](https://www.github.com/googleapis/python-bigquery/issues/307)) ([b8f502b](https://www.github.com/googleapis/python-bigquery/commit/b8f502b14f21d1815697e4d57cf1225dfb4a7c5e))
+
+
+### Performance Improvements
+
+* add size parameter for load table from dataframe and json methods ([#280](https://www.github.com/googleapis/python-bigquery/issues/280)) ([3be78b7](https://www.github.com/googleapis/python-bigquery/commit/3be78b737add7111e24e912cd02fc6df75a07de6))
+
+
+### Documentation
+
+* update clustering field docstrings ([#286](https://www.github.com/googleapis/python-bigquery/issues/286)) ([5ea1ece](https://www.github.com/googleapis/python-bigquery/commit/5ea1ece2d911cdd1f3d9549ee01559ce8ed8269a)), closes [#285](https://www.github.com/googleapis/python-bigquery/issues/285)
+* update snippets samples to support version 2.0 ([#309](https://www.github.com/googleapis/python-bigquery/issues/309)) ([61634be](https://www.github.com/googleapis/python-bigquery/commit/61634be9bf9e3df7589fc1bfdbda87288859bb13))
+
+
+### Dependencies
+
+* add protobuf dependency ([#306](https://www.github.com/googleapis/python-bigquery/issues/306)) ([cebb5e0](https://www.github.com/googleapis/python-bigquery/commit/cebb5e0e911e8c9059bc8c9e7fce4440e518bff3)), closes [#305](https://www.github.com/googleapis/python-bigquery/issues/305)
+* require pyarrow for pandas support ([#314](https://www.github.com/googleapis/python-bigquery/issues/314)) ([801e4c0](https://www.github.com/googleapis/python-bigquery/commit/801e4c0574b7e421aa3a28cafec6fd6bcce940dd)), closes [#265](https://www.github.com/googleapis/python-bigquery/issues/265)
+
+## [2.1.0](https://www.github.com/googleapis/python-bigquery/compare/v2.0.0...v2.1.0) (2020-10-08)
+
+
+### Features
+
+* add constants for MONTH and YEAR time partitioning types ([#283](https://www.github.com/googleapis/python-bigquery/issues/283)) ([9090e1c](https://www.github.com/googleapis/python-bigquery/commit/9090e1ccd8825a97835325b4829f6e7ecfd9ea88))
+
+
+### Bug Fixes
+
+* remove unnecessary dependency on libcst ([#308](https://www.github.com/googleapis/python-bigquery/issues/308)) ([c055930](https://www.github.com/googleapis/python-bigquery/commit/c05593094c1405f752b2c51b15202a6dbb5cb83f))
+
+
+### Performance Improvements
+
+* remove redundant array deepcopy ([#26](https://www.github.com/googleapis/python-bigquery/issues/26)) ([b54f867](https://www.github.com/googleapis/python-bigquery/commit/b54f86769c982ce5c8fcbf3889f82450428bb40c))
+
+
+### Documentation
+
+* **samples:** add create_table_clustered code snippet ([#291](https://www.github.com/googleapis/python-bigquery/issues/291)) ([d1eb8b3](https://www.github.com/googleapis/python-bigquery/commit/d1eb8b3dcc789916c5d3ba8464f62b1f8bef35ff))
+
+## 2.0.0
+
+09-30-2020 14:51 PDT
+
+
+### Implementation Changes
+
+- Transition the library to microgenerator. ([#278](https://github.com/googleapis/python-bigquery/pull/278))
+ This is a **breaking change** that **drops support for Python 2.7 and 3.5** and brings a few other changes.
+ See [migration guide](https://googleapis.dev/python/bigquery/latest/UPGRADING.html) for more info.
+
+
+
+### Internal / Testing Changes
+
+- Update protoc-generated comments (via synth). ([#270](https://github.com/googleapis/python-bigquery/pull/270))
+- Add CI secrets manager (via synth). ([#271](https://github.com/googleapis/python-bigquery/pull/271))
+
+## [1.28.0](https://www.github.com/googleapis/python-bigquery/compare/v1.27.2...v1.28.0) (2020-09-22)
+
+
+### Features
+
+* add custom cell magic parser to handle complex `--params` values ([#213](https://www.github.com/googleapis/python-bigquery/issues/213)) ([dcfbac2](https://www.github.com/googleapis/python-bigquery/commit/dcfbac267fbf66d189b0cc7e76f4712122a74b7b))
+* add instrumentation to list methods ([#239](https://www.github.com/googleapis/python-bigquery/issues/239)) ([fa9f9ca](https://www.github.com/googleapis/python-bigquery/commit/fa9f9ca491c3f9954287102c567ec483aa6151d4))
+* add opentelemetry tracing ([#215](https://www.github.com/googleapis/python-bigquery/issues/215)) ([a04996c](https://www.github.com/googleapis/python-bigquery/commit/a04996c537e9d8847411fcbb1b05da5f175b339e))
+* expose require_partition_filter for hive_partition ([#257](https://www.github.com/googleapis/python-bigquery/issues/257)) ([aa1613c](https://www.github.com/googleapis/python-bigquery/commit/aa1613c1bf48c7efb999cb8b8c422c80baf1950b))
+
+
+### Bug Fixes
+
+* fix dependency issue in fastavro ([#241](https://www.github.com/googleapis/python-bigquery/issues/241)) ([2874abf](https://www.github.com/googleapis/python-bigquery/commit/2874abf4827f1ea529519d4b138511d31f732a50))
+* update minimum dependency versions ([#263](https://www.github.com/googleapis/python-bigquery/issues/263)) ([1be66ce](https://www.github.com/googleapis/python-bigquery/commit/1be66ce94a32b1f924bdda05d068c2977631af9e))
+* validate job_config.source_format in load_table_from_dataframe ([#262](https://www.github.com/googleapis/python-bigquery/issues/262)) ([6160fee](https://www.github.com/googleapis/python-bigquery/commit/6160fee4b1a79b0ea9031cc18caf6322fe4c4084))
+
+
+### Documentation
+
+* recommend insert_rows_json to avoid call to tables.get ([#258](https://www.github.com/googleapis/python-bigquery/issues/258)) ([ae647eb](https://www.github.com/googleapis/python-bigquery/commit/ae647ebd68deff6e30ca2cffb5b7422c6de4940b))
+
+## [1.27.2](https://www.github.com/googleapis/python-bigquery/compare/v1.27.1...v1.27.2) (2020-08-18)
+
+
+### Bug Fixes
+
+* rationalize platform constraints for 'pyarrow' extra ([#235](https://www.github.com/googleapis/python-bigquery/issues/235)) ([c9a0567](https://www.github.com/googleapis/python-bigquery/commit/c9a0567f59491b769a9e2fd535430423e39d4fa8))
+
+## [1.27.1](https://www.github.com/googleapis/python-bigquery/compare/v1.27.0...v1.27.1) (2020-08-18)
+
+
+### Bug Fixes
+
+* tweak pyarrow extra to soothe PyPI ([#230](https://www.github.com/googleapis/python-bigquery/issues/230)) ([c15efbd](https://www.github.com/googleapis/python-bigquery/commit/c15efbd1ee4488898fc862768eef701443f492f6))
+
+## [1.27.0](https://www.github.com/googleapis/python-bigquery/compare/v1.26.1...v1.27.0) (2020-08-15)
+
+
+### Features
+
+* add support and tests for struct fields ([#146](https://www.github.com/googleapis/python-bigquery/issues/146)) ([fee2ba8](https://www.github.com/googleapis/python-bigquery/commit/fee2ba80e338d093ee61565359268da91a5c9913))
+* add support for getting and setting table IAM policy ([#144](https://www.github.com/googleapis/python-bigquery/issues/144)) ([f59fc9a](https://www.github.com/googleapis/python-bigquery/commit/f59fc9a482d9f9ae63e2b2bfc80b9a3481d09bde))
+* **bigquery:** add client_options to base class ([#216](https://www.github.com/googleapis/python-bigquery/issues/216)) ([478597a](https://www.github.com/googleapis/python-bigquery/commit/478597a38167fa57b60ae7f65b581f3fe75ddc7c))
+
+
+### Bug Fixes
+
+* converting to dataframe with out of bounds timestamps ([#209](https://www.github.com/googleapis/python-bigquery/issues/209)) ([8209203](https://www.github.com/googleapis/python-bigquery/commit/8209203e967f0624ad306166c0af6f6f1027c550)), closes [#168](https://www.github.com/googleapis/python-bigquery/issues/168)
+* raise error if inserting rows with unknown fields ([#163](https://www.github.com/googleapis/python-bigquery/issues/163)) ([8fe7254](https://www.github.com/googleapis/python-bigquery/commit/8fe725429541eed34ddc01cffc8b1ee846c14162))
+
+## [1.26.1](https://www.github.com/googleapis/python-bigquery/compare/v1.26.0...v1.26.1) (2020-07-25)
+
+### Documentation
+
+* Migrated code samples from
+ https://github.com/GoogleCloudPlatform/python-docs-samples
+
+### Bug Fixes
+
+* RowIterator.to_arrow() error when BQ Storage client cannot be created ([#181](https://www.github.com/googleapis/python-bigquery/issues/181)) ([7afa3d7](https://www.github.com/googleapis/python-bigquery/commit/7afa3d70f8564dcdacda2b9acbbd7207b50b186e))
+
+### Dependencies
+
+* Updated version constraints on grmp dependency in anticipation of 1.0.0 release
+ ([#189](https://github.com/googleapis/python-bigquery/pull/189))
+
+## [1.26.0](https://www.github.com/googleapis/python-bigquery/compare/v1.25.0...v1.26.0) (2020-07-20)
+
+
+### Features
+
+* use BigQuery Storage client by default (if dependencies available) ([#55](https://www.github.com/googleapis/python-bigquery/issues/55)) ([e75ff82](https://www.github.com/googleapis/python-bigquery/commit/e75ff8297c65981545b097f75a17cf9e78ac6772)), closes [#91](https://www.github.com/googleapis/python-bigquery/issues/91)
+* **bigquery:** add __eq__ method for class PartitionRange and RangePartitioning ([#162](https://www.github.com/googleapis/python-bigquery/issues/162)) ([0d2a88d](https://www.github.com/googleapis/python-bigquery/commit/0d2a88d8072154cfc9152afd6d26a60ddcdfbc73))
+* **bigquery:** expose date_as_object parameter to users ([#150](https://www.github.com/googleapis/python-bigquery/issues/150)) ([a2d5ce9](https://www.github.com/googleapis/python-bigquery/commit/a2d5ce9e97992318d7dc85c51c053cab74e25a11))
+* **bigquery:** expose date_as_object parameter to users ([#150](https://www.github.com/googleapis/python-bigquery/issues/150)) ([cbd831e](https://www.github.com/googleapis/python-bigquery/commit/cbd831e08024a67148723afd49e1db085e0a862c))
+
+
+### Bug Fixes
+
+* dry run queries with DB API cursor ([#128](https://www.github.com/googleapis/python-bigquery/issues/128)) ([bc33a67](https://www.github.com/googleapis/python-bigquery/commit/bc33a678a765f0232615aa2038b8cc67c88468a0))
+* omit `NaN` values when uploading from `insert_rows_from_dataframe` ([#170](https://www.github.com/googleapis/python-bigquery/issues/170)) ([f9f2f45](https://www.github.com/googleapis/python-bigquery/commit/f9f2f45bc009c03cd257441bd4b6beb1754e2177))
+
+
+### Documentation
+
+* **bigquery:** add client thread-safety documentation ([#132](https://www.github.com/googleapis/python-bigquery/issues/132)) ([fce76b3](https://www.github.com/googleapis/python-bigquery/commit/fce76b3776472b1da798df862a3405e659e35bab))
+* **bigquery:** add docstring for conflict exception ([#171](https://www.github.com/googleapis/python-bigquery/issues/171)) ([9c3409b](https://www.github.com/googleapis/python-bigquery/commit/9c3409bb06218bf499620544f8e92802df0cce47))
+* **bigquery:** consistent use of optional keyword ([#153](https://www.github.com/googleapis/python-bigquery/issues/153)) ([79d8c61](https://www.github.com/googleapis/python-bigquery/commit/79d8c61064cca18b596a24b6f738c7611721dd5c))
+* **bigquery:** fix the broken docs ([#139](https://www.github.com/googleapis/python-bigquery/issues/139)) ([3235255](https://www.github.com/googleapis/python-bigquery/commit/3235255cc5f483949f34d2e8ef13b372e8713782))
+
+## [1.25.0](https://www.github.com/googleapis/python-bigquery/compare/v1.24.0...v1.25.0) (2020-06-06)
+
+
+### Features
+
+* add BigQuery storage client support to DB API ([#36](https://www.github.com/googleapis/python-bigquery/issues/36)) ([ba9b2f8](https://www.github.com/googleapis/python-bigquery/commit/ba9b2f87e36320d80f6f6460b77e6daddb0fa214))
+* **bigquery:** add create job method ([#32](https://www.github.com/googleapis/python-bigquery/issues/32)) ([2abdef8](https://www.github.com/googleapis/python-bigquery/commit/2abdef82bed31601d1ca1aa92a10fea1e09f5297))
+* **bigquery:** add support of model for extract job ([#71](https://www.github.com/googleapis/python-bigquery/issues/71)) ([4a7a514](https://www.github.com/googleapis/python-bigquery/commit/4a7a514659a9f6f9bbd8af46bab3f8782d6b4b98))
+* add HOUR support for time partitioning interval ([#91](https://www.github.com/googleapis/python-bigquery/issues/91)) ([0dd90b9](https://www.github.com/googleapis/python-bigquery/commit/0dd90b90e3714c1d18f8a404917a9454870e338a))
+* add support for policy tags ([#77](https://www.github.com/googleapis/python-bigquery/issues/77)) ([38a5c01](https://www.github.com/googleapis/python-bigquery/commit/38a5c01ca830daf165592357c45f2fb4016aad23))
+* make AccessEntry objects hashable ([#93](https://www.github.com/googleapis/python-bigquery/issues/93)) ([23a173b](https://www.github.com/googleapis/python-bigquery/commit/23a173bc5a25c0c8200adc5af62eb05624c9099e))
+* **bigquery:** expose start index parameter for query result ([#121](https://www.github.com/googleapis/python-bigquery/issues/121)) ([be86de3](https://www.github.com/googleapis/python-bigquery/commit/be86de330a3c3801653a0ccef90e3d9bdb3eee7a))
+* **bigquery:** unit and system test for dataframe with int column with Nan values ([#39](https://www.github.com/googleapis/python-bigquery/issues/39)) ([5fd840e](https://www.github.com/googleapis/python-bigquery/commit/5fd840e9d4c592c4f736f2fd4792c9670ba6795e))
+
+
+### Bug Fixes
+
+* allow partial streaming_buffer statistics ([#37](https://www.github.com/googleapis/python-bigquery/issues/37)) ([645f0fd](https://www.github.com/googleapis/python-bigquery/commit/645f0fdb35ee0e81ee70f7459e796a42a1f03210))
+* distinguish server timeouts from transport timeouts ([#43](https://www.github.com/googleapis/python-bigquery/issues/43)) ([a17be5f](https://www.github.com/googleapis/python-bigquery/commit/a17be5f01043f32d9fbfb2ddf456031ea9205c8f))
+* improve cell magic error message on missing query ([#58](https://www.github.com/googleapis/python-bigquery/issues/58)) ([6182cf4](https://www.github.com/googleapis/python-bigquery/commit/6182cf48aef8f463bb96891cfc44a96768121dbc))
+* **bigquery:** fix repr of model reference ([#66](https://www.github.com/googleapis/python-bigquery/issues/66)) ([26c6204](https://www.github.com/googleapis/python-bigquery/commit/26c62046f4ec8880cf6561cc90a8b821dcc84ec5))
+* **bigquery:** fix start index with page size for list rows ([#27](https://www.github.com/googleapis/python-bigquery/issues/27)) ([400673b](https://www.github.com/googleapis/python-bigquery/commit/400673b5d0f2a6a3d828fdaad9d222ca967ffeff))
+
+## 1.24.0
+
+02-03-2020 01:38 PST
+
+### Implementation Changes
+
+- Fix inserting missing repeated fields. ([#10196](https://github.com/googleapis/google-cloud-python/pull/10196))
+- Deprecate `client.dataset()` in favor of `DatasetReference`. ([#7753](https://github.com/googleapis/google-cloud-python/pull/7753))
+- Use faster `to_arrow` + `to_pandas` in `to_dataframe()` when `pyarrow` is available. ([#10027](https://github.com/googleapis/google-cloud-python/pull/10027))
+- Write pandas `datetime[ns]` columns to BigQuery TIMESTAMP columns. ([#10028](https://github.com/googleapis/google-cloud-python/pull/10028))
+
+### New Features
+
+- Check `rows` argument type in `insert_rows()`. ([#10174](https://github.com/googleapis/google-cloud-python/pull/10174))
+- Check `json_rows` arg type in `insert_rows_json()`. ([#10162](https://github.com/googleapis/google-cloud-python/pull/10162))
+- Make `RowIterator.to_dataframe_iterable()` method public. ([#10017](https://github.com/googleapis/google-cloud-python/pull/10017))
+- Add retry parameter to public methods where missing. ([#10026](https://github.com/googleapis/google-cloud-python/pull/10026))
+- Add timeout parameter to Client and Job public methods. ([#10002](https://github.com/googleapis/google-cloud-python/pull/10002))
+- Add timeout parameter to `QueryJob.done()` method. ([#9875](https://github.com/googleapis/google-cloud-python/pull/9875))
+- Add `create_bqstorage_client` parameter to `to_dataframe()` and `to_arrow()` methods. ([#9573](https://github.com/googleapis/google-cloud-python/pull/9573))
+
+### Dependencies
+
+- Fix minimum versions of `google-cloud-core` and `google-resumable-media` dependencies. ([#10016](https://github.com/googleapis/google-cloud-python/pull/10016))
+
+### Documentation
+
+- Fix a comment typo in `job.py`. ([#10209](https://github.com/googleapis/google-cloud-python/pull/10209))
+- Update code samples of load table file and load table URI. ([#10175](https://github.com/googleapis/google-cloud-python/pull/10175))
+- Uncomment `Client` constructor and imports in samples. ([#10058](https://github.com/googleapis/google-cloud-python/pull/10058))
+- Remove unused query code sample. ([#10024](https://github.com/googleapis/google-cloud-python/pull/10024))
+- Update code samples to use strings for table and dataset IDs. ([#9974](https://github.com/googleapis/google-cloud-python/pull/9974))
+
+### Internal / Testing Changes
+
+- Bump copyright year to 2020, tweak docstring formatting (via synth). [#10225](https://github.com/googleapis/google-cloud-python/pull/10225)
+- Add tests for concatenating categorical columns. ([#10180](https://github.com/googleapis/google-cloud-python/pull/10180))
+- Adjust test assertions to the new default timeout. ([#10222](https://github.com/googleapis/google-cloud-python/pull/10222))
+- Use Python 3.6 for the nox blacken session (via synth). ([#10012](https://github.com/googleapis/google-cloud-python/pull/10012))
+
+## 1.23.1
+
+12-16-2019 09:39 PST
+
+
+### Implementation Changes
+
+- Add `iamMember` entity type to allowed access classes. ([#9973](https://github.com/googleapis/google-cloud-python/pull/9973))
+- Fix typo in import error message (pandas -> pyarrow). ([#9955](https://github.com/googleapis/google-cloud-python/pull/9955))
+
+### Dependencies
+
+- Add `six` as an explicit dependency. ([#9979](https://github.com/googleapis/google-cloud-python/pull/9979))
+
+### Documentation
+
+- Add sample to read from query destination table. ([#9964](https://github.com/googleapis/google-cloud-python/pull/9964))
+
+## 1.23.0
+
+12-11-2019 13:31 PST
+
+### New Features
+
+- Add `close()` method to client for releasing open sockets. ([#9894](https://github.com/googleapis/google-cloud-python/pull/9894))
+- Add support of `use_avro_logical_types` for extract jobs. ([#9642](https://github.com/googleapis/google-cloud-python/pull/9642))
+- Add support for hive partitioning options configuration. ([#9626](https://github.com/googleapis/google-cloud-python/pull/9626))
+- Add description for routine entities. ([#9785](https://github.com/googleapis/google-cloud-python/pull/9785))
+
+### Documentation
+
+- Update code samples to use strings for table and dataset IDs. ([#9495](https://github.com/googleapis/google-cloud-python/pull/9495))
+
+### Internal / Testing Changes
+
+- Run unit tests with Python 3.8. ([#9880](https://github.com/googleapis/google-cloud-python/pull/9880))
+- Import `Mapping` from `collections.abc` not from `collections`. ([#9826](https://github.com/googleapis/google-cloud-python/pull/9826))
+
+## 1.22.0
+
+11-13-2019 12:23 PST
+
+
+### Implementation Changes
+- Preserve job config passed to Client methods. ([#9735](https://github.com/googleapis/google-cloud-python/pull/9735))
+- Use pyarrow fallback for improved schema detection. ([#9321](https://github.com/googleapis/google-cloud-python/pull/9321))
+- Add TypeError if wrong `job_config type` is passed to client job methods. ([#9506](https://github.com/googleapis/google-cloud-python/pull/9506))
+- Fix arrow deprecation warning. ([#9504](https://github.com/googleapis/google-cloud-python/pull/9504))
+
+### New Features
+- Add `--destination_table` parameter to IPython magic. ([#9599](https://github.com/googleapis/google-cloud-python/pull/9599))
+- Allow passing schema as a sequence of dicts. ([#9550](https://github.com/googleapis/google-cloud-python/pull/9550))
+- Implement defaultEncryptionConfiguration on datasets. ([#9489](https://github.com/googleapis/google-cloud-python/pull/9489))
+- Add range partitioning to tables, load jobs, and query jobs. ([#9477](https://github.com/googleapis/google-cloud-python/pull/9477))
+
+### Dependencies
+- Pin `google-resumable-media` to includ 0.5.x. ([#9572](https://github.com/googleapis/google-cloud-python/pull/9572))
+
+### Documentation
+- Fix link anchors in external config docstrings. ([#9627](https://github.com/googleapis/google-cloud-python/pull/9627))
+- Add python 2 sunset banner to documentation. ([#9036](https://github.com/googleapis/google-cloud-python/pull/9036))
+- Add table create sample using integer range partitioning. ([#9478](https://github.com/googleapis/google-cloud-python/pull/9478))
+- Document how to achieve higher write limit and add tests. ([#9574](https://github.com/googleapis/google-cloud-python/pull/9574))
+- Add code sample for scripting. ([#9537](https://github.com/googleapis/google-cloud-python/pull/9537))
+- Rewrite docs in Google style, part 2. ([#9481](https://github.com/googleapis/google-cloud-python/pull/9481))
+- Use multi-regional key path for CMEK in snippets. ([#9523](https://github.com/googleapis/google-cloud-python/pull/9523))
+
+### Internal / Testing Changes
+- Fix undelete table system test to use milliseconds in snapshot decorator. ([#9649](https://github.com/googleapis/google-cloud-python/pull/9649))
+- Format code with latest version of black. ([#9556](https://github.com/googleapis/google-cloud-python/pull/9556))
+- Remove duplicate test dependencies. ([#9503](https://github.com/googleapis/google-cloud-python/pull/9503))
+
+## 1.21.0
+
+10-16-2019 10:33 PDT
+
+
+### New Features
+
+- add ability to pass in a table ID instead of a query to the `%%bigquery` magic ([#9170](https://github.com/googleapis/google-cloud-python/pull/9170))
+- add support for custom `QueryJobConfig` in `BigQuery.cursor.execute` method ([#9278](https://github.com/googleapis/google-cloud-python/pull/9278))
+- store `QueryJob` to destination var on error in `%%bigquery` magic ([#9245](https://github.com/googleapis/google-cloud-python/pull/9245))
+- add script statistics to job resource ([#9428](https://github.com/googleapis/google-cloud-python/pull/9428))
+- add support for sheets ranges ([#9416](https://github.com/googleapis/google-cloud-python/pull/9416))
+- add support for listing jobs by parent job ([#9225](https://github.com/googleapis/google-cloud-python/pull/9225))
+- expose customer managed encryption key for ML models ([#9302](https://github.com/googleapis/google-cloud-python/pull/9302))
+- add `Dataset.default_partition_expiration_ms` and `Table.require_partition_filter` properties ([#9464](https://github.com/googleapis/google-cloud-python/pull/9464))
+
+### Dependencies
+
+- restrict version range of `google-resumable-media` ([#9243](https://github.com/googleapis/google-cloud-python/pull/9243))
+
+### Documentation
+
+- document how to load data as JSON string ([#9231](https://github.com/googleapis/google-cloud-python/pull/9231))
+- standardize comments and formatting in existing code samples ([#9212](https://github.com/googleapis/google-cloud-python/pull/9212))
+- rewrite docstrings in Google style ([#9326](https://github.com/googleapis/google-cloud-python/pull/9326))
+- fix incorrect links to REST API in reference docs ([#9436](https://github.com/googleapis/google-cloud-python/pull/9436))
+
+### Internal / Testing Changes
+
+- add code samples to lint check ([#9277](https://github.com/googleapis/google-cloud-python/pull/9277))
+- update code samples to use strings for table and dataset IDs ([#9136](https://github.com/googleapis/google-cloud-python/pull/9136))
+- simplify scripting system test to reduce flakiness ([#9458](https://github.com/googleapis/google-cloud-python/pull/9458))
+
+## 1.20.0
+
+09-13-2019 11:22 PDT
+
+
+### Implementation Changes
+- Change default endpoint to bigquery.googleapis.com ([#9213](https://github.com/googleapis/google-cloud-python/pull/9213))
+- Change the default value of Cursor instances' `arraysize` attribute to None ([#9199](https://github.com/googleapis/google-cloud-python/pull/9199))
+- Deprecate automatic schema conversion. ([#9176](https://github.com/googleapis/google-cloud-python/pull/9176))
+- Fix `list_rows()` max results with BQ storage client ([#9178](https://github.com/googleapis/google-cloud-python/pull/9178))
+
+### New Features
+- Add `Model.encryption_config`. (via synth) ([#9214](https://github.com/googleapis/google-cloud-python/pull/9214))
+- Add `Client.insert_rows_from_dataframe()` method ([#9162](https://github.com/googleapis/google-cloud-python/pull/9162))
+- Add support for array parameters to `Cursor.execute()`. ([#9189](https://github.com/googleapis/google-cloud-python/pull/9189))
+- Add support for project IDs with org prefix to `Table.from_string()` factory. ([#9161](https://github.com/googleapis/google-cloud-python/pull/9161))
+- Add `--max_results` option to Jupyter magics ([#9169](https://github.com/googleapis/google-cloud-python/pull/9169))
+- Autofetch table schema on load if not provided. ([#9108](https://github.com/googleapis/google-cloud-python/pull/9108))
+- Add `max_results` parameter to `QueryJob.result()`. ([#9167](https://github.com/googleapis/google-cloud-python/pull/9167))
+
+### Documentation
+- Fix doc link. ([#9200](https://github.com/googleapis/google-cloud-python/pull/9200))
+
+### Internal / Testing Changes
+- Revert "Disable failing snippets test ([#9156](https://github.com/googleapis/google-cloud-python/pull/9156))." ([#9220](https://github.com/googleapis/google-cloud-python/pull/9220))
+
+## 1.19.0
+
+09-03-2019 14:33 PDT
+
+### Implementation Changes
+
+- Raise when unexpected fields are present in the `LoadJobConfig.schema` when calling `load_table_from_dataframe`. ([#9096](https://github.com/googleapis/google-cloud-python/pull/9096))
+- Determine the schema in `load_table_from_dataframe` based on dtypes. ([#9049](https://github.com/googleapis/google-cloud-python/pull/9049))
+- Raise helpful error when loading table from dataframe with `STRUCT` columns. ([#9053](https://github.com/googleapis/google-cloud-python/pull/9053))
+- Fix schema recognition of struct field types. ([#9001](https://github.com/googleapis/google-cloud-python/pull/9001))
+- Fix deserializing `None` in `QueryJob` for queries with parameters. ([#9029](https://github.com/googleapis/google-cloud-python/pull/9029))
+
+### New Features
+
+- Include indexes in table written by `load_table_from_dataframe`, only if
+ fields corresponding to indexes are present in `LoadJobConfig.schema`.
+ ([#9084](https://github.com/googleapis/google-cloud-python/pull/9084))
+- Add `client_options` to constructor. ([#8999](https://github.com/googleapis/google-cloud-python/pull/8999))
+- Add `--dry_run` option to `%%bigquery` magic. ([#9067](https://github.com/googleapis/google-cloud-python/pull/9067))
+- Add `load_table_from_json()` method to create a table from a list of dictionaries. ([#9076](https://github.com/googleapis/google-cloud-python/pull/9076))
+- Allow subset of schema to be passed into `load_table_from_dataframe`. ([#9064](https://github.com/googleapis/google-cloud-python/pull/9064))
+- Add support for unsetting `LoadJobConfig.schema`. ([#9077](https://github.com/googleapis/google-cloud-python/pull/9077))
+- Add support to `Dataset` for project IDs containing an org prefix. ([#8877](https://github.com/googleapis/google-cloud-python/pull/8877))
+- Add enum with SQL type names allowed to be used in `SchemaField`. ([#9040](https://github.com/googleapis/google-cloud-python/pull/9040))
+
+### Documentation
+
+- Fix the reference URL for `Client.create_dataset()`. ([#9149](https://github.com/googleapis/google-cloud-python/pull/9149))
+- Update code samples to use strings for table names instead of `client.dataset()`. ([#9032](https://github.com/googleapis/google-cloud-python/pull/9032))
+- Remove compatability badges from READMEs. ([#9035](https://github.com/googleapis/google-cloud-python/pull/9035))
+- Fix Pandas DataFrame load example under Python 2.7. ([#9022](https://github.com/googleapis/google-cloud-python/pull/9022))
+
+### Internal / Testing Changes
+
+- Disable failing snippets test for copying CMEK-protected tables. ([#9156](https://github.com/googleapis/google-cloud-python/pull/9156))
+- Fix BigQuery client unit test assertions ([#9112](https://github.com/googleapis/google-cloud-python/pull/9112))
+- Replace avro with arrow schemas in `test_table.py` ([#9056](https://github.com/googleapis/google-cloud-python/pull/9056))
+
+## 1.18.0
+
+08-08-2019 12:28 PDT
+
+### New Features
+
+- Add `bqstorage_client` param to `QueryJob.to_arrow()` ([#8693](https://github.com/googleapis/google-cloud-python/pull/8693))
+- Include SQL query and job ID in exception messages. ([#8748](https://github.com/googleapis/google-cloud-python/pull/8748))
+- Allow using TableListItem to construct a Table object. ([#8738](https://github.com/googleapis/google-cloud-python/pull/8738))
+- Add StandardSqlDataTypes enum to BigQuery ([#8782](https://github.com/googleapis/google-cloud-python/pull/8782))
+- Add `to_standard_sql()` method to SchemaField ([#8880](https://github.com/googleapis/google-cloud-python/pull/8880))
+- Add debug logging statements to track when BQ Storage API is used. ([#8838](https://github.com/googleapis/google-cloud-python/pull/8838))
+- Hide error traceback in BigQuery cell magic ([#8808](https://github.com/googleapis/google-cloud-python/pull/8808))
+- Allow choice of compression when loading from dataframe ([#8938](https://github.com/googleapis/google-cloud-python/pull/8938))
+- Additional clustering metrics for BQML K-means models (via synth). ([#8945](https://github.com/googleapis/google-cloud-python/pull/8945))
+
+### Documentation
+
+- Add compatibility check badges to READMEs. ([#8288](https://github.com/googleapis/google-cloud-python/pull/8288))
+- Link to googleapis.dev documentation in READMEs. ([#8705](https://github.com/googleapis/google-cloud-python/pull/8705))
+- Remove redundant service account key code sample. ([#8891](https://github.com/googleapis/google-cloud-python/pull/8891))
+
+### Internal / Testing Changes
+
+- Fix several pytest "skip if" markers ([#8694](https://github.com/googleapis/google-cloud-python/pull/8694))
+- Update tests to support conversion of NaN as NULL in pyarrow `0.14.*`. ([#8785](https://github.com/googleapis/google-cloud-python/pull/8785))
+- Mock external calls in one of BigQuery unit tests ([#8727](https://github.com/googleapis/google-cloud-python/pull/8727))
+- Set IPython user agent when running queries with IPython cell magic ([#8713](https://github.com/googleapis/google-cloud-python/pull/8713))
+- Use configurable bucket name for GCS samples data in systems tests. ([#8783](https://github.com/googleapis/google-cloud-python/pull/8783))
+- Move `maybe_fail_import()` to top level test utils ([#8840](https://github.com/googleapis/google-cloud-python/pull/8840))
+- Set BQ Storage client user-agent when in Jupyter cell ([#8734](https://github.com/googleapis/google-cloud-python/pull/8734))
+
+## 1.17.0
+
+07-12-2019 07:56 PDT
+
+### New Features
+
+- Support faster Arrow data format in `to_dataframe` when using BigQuery Storage API. ([#8551](https://github.com/googleapis/google-cloud-python/pull/8551))
+- Add `to_arrow` to get a `pyarrow.Table` from query results. ([#8609](https://github.com/googleapis/google-cloud-python/pull/8609))
+
+### Dependencies
+
+- Exclude bad 0.14.0 `pyarrow` release. ([#8551](https://github.com/googleapis/google-cloud-python/pull/8551))
+
+## 1.16.0
+
+07-01-2019 10:22 PDT
+
+### New Features
+
+- Add Routines API. ([#8491](https://github.com/googleapis/google-cloud-python/pull/8491))
+- Add more stats to Models API, such as `optimization_strategy` (via synth). ([#8344](https://github.com/googleapis/google-cloud-python/pull/8344))
+
+### Documentation
+
+- Add docs job to publish to googleapis.dev. ([#8464](https://github.com/googleapis/google-cloud-python/pull/8464))
+- Add sample demonstrating how to create a job. ([#8422](https://github.com/googleapis/google-cloud-python/pull/8422))
+- Use autodetected location in code samples. ([#8340](https://github.com/googleapis/google-cloud-python/pull/8340), [#8341](https://github.com/googleapis/google-cloud-python/pull/8341))
+
+### Internal / Testing Changes
+
+- Refactor `to_dataframe` to deterministicly update progress bar. ([#8303](https://github.com/googleapis/google-cloud-python/pull/8303))
+
+## 1.15.0
+
+06-14-2019 10:10 PDT
+
+### Implementation Changes
+
+- Fix bug where `load_table_from_dataframe` could not append to REQUIRED fields. ([#8230](https://github.com/googleapis/google-cloud-python/pull/8230))
+
+### New Features
+
+- Add `page_size` parameter to `QueryJob.result`. ([#8206](https://github.com/googleapis/google-cloud-python/pull/8206))
+
+## 1.14.0
+
+06-04-2019 11:11 PDT
+
+
+### New Features
+- Add `maximum_bytes_billed` argument and `context.default_query_job_config` property to magics. ([#8179](https://github.com/googleapis/google-cloud-python/pull/8179))
+
+### Dependencies
+- Don't pin `google-api-core` in libs using `google-cloud-core`. ([#8213](https://github.com/googleapis/google-cloud-python/pull/8213))
+
+## 1.13.0
+
+05-31-2019 10:22 PDT
+
+### New Features
+
+- Use `job_config.schema` for data type conversion if specified in `load_table_from_dataframe`. ([#8105](https://github.com/googleapis/google-cloud-python/pull/8105))
+
+### Internal / Testing Changes
+
+- Adds private `_connection` object to magics context. ([#8192](https://github.com/googleapis/google-cloud-python/pull/8192))
+- Fix coverage in 'types.py' (via synth). ([#8146](https://github.com/googleapis/google-cloud-python/pull/8146))
+
+## 1.12.1
+
+05-21-2019 11:16 PDT
+
+### Implementation Changes
+
+- Don't raise error when encountering unknown fields in Models API. ([#8083](https://github.com/googleapis/google-cloud-python/pull/8083))
+
+### Documentation
+
+- Use alabaster theme everwhere. ([#8021](https://github.com/googleapis/google-cloud-python/pull/8021))
+
+### Internal / Testing Changes
+
+- Add empty lines (via synth). ([#8049](https://github.com/googleapis/google-cloud-python/pull/8049))
+
+## 1.12.0
+
+05-16-2019 11:25 PDT
+
+### Implementation Changes
+- Remove duplicates from index on pandas DataFrames returned by `to_dataframe()`. ([#7953](https://github.com/googleapis/google-cloud-python/pull/7953))
+- Prevent error when time partitioning is populated with empty dict ([#7904](https://github.com/googleapis/google-cloud-python/pull/7904))
+- Preserve order in `to_dataframe` with BQ Storage from queries containing `ORDER BY` ([#7793](https://github.com/googleapis/google-cloud-python/pull/7793))
+- Respect `progress_bar_type` in `to_dataframe` when used with BQ Storage API ([#7697](https://github.com/googleapis/google-cloud-python/pull/7697))
+- Refactor QueryJob.query to read from resource dictionary ([#7763](https://github.com/googleapis/google-cloud-python/pull/7763))
+- Close the `to_dataframe` progress bar when finished. ([#7757](https://github.com/googleapis/google-cloud-python/pull/7757))
+- Ensure that `KeyboardInterrupt` during `to_dataframe`no longer hangs. ([#7698](https://github.com/googleapis/google-cloud-python/pull/7698))
+- Raise ValueError when BQ Storage is required but missing ([#7726](https://github.com/googleapis/google-cloud-python/pull/7726))
+- Make `total_rows` available on RowIterator before iteration ([#7622](https://github.com/googleapis/google-cloud-python/pull/7622))
+- Avoid masking auth errors in `to_dataframe` with BQ Storage API ([#7674](https://github.com/googleapis/google-cloud-python/pull/7674))
+
+### New Features
+- Add support for passing `client_info`. ([#7849](https://github.com/googleapis/google-cloud-python/pull/7849) and ([#7806](https://github.com/googleapis/google-cloud-python/pull/7806))
+- Phase 1 for storing schemas for later use. ([#7761](https://github.com/googleapis/google-cloud-python/pull/7761))
+- Add `destination` and related properties to LoadJob. ([#7710](https://github.com/googleapis/google-cloud-python/pull/7710))
+- Add `clustering_fields` property to TableListItem ([#7692](https://github.com/googleapis/google-cloud-python/pull/7692))
+- Add `created` and `expires` properties to TableListItem ([#7684](https://github.com/googleapis/google-cloud-python/pull/7684))
+
+### Dependencies
+- Pin `google-cloud-core >= 1.0.0, < 2.0dev`. ([#7993](https://github.com/googleapis/google-cloud-python/pull/7993))
+- Add `[all]` extras to install all extra dependencies ([#7610](https://github.com/googleapis/google-cloud-python/pull/7610))
+
+### Documentation
+- Move table and dataset snippets to samples/ directory ([#7683](https://github.com/googleapis/google-cloud-python/pull/7683))
+
+### Internal / Testing Changes
+- Blacken unit tests. ([#7960](https://github.com/googleapis/google-cloud-python/pull/7960))
+- Cleanup client tests with method to create minimal table resource ([#7802](https://github.com/googleapis/google-cloud-python/pull/7802))
+
+## 1.11.2
+
+04-05-2019 08:16 PDT
+
+### Dependencies
+
+- Add dependency on protobuf. ([#7668](https://github.com/googleapis/google-cloud-python/pull/7668))
+
+## 1.11.1
+
+04-04-2019 09:19 PDT
+
+### Internal / Testing Changes
+
+- Increment version number in `setup.py`.
+
+## 1.11.0
+
+04-03-2019 19:33 PDT
+
+### Implementation Changes
+
+- Remove classifier for Python 3.4 for end-of-life. ([#7535](https://github.com/googleapis/google-cloud-python/pull/7535))
+
+### New Features
+
+- Enable fastparquet support by using temporary file in `load_table_from_dataframe` ([#7545](https://github.com/googleapis/google-cloud-python/pull/7545))
+- Allow string for copy sources, query destination, and default dataset ([#7560](https://github.com/googleapis/google-cloud-python/pull/7560))
+- Add `progress_bar_type` argument to `to_dataframe` to use `tqdm` to display a progress bar ([#7552](https://github.com/googleapis/google-cloud-python/pull/7552))
+- Call `get_table` in `list_rows` if the schema is not available ([#7621](https://github.com/googleapis/google-cloud-python/pull/7621))
+- Fallback to BQ API when there are problems reading from BQ Storage. ([#7633](https://github.com/googleapis/google-cloud-python/pull/7633))
+- Add methods for Models API ([#7562](https://github.com/googleapis/google-cloud-python/pull/7562))
+- Add option to use BigQuery Storage API from IPython magics ([#7640](https://github.com/googleapis/google-cloud-python/pull/7640))
+
+### Documentation
+
+- Remove typo in `Table.from_api_repr` docstring. ([#7509](https://github.com/googleapis/google-cloud-python/pull/7509))
+- Add docs session to nox configuration for BigQuery ([#7541](https://github.com/googleapis/google-cloud-python/pull/7541))
+
+### Internal / Testing Changes
+
+- Refactor `table()` methods into shared implementation. ([#7516](https://github.com/googleapis/google-cloud-python/pull/7516))
+- Blacken noxfile and setup file in nox session ([#7619](https://github.com/googleapis/google-cloud-python/pull/7619))
+- Actually use the `progress_bar_type` argument in `QueryJob.to_dataframe()`. ([#7616](https://github.com/googleapis/google-cloud-python/pull/7616))
+
+## 1.10.0
+
+03-06-2019 15:20 PST
+
+### Implementation Changes
+
+- Harden 'ArrayQueryParameter.from_api_repr' against missing 'parameterValue'. ([#7311](https://github.com/googleapis/google-cloud-python/pull/7311))
+- Allow nested records w/ null values. ([#7297](https://github.com/googleapis/google-cloud-python/pull/7297))
+
+### New Features
+
+- Add `exists_ok` and `not_found_ok` options to ignore errors when creating/deleting datasets/tables. ([#7491](https://github.com/googleapis/google-cloud-python/pull/7491))
+- Accept a string in Table and Dataset constructors. ([#7483](https://github.com/googleapis/google-cloud-python/pull/7483))
+
+### Documentation
+
+- Update docstring of RowIterator's to_dataframe ([#7306](https://github.com/googleapis/google-cloud-python/pull/7306))
+- Updated client library documentation URLs. ([#7307](https://github.com/googleapis/google-cloud-python/pull/7307))
+
+### Internal / Testing Changes
+
+- Fix lint. ([#7383](https://github.com/googleapis/google-cloud-python/pull/7383))
+
+## 1.9.0
+
+02-04-2019 13:28 PST
+
+### New Features
+
+- Add arguments to select `dtypes` and use BQ Storage API to `QueryJob.to_dataframe()`. ([#7241](https://github.com/googleapis/google-cloud-python/pull/7241))
+
+### Documentation
+
+- Add sample for fetching `total_rows` from query results. ([#7217](https://github.com/googleapis/google-cloud-python/pull/7217))
+
+## 1.8.1
+
+12-17-2018 17:53 PST
+
+
+### Documentation
+- Document Python 2 deprecation ([#6910](https://github.com/googleapis/google-cloud-python/pull/6910))
+- Normalize docs for 'page_size' / 'max_results' / 'page_token' ([#6842](https://github.com/googleapis/google-cloud-python/pull/6842))
+
+## 1.8.0
+
+12-10-2018 12:39 PST
+
+
+### Implementation Changes
+- Add option to use BQ Storage API with `to_dataframe` ([#6854](https://github.com/googleapis/google-cloud-python/pull/6854))
+- Fix exception type in comment ([#6847](https://github.com/googleapis/google-cloud-python/pull/6847))
+- Add `to_bqstorage` to convert from Table[Reference] google-cloud-bigquery-storage reference ([#6840](https://github.com/googleapis/google-cloud-python/pull/6840))
+- Import `iam.policy` from `google.api_core`. ([#6741](https://github.com/googleapis/google-cloud-python/pull/6741))
+- Add avro logical type control for load jobs. ([#6827](https://github.com/googleapis/google-cloud-python/pull/6827))
+- Allow setting partition expiration to 'None'. ([#6823](https://github.com/googleapis/google-cloud-python/pull/6823))
+- Add `retry` argument to `_AsyncJob.result`. ([#6302](https://github.com/googleapis/google-cloud-python/pull/6302))
+
+### Dependencies
+- Update dependency to google-cloud-core ([#6835](https://github.com/googleapis/google-cloud-python/pull/6835))
+
+### Documentation
+- Add avro load samples ([#6832](https://github.com/googleapis/google-cloud-python/pull/6832))
+
+### Internal / Testing Changes
+- Blacken libraries ([#6794](https://github.com/googleapis/google-cloud-python/pull/6794))
+- Fix copy/paste typos in noxfile comments ([#6831](https://github.com/googleapis/google-cloud-python/pull/6831))
+
+## 1.7.0
+
+11-05-2018 16:41 PST
+
+### Implementation Changes
+
+- Add destination table properties to `LoadJobConfig`. ([#6202](https://github.com/googleapis/google-cloud-python/pull/6202))
+- Allow strings or references in `create_dataset` and `create_table` ([#6199](https://github.com/googleapis/google-cloud-python/pull/6199))
+- Fix swallowed error message ([#6168](https://github.com/googleapis/google-cloud-python/pull/6168))
+
+### New Features
+
+- Add `--params option` to `%%bigquery` magic ([#6277](https://github.com/googleapis/google-cloud-python/pull/6277))
+- Expose `to_api_repr` method for jobs. ([#6176](https://github.com/googleapis/google-cloud-python/pull/6176))
+- Allow string in addition to DatasetReference / TableReference in Client methods. ([#6164](https://github.com/googleapis/google-cloud-python/pull/6164))
+- Add keyword arguments to job config constructors for setting properties ([#6397](https://github.com/googleapis/google-cloud-python/pull/6397))
+
+### Documentation
+
+- Update README service links in quickstart guides. ([#6322](https://github.com/googleapis/google-cloud-python/pull/6322))
+- Move usage guides to their own docs. ([#6238](https://github.com/googleapis/google-cloud-python/pull/6238))
+- Normalize use of support level badges ([#6159](https://github.com/googleapis/google-cloud-python/pull/6159))
+
+### Internal / Testing Changes
+
+- Deprecation cleanups ([#6304](https://github.com/googleapis/google-cloud-python/pull/6304))
+- Use `_get_sub_prop` helper so missing load stats don't raise. ([#6269](https://github.com/googleapis/google-cloud-python/pull/6269))
+- Use new Nox ([#6175](https://github.com/googleapis/google-cloud-python/pull/6175))
+- Harden snippets against transient GCS errors. ([#6184](https://github.com/googleapis/google-cloud-python/pull/6184))
+
+## 1.6.0
+
+### New Features
+- Add support for `GEOGRAPHY` type ([#6147](https://github.com/googleapis/google-cloud-python/pull/6147))
+- Add default QueryJobConfig to Client ([#6088](https://github.com/googleapis/google-cloud-python/pull/6088))
+
+### Documentation
+- Remove unused "append" samples ([#6100](https://github.com/googleapis/google-cloud-python/pull/6100))
+
+### Internal / Testing Changes
+- Address dataset leaks, conflicts in systests ([#6099](https://github.com/googleapis/google-cloud-python/pull/6099))
+- Harden bucket teardown against `429 Too Many Requests`. ([#6101](https://github.com/googleapis/google-cloud-python/pull/6101))
+
+## 1.5.1
+
+### Implementation Changes
+
+- Retry '502 Bad Gateway' errors by default. (#5930)
+- Avoid pulling entire result set into memory when constructing dataframe. (#5870)
+- Add support for retrying unstructured 429 / 500 / 502 responses. (#6011)
+- Populate the jobReference from the API response. (#6044)
+
+### Documentation
+
+- Prepare documentation for repo split (#5955)
+- Fix leakage of bigquery/spanner sections into sidebar menu. (#5986)
+
+### Internal / Testing Changes
+
+- Test pandas support under Python 3.7. (#5857)
+- Nox: use inplace installs (#5865)
+- Update system test to use test data in bigquery-public-data. (#5965)
+
+## 1.5.0
+
+### Implementation Changes
+
+- Make 'Table.location' read-only. (#5687)
+
+### New Features
+
+- Add 'clustering_fields' properties. (#5630)
+- Add support for job labels (#5654)
+- Add 'QueryJob.estimated_bytes_processed' property (#5655)
+- Add support/tests for loading tables from 'gzip.GzipFile'. (#5711)
+- Add 'ExternalSourceFormat' enum. (#5674)
+- Add default location to client (#5678)
+
+### Documentation
+
+- Fix typo in CopyJob sources docstring (#5690)
+
+### Internal / Testing Changes
+
+- Add/refactor snippets for managing BigQuery jobs (#5631)
+- Reenable systests for 'dataset.update'/'table.update'. (#5732)
+
+## 1.4.0
+
+### Implementation Changes
+
+- Add 'internalError' to retryable error reasons. (#5599)
+- Don't raise exception if viewing CREATE VIEW DDL results (#5602)
+
+### New Features
+
+- Add Orc source format support and samples (#5500)
+- Move 'DEFAULT_RETRY' (w/ its predicate) to a new public 'retry' module. (#5552)
+- Allow listing rows on an empty table. (#5584)
+
+### Documentation
+
+- Add load_table_from_dataframe() to usage docs and changelog and dedents snippets in usage page (#5501)
+- Add samples for query external data sources (GCS & Sheets) (#5491)
+- Add BigQuery authorized view samples (#5515)
+- Update docs to show pyarrow as the only dependency of load_table_from_dataframe() (#5582)
+
+### Internal / Testing Changes
+
+- Add missing explict coverage for '_helpers' (#5550)
+- Skip update_table and update_dataset tests until etag issue is resolved. (#5590)
+
+## 1.3.0
+
+### New Features
+
+- NUMERIC type support (#5331)
+- Add timeline and top-level slot-millis to query statistics. (#5312)
+- Add additional statistics to query plan stages. (#5307)
+- Add `client.load_table_from_dataframe()` (#5387)
+
+### Documentation
+
+- Use autosummary to split up API reference docs (#5340)
+- Fix typo in Client docstrings (#5342)
+
+### Internal / Testing Changes
+
+- Prune systests identified as reduntant to snippets. (#5365)
+- Modify system tests to use prerelease versions of grpcio (#5304)
+- Improve system test performance (#5319)
+
+## 1.2.0
+
+### Implementation Changes
+- Switch `list_partitions` helper to a direct metatable read (#5273)
+- Fix typo in `Encoding.ISO_8859_1` enum value (#5211)
+
+### New Features
+- Add UnknownJob type for redacted jobs. (#5281)
+- Add project parameter to `list_datasets` and `list_jobs` (#5217)
+- Add from_string factory methods to Dataset and Table (#5255)
+- Add column based time partitioning (#5267)
+
+### Documentation
+- Standardize docstrings for constants (#5289)
+- Fix docstring / impl of `ExtractJob.destination_uri_file_counts`. (#5245)
+
+### Internal / Testing Changes
+- Add testing support for Python 3.7; remove testing support for Python 3.4. (#5295)
+
+## 1.1.0
+
+### New Features
+- Add `client.get_service_account_email` (#5203)
+
+### Documentation
+- Update samples and standardize region tags (#5195)
+
+### Internal / Testing Changes
+- Fix trove classifier to be Production/Stable
+- Don't suppress 'dots' output on test (#5202)
+
+## 1.0.0
+
+### Implementation Changes
+- Remove deprecated Client methods (#5182)
+
+## 0.32.0
+
+### :warning: Interface changes
+
+- Use `job.configuration` resource for XXXJobConfig classes (#5036)
+
+### Interface additions
+
+- Add `page_size` parameter for `list_rows` and use in DB-API for `arraysize` (#4931)
+- Add IPython magics for running queries (#4983)
+
+### Documentation
+
+- Add job string constant parameters in init and snippets documentation (#4987)
+
+### Internal / Testing changes
+
+- Specify IPython version 5.5 when running Python 2.7 tests (#5145)
+- Move all Dataset property conversion logic into properties (#5130)
+- Remove unnecessary _Table class from test_job.py (#5126)
+- Use explicit bytes to initialize 'BytesIO'. (#5116)
+- Make SchemaField be able to include description via from_api_repr method (#5114)
+- Remove _ApiResourceProperty class (#5107)
+- Add dev version for 0.32.0 release (#5105)
+- StringIO to BytesIO (#5101)
+- Shorten snippets test name (#5091)
+- Don't use `selected_fields` for listing query result rows (#5072)
+- Add location property to job classes. (#5071)
+- Use autospec for Connection in tests. (#5066)
+- Add Parquet SourceFormat and samples (#5057)
+- Remove test_load_table_from_uri_w_autodetect_schema_then_get_job because of duplicate test in snippets (#5004)
+- Fix encoding variable and strings UTF-8 and ISO-8859-1 difference documentation (#4990)
+
+## 0.31.0
+
+### Interface additions
+
+- Add support for `EncryptionConfiguration` (#4845)
+
+### Implementation changes
+
+- Allow listing/getting jobs even when there is an "invalid" job. (#4786)
+
+### Dependencies
+
+- The minimum version for `google-api-core` has been updated to version 1.0.0. This may cause some incompatibility with older google-cloud libraries, you will need to update those libraries if you have a dependency conflict. (#4944, #4946)
+
+### Documentation
+
+- Update format in `Table.full_table_id` and `TableListItem.full_table_id` docstrings. (#4906)
+
+### Testing and internal changes
+
+- Install local dependencies when running lint (#4936)
+- Re-enable lint for tests, remove usage of pylint (#4921)
+- Normalize all setup.py files (#4909)
+- Remove unnecessary debug print from tests (#4907)
+- Use constant strings for job properties in tests (#4833)
+
+## 0.30.0
+
+This is the release candidate for v1.0.0.
+
+### Interface changes / additions
+
+- Add `delete_contents` to `delete_dataset`. (#4724)
+
+### Bugfixes
+
+- Add handling of missing properties in `SchemaField.from_api_repr()`. (#4754)
+- Fix missing return value in `LoadJobConfig.from_api_repr`. (#4727)
+
+### Documentation
+
+- Minor documentation and typo fixes. (#4782, #4718, #4784, #4835, #4836)
+
+## 0.29.0
+
+### Interface changes / additions
+
+- Add `to_dataframe()` method to row iterators. When Pandas is installed this
+ method returns a `DataFrame` containing the query's or table's rows.
+ ([#4354](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4354))
+- Iterate over a `QueryJob` to wait for and get the query results.
+ ([#4350](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4350))
+- Add `Table.reference` and `Dataset.reference` properties to get the
+ `TableReference` or `DatasetReference` corresponding to that `Table` or
+ `Dataset`, respectively.
+ ([#4405](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4405))
+- Add `Row.keys()`, `Row.items()`, and `Row.get()`. This makes `Row` act
+ more like a built-in dictionary.
+ ([#4393](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4393),
+ [#4413](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4413))
+
+### Interface changes / breaking changes
+
+- Add `Client.insert_rows()` and `Client.insert_rows_json()`, deprecate
+ `Client.create_rows()` and `Client.create_rows_json()`.
+ ([#4657](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4657))
+- Add `Client.list_tables`, deprecate `Client.list_dataset_tables`.
+ ([#4653](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4653))
+- `Client.list_tables` returns an iterators of `TableListItem`. The API
+ only returns a subset of properties of a table when listing.
+ ([#4427](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4427))
+- Remove `QueryJob.query_results()`. Use `QueryJob.result()` instead.
+ ([#4652](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4652))
+- Remove `Client.query_rows()`. Use `Client.query()` instead.
+ ([#4429](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4429))
+- `Client.list_datasets` returns an iterator of `DatasetListItem`. The API
+ only returns a subset of properties of a dataset when listing.
+ ([#4439](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4439))
+
+## 0.28.0
+
+**0.28.0 significantly changes the interface for this package.** For examples
+of the differences between 0.28.0 and previous versions, see
+[Migrating to the BigQuery Python client library 0.28][2].
+These changes can be summarized as follows:
+
+- Query and view operations default to the standard SQL dialect. (#4192)
+- Client functions related to
+ [jobs](https://cloud.google.com/bigquery/docs/jobs-overview), like running
+ queries, immediately start the job.
+- Functions to create, get, update, delete datasets and tables moved to the
+ client class.
+
+[2]: https://cloud.google.com/bigquery/docs/python-client-migration
+
+### Fixes
+
+- Populate timeout parameter correctly for queries (#4209)
+- Automatically retry idempotent RPCs (#4148, #4178)
+- Parse timestamps in query parameters using canonical format (#3945)
+- Parse array parameters that contain a struct type. (#4040)
+- Support Sub Second Datetimes in row data (#3901, #3915, #3926), h/t @page1
+
+### Interface changes / additions
+
+- Support external table configuration (#4182) in query jobs (#4191) and
+ tables (#4193).
+- New `Row` class allows for access by integer index like a tuple, string
+ index like a dictionary, or attribute access like an object. (#4149)
+- Add option for job ID generation with user-supplied prefix (#4198)
+- Add support for update of dataset access entries (#4197)
+- Add support for atomic read-modify-write of a dataset using etag (#4052)
+- Add support for labels to `Dataset` (#4026)
+- Add support for labels to `Table` (#4207)
+- Add `Table.streaming_buffer` property (#4161)
+- Add `TableReference` class (#3942)
+- Add `DatasetReference` class (#3938, #3942, #3993)
+- Add `ExtractJob.destination_uri_file_counts` property. (#3803)
+- Add `client.create_rows_json()` to bypass conversions on streaming writes.
+ (#4189)
+- Add `client.get_job()` to get arbitrary jobs. (#3804, #4213)
+- Add filter to `client.list_datasets()` (#4205)
+- Add `QueryJob.undeclared_query_parameters` property. (#3802)
+- Add `QueryJob.referenced_tables` property. (#3801)
+- Add new scalar statistics properties to `QueryJob` (#3800)
+- Add `QueryJob.query_plan` property. (#3799)
+
+### Interface changes / breaking changes
+
+- Remove `client.run_async_query()`, use `client.query()` instead. (#4130)
+- Remove `client.run_sync_query()`, use `client.query_rows()` instead. (#4065, #4248)
+- Make `QueryResults` read-only. (#4094, #4144)
+- Make `get_query_results` private. Return rows for `QueryJob.result()` (#3883)
+- Move `*QueryParameter` and `UDFResource` classes to `query` module (also
+ exposed in `bigquery` module). (#4156)
+
+#### Changes to tables
+
+- Remove `client` from `Table` class (#4159)
+- Remove `table.exists()` (#4145)
+- Move `table.list_parations` to `client.list_partitions` (#4146)
+- Move `table.upload_from_file` to `client.load_table_from_file` (#4136)
+- Move `table.update()` and `table.patch()` to `client.update_table()` (#4076)
+- Move `table.insert_data()` to `client.create_rows()`. Automatically
+ generates row IDs if not supplied. (#4151, #4173)
+- Move `table.fetch_data()` to `client.list_rows()` (#4119, #4143)
+- Move `table.delete()` to `client.delete_table()` (#4066)
+- Move `table.create()` to `client.create_table()` (#4038, #4043)
+- Move `table.reload()` to `client.get_table()` (#4004)
+- Rename `Table.name` attribute to `Table.table_id` (#3959)
+- `Table` constructor takes a `TableReference` as parameter (#3997)
+
+#### Changes to datasets
+
+- Remove `client` from `Dataset` class (#4018)
+- Remove `dataset.exists()` (#3996)
+- Move `dataset.list_tables()` to `client.list_dataset_tables()` (#4013)
+- Move `dataset.delete()` to `client.delete_dataset()` (#4012)
+- Move `dataset.patch()` and `dataset.update()` to `client.update_dataset()` (#4003)
+- Move `dataset.create()` to `client.create_dataset()` (#3982)
+- Move `dataset.reload()` to `client.get_dataset()` (#3973)
+- Rename `Dataset.name` attribute to `Dataset.dataset_id` (#3955)
+- `client.dataset()` returns a `DatasetReference` instead of `Dataset`. (#3944)
+- Rename class: `dataset.AccessGrant -> dataset.AccessEntry`. (#3798)
+- `dataset.table()` returns a `TableReference` instead of a `Table` (#4014)
+- `Dataset` constructor takes a DatasetReference (#4036)
+
+#### Changes to jobs
+
+- Make `job.begin()` method private. (#4242)
+- Add `LoadJobConfig` class and modify `LoadJob` (#4103, #4137)
+- Add `CopyJobConfig` class and modify `CopyJob` (#4051, #4059)
+- Type of Job's and Query's `default_dataset` changed from `Dataset` to
+ `DatasetReference` (#4037)
+- Rename `client.load_table_from_storage()` to `client.load_table_from_uri()`
+ (#4235)
+- Rename `client.extract_table_to_storage` to `client.extract_table()`.
+ Method starts the extract job immediately. (#3991, #4177)
+- Rename `XJob.name` to `XJob.job_id`. (#3962)
+- Rename job classes. `LoadTableFromStorageJob -> LoadJob` and
+ `ExtractTableToStorageJob -> jobs.ExtractJob` (#3797)
+
+### Dependencies
+
+- Updating to `google-cloud-core ~= 0.28`, in particular, the
+ `google-api-core` package has been moved out of `google-cloud-core`. (#4221)
+
+PyPI: https://pypi.org/project/google-cloud-bigquery/0.28.0/
+
+
+## 0.27.0
+
+- Remove client-side enum validation. (#3735)
+- Add `Table.row_from_mapping` helper. (#3425)
+- Move `google.cloud.future` to `google.api.core` (#3764)
+- Fix `__eq__` and `__ne__`. (#3765)
+- Move `google.cloud.iterator` to `google.api.core.page_iterator` (#3770)
+- `nullMarker` support for BigQuery Load Jobs (#3777), h/t @leondealmeida
+- Allow `job_id` to be explicitly specified in DB-API. (#3779)
+- Add support for a custom null marker. (#3776)
+- Add `SchemaField` serialization and deserialization. (#3786)
+- Add `get_query_results` method to the client. (#3838)
+- Poll for query completion via `getQueryResults` method. (#3844)
+- Allow fetching more than the first page when `max_results` is set. (#3845)
+
+PyPI: https://pypi.org/project/google-cloud-bigquery/0.27.0/
+
+## 0.26.0
+
+### Notable implementation changes
+
+- Using the `requests` transport attached to a Client for for resumable media
+ (i.e. downloads and uploads) (#3705) (this relates to the `httplib2` to
+ `requests` switch)
+
+### Interface changes / additions
+
+- Adding `autodetect` property on `LoadTableFromStorageJob` to enable schema
+ autodetection. (#3648)
+- Implementing the Python Futures interface for Jobs. Call `job.result()` to
+ wait for jobs to complete instead of polling manually on the job status.
+ (#3626)
+- Adding `is_nullable` property on `SchemaField`. Can be used to check if a
+ column is nullable. (#3620)
+- `job_name` argument added to `Table.upload_from_file` for setting the job
+ ID. (#3605)
+- Adding `google.cloud.bigquery.dbapi` package, which implements PEP-249
+ DB-API specification. (#2921)
+- Adding `Table.view_use_legacy_sql` property. Can be used to create views
+ with legacy or standard SQL. (#3514)
+
+### Interface changes / breaking changes
+
+- Removing `results()` method from the `QueryJob` class. Use
+ `query_results()` instead. (#3661)
+- `SchemaField` is now immutable. It is also hashable so that it can be used
+ in sets. (#3601)
+
+### Dependencies
+
+- Updating to `google-cloud-core ~= 0.26`, in particular, the underlying HTTP
+ transport switched from `httplib2` to `requests` (#3654, #3674)
+- Adding dependency on `google-resumable-media` for loading BigQuery tables
+ from local files. (#3555)
+
+### Packaging
+
+- Fix inclusion of `tests` (vs. `unit_tests`) in `MANIFEST.in` (#3552)
+- Updating `author_email` in `setup.py` to `googleapis-publisher@google.com`.
+ (#3598)
+
+PyPI: https://pypi.org/project/google-cloud-bigquery/0.26.0/
diff --git a/testbed/googleapis__python-bigquery/CODE_OF_CONDUCT.md b/testbed/googleapis__python-bigquery/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000000000000000000000000000000000..039f436812047176f9dd787fc07cb5f8af8dba63
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/CODE_OF_CONDUCT.md
@@ -0,0 +1,95 @@
+
+# Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of
+experience, education, socio-economic status, nationality, personal appearance,
+race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, or to ban temporarily or permanently any
+contributor for other behaviors that they deem inappropriate, threatening,
+offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+This Code of Conduct also applies outside the project spaces when the Project
+Steward has a reasonable belief that an individual's behavior may have a
+negative impact on the project or its community.
+
+## Conflict Resolution
+
+We do not believe that all conflict is bad; healthy debate and disagreement
+often yield positive results. However, it is never okay to be disrespectful or
+to engage in behavior that violates the project’s code of conduct.
+
+If you see someone violating the code of conduct, you are encouraged to address
+the behavior directly with those involved. Many issues can be resolved quickly
+and easily, and this gives people more control over the outcome of their
+dispute. If you are unable to resolve the matter for any reason, or if the
+behavior is threatening or harassing, report it. We are dedicated to providing
+an environment where participants feel welcome and safe.
+
+
+Reports should be directed to *googleapis-stewards@google.com*, the
+Project Steward(s) for *Google Cloud Client Libraries*. It is the Project Steward’s duty to
+receive and address reported violations of the code of conduct. They will then
+work with a committee consisting of representatives from the Open Source
+Programs Office and the Google Open Source Strategy team. If for any reason you
+are uncomfortable reaching out to the Project Steward, please email
+opensource@google.com.
+
+We will investigate every complaint, but you may not receive a direct response.
+We will use our discretion in determining when and how to follow up on reported
+incidents, which may range from not taking action to permanent expulsion from
+the project and project-sponsored spaces. We will notify the accused of the
+report and provide them an opportunity to discuss it before any action is taken.
+The identity of the reporter will be omitted from the details of the report
+supplied to the accused. In potentially harmful situations, such as ongoing
+harassment or threats to anyone's safety, we may take action without notice.
+
+## Attribution
+
+This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
+available at
+https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/CONTRIBUTING.rst b/testbed/googleapis__python-bigquery/CONTRIBUTING.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7be61e6b61a5c6cf1ded922a2f387a81f795ab14
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/CONTRIBUTING.rst
@@ -0,0 +1,283 @@
+.. Generated by synthtool. DO NOT EDIT!
+############
+Contributing
+############
+
+#. **Please sign one of the contributor license agreements below.**
+#. Fork the repo, develop and test your code changes, add docs.
+#. Make sure that your commit messages clearly describe the changes.
+#. Send a pull request. (Please Read: `Faster Pull Request Reviews`_)
+
+.. _Faster Pull Request Reviews: https://github.com/kubernetes/community/blob/master/contributors/guide/pull-requests.md#best-practices-for-faster-reviews
+
+.. contents:: Here are some guidelines for hacking on the Google Cloud Client libraries.
+
+***************
+Adding Features
+***************
+
+In order to add a feature:
+
+- The feature must be documented in both the API and narrative
+ documentation.
+
+- The feature must work fully on the following CPython versions:
+ 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12 on both UNIX and Windows.
+
+- The feature must not add unnecessary dependencies (where
+ "unnecessary" is of course subjective, but new dependencies should
+ be discussed).
+
+****************************
+Using a Development Checkout
+****************************
+
+You'll have to create a development environment using a Git checkout:
+
+- While logged into your GitHub account, navigate to the
+ ``python-bigquery`` `repo`_ on GitHub.
+
+- Fork and clone the ``python-bigquery`` repository to your GitHub account by
+ clicking the "Fork" button.
+
+- Clone your fork of ``python-bigquery`` from your GitHub account to your local
+ computer, substituting your account username and specifying the destination
+ as ``hack-on-python-bigquery``. E.g.::
+
+ $ cd ${HOME}
+ $ git clone git@github.com:USERNAME/python-bigquery.git hack-on-python-bigquery
+ $ cd hack-on-python-bigquery
+ # Configure remotes such that you can pull changes from the googleapis/python-bigquery
+ # repository into your local repository.
+ $ git remote add upstream git@github.com:googleapis/python-bigquery.git
+ # fetch and merge changes from upstream into main
+ $ git fetch upstream
+ $ git merge upstream/main
+
+Now your local repo is set up such that you will push changes to your GitHub
+repo, from which you can submit a pull request.
+
+To work on the codebase and run the tests, we recommend using ``nox``,
+but you can also use a ``virtualenv`` of your own creation.
+
+.. _repo: https://github.com/googleapis/python-bigquery
+
+Using ``nox``
+=============
+
+We use `nox `__ to instrument our tests.
+
+- To test your changes, run unit tests with ``nox``::
+ $ nox -s unit
+
+- To run a single unit test::
+
+ $ nox -s unit-3.12 -- -k
+
+
+ .. note::
+
+ The unit tests and system tests are described in the
+ ``noxfile.py`` files in each directory.
+
+.. nox: https://pypi.org/project/nox/
+
+*****************************************
+I'm getting weird errors... Can you help?
+*****************************************
+
+If the error mentions ``Python.h`` not being found,
+install ``python-dev`` and try again.
+On Debian/Ubuntu::
+
+ $ sudo apt-get install python-dev
+
+************
+Coding Style
+************
+- We use the automatic code formatter ``black``. You can run it using
+ the nox session ``blacken``. This will eliminate many lint errors. Run via::
+
+ $ nox -s blacken
+
+- PEP8 compliance is required, with exceptions defined in the linter configuration.
+ If you have ``nox`` installed, you can test that you have not introduced
+ any non-compliant code via::
+
+ $ nox -s lint
+
+- In order to make ``nox -s lint`` run faster, you can set some environment
+ variables::
+
+ export GOOGLE_CLOUD_TESTING_REMOTE="upstream"
+ export GOOGLE_CLOUD_TESTING_BRANCH="main"
+
+ By doing this, you are specifying the location of the most up-to-date
+ version of ``python-bigquery``. The
+ remote name ``upstream`` should point to the official ``googleapis``
+ checkout and the branch should be the default branch on that remote (``main``).
+
+- This repository contains configuration for the
+ `pre-commit `__ tool, which automates checking
+ our linters during a commit. If you have it installed on your ``$PATH``,
+ you can enable enforcing those checks via:
+
+.. code-block:: bash
+
+ $ pre-commit install
+ pre-commit installed at .git/hooks/pre-commit
+
+Exceptions to PEP8:
+
+- Many unit tests use a helper method, ``_call_fut`` ("FUT" is short for
+ "Function-Under-Test"), which is PEP8-incompliant, but more readable.
+ Some also use a local variable, ``MUT`` (short for "Module-Under-Test").
+
+********************
+Running System Tests
+********************
+
+- To run system tests, you can execute::
+
+ # Run all system tests
+ $ nox -s system
+
+ # Run a single system test
+ $ nox -s system-3.8 -- -k
+
+
+ .. note::
+
+ System tests are only configured to run under Python 3.8.
+ For expediency, we do not run them in older versions of Python 3.
+
+ This alone will not run the tests. You'll need to change some local
+ auth settings and change some configuration in your project to
+ run all the tests.
+
+- System tests will be run against an actual project. You should use local credentials from gcloud when possible. See `Best practices for application authentication `__. Some tests require a service account. For those tests see `Authenticating as a service account `__.
+
+*************
+Test Coverage
+*************
+
+- The codebase *must* have 100% test statement coverage after each commit.
+ You can test coverage via ``nox -s cover``.
+
+******************************************************
+Documentation Coverage and Building HTML Documentation
+******************************************************
+
+If you fix a bug, and the bug requires an API or behavior modification, all
+documentation in this package which references that API or behavior must be
+changed to reflect the bug fix, ideally in the same commit that fixes the bug
+or adds the feature.
+
+Build the docs via:
+
+ $ nox -s docs
+
+*************************
+Samples and code snippets
+*************************
+
+Code samples and snippets live in the `samples/` catalogue. Feel free to
+provide more examples, but make sure to write tests for those examples.
+Each folder containing example code requires its own `noxfile.py` script
+which automates testing. If you decide to create a new folder, you can
+base it on the `samples/snippets` folder (providing `noxfile.py` and
+the requirements files).
+
+The tests will run against a real Google Cloud Project, so you should
+configure them just like the System Tests.
+
+- To run sample tests, you can execute::
+
+ # Run all tests in a folder
+ $ cd samples/snippets
+ $ nox -s py-3.8
+
+ # Run a single sample test
+ $ cd samples/snippets
+ $ nox -s py-3.8 -- -k
+
+********************************************
+Note About ``README`` as it pertains to PyPI
+********************************************
+
+The `description on PyPI`_ for the project comes directly from the
+``README``. Due to the reStructuredText (``rst``) parser used by
+PyPI, relative links which will work on GitHub (e.g. ``CONTRIBUTING.rst``
+instead of
+``https://github.com/googleapis/python-bigquery/blob/main/CONTRIBUTING.rst``)
+may cause problems creating links or rendering the description.
+
+.. _description on PyPI: https://pypi.org/project/google-cloud-bigquery
+
+
+*************************
+Supported Python Versions
+*************************
+
+We support:
+
+- `Python 3.7`_
+- `Python 3.8`_
+- `Python 3.9`_
+- `Python 3.10`_
+- `Python 3.11`_
+- `Python 3.12`_
+
+.. _Python 3.7: https://docs.python.org/3.7/
+.. _Python 3.8: https://docs.python.org/3.8/
+.. _Python 3.9: https://docs.python.org/3.9/
+.. _Python 3.10: https://docs.python.org/3.10/
+.. _Python 3.11: https://docs.python.org/3.11/
+.. _Python 3.12: https://docs.python.org/3.12/
+
+
+Supported versions can be found in our ``noxfile.py`` `config`_.
+
+.. _config: https://github.com/googleapis/python-bigquery/blob/main/noxfile.py
+
+
+We also explicitly decided to support Python 3 beginning with version 3.7.
+Reasons for this include:
+
+- Encouraging use of newest versions of Python 3
+- Taking the lead of `prominent`_ open-source `projects`_
+- `Unicode literal support`_ which allows for a cleaner codebase that
+ works in both Python 2 and Python 3
+
+.. _prominent: https://docs.djangoproject.com/en/1.9/faq/install/#what-python-version-can-i-use-with-django
+.. _projects: http://flask.pocoo.org/docs/0.10/python3/
+.. _Unicode literal support: https://www.python.org/dev/peps/pep-0414/
+
+**********
+Versioning
+**********
+
+This library follows `Semantic Versioning`_.
+
+.. _Semantic Versioning: http://semver.org/
+
+Some packages are currently in major version zero (``0.y.z``), which means that
+anything may change at any time and the public API should not be considered
+stable.
+
+******************************
+Contributor License Agreements
+******************************
+
+Before we can accept your pull requests you'll need to sign a Contributor
+License Agreement (CLA):
+
+- **If you are an individual writing original source code** and **you own the
+ intellectual property**, then you'll need to sign an
+ `individual CLA `__.
+- **If you work for a company that wants to allow you to contribute your work**,
+ then you'll need to sign a
+ `corporate CLA `__.
+
+You can sign these electronically (just scroll to the bottom). After that,
+we'll be able to accept your pull requests.
diff --git a/testbed/googleapis__python-bigquery/LICENSE b/testbed/googleapis__python-bigquery/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/testbed/googleapis__python-bigquery/MANIFEST.in b/testbed/googleapis__python-bigquery/MANIFEST.in
new file mode 100644
index 0000000000000000000000000000000000000000..d6814cd6003731b322dc78fc182706a6799c44b5
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/MANIFEST.in
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Generated by synthtool. DO NOT EDIT!
+include README.rst LICENSE
+recursive-include google *.json *.proto py.typed
+recursive-include tests *
+global-exclude *.py[co]
+global-exclude __pycache__
+
+# Exclude scripts for samples readmegen
+prune scripts/readme-gen
diff --git a/testbed/googleapis__python-bigquery/README.rst b/testbed/googleapis__python-bigquery/README.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f81adc4b90d9bad36c7145ce0f1cf3205de29e55
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/README.rst
@@ -0,0 +1,141 @@
+Python Client for Google BigQuery
+=================================
+
+|GA| |pypi| |versions|
+
+Querying massive datasets can be time consuming and expensive without the
+right hardware and infrastructure. Google `BigQuery`_ solves this problem by
+enabling super-fast, SQL queries against append-mostly tables, using the
+processing power of Google's infrastructure.
+
+- `Client Library Documentation`_
+- `Product Documentation`_
+
+.. |GA| image:: https://img.shields.io/badge/support-GA-gold.svg
+ :target: https://github.com/googleapis/google-cloud-python/blob/main/README.rst#general-availability
+.. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-bigquery.svg
+ :target: https://pypi.org/project/google-cloud-bigquery/
+.. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-bigquery.svg
+ :target: https://pypi.org/project/google-cloud-bigquery/
+.. _BigQuery: https://cloud.google.com/bigquery/what-is-bigquery
+.. _Client Library Documentation: https://googleapis.dev/python/bigquery/latest
+.. _Product Documentation: https://cloud.google.com/bigquery/docs/reference/v2/
+
+Quick Start
+-----------
+
+In order to use this library, you first need to go through the following steps:
+
+1. `Select or create a Cloud Platform project.`_
+2. `Enable billing for your project.`_
+3. `Enable the Google Cloud BigQuery API.`_
+4. `Setup Authentication.`_
+
+.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project
+.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project
+.. _Enable the Google Cloud BigQuery API.: https://cloud.google.com/bigquery
+.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html
+
+Installation
+~~~~~~~~~~~~
+
+Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to
+create isolated Python environments. The basic problem it addresses is one of
+dependencies and versions, and indirectly permissions.
+
+With `virtualenv`_, it's possible to install this library without needing system
+install permissions, and without clashing with the installed system
+dependencies.
+
+.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/
+
+
+Supported Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Python >= 3.7
+
+Unsupported Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Python == 2.7, Python == 3.5, Python == 3.6.
+
+The last version of this library compatible with Python 2.7 and 3.5 is
+`google-cloud-bigquery==1.28.0`.
+
+
+Mac/Linux
+^^^^^^^^^
+
+.. code-block:: console
+
+ pip install virtualenv
+ virtualenv
+ source /bin/activate
+ /bin/pip install google-cloud-bigquery
+
+
+Windows
+^^^^^^^
+
+.. code-block:: console
+
+ pip install virtualenv
+ virtualenv
+ \Scripts\activate
+ \Scripts\pip.exe install google-cloud-bigquery
+
+Example Usage
+-------------
+
+Perform a query
+~~~~~~~~~~~~~~~
+
+.. code:: python
+
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ # Perform a query.
+ QUERY = (
+ 'SELECT name FROM `bigquery-public-data.usa_names.usa_1910_2013` '
+ 'WHERE state = "TX" '
+ 'LIMIT 100')
+ query_job = client.query(QUERY) # API request
+ rows = query_job.result() # Waits for query to finish
+
+ for row in rows:
+ print(row.name)
+
+Instrumenting With OpenTelemetry
+--------------------------------
+
+This application uses `OpenTelemetry`_ to output tracing data from
+API calls to BigQuery. To enable OpenTelemetry tracing in
+the BigQuery client the following PyPI packages need to be installed:
+
+.. _OpenTelemetry: https://opentelemetry.io
+
+.. code-block:: console
+
+ pip install google-cloud-bigquery[opentelemetry] opentelemetry-exporter-gcp-trace
+
+After installation, OpenTelemetry can be used in the BigQuery
+client and in BigQuery jobs. First, however, an exporter must be
+specified for where the trace data will be outputted to. An
+example of this can be found here:
+
+.. code-block:: python
+
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
+ from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
+ tracer_provider = TracerProvider()
+ tracer_provider = BatchSpanProcessor(CloudTraceSpanExporter())
+ trace.set_tracer_provider(TracerProvider())
+
+In this example all tracing data will be published to the Google
+`Cloud Trace`_ console. For more information on OpenTelemetry, please consult the `OpenTelemetry documentation`_.
+
+.. _OpenTelemetry documentation: https://opentelemetry-python.readthedocs.io
+.. _Cloud Trace: https://cloud.google.com/trace
diff --git a/testbed/googleapis__python-bigquery/SECURITY.md b/testbed/googleapis__python-bigquery/SECURITY.md
new file mode 100644
index 0000000000000000000000000000000000000000..8b58ae9c01ae3b07eeba325544a99071e0713f31
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/SECURITY.md
@@ -0,0 +1,7 @@
+# Security Policy
+
+To report a security issue, please use [g.co/vulnz](https://g.co/vulnz).
+
+The Google Security Team will respond within 5 working days of your report on g.co/vulnz.
+
+We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue.
diff --git a/testbed/googleapis__python-bigquery/UPGRADING.md b/testbed/googleapis__python-bigquery/UPGRADING.md
new file mode 100644
index 0000000000000000000000000000000000000000..95f87f7ee9d81347ecd0126a78284d9b2643777e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/UPGRADING.md
@@ -0,0 +1,243 @@
+
+
+# 3.0.0 Migration Guide
+
+## New Required Dependencies
+
+Some of the previously optional dependencies are now *required* in `3.x` versions of the
+library, namely
+[google-cloud-bigquery-storage](https://pypi.org/project/google-cloud-bigquery-storage/)
+(minimum version `2.0.0`) and [pyarrow](https://pypi.org/project/pyarrow/) (minimum
+version `3.0.0`).
+
+The behavior of some of the package "extras" has thus also changed:
+ * The `pandas` extra now requires the [db-types](https://pypi.org/project/db-dtypes/)
+ package.
+ * The `bqstorage` extra has been preserved for comaptibility reasons, but it is now a
+ no-op and should be omitted when installing the BigQuery client library.
+
+ **Before:**
+ ```
+ $ pip install google-cloud-bigquery[bqstorage]
+ ```
+
+ **After:**
+ ```
+ $ pip install google-cloud-bigquery
+ ```
+
+ * The `bignumeric_type` extra has been removed, as `BIGNUMERIC` type is now
+ automatically supported. That extra should thus not be used.
+
+ **Before:**
+ ```
+ $ pip install google-cloud-bigquery[bignumeric_type]
+ ```
+
+ **After:**
+ ```
+ $ pip install google-cloud-bigquery
+ ```
+
+
+## Type Annotations
+
+The library is now type-annotated and declares itself as such. If you use a static
+type checker such as `mypy`, you might start getting errors in places where
+`google-cloud-bigquery` package is used.
+
+It is recommended to update your code and/or type annotations to fix these errors, but
+if this is not feasible in the short term, you can temporarily ignore type annotations
+in `google-cloud-bigquery`, for example by using a special `# type: ignore` comment:
+
+```py
+from google.cloud import bigquery # type: ignore
+```
+
+But again, this is only recommended as a possible short-term workaround if immediately
+fixing the type check errors in your project is not feasible.
+
+## Re-organized Types
+
+The auto-generated parts of the library has been removed, and proto-based types formerly
+found in `google.cloud.bigquery_v2` have been replaced by the new implementation (but
+see the [section](#legacy-types) below).
+
+For example, the standard SQL data types should new be imported from a new location:
+
+**Before:**
+```py
+from google.cloud.bigquery_v2 import StandardSqlDataType
+from google.cloud.bigquery_v2.types import StandardSqlField
+from google.cloud.bigquery_v2.types.standard_sql import StandardSqlStructType
+```
+
+**After:**
+```py
+from google.cloud.bigquery import StandardSqlDataType
+from google.cloud.bigquery.standard_sql import StandardSqlField
+from google.cloud.bigquery.standard_sql import StandardSqlStructType
+```
+
+The `TypeKind` enum defining all possible SQL types for schema fields has been renamed
+and is not nested anymore under `StandardSqlDataType`:
+
+
+**Before:**
+```py
+from google.cloud.bigquery_v2 import StandardSqlDataType
+
+if field_type == StandardSqlDataType.TypeKind.STRING:
+ ...
+```
+
+**After:**
+```py
+
+from google.cloud.bigquery import StandardSqlTypeNames
+
+if field_type == StandardSqlTypeNames.STRING:
+ ...
+```
+
+
+## Issuing queries with `Client.create_job` preserves destination table
+
+The `Client.create_job` method no longer removes the destination table from a
+query job's configuration. Destination table for the query can thus be
+explicitly defined by the user.
+
+
+## Changes to data types when reading a pandas DataFrame
+
+The default dtypes returned by the `to_dataframe` method have changed.
+
+* Now, the BigQuery `BOOLEAN` data type maps to the pandas `boolean` dtype.
+ Previously, this mapped to the pandas `bool` dtype when the column did not
+ contain `NULL` values and the pandas `object` dtype when `NULL` values are
+ present.
+* Now, the BigQuery `INT64` data type maps to the pandas `Int64` dtype.
+ Previously, this mapped to the pandas `int64` dtype when the column did not
+ contain `NULL` values and the pandas `float64` dtype when `NULL` values are
+ present.
+* Now, the BigQuery `DATE` data type maps to the pandas `dbdate` dtype, which
+ is provided by the
+ [db-dtypes](https://googleapis.dev/python/db-dtypes/latest/index.html)
+ package. If any date value is outside of the range of
+ [pandas.Timestamp.min](https://pandas.pydata.org/docs/reference/api/pandas.Timestamp.min.html)
+ (1677-09-22) and
+ [pandas.Timestamp.max](https://pandas.pydata.org/docs/reference/api/pandas.Timestamp.max.html)
+ (2262-04-11), the data type maps to the pandas `object` dtype. The
+ `date_as_object` parameter has been removed.
+* Now, the BigQuery `TIME` data type maps to the pandas `dbtime` dtype, which
+ is provided by the
+ [db-dtypes](https://googleapis.dev/python/db-dtypes/latest/index.html)
+ package.
+
+
+## Changes to data types loading a pandas DataFrame
+
+In the absence of schema information, pandas columns with naive
+`datetime64[ns]` values, i.e. without timezone information, are recognized and
+loaded using the `DATETIME` type. On the other hand, for columns with
+timezone-aware `datetime64[ns, UTC]` values, the `TIMESTAMP` type is continued
+to be used.
+
+## Changes to `Model`, `Client.get_model`, `Client.update_model`, and `Client.list_models`
+
+The types of several `Model` properties have been changed.
+
+- `Model.feature_columns` now returns a sequence of `google.cloud.bigquery.standard_sql.StandardSqlField`.
+- `Model.label_columns` now returns a sequence of `google.cloud.bigquery.standard_sql.StandardSqlField`.
+- `Model.model_type` now returns a string.
+- `Model.training_runs` now returns a sequence of dictionaries, as recieved from the [BigQuery REST API](https://cloud.google.com/bigquery/docs/reference/rest/v2/models#Model.FIELDS.training_runs).
+
+
+## Legacy Protocol Buffers Types
+
+For compatibility reasons, the legacy proto-based types still exists as static code
+and can be imported:
+
+```py
+from google.cloud.bigquery_v2 import Model # a sublcass of proto.Message
+```
+
+Mind, however, that importing them will issue a warning, because aside from
+being importable, these types **are not maintained anymore**. They may differ
+both from the types in `google.cloud.bigquery`, and from the types supported on
+the backend.
+
+### Maintaining compatibility with `google-cloud-bigquery` version 2.0
+
+If you maintain a library or system that needs to support both
+`google-cloud-bigquery` version 2.x and 3.x, it is recommended that you detect
+when version 2.x is in use and convert properties that use the legacy protocol
+buffer types, such as `Model.training_runs`, into the types used in 3.x.
+
+Call the [`to_dict`
+method](https://proto-plus-python.readthedocs.io/en/latest/reference/message.html#proto.message.Message.to_dict)
+on the protocol buffers objects to get a JSON-compatible dictionary.
+
+```py
+from google.cloud.bigquery_v2 import Model
+
+training_run: Model.TrainingRun = ...
+training_run_dict = training_run.to_dict()
+```
+
+# 2.0.0 Migration Guide
+
+The 2.0 release of the `google-cloud-bigquery` client drops support for Python
+versions below 3.6. The client surface itself has not changed, but the 1.x series
+will not be receiving any more feature updates or bug fixes. You are thus
+encouraged to upgrade to the 2.x series.
+
+If you experience issues or have questions, please file an
+[issue](https://github.com/googleapis/python-bigquery/issues).
+
+
+## Supported Python Versions
+
+> **WARNING**: Breaking change
+
+The 2.0.0 release requires Python 3.6+.
+
+
+## Supported BigQuery Storage Clients
+
+The 2.0.0 release requires BigQuery Storage `>= 2.0.0`, which dropped support
+for `v1beta1` and `v1beta2` versions of the BigQuery Storage API. If you want to
+use a BigQuery Storage client, it must be the one supporting the `v1` API version.
+
+
+## Changed GAPIC Enums Path
+
+> **WARNING**: Breaking change
+
+Generated GAPIC enum types have been moved under `types`. Import paths need to be
+adjusted.
+
+**Before:**
+```py
+from google.cloud.bigquery_v2.gapic import enums
+
+distance_type = enums.Model.DistanceType.COSINE
+```
+
+**After:**
+```py
+from google.cloud.bigquery_v2 import types
+
+distance_type = types.Model.DistanceType.COSINE
+```
diff --git a/testbed/googleapis__python-bigquery/benchmark/README.md b/testbed/googleapis__python-bigquery/benchmark/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..33065807ec346f3e3dcd42a21ad68da99949c1bd
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/benchmark/README.md
@@ -0,0 +1,128 @@
+# BigQuery Benchmark
+This directory contains benchmark scripts for BigQuery client. It is created primarily for project
+maintainers to measure library performance.
+
+## Usage
+`python benchmark.py`
+
+
+### Flags
+Run `python benchmark.py -h` for detailed information on available flags.
+
+`--reruns` can be used to override the default number of times a query is rerun. Must be a positive
+integer. Default value is 3.
+
+`--projectid` can be used to run benchmarks in a different project. If unset, the GOOGLE_CLOUD_PROJECT
+ environment variable is used.
+
+`--queryfile` can be used to override the default file which contains queries to be instrumented.
+
+`--table` can be used to specify a table to which benchmarking results should be streamed. The format
+for this string is in BigQuery standard SQL notation without escapes, e.g. `projectid.datasetid.tableid`
+
+`--create_table` can be used to have the benchmarking tool create the destination table prior to streaming.
+
+`--tag` allows arbitrary key:value pairs to be set. This flag can be specified multiple times.
+
+When `--create_table` flag is set, must also specify the name of the new table using `--table`.
+
+### Example invocations
+
+Setting all the flags
+```
+python benchmark.py \
+ --reruns 5 \
+ --projectid test_project_id \
+ --table logging_project_id.querybenchmarks.measurements \
+ --create_table \
+ --tag source:myhostname \
+ --tag somekeywithnovalue \
+ --tag experiment:special_environment_thing
+```
+
+Or, a more realistic invocation using shell substitions:
+```
+python benchmark.py \
+ --reruns 5 \
+ --table $BENCHMARK_TABLE \
+ --tag origin:$(hostname) \
+ --tag branch:$(git branch --show-current) \
+ --tag latestcommit:$(git log --pretty=format:'%H' -n 1)
+```
+
+## Stream Results To A BigQuery Table
+
+When streaming benchmarking results to a BigQuery table, the table schema is as follows:
+```
+[
+ {
+ "name": "groupname",
+ "type": "STRING"
+ },
+ {
+ "name": "name",
+ "type": "STRING"
+ },
+ {
+ "name": "tags",
+ "type": "RECORD",
+ "mode": "REPEATED",
+ "fields": [
+ {
+ "name": "key",
+ "type": "STRING"
+ },
+ {
+ "name": "value",
+ "type": "STRING"
+ }
+ ]
+ },
+ {
+ "name": "SQL",
+ "type": "STRING"
+ },
+ {
+ "name": "runs",
+ "type": "RECORD",
+ "mode": "REPEATED",
+ "fields": [
+ {
+ "name": "errorstring",
+ "type": "STRING"
+ },
+ {
+ "name": "start_time",
+ "type": "TIMESTAMP"
+ },
+ {
+ "name": "query_end_time",
+ "type": "TIMESTAMP"
+ },
+ {
+ "name": "first_row_returned_time",
+ "type": "TIMESTAMP"
+ },
+ {
+ "name": "all_rows_returned_time",
+ "type": "TIMESTAMP"
+ },
+ {
+ "name": "total_rows",
+ "type": "INTEGER"
+ }
+ ]
+ },
+ {
+ "name": "event_time",
+ "type": "TIMESTAMP"
+ }
+]
+```
+
+The table schema is the same as the [benchmark in go](https://github.com/googleapis/google-cloud-go/tree/main/bigquery/benchmarks),
+so results from both languages can be streamed to the same table.
+
+## BigQuery Benchmarks In Other Languages
+* Go: https://github.com/googleapis/google-cloud-go/tree/main/bigquery/benchmarks
+* JAVA: https://github.com/googleapis/java-bigquery/tree/main/benchmark
diff --git a/testbed/googleapis__python-bigquery/benchmark/benchmark.py b/testbed/googleapis__python-bigquery/benchmark/benchmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7dc78678e1a16f1fce4a12b4d75be1b403fb17a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/benchmark/benchmark.py
@@ -0,0 +1,323 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Scripts for benchmarking BigQuery queries performance."""
+
+import argparse
+from datetime import datetime
+import json
+import os
+
+from google.api_core import exceptions
+
+from google.cloud import bigquery
+
+_run_schema = [
+ bigquery.SchemaField("groupname", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("name", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField(
+ "tags",
+ "RECORD",
+ mode="REPEATED",
+ fields=[
+ bigquery.SchemaField("key", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("value", "STRING", mode="NULLABLE"),
+ ],
+ ),
+ bigquery.SchemaField("SQL", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField(
+ "runs",
+ "RECORD",
+ mode="REPEATED",
+ fields=[
+ bigquery.SchemaField("errorstring", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("start_time", "TIMESTAMP", mode="NULLABLE"),
+ bigquery.SchemaField("query_end_time", "TIMESTAMP", mode="NULLABLE"),
+ bigquery.SchemaField(
+ "first_row_returned_time", "TIMESTAMP", mode="NULLABLE"
+ ),
+ bigquery.SchemaField(
+ "all_rows_returned_time", "TIMESTAMP", mode="NULLABLE"
+ ),
+ bigquery.SchemaField("total_rows", "INTEGER", mode="NULLABLE"),
+ ],
+ ),
+ bigquery.SchemaField("event_time", "TIMESTAMP", mode="NULLABLE"),
+]
+
+
+def _check_pos_int(value):
+ """Verifies the value is a positive integer."""
+ ivalue = int(value)
+ if ivalue <= 0:
+ raise argparse.ArgumentTypeError(
+ f"Argument rerun should be positive int. Actual value: {value}"
+ )
+ return ivalue
+
+
+def _parse_tag(tag):
+ """Parses input tag into key value pair as a dict."""
+ tagstring = str(tag)
+ key, value = tagstring.split(":")
+ if not key or not value:
+ raise argparse.ArgumentTypeError(
+ "key and value in tag need to be non-empty. Actual value: "
+ + f"key={key}, value={value}"
+ )
+ return {"key": key, "value": value}
+
+
+def _parse_args() -> dict:
+ """Parses input flags."""
+ parser = argparse.ArgumentParser(description="Benchmark for BigQuery.")
+
+ parser.add_argument(
+ "--reruns",
+ action="store",
+ type=_check_pos_int,
+ default=3,
+ metavar="",
+ help="how many times each query is run. Must be a positive integer."
+ + "Default 3 times",
+ )
+
+ parser.add_argument(
+ "--projectid",
+ action="store",
+ type=str,
+ metavar="",
+ help="run benchmarks in a different project. If unset, the "
+ + "GOOGLE_CLOUD_PROJECT environment variable is used",
+ )
+
+ parser.add_argument(
+ "--queryfile",
+ action="store",
+ type=str,
+ metavar="",
+ default="queries.json",
+ help="override the default file which contains queries to be instrumented",
+ )
+
+ parser.add_argument(
+ "--table",
+ action="store",
+ type=str,
+ metavar="",
+ help="specify a table to which benchmarking results should be "
+ + "streamed. The format for this string is in BigQuery standard SQL "
+ + "notation without escapes, e.g. projectid.datasetid.tableid",
+ )
+
+ parser.add_argument(
+ "--create_table",
+ action="store_true",
+ help="let the benchmarking tool create the destination table prior to"
+ + " streaming; if set, also need to set --table to specify table name",
+ )
+
+ parser.add_argument(
+ "--tag",
+ action="append",
+ type=_parse_tag,
+ metavar="",
+ help="set arbitrary key:value pairs, can be set multiple times",
+ )
+
+ args = parser.parse_args()
+ args_dict = vars(args)
+
+ # Verifies that project id is set.
+ if not args_dict.get("projectid"):
+ if projectid_env := os.environ["GOOGLE_CLOUD_PROJECT"]:
+ args_dict["projectid"] = projectid_env
+ else:
+ raise ValueError(
+ "Must provide --projectid or set "
+ "GOOGLE_CLOUD_PROJECT environment variable"
+ )
+
+ # Verifies that table name is specified when `create_table == True`.
+ if args_dict.get("create_table") and not args_dict.get("table"):
+ raise ValueError(
+ "When --create_table is present, must specify table name with --table"
+ )
+
+ return args_dict
+
+
+def _prepare_table(client, create_table: bool, table_name: str) -> str:
+ """Ensures a table exists, and optionally creates it if directed."""
+
+ # Verifies that table destination is of valid format.
+ parts = table_name.split(".")
+ if len(parts) != 3:
+ raise ValueError(f"Expected table in p.d.t format, got: {table_name}")
+
+ table = bigquery.Table(table_name, schema=_run_schema)
+
+ # Create table if create_table == True.
+ if create_table:
+ table = client.create_table(table)
+ print(f"Created table {table.project}.{table.dataset_id}." f"{table.table_id}")
+
+ # Verifies that table exists.
+ client.get_table(table_name)
+ return table_name
+
+
+def _run_query(client, query: str, rerun: int) -> list:
+ """Runs individual query for `rerun` times, and returns run results."""
+ runs = []
+
+ for _ in range(rerun):
+ print(".", end="", flush=True)
+ run = {}
+ num_rows = 0
+ num_cols = 0
+ start_time = datetime.now()
+ first_row_time = datetime.min
+ end_time = datetime.min
+
+ job = client.query(query)
+ query_end_time = datetime.now()
+
+ try:
+ rows = job.result()
+ for row in rows:
+ if num_rows == 0:
+ num_cols = len(row)
+ first_row_time = datetime.now()
+ elif num_cols != len(row):
+ raise RuntimeError(f"found {len(row)} columns, expected {num_cols}")
+ num_rows += 1
+ end_time = datetime.now()
+ except exceptions.BadRequest as exc:
+ run["errorstring"] = repr(exc)
+
+ run["start_time"] = start_time.isoformat()
+ run["query_end_time"] = query_end_time.isoformat()
+ run["first_row_returned_time"] = first_row_time.isoformat()
+ run["all_rows_returned_time"] = end_time.isoformat()
+ run["total_rows"] = num_rows
+ runs.append(run)
+
+ print("")
+ return runs
+
+
+def _get_delta(time_str_1: str, time_str_2: str) -> str:
+ """Calculates delta of two ISO format time string, and return as a string."""
+ time_1 = datetime.fromisoformat(time_str_1)
+ time_2 = datetime.fromisoformat(time_str_2)
+ delta = time_1 - time_2
+ return str(delta)
+
+
+def _is_datetime_min(time_str: str) -> bool:
+ return datetime.fromisoformat(time_str) == datetime.min
+
+
+def _summary(run: dict) -> str:
+ """Converts run dict to run summary string."""
+ no_val = "NODATA"
+ output = ["QUERYTIME "]
+
+ if not _is_datetime_min(run.get("query_end_time")):
+ output.append(f"{_get_delta(run.get('query_end_time'), run.get('start_time'))}")
+ else:
+ output.append(no_val)
+ output.append(" FIRSTROW ")
+
+ if not _is_datetime_min(run.get("first_row_returned_time")):
+ output.append(
+ f"{_get_delta(run.get('first_row_returned_time'), run.get('start_time'))}"
+ )
+ else:
+ output.append(no_val)
+ output += " ALLROWS "
+
+ if not _is_datetime_min(run.get("all_rows_returned_time")):
+ output.append(
+ f"{_get_delta(run.get('all_rows_returned_time'), run.get('start_time'))}"
+ )
+ else:
+ output.append(no_val)
+
+ if run.get("total_rows"):
+ output.append(f" ROWS {run.get('total_rows')}")
+ if run.get("errorstring"):
+ output.append(f" ERRORED {run.get('errorstring')}")
+
+ return "".join(output)
+
+
+def _print_results(profiles: list):
+ for i, prof in enumerate(profiles):
+ print(f"{i+1}: ({prof['groupname']}:{prof['name']})")
+ print(f"SQL: {prof['SQL']}")
+ print("MEASUREMENTS")
+ for j, run in enumerate(prof["runs"]):
+ print(f"\t\t({j}) {_summary(run)}")
+
+
+def _run_benchmarks(args: dict) -> list:
+ client = bigquery.Client()
+
+ # If we're going to stream results, let's make sure we can do that
+ # before running all the tests.
+ table_id = ""
+ if args.get("create_table") or args.get("table"):
+ table_id = _prepare_table(client, args.get("create_table"), args.get("table"))
+
+ queries_file = args.get("queryfile")
+ with open(queries_file, "r") as f:
+ groups = json.loads(f.read())
+
+ measure_start = datetime.now()
+ profiles = []
+ for group_name, group in groups.items():
+ for name, query in group.items():
+ print(f"Measuring {group_name} : {name}", end="", flush=True)
+ event_time = datetime.now()
+ runs = _run_query(client, query, args.get("reruns"))
+
+ profile = {}
+ profile["groupname"] = group_name
+ profile["name"] = name
+ profile["tags"] = args.get("tag") or []
+ profile["SQL"] = query
+ profile["runs"] = runs
+ profile["event_time"] = event_time.isoformat()
+ profiles.append(profile)
+
+ measure_end = datetime.now()
+ print(f"Measurement time: {str(measure_end-measure_start)}")
+
+ # Stream benchmarking results to table, if required.
+ if table_id:
+ print(f"Streaming test results to table {table_id}...")
+ errors = client.insert_rows_json(table_id, profiles)
+ if errors:
+ raise RuntimeError(f"Cannot upload queries profiles: {errors}")
+ print("Streaming complete.")
+
+ return profiles
+
+
+if __name__ == "__main__":
+ args = _parse_args()
+ profiles = _run_benchmarks(args)
+ _print_results(profiles)
diff --git a/testbed/googleapis__python-bigquery/benchmark/queries.json b/testbed/googleapis__python-bigquery/benchmark/queries.json
new file mode 100644
index 0000000000000000000000000000000000000000..464395619a63d30a8e6f8cddf50ab8b7349b31fb
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/benchmark/queries.json
@@ -0,0 +1,16 @@
+{
+ "simple-cacheable": {
+ "nycyellow-limit1k":"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 1000",
+ "nycyellow-limit10k":"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 10000",
+ "nycyellow-limit100k":"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 100000",
+ "wikisamples-ordered-limit1k":"SELECT title FROM `bigquery-public-data.samples.wikipedia` ORDER BY title LIMIT 1000"
+ },
+ "simple-nondeterministic": {
+ "current-timestamp":"SELECT CURRENT_TIMESTAMP() as ts",
+ "session-user": "SELECT SESSION_USER() as ts",
+ "literals": "SELECT 1 as i, 3.14 as pi"
+ },
+ "simple-invalid": {
+ "invalid-query": "invalid sql here"
+ }
+}
diff --git a/testbed/googleapis__python-bigquery/docs/.gitignore b/testbed/googleapis__python-bigquery/docs/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..3fe20bec0f3aaccf5e2a0ac9e6649036723ca675
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/.gitignore
@@ -0,0 +1 @@
+generated/
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/docs/README.rst b/testbed/googleapis__python-bigquery/docs/README.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f81adc4b90d9bad36c7145ce0f1cf3205de29e55
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/README.rst
@@ -0,0 +1,141 @@
+Python Client for Google BigQuery
+=================================
+
+|GA| |pypi| |versions|
+
+Querying massive datasets can be time consuming and expensive without the
+right hardware and infrastructure. Google `BigQuery`_ solves this problem by
+enabling super-fast, SQL queries against append-mostly tables, using the
+processing power of Google's infrastructure.
+
+- `Client Library Documentation`_
+- `Product Documentation`_
+
+.. |GA| image:: https://img.shields.io/badge/support-GA-gold.svg
+ :target: https://github.com/googleapis/google-cloud-python/blob/main/README.rst#general-availability
+.. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-bigquery.svg
+ :target: https://pypi.org/project/google-cloud-bigquery/
+.. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-bigquery.svg
+ :target: https://pypi.org/project/google-cloud-bigquery/
+.. _BigQuery: https://cloud.google.com/bigquery/what-is-bigquery
+.. _Client Library Documentation: https://googleapis.dev/python/bigquery/latest
+.. _Product Documentation: https://cloud.google.com/bigquery/docs/reference/v2/
+
+Quick Start
+-----------
+
+In order to use this library, you first need to go through the following steps:
+
+1. `Select or create a Cloud Platform project.`_
+2. `Enable billing for your project.`_
+3. `Enable the Google Cloud BigQuery API.`_
+4. `Setup Authentication.`_
+
+.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project
+.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project
+.. _Enable the Google Cloud BigQuery API.: https://cloud.google.com/bigquery
+.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html
+
+Installation
+~~~~~~~~~~~~
+
+Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to
+create isolated Python environments. The basic problem it addresses is one of
+dependencies and versions, and indirectly permissions.
+
+With `virtualenv`_, it's possible to install this library without needing system
+install permissions, and without clashing with the installed system
+dependencies.
+
+.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/
+
+
+Supported Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Python >= 3.7
+
+Unsupported Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Python == 2.7, Python == 3.5, Python == 3.6.
+
+The last version of this library compatible with Python 2.7 and 3.5 is
+`google-cloud-bigquery==1.28.0`.
+
+
+Mac/Linux
+^^^^^^^^^
+
+.. code-block:: console
+
+ pip install virtualenv
+ virtualenv
+ source /bin/activate
+ /bin/pip install google-cloud-bigquery
+
+
+Windows
+^^^^^^^
+
+.. code-block:: console
+
+ pip install virtualenv
+ virtualenv
+ \Scripts\activate
+ \Scripts\pip.exe install google-cloud-bigquery
+
+Example Usage
+-------------
+
+Perform a query
+~~~~~~~~~~~~~~~
+
+.. code:: python
+
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ # Perform a query.
+ QUERY = (
+ 'SELECT name FROM `bigquery-public-data.usa_names.usa_1910_2013` '
+ 'WHERE state = "TX" '
+ 'LIMIT 100')
+ query_job = client.query(QUERY) # API request
+ rows = query_job.result() # Waits for query to finish
+
+ for row in rows:
+ print(row.name)
+
+Instrumenting With OpenTelemetry
+--------------------------------
+
+This application uses `OpenTelemetry`_ to output tracing data from
+API calls to BigQuery. To enable OpenTelemetry tracing in
+the BigQuery client the following PyPI packages need to be installed:
+
+.. _OpenTelemetry: https://opentelemetry.io
+
+.. code-block:: console
+
+ pip install google-cloud-bigquery[opentelemetry] opentelemetry-exporter-gcp-trace
+
+After installation, OpenTelemetry can be used in the BigQuery
+client and in BigQuery jobs. First, however, an exporter must be
+specified for where the trace data will be outputted to. An
+example of this can be found here:
+
+.. code-block:: python
+
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
+ from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
+ tracer_provider = TracerProvider()
+ tracer_provider = BatchSpanProcessor(CloudTraceSpanExporter())
+ trace.set_tracer_provider(TracerProvider())
+
+In this example all tracing data will be published to the Google
+`Cloud Trace`_ console. For more information on OpenTelemetry, please consult the `OpenTelemetry documentation`_.
+
+.. _OpenTelemetry documentation: https://opentelemetry-python.readthedocs.io
+.. _Cloud Trace: https://cloud.google.com/trace
diff --git a/testbed/googleapis__python-bigquery/docs/UPGRADING.md b/testbed/googleapis__python-bigquery/docs/UPGRADING.md
new file mode 100644
index 0000000000000000000000000000000000000000..95f87f7ee9d81347ecd0126a78284d9b2643777e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/UPGRADING.md
@@ -0,0 +1,243 @@
+
+
+# 3.0.0 Migration Guide
+
+## New Required Dependencies
+
+Some of the previously optional dependencies are now *required* in `3.x` versions of the
+library, namely
+[google-cloud-bigquery-storage](https://pypi.org/project/google-cloud-bigquery-storage/)
+(minimum version `2.0.0`) and [pyarrow](https://pypi.org/project/pyarrow/) (minimum
+version `3.0.0`).
+
+The behavior of some of the package "extras" has thus also changed:
+ * The `pandas` extra now requires the [db-types](https://pypi.org/project/db-dtypes/)
+ package.
+ * The `bqstorage` extra has been preserved for comaptibility reasons, but it is now a
+ no-op and should be omitted when installing the BigQuery client library.
+
+ **Before:**
+ ```
+ $ pip install google-cloud-bigquery[bqstorage]
+ ```
+
+ **After:**
+ ```
+ $ pip install google-cloud-bigquery
+ ```
+
+ * The `bignumeric_type` extra has been removed, as `BIGNUMERIC` type is now
+ automatically supported. That extra should thus not be used.
+
+ **Before:**
+ ```
+ $ pip install google-cloud-bigquery[bignumeric_type]
+ ```
+
+ **After:**
+ ```
+ $ pip install google-cloud-bigquery
+ ```
+
+
+## Type Annotations
+
+The library is now type-annotated and declares itself as such. If you use a static
+type checker such as `mypy`, you might start getting errors in places where
+`google-cloud-bigquery` package is used.
+
+It is recommended to update your code and/or type annotations to fix these errors, but
+if this is not feasible in the short term, you can temporarily ignore type annotations
+in `google-cloud-bigquery`, for example by using a special `# type: ignore` comment:
+
+```py
+from google.cloud import bigquery # type: ignore
+```
+
+But again, this is only recommended as a possible short-term workaround if immediately
+fixing the type check errors in your project is not feasible.
+
+## Re-organized Types
+
+The auto-generated parts of the library has been removed, and proto-based types formerly
+found in `google.cloud.bigquery_v2` have been replaced by the new implementation (but
+see the [section](#legacy-types) below).
+
+For example, the standard SQL data types should new be imported from a new location:
+
+**Before:**
+```py
+from google.cloud.bigquery_v2 import StandardSqlDataType
+from google.cloud.bigquery_v2.types import StandardSqlField
+from google.cloud.bigquery_v2.types.standard_sql import StandardSqlStructType
+```
+
+**After:**
+```py
+from google.cloud.bigquery import StandardSqlDataType
+from google.cloud.bigquery.standard_sql import StandardSqlField
+from google.cloud.bigquery.standard_sql import StandardSqlStructType
+```
+
+The `TypeKind` enum defining all possible SQL types for schema fields has been renamed
+and is not nested anymore under `StandardSqlDataType`:
+
+
+**Before:**
+```py
+from google.cloud.bigquery_v2 import StandardSqlDataType
+
+if field_type == StandardSqlDataType.TypeKind.STRING:
+ ...
+```
+
+**After:**
+```py
+
+from google.cloud.bigquery import StandardSqlTypeNames
+
+if field_type == StandardSqlTypeNames.STRING:
+ ...
+```
+
+
+## Issuing queries with `Client.create_job` preserves destination table
+
+The `Client.create_job` method no longer removes the destination table from a
+query job's configuration. Destination table for the query can thus be
+explicitly defined by the user.
+
+
+## Changes to data types when reading a pandas DataFrame
+
+The default dtypes returned by the `to_dataframe` method have changed.
+
+* Now, the BigQuery `BOOLEAN` data type maps to the pandas `boolean` dtype.
+ Previously, this mapped to the pandas `bool` dtype when the column did not
+ contain `NULL` values and the pandas `object` dtype when `NULL` values are
+ present.
+* Now, the BigQuery `INT64` data type maps to the pandas `Int64` dtype.
+ Previously, this mapped to the pandas `int64` dtype when the column did not
+ contain `NULL` values and the pandas `float64` dtype when `NULL` values are
+ present.
+* Now, the BigQuery `DATE` data type maps to the pandas `dbdate` dtype, which
+ is provided by the
+ [db-dtypes](https://googleapis.dev/python/db-dtypes/latest/index.html)
+ package. If any date value is outside of the range of
+ [pandas.Timestamp.min](https://pandas.pydata.org/docs/reference/api/pandas.Timestamp.min.html)
+ (1677-09-22) and
+ [pandas.Timestamp.max](https://pandas.pydata.org/docs/reference/api/pandas.Timestamp.max.html)
+ (2262-04-11), the data type maps to the pandas `object` dtype. The
+ `date_as_object` parameter has been removed.
+* Now, the BigQuery `TIME` data type maps to the pandas `dbtime` dtype, which
+ is provided by the
+ [db-dtypes](https://googleapis.dev/python/db-dtypes/latest/index.html)
+ package.
+
+
+## Changes to data types loading a pandas DataFrame
+
+In the absence of schema information, pandas columns with naive
+`datetime64[ns]` values, i.e. without timezone information, are recognized and
+loaded using the `DATETIME` type. On the other hand, for columns with
+timezone-aware `datetime64[ns, UTC]` values, the `TIMESTAMP` type is continued
+to be used.
+
+## Changes to `Model`, `Client.get_model`, `Client.update_model`, and `Client.list_models`
+
+The types of several `Model` properties have been changed.
+
+- `Model.feature_columns` now returns a sequence of `google.cloud.bigquery.standard_sql.StandardSqlField`.
+- `Model.label_columns` now returns a sequence of `google.cloud.bigquery.standard_sql.StandardSqlField`.
+- `Model.model_type` now returns a string.
+- `Model.training_runs` now returns a sequence of dictionaries, as recieved from the [BigQuery REST API](https://cloud.google.com/bigquery/docs/reference/rest/v2/models#Model.FIELDS.training_runs).
+
+
+## Legacy Protocol Buffers Types
+
+For compatibility reasons, the legacy proto-based types still exists as static code
+and can be imported:
+
+```py
+from google.cloud.bigquery_v2 import Model # a sublcass of proto.Message
+```
+
+Mind, however, that importing them will issue a warning, because aside from
+being importable, these types **are not maintained anymore**. They may differ
+both from the types in `google.cloud.bigquery`, and from the types supported on
+the backend.
+
+### Maintaining compatibility with `google-cloud-bigquery` version 2.0
+
+If you maintain a library or system that needs to support both
+`google-cloud-bigquery` version 2.x and 3.x, it is recommended that you detect
+when version 2.x is in use and convert properties that use the legacy protocol
+buffer types, such as `Model.training_runs`, into the types used in 3.x.
+
+Call the [`to_dict`
+method](https://proto-plus-python.readthedocs.io/en/latest/reference/message.html#proto.message.Message.to_dict)
+on the protocol buffers objects to get a JSON-compatible dictionary.
+
+```py
+from google.cloud.bigquery_v2 import Model
+
+training_run: Model.TrainingRun = ...
+training_run_dict = training_run.to_dict()
+```
+
+# 2.0.0 Migration Guide
+
+The 2.0 release of the `google-cloud-bigquery` client drops support for Python
+versions below 3.6. The client surface itself has not changed, but the 1.x series
+will not be receiving any more feature updates or bug fixes. You are thus
+encouraged to upgrade to the 2.x series.
+
+If you experience issues or have questions, please file an
+[issue](https://github.com/googleapis/python-bigquery/issues).
+
+
+## Supported Python Versions
+
+> **WARNING**: Breaking change
+
+The 2.0.0 release requires Python 3.6+.
+
+
+## Supported BigQuery Storage Clients
+
+The 2.0.0 release requires BigQuery Storage `>= 2.0.0`, which dropped support
+for `v1beta1` and `v1beta2` versions of the BigQuery Storage API. If you want to
+use a BigQuery Storage client, it must be the one supporting the `v1` API version.
+
+
+## Changed GAPIC Enums Path
+
+> **WARNING**: Breaking change
+
+Generated GAPIC enum types have been moved under `types`. Import paths need to be
+adjusted.
+
+**Before:**
+```py
+from google.cloud.bigquery_v2.gapic import enums
+
+distance_type = enums.Model.DistanceType.COSINE
+```
+
+**After:**
+```py
+from google.cloud.bigquery_v2 import types
+
+distance_type = types.Model.DistanceType.COSINE
+```
diff --git a/testbed/googleapis__python-bigquery/docs/_static/custom.css b/testbed/googleapis__python-bigquery/docs/_static/custom.css
new file mode 100644
index 0000000000000000000000000000000000000000..b0a295464b237685b830c50aeeea5a7501e06e9b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/_static/custom.css
@@ -0,0 +1,20 @@
+div#python2-eol {
+ border-color: red;
+ border-width: medium;
+}
+
+/* Ensure minimum width for 'Parameters' / 'Returns' column */
+dl.field-list > dt {
+ min-width: 100px
+}
+
+/* Insert space between methods for readability */
+dl.method {
+ padding-top: 10px;
+ padding-bottom: 10px
+}
+
+/* Insert empty space between classes */
+dl.class {
+ padding-bottom: 50px
+}
diff --git a/testbed/googleapis__python-bigquery/docs/_templates/layout.html b/testbed/googleapis__python-bigquery/docs/_templates/layout.html
new file mode 100644
index 0000000000000000000000000000000000000000..6316a537f72bab9b5140b2a8fd2f77f06e49569b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/_templates/layout.html
@@ -0,0 +1,50 @@
+
+{% extends "!layout.html" %}
+{%- block content %}
+{%- if theme_fixed_sidebar|lower == 'true' %}
+
+ {{ sidebar() }}
+ {%- block document %}
+
+ {%- if render_sidebar %}
+
+ {%- endif %}
+
+ {%- block relbar_top %}
+ {%- if theme_show_relbar_top|tobool %}
+
+
+ {{- rellink_markup () }}
+
+ {%- endif %}
+ {% endblock %}
+
+
+
+ As of January 1, 2020 this library no longer supports Python 2 on the latest released version.
+ Library versions released prior to that date will continue to be available. For more information please
+ visit
Python 2 support on Google Cloud .
+
+ {% block body %} {% endblock %}
+
+
+ {%- block relbar_bottom %}
+ {%- if theme_show_relbar_bottom|tobool %}
+
+
+ {{- rellink_markup () }}
+
+ {%- endif %}
+ {% endblock %}
+
+ {%- if render_sidebar %}
+
+ {%- endif %}
+
+ {%- endblock %}
+
+
+{%- else %}
+{{ super() }}
+{%- endif %}
+{%- endblock %}
diff --git a/testbed/googleapis__python-bigquery/docs/bigquery/legacy_proto_types.rst b/testbed/googleapis__python-bigquery/docs/bigquery/legacy_proto_types.rst
new file mode 100644
index 0000000000000000000000000000000000000000..36e9984b9e2b567ef6d132d2eed56f3413969f22
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/bigquery/legacy_proto_types.rst
@@ -0,0 +1,14 @@
+Legacy proto-based Types for Google Cloud Bigquery v2 API
+=========================================================
+
+.. warning::
+ These types are provided for backward compatibility only, and are not maintained
+ anymore. They might also differ from the types supported on the backend. It is
+ therefore strongly advised to migrate to the types found in :doc:`standard_sql`.
+
+ Also see the :doc:`3.0.0 Migration Guide<../UPGRADING>` for more information.
+
+.. automodule:: google.cloud.bigquery_v2.types
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testbed/googleapis__python-bigquery/docs/bigquery/standard_sql.rst b/testbed/googleapis__python-bigquery/docs/bigquery/standard_sql.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bd52bb78fcb0327ecba73fd7442f5c5f87b52e39
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/bigquery/standard_sql.rst
@@ -0,0 +1,7 @@
+Types for Google Cloud Bigquery v2 API
+======================================
+
+.. automodule:: google.cloud.bigquery.standard_sql
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testbed/googleapis__python-bigquery/docs/changelog.md b/testbed/googleapis__python-bigquery/docs/changelog.md
new file mode 100644
index 0000000000000000000000000000000000000000..5de99a6ca2b7f286c12c93d8cf326f93a859af42
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/changelog.md
@@ -0,0 +1,2440 @@
+# Changelog
+
+[PyPI History][1]
+
+[1]: https://pypi.org/project/google-cloud-bigquery/#history
+
+
+## [3.26.0](https://github.com/googleapis/python-bigquery/compare/v3.25.0...v3.26.0) (2024-09-25)
+
+
+### Features
+
+* Include LegacyPandasError in init imports ([#2014](https://github.com/googleapis/python-bigquery/issues/2014)) ([3ab5e95](https://github.com/googleapis/python-bigquery/commit/3ab5e95984ad521027a4e1efd9f16767403e668d))
+* Use `bigquery-magics` package for the `%%bigquery` magic ([#1965](https://github.com/googleapis/python-bigquery/issues/1965)) ([60128a5](https://github.com/googleapis/python-bigquery/commit/60128a522375823422f238312521a2ce356d9177))
+
+
+### Bug Fixes
+
+* Add docfx to the presubmit configuration and delete docs-presubmit ([#1995](https://github.com/googleapis/python-bigquery/issues/1995)) ([bd83cfd](https://github.com/googleapis/python-bigquery/commit/bd83cfd2eb25cec58d59af8048f5188d748b083d))
+* Add warning when encountering unknown field types ([#1989](https://github.com/googleapis/python-bigquery/issues/1989)) ([8f5a41d](https://github.com/googleapis/python-bigquery/commit/8f5a41d283a965ca161019588d3a3b2947b04b5b))
+* Allow protobuf 5.x; require protobuf >=3.20.2; proto-plus >=1.22.3 ([#1976](https://github.com/googleapis/python-bigquery/issues/1976)) ([57bf873](https://github.com/googleapis/python-bigquery/commit/57bf873474382cc2cb34243b704bc928fa1b64c6))
+* Do not set job timeout extra property if None ([#1987](https://github.com/googleapis/python-bigquery/issues/1987)) ([edcb79c](https://github.com/googleapis/python-bigquery/commit/edcb79ca69dba30d8102abebb9d53bc76e4882ee))
+* Set pyarrow field nullable to False for a BigQuery field in REPEATED mode ([#1999](https://github.com/googleapis/python-bigquery/issues/1999)) ([5352870](https://github.com/googleapis/python-bigquery/commit/5352870283ca7d4652aefc73f12645bcf6e1363c))
+
+
+### Dependencies
+
+* Bump min version of google-api-core and google-cloud-core to 2.x ([#1972](https://github.com/googleapis/python-bigquery/issues/1972)) ([a958732](https://github.com/googleapis/python-bigquery/commit/a958732aed7d9bd51ffde3dc0e6cae9ad7455b54))
+
+
+### Documentation
+
+* Add short mode query sample & test ([#1978](https://github.com/googleapis/python-bigquery/issues/1978)) ([ba61a8a](https://github.com/googleapis/python-bigquery/commit/ba61a8ab0da541ba1940211875d7ea2e9e17dfa8))
+* Improve QueryJobConfig.destination docstring ([#2016](https://github.com/googleapis/python-bigquery/issues/2016)) ([1b4cca0](https://github.com/googleapis/python-bigquery/commit/1b4cca0a3cc788a4570705572d5f04172f6b4b24))
+
+## [3.25.0](https://github.com/googleapis/python-bigquery/compare/v3.24.0...v3.25.0) (2024-06-17)
+
+
+### Features
+
+* Add prefer_bqstorage_client option for Connection ([#1945](https://github.com/googleapis/python-bigquery/issues/1945)) ([bfdeb3f](https://github.com/googleapis/python-bigquery/commit/bfdeb3fdbc1d5b26fcd3d1433abfb0be49d12018))
+* Support load job option ColumnNameCharacterMap ([#1952](https://github.com/googleapis/python-bigquery/issues/1952)) ([7e522ee](https://github.com/googleapis/python-bigquery/commit/7e522eea776cd9a74f8078c4236f63d5ff11f20e))
+
+
+### Bug Fixes
+
+* Do not overwrite page_size with max_results when start_index is set ([#1956](https://github.com/googleapis/python-bigquery/issues/1956)) ([7d0fcee](https://github.com/googleapis/python-bigquery/commit/7d0fceefdf28278c1f2cdaab571de9b235320998))
+
+## [3.24.0](https://github.com/googleapis/python-bigquery/compare/v3.23.1...v3.24.0) (2024-06-04)
+
+
+### Features
+
+* Add default timeout for Client.get_job() ([#1935](https://github.com/googleapis/python-bigquery/issues/1935)) ([9fbad76](https://github.com/googleapis/python-bigquery/commit/9fbad767cc228e02040436742d0cb6743d370b90))
+* Add support for map target type in Parquet options ([#1919](https://github.com/googleapis/python-bigquery/issues/1919)) ([c3f7b23](https://github.com/googleapis/python-bigquery/commit/c3f7b237383d4705ed6e720544728c4db61f6c83))
+
+
+### Bug Fixes
+
+* Create query job in job.result() if doesn't exist ([#1944](https://github.com/googleapis/python-bigquery/issues/1944)) ([8f5b4b7](https://github.com/googleapis/python-bigquery/commit/8f5b4b70423c277ffd559d2034bc0b2b5fb93169))
+* Retry `is_job_done` on `ConnectionError` ([#1930](https://github.com/googleapis/python-bigquery/issues/1930)) ([4f72723](https://github.com/googleapis/python-bigquery/commit/4f72723f539d35977bc52c5950f6e00889b5c7be))
+
+
+### Performance Improvements
+
+* If `page_size` or `max_results` is set on `QueryJob.result()`, use to download first page of results ([#1942](https://github.com/googleapis/python-bigquery/issues/1942)) ([3e7a48d](https://github.com/googleapis/python-bigquery/commit/3e7a48d36e3c7bf6abe1b5550097178f6ca6e174))
+
+## [3.23.1](https://github.com/googleapis/python-bigquery/compare/v3.23.0...v3.23.1) (2024-05-21)
+
+
+### Performance Improvements
+
+* Decrease the threshold in which we use the BQ Storage Read API ([#1925](https://github.com/googleapis/python-bigquery/issues/1925)) ([eaa1a52](https://github.com/googleapis/python-bigquery/commit/eaa1a52b360646909c14ca7194b8c6b17fefdd79))
+
+## [3.23.0](https://github.com/googleapis/python-bigquery/compare/v3.22.0...v3.23.0) (2024-05-16)
+
+
+### Features
+
+* Adds timer decorator to facilitate debugging ([#1917](https://github.com/googleapis/python-bigquery/issues/1917)) ([ea750e0](https://github.com/googleapis/python-bigquery/commit/ea750e0248473b6207b8517aa7ea1cf4e19bccf2))
+* Support insertAll for range ([#1909](https://github.com/googleapis/python-bigquery/issues/1909)) ([74e75e8](https://github.com/googleapis/python-bigquery/commit/74e75e89ce3a5ac18112b2c1c33248445ff072e4))
+
+
+### Bug Fixes
+
+* Add pyarrow version check for range support ([#1914](https://github.com/googleapis/python-bigquery/issues/1914)) ([a86d7b9](https://github.com/googleapis/python-bigquery/commit/a86d7b96813f67fea28b46c5252416222edca9a6))
+* Edit presubmit for to simplify configuration ([#1915](https://github.com/googleapis/python-bigquery/issues/1915)) ([b739596](https://github.com/googleapis/python-bigquery/commit/b739596f37b8c00b375cc811c316b618097d761a))
+
+## [3.22.0](https://github.com/googleapis/python-bigquery/compare/v3.21.0...v3.22.0) (2024-04-19)
+
+
+### Features
+
+* Support RANGE in queries Part 2: Arrow ([#1868](https://github.com/googleapis/python-bigquery/issues/1868)) ([5251b5d](https://github.com/googleapis/python-bigquery/commit/5251b5dbb254732ea730bab664ad319bd5be47e7))
+
+## [3.21.0](https://github.com/googleapis/python-bigquery/compare/v3.20.1...v3.21.0) (2024-04-18)
+
+
+### Features
+
+* Add compression option ZSTD. ([#1890](https://github.com/googleapis/python-bigquery/issues/1890)) ([5ed9cce](https://github.com/googleapis/python-bigquery/commit/5ed9ccee204b7cf8e96cb0e050f6830c05f3b4fd))
+* Adds billing to opentel ([#1889](https://github.com/googleapis/python-bigquery/issues/1889)) ([38697fb](https://github.com/googleapis/python-bigquery/commit/38697fb942516fc2f6f5e21e19a11811fbaeb1f4))
+* Support RANGE in queries Part 1: JSON ([#1884](https://github.com/googleapis/python-bigquery/issues/1884)) ([3634405](https://github.com/googleapis/python-bigquery/commit/3634405fa1b40ae5f69b06d7c7f8de4e3d246d92))
+
+
+### Bug Fixes
+
+* Add types to DatasetReference constructor ([#1601](https://github.com/googleapis/python-bigquery/issues/1601)) ([bf8861c](https://github.com/googleapis/python-bigquery/commit/bf8861c3473a1af978db7a06463ddc0bad86f326))
+* Creates linting-typing.cfg in presubmit ([#1881](https://github.com/googleapis/python-bigquery/issues/1881)) ([c852c15](https://github.com/googleapis/python-bigquery/commit/c852c153c55025ba1187d61e313ead2308616c55))
+* Remove duplicate key time_partitioning from Table._PROPERTY_TO_A… ([#1898](https://github.com/googleapis/python-bigquery/issues/1898)) ([82ae908](https://github.com/googleapis/python-bigquery/commit/82ae908fbf3b2361343fff1859d3533383dc50ec))
+* Retry query jobs that fail even with ambiguous `jobs.getQueryResults` REST errors ([#1903](https://github.com/googleapis/python-bigquery/issues/1903), [#1900](https://github.com/googleapis/python-bigquery/issues/1900)) ([1367b58](https://github.com/googleapis/python-bigquery/commit/1367b584b68d917ec325ce4383a0e9a36205b894))
+
+
+### Performance Improvements
+
+* Avoid unnecessary API call in `QueryJob.result()` when job is already finished ([#1900](https://github.com/googleapis/python-bigquery/issues/1900)) ([1367b58](https://github.com/googleapis/python-bigquery/commit/1367b584b68d917ec325ce4383a0e9a36205b894))
+
+## [3.20.1](https://github.com/googleapis/python-bigquery/compare/v3.20.0...v3.20.1) (2024-04-01)
+
+
+### Bug Fixes
+
+* Make `pyarrow` an optional dependency post-3.20.0 yanked release ([#1879](https://github.com/googleapis/python-bigquery/issues/1879)) ([21714e1](https://github.com/googleapis/python-bigquery/commit/21714e18bad8d8d89ed5642dbdb61d14e97d5f33))
+
+## [3.20.0](https://github.com/googleapis/python-bigquery/compare/v3.19.0...v3.20.0) (2024-03-27)
+
+
+### Features
+
+* Add `fields` parameter to `set_iam_policy` for consistency with update methods ([#1872](https://github.com/googleapis/python-bigquery/issues/1872)) ([08b1e6f](https://github.com/googleapis/python-bigquery/commit/08b1e6f9c41121907c345daedbae40ece18e8b6a))
+
+
+### Bug Fixes
+
+* Correct type checking ([#1848](https://github.com/googleapis/python-bigquery/issues/1848)) ([2660dbd](https://github.com/googleapis/python-bigquery/commit/2660dbd4821a89a1e20e3e1541504a409f1979aa))
+* Update error logging when converting to pyarrow column fails ([#1836](https://github.com/googleapis/python-bigquery/issues/1836)) ([0ac6e9b](https://github.com/googleapis/python-bigquery/commit/0ac6e9bf186945832f5dcdf5a4d95667b4da223e))
+* Updates a number of optional dependencies ([#1864](https://github.com/googleapis/python-bigquery/issues/1864)) ([c2496a1](https://github.com/googleapis/python-bigquery/commit/c2496a1014a7d99e805b3d0a66e4517165bd7e01))
+* Use an allowlist instead of denylist to determine when `query_and_wait` uses `jobs.query` API ([#1869](https://github.com/googleapis/python-bigquery/issues/1869)) ([e265db6](https://github.com/googleapis/python-bigquery/commit/e265db6a6a37d13056dcaac240c2cf3975dfd644))
+
+## [3.19.0](https://github.com/googleapis/python-bigquery/compare/v3.18.0...v3.19.0) (2024-03-11)
+
+
+### Features
+
+* Support RANGE query parameters ([#1827](https://github.com/googleapis/python-bigquery/issues/1827)) ([b359a9a](https://github.com/googleapis/python-bigquery/commit/b359a9a55936a759a36aa69c5e5b014685e1fca6))
+* Support range sql ([#1807](https://github.com/googleapis/python-bigquery/issues/1807)) ([86a45c9](https://github.com/googleapis/python-bigquery/commit/86a45c989836b34dca456bac014352e55d6f86c0))
+
+
+### Bug Fixes
+
+* Add google-auth as a direct dependency ([713ce2c](https://github.com/googleapis/python-bigquery/commit/713ce2c2f6ce9931f67cbbcd63ad436ad336ad26))
+* Augment universe_domain handling ([#1837](https://github.com/googleapis/python-bigquery/issues/1837)) ([53c2cbf](https://github.com/googleapis/python-bigquery/commit/53c2cbf98d2961f553747514de273bcd5c117f0e))
+* **deps:** Require google-api-core>=1.34.1, >=2.11.0 ([713ce2c](https://github.com/googleapis/python-bigquery/commit/713ce2c2f6ce9931f67cbbcd63ad436ad336ad26))
+* Supplementary fix to env-based universe resolution ([#1844](https://github.com/googleapis/python-bigquery/issues/1844)) ([b818992](https://github.com/googleapis/python-bigquery/commit/b8189929b6008f7780214822062f8ed05d8d2a01))
+* Supplementary fix to env-based universe resolution ([#1847](https://github.com/googleapis/python-bigquery/issues/1847)) ([6dff50f](https://github.com/googleapis/python-bigquery/commit/6dff50f4fbc5aeb644383a4050dd5ffc05015ffe))
+
+## [3.18.0](https://github.com/googleapis/python-bigquery/compare/v3.17.2...v3.18.0) (2024-02-29)
+
+
+### Features
+
+* Support nullable boolean and Int64 dtypes in `insert_rows_from_dataframe` ([#1816](https://github.com/googleapis/python-bigquery/issues/1816)) ([ab0cf4c](https://github.com/googleapis/python-bigquery/commit/ab0cf4cc03292f62b56a8813cfb7681daa87f872))
+* Support slot_ms in QueryPlanEntry ([#1831](https://github.com/googleapis/python-bigquery/issues/1831)) ([d62cabb](https://github.com/googleapis/python-bigquery/commit/d62cabbf115637ecbaf8cc378f39329a5ae74c26))
+
+
+### Bug Fixes
+
+* Keyword rendering and docstring improvements ([#1829](https://github.com/googleapis/python-bigquery/issues/1829)) ([4dfb920](https://github.com/googleapis/python-bigquery/commit/4dfb920b106784e98f343b3e3fc8e8ff70c50560))
+
+
+### Documentation
+
+* **samples:** Updates to urllib3 constraint for Python 3.7 ([#1834](https://github.com/googleapis/python-bigquery/issues/1834)) ([b099c32](https://github.com/googleapis/python-bigquery/commit/b099c32a83946a347560f6a71d08c3f263e56cb6))
+* Update `client_query_w_named_params.py` to use `query_and_wait` API ([#1782](https://github.com/googleapis/python-bigquery/issues/1782)) ([89dfcb6](https://github.com/googleapis/python-bigquery/commit/89dfcb6469d22e78003a70371a0938a6856e033c))
+
+## [3.17.2](https://github.com/googleapis/python-bigquery/compare/v3.17.1...v3.17.2) (2024-01-30)
+
+
+### Bug Fixes
+
+* Change load_table_from_json autodetect logic ([#1804](https://github.com/googleapis/python-bigquery/issues/1804)) ([6249032](https://github.com/googleapis/python-bigquery/commit/62490325f64e5d66303d9218992e28ac5f21cb3f))
+
+
+### Documentation
+
+* Update to use API ([#1781](https://github.com/googleapis/python-bigquery/issues/1781)) ([81563b0](https://github.com/googleapis/python-bigquery/commit/81563b06298fe3a64be6a89b583c3d64758ca12a))
+* Update `client_query_destination_table.py` sample to use `query_and_wait` ([#1783](https://github.com/googleapis/python-bigquery/issues/1783)) ([68ebbe1](https://github.com/googleapis/python-bigquery/commit/68ebbe12d455ce8e9b1784fb11787c2fb842ef22))
+* Update query_external_sheets_permanent_table.py to use query_and_wait API ([#1778](https://github.com/googleapis/python-bigquery/issues/1778)) ([a7be88a](https://github.com/googleapis/python-bigquery/commit/a7be88adf8a480ee61aa79789cb53df1b79bb091))
+* Update sample for query_to_arrow to use query_and_wait API ([#1776](https://github.com/googleapis/python-bigquery/issues/1776)) ([dbf10de](https://github.com/googleapis/python-bigquery/commit/dbf10dee51a7635e9b98658f205ded2de087a06f))
+* Update the query destination table legacy file to use query_and_wait API ([#1775](https://github.com/googleapis/python-bigquery/issues/1775)) ([ef89f9e](https://github.com/googleapis/python-bigquery/commit/ef89f9e58c22b3af5a7757b69daa030116012350))
+* Update to use `query_and_wait` in `client_query_w_positional_params.py` ([#1786](https://github.com/googleapis/python-bigquery/issues/1786)) ([410f71e](https://github.com/googleapis/python-bigquery/commit/410f71e6b6e755928e363ed89c1044e14b0db9cc))
+* Update to use `query_and_wait` in `samples/client_query_w_timestamp_params.py` ([#1785](https://github.com/googleapis/python-bigquery/issues/1785)) ([ba36948](https://github.com/googleapis/python-bigquery/commit/ba3694852c13c8a29fe0f9d923353e82acfd4278))
+* Update to_geodataframe to use query_and_wait functionality ([#1800](https://github.com/googleapis/python-bigquery/issues/1800)) ([1298594](https://github.com/googleapis/python-bigquery/commit/12985942942b8f205ecd261fcdf620df9a640460))
+
+## [3.17.1](https://github.com/googleapis/python-bigquery/compare/v3.17.0...v3.17.1) (2024-01-24)
+
+
+### Bug Fixes
+
+* Add pyarrow.large_strign to the _ARROW_SCALAR_IDS_TO_BQ map ([#1796](https://github.com/googleapis/python-bigquery/issues/1796)) ([b402a6d](https://github.com/googleapis/python-bigquery/commit/b402a6df92e656aee10dd2c11c48f6ed93c74fd7))
+* Retry 'job exceeded rate limits' for DDL queries ([#1794](https://github.com/googleapis/python-bigquery/issues/1794)) ([39f33b2](https://github.com/googleapis/python-bigquery/commit/39f33b210ecbe9c2fd390825d29393c2d80257f5))
+
+## [3.17.0](https://github.com/googleapis/python-bigquery/compare/v3.16.0...v3.17.0) (2024-01-24)
+
+
+### Features
+
+* Support universe resolution ([#1774](https://github.com/googleapis/python-bigquery/issues/1774)) ([0b5c1d5](https://github.com/googleapis/python-bigquery/commit/0b5c1d597cdec3a05a16fb935595f773c5840bd4))
+
+
+### Bug Fixes
+
+* `query_and_wait` now retains unknown query configuration `_properties` ([#1793](https://github.com/googleapis/python-bigquery/issues/1793)) ([4ba4342](https://github.com/googleapis/python-bigquery/commit/4ba434287a0a25f027e3b63a80f8881a9b16723e))
+* Raise `ValueError` in `query_and_wait` with wrong `job_config` type ([4ba4342](https://github.com/googleapis/python-bigquery/commit/4ba434287a0a25f027e3b63a80f8881a9b16723e))
+
+
+### Documentation
+
+* Remove unused query code sample ([#1769](https://github.com/googleapis/python-bigquery/issues/1769)) ([1f96439](https://github.com/googleapis/python-bigquery/commit/1f96439b3dbd27f11be5e2af84f290ec6094d0a4))
+* Update `snippets.py` to use `query_and_wait` ([#1773](https://github.com/googleapis/python-bigquery/issues/1773)) ([d90602d](https://github.com/googleapis/python-bigquery/commit/d90602de87e58b665cb974401a327a640805822f))
+* Update multiple samples to change query to query_and_wait ([#1784](https://github.com/googleapis/python-bigquery/issues/1784)) ([d1161dd](https://github.com/googleapis/python-bigquery/commit/d1161dddde41a7d35b30033ccbf6984a5de640bd))
+* Update the query with no cache sample to use query_and_wait API ([#1770](https://github.com/googleapis/python-bigquery/issues/1770)) ([955a4cd](https://github.com/googleapis/python-bigquery/commit/955a4cd99e21cbca1b2f9c1dc6aa3fd8070cd61f))
+* Updates `query` to `query and wait` in samples/desktopapp/user_credentials.py ([#1787](https://github.com/googleapis/python-bigquery/issues/1787)) ([89f1299](https://github.com/googleapis/python-bigquery/commit/89f1299b3164b51fb0f29bc600a34ded59c10682))
+
+## [3.16.0](https://github.com/googleapis/python-bigquery/compare/v3.15.0...v3.16.0) (2024-01-12)
+
+
+### Features
+
+* Add `table_constraints` field to Table model ([#1755](https://github.com/googleapis/python-bigquery/issues/1755)) ([a167f9a](https://github.com/googleapis/python-bigquery/commit/a167f9a95f0a8fbf0bdb4943d06f07c03768c132))
+* Support jsonExtension in LoadJobConfig ([#1751](https://github.com/googleapis/python-bigquery/issues/1751)) ([0fd7347](https://github.com/googleapis/python-bigquery/commit/0fd7347ddb4ae1993f02b3bc109f64297437b3e2))
+
+
+### Bug Fixes
+
+* Add detailed message in job error ([#1762](https://github.com/googleapis/python-bigquery/issues/1762)) ([08483fb](https://github.com/googleapis/python-bigquery/commit/08483fba675f3b87571787e1e4420134a8fc8177))
+
+## [3.15.0](https://github.com/googleapis/python-bigquery/compare/v3.14.1...v3.15.0) (2024-01-09)
+
+
+### Features
+
+* Support JSON type in `insert_rows` and as a scalar query parameter ([#1757](https://github.com/googleapis/python-bigquery/issues/1757)) ([02a7d12](https://github.com/googleapis/python-bigquery/commit/02a7d129776b7da7da844ffa9c5cdf21811cd3af))
+* Support RANGE in schema ([#1746](https://github.com/googleapis/python-bigquery/issues/1746)) ([8585747](https://github.com/googleapis/python-bigquery/commit/8585747058e6db49a8078ae44d8e10735cdc27f9))
+
+
+### Bug Fixes
+
+* Deserializing JSON subfields within structs fails ([#1742](https://github.com/googleapis/python-bigquery/issues/1742)) ([0d93073](https://github.com/googleapis/python-bigquery/commit/0d930739c78b557db6cd48b38fe16eba93719c40))
+* Due to upstream change in dataset, updates expected results ([#1761](https://github.com/googleapis/python-bigquery/issues/1761)) ([132c14b](https://github.com/googleapis/python-bigquery/commit/132c14bbddfb61ea8bc408bef5e958e21b5b819c))
+* Load_table_from_dataframe for higher scale decimal ([#1703](https://github.com/googleapis/python-bigquery/issues/1703)) ([b9c8be0](https://github.com/googleapis/python-bigquery/commit/b9c8be0982c76187444300c414e0dda8b0ad105b))
+* Updates types-protobuf version for mypy-samples nox session ([#1764](https://github.com/googleapis/python-bigquery/issues/1764)) ([c0de695](https://github.com/googleapis/python-bigquery/commit/c0de6958e5761ad6ff532dd933b0f4387e18f1b9))
+
+
+### Performance Improvements
+
+* DB-API uses more efficient `query_and_wait` when no job ID is provided ([#1747](https://github.com/googleapis/python-bigquery/issues/1747)) ([d225a94](https://github.com/googleapis/python-bigquery/commit/d225a94e718a85877c495fbd32eca607b8919ac6))
+
+## [3.14.1](https://github.com/googleapis/python-bigquery/compare/v3.14.0...v3.14.1) (2023-12-13)
+
+
+### Bug Fixes
+
+* Add missing handler for deserializing json value ([#1587](https://github.com/googleapis/python-bigquery/issues/1587)) ([09017a9](https://github.com/googleapis/python-bigquery/commit/09017a997010f78bb6e34238fab15247ed14ea7e))
+
+## [3.14.0](https://github.com/googleapis/python-bigquery/compare/v3.13.0...v3.14.0) (2023-12-08)
+
+
+### Features
+
+* Add `Client.query_and_wait` which directly returns a `RowIterator` of results ([#1722](https://github.com/googleapis/python-bigquery/issues/1722)) ([89a647e](https://github.com/googleapis/python-bigquery/commit/89a647e19fe5d7302c0a39bba77a155635c5c29d))
+* Add `job_id`, `location`, `project`, and `query_id` properties on `RowIterator` ([#1733](https://github.com/googleapis/python-bigquery/issues/1733)) ([494f275](https://github.com/googleapis/python-bigquery/commit/494f275ab2493dc7904f685c4d12e60bef51ab21))
+* Add `job_timeout_ms` to job configuration classes ([#1675](https://github.com/googleapis/python-bigquery/issues/1675)) ([84d64cd](https://github.com/googleapis/python-bigquery/commit/84d64cdd157afef4a7bf7807e557d59452133434))
+* Add support dataset.max_time_travel_hours ([#1683](https://github.com/googleapis/python-bigquery/issues/1683)) ([f22eff2](https://github.com/googleapis/python-bigquery/commit/f22eff25f116f1c4973ac2b8b03bc8a4ae1f3f42))
+* Add support for Dataset.isCaseInsensitive ([#1671](https://github.com/googleapis/python-bigquery/issues/1671)) ([386fa86](https://github.com/googleapis/python-bigquery/commit/386fa86c89b8cff69fc02213254a1c53c02fee42))
+* Add support for Python 3.12 ([#1736](https://github.com/googleapis/python-bigquery/issues/1736)) ([3c0976a](https://github.com/googleapis/python-bigquery/commit/3c0976aecb0f917477feef4e9ed865997c2bb106))
+* Removed pkg_resources from all test files and moved importlib into pandas extra ([#1726](https://github.com/googleapis/python-bigquery/issues/1726)) ([1f4ebb1](https://github.com/googleapis/python-bigquery/commit/1f4ebb1eca4f9380a31172fc8cb2fae125f8c5a2))
+* Support data_governance_type ([#1708](https://github.com/googleapis/python-bigquery/issues/1708)) ([eff365d](https://github.com/googleapis/python-bigquery/commit/eff365dc17755d0855338e2f273428ffe2056f67))
+
+
+### Bug Fixes
+
+* `load_table_from_dataframe` now assumes there may be local null values ([#1735](https://github.com/googleapis/python-bigquery/issues/1735)) ([f05dc69](https://github.com/googleapis/python-bigquery/commit/f05dc69a1f8c65ac32085bfcc6950c2c83f8a843))
+* Ensure query job retry has longer deadline than API request deadline ([#1734](https://github.com/googleapis/python-bigquery/issues/1734)) ([5573579](https://github.com/googleapis/python-bigquery/commit/55735791122f97b7f67cb962b489fd1f12210af5))
+* Keep `RowIterator.total_rows` populated after iteration ([#1748](https://github.com/googleapis/python-bigquery/issues/1748)) ([8482f47](https://github.com/googleapis/python-bigquery/commit/8482f4759ce3c4b00fa06a7f306a2ac4d4ee8eb7))
+* Move grpc, proto-plus and protobuf packages to extras ([#1721](https://github.com/googleapis/python-bigquery/issues/1721)) ([5ce4d13](https://github.com/googleapis/python-bigquery/commit/5ce4d136af97b91fbe1cc56bba1021e50a9c8476))
+
+
+### Performance Improvements
+
+* Use the first page a results when `query(api_method="QUERY")` ([#1723](https://github.com/googleapis/python-bigquery/issues/1723)) ([6290517](https://github.com/googleapis/python-bigquery/commit/6290517d6b153a31f20098f75aee580b7915aca9))
+
+## [3.13.0](https://github.com/googleapis/python-bigquery/compare/v3.12.0...v3.13.0) (2023-10-30)
+
+
+### Features
+
+* Add `Model.transform_columns` property ([#1661](https://github.com/googleapis/python-bigquery/issues/1661)) ([5ceed05](https://github.com/googleapis/python-bigquery/commit/5ceed056482f6d1f2fc45e7e6b84382de45c85ed))
+* Add support for dataset.default_rounding_mode ([#1688](https://github.com/googleapis/python-bigquery/issues/1688)) ([83bc768](https://github.com/googleapis/python-bigquery/commit/83bc768b90a852d258a4805603020a296e02d2f9))
+
+
+### Bug Fixes
+
+* AccessEntry API representation parsing ([#1682](https://github.com/googleapis/python-bigquery/issues/1682)) ([a40d7ae](https://github.com/googleapis/python-bigquery/commit/a40d7ae03149708fc34c962b43a6ac198780b6aa))
+
+
+### Documentation
+
+* Remove redundant `bigquery_update_table_expiration` code sample ([#1673](https://github.com/googleapis/python-bigquery/issues/1673)) ([2dded33](https://github.com/googleapis/python-bigquery/commit/2dded33626b3de6c4ab5e1229eb4c85786b2ff53))
+* Revised `create_partitioned_table` sample ([#1447](https://github.com/googleapis/python-bigquery/issues/1447)) ([40ba859](https://github.com/googleapis/python-bigquery/commit/40ba859059c3e463e17ea7781bc5a9aff8244c5d))
+* Revised relax column mode sample ([#1467](https://github.com/googleapis/python-bigquery/issues/1467)) ([b8c9276](https://github.com/googleapis/python-bigquery/commit/b8c9276be011d971b941b583fd3d4417d438067f))
+
+## [3.12.0](https://github.com/googleapis/python-bigquery/compare/v3.11.4...v3.12.0) (2023-10-02)
+
+
+### Features
+
+* Add `Dataset.storage_billing_model` setter, use `client.update_dataset(ds, fields=["storage_billing_model"])` to update ([#1643](https://github.com/googleapis/python-bigquery/issues/1643)) ([5deba50](https://github.com/googleapis/python-bigquery/commit/5deba50b8c2d91d08bd5f5fb68742268c494b4a9))
+* Search statistics ([#1616](https://github.com/googleapis/python-bigquery/issues/1616)) ([b930e46](https://github.com/googleapis/python-bigquery/commit/b930e4673b0d1cceb53f683e47578d87af9361f3))
+* Widen retry predicate to include ServiceUnavailable ([#1641](https://github.com/googleapis/python-bigquery/issues/1641)) ([3e021a4](https://github.com/googleapis/python-bigquery/commit/3e021a46d387a0e3cb69913a281062fc221bb926))
+
+
+### Bug Fixes
+
+* Allow `storage_billing_model` to be explicitly set to `None` to use project default value ([#1665](https://github.com/googleapis/python-bigquery/issues/1665)) ([514d3e1](https://github.com/googleapis/python-bigquery/commit/514d3e12e5131bd589dff08893fd89bf40338ba3))
+* Relax timeout expectations ([#1645](https://github.com/googleapis/python-bigquery/issues/1645)) ([1760e94](https://github.com/googleapis/python-bigquery/commit/1760e945d16163980027fecf21113cd77ddc35a1))
+* Use isinstance() per E721, unpin flake8 ([#1659](https://github.com/googleapis/python-bigquery/issues/1659)) ([54a7769](https://github.com/googleapis/python-bigquery/commit/54a77694afcd80be4ba469c6ebb7ca8be112b04e))
+
+
+### Documentation
+
+* Revise update_table_expiration sample ([#1457](https://github.com/googleapis/python-bigquery/issues/1457)) ([03194e0](https://github.com/googleapis/python-bigquery/commit/03194e0156ed9201cb36301967c5af117d7ef29c))
+
+## [3.11.4](https://github.com/googleapis/python-bigquery/compare/v3.11.3...v3.11.4) (2023-07-19)
+
+
+### Bug Fixes
+
+* Updates typing in function definitions ([#1613](https://github.com/googleapis/python-bigquery/issues/1613)) ([db755ce](https://github.com/googleapis/python-bigquery/commit/db755ce5d2ae21e458f33f02cf63d2e5fbc45cf5))
+
+## [3.11.3](https://github.com/googleapis/python-bigquery/compare/v3.11.2...v3.11.3) (2023-06-27)
+
+
+### Bug Fixes
+
+* Type annotations include Optional when None is accepted ([#1554](https://github.com/googleapis/python-bigquery/issues/1554)) ([6c1ab80](https://github.com/googleapis/python-bigquery/commit/6c1ab802b09124ba837d6d5358962e3fce2d4a2c))
+
+## [3.11.2](https://github.com/googleapis/python-bigquery/compare/v3.11.1...v3.11.2) (2023-06-21)
+
+
+### Bug Fixes
+
+* Updates tests based on revised hacker_news tables ([#1591](https://github.com/googleapis/python-bigquery/issues/1591)) ([d73cf49](https://github.com/googleapis/python-bigquery/commit/d73cf495b8dfa032a43dc1d58599d0691aaa0efb))
+
+## [3.11.1](https://github.com/googleapis/python-bigquery/compare/v3.11.0...v3.11.1) (2023-06-09)
+
+
+### Documentation
+
+* Add/reformat return types for cloud RAD docs ([#1582](https://github.com/googleapis/python-bigquery/issues/1582)) ([6efdce1](https://github.com/googleapis/python-bigquery/commit/6efdce13cc3b25d37d22a856f2308daed569e637))
+
+## [3.11.0](https://github.com/googleapis/python-bigquery/compare/v3.10.0...v3.11.0) (2023-06-01)
+
+
+### Features
+
+* Add remote function options to routines ([#1558](https://github.com/googleapis/python-bigquery/issues/1558)) ([84ad11d](https://github.com/googleapis/python-bigquery/commit/84ad11d00d99d279e4e6e0fa4ca60e59575b1dad))
+
+
+### Bug Fixes
+
+* Filter None values from OpenTelemetry attributes ([#1567](https://github.com/googleapis/python-bigquery/issues/1567)) ([9ea2e21](https://github.com/googleapis/python-bigquery/commit/9ea2e21c35783782993d1ad2d3b910bbe9981ce2))
+* Handle case when expirationMs is None ([#1553](https://github.com/googleapis/python-bigquery/issues/1553)) ([fa6e13d](https://github.com/googleapis/python-bigquery/commit/fa6e13d5006caadb36899b4e2a24ca82b7f11b17))
+* Raise most recent exception when not able to fetch query job after starting the job ([#1362](https://github.com/googleapis/python-bigquery/issues/1362)) ([09cc1df](https://github.com/googleapis/python-bigquery/commit/09cc1df6babaf90ea0b0a6fd926f8013822a31ed))
+
+## [3.10.0](https://github.com/googleapis/python-bigquery/compare/v3.9.0...v3.10.0) (2023-04-18)
+
+
+### Features
+
+* Add date, datetime, time, timestamp dtype to to_dataframe ([#1547](https://github.com/googleapis/python-bigquery/issues/1547)) ([64e913d](https://github.com/googleapis/python-bigquery/commit/64e913d73832f6363466cbea5ace2337c86fa58b))
+
+## [3.9.0](https://github.com/googleapis/python-bigquery/compare/v3.8.0...v3.9.0) (2023-03-28)
+
+
+### Features
+
+* Expose query job on dbapi cursor ([#1520](https://github.com/googleapis/python-bigquery/issues/1520)) ([339eb0e](https://github.com/googleapis/python-bigquery/commit/339eb0e86040a7c30d140800f34810ffc6a7c76b))
+
+
+### Bug Fixes
+
+* Keyerror when the load_table_from_dataframe accesses a unmapped dtype dataframe index ([#1535](https://github.com/googleapis/python-bigquery/issues/1535)) ([a69348a](https://github.com/googleapis/python-bigquery/commit/a69348a558f48cfc61d03d3e8bb7f9aee48bea86))
+
+## [3.8.0](https://github.com/googleapis/python-bigquery/compare/v3.7.0...v3.8.0) (2023-03-24)
+
+
+### Features
+
+* Add bool, int, float, string dtype to to_dataframe ([#1529](https://github.com/googleapis/python-bigquery/issues/1529)) ([5e4465d](https://github.com/googleapis/python-bigquery/commit/5e4465d0975f54e8da885006686d9431ff9c5653))
+* Add default LoadJobConfig to Client ([#1526](https://github.com/googleapis/python-bigquery/issues/1526)) ([a2520ca](https://github.com/googleapis/python-bigquery/commit/a2520cabf7ec6bcb923c21e338188f1c10dc4d5d))
+* Expose configuration property on CopyJob, ExtractJob, LoadJob, QueryJob ([#1521](https://github.com/googleapis/python-bigquery/issues/1521)) ([8270a10](https://github.com/googleapis/python-bigquery/commit/8270a10df8f40750a7ac541a1781a71d7e79ce67))
+
+
+### Bug Fixes
+
+* Loosen ipywidgets restrictions further to address ipython compatibility issues ([#1531](https://github.com/googleapis/python-bigquery/issues/1531)) ([50e5026](https://github.com/googleapis/python-bigquery/commit/50e502674807b9771d7e26c0e784539bed8f9da6))
+
+## [3.7.0](https://github.com/googleapis/python-bigquery/compare/v3.6.0...v3.7.0) (2023-03-06)
+
+
+### Features
+
+* Add `connection_properties` and `create_session` to `LoadJobConfig` ([#1509](https://github.com/googleapis/python-bigquery/issues/1509)) ([cd0aaa1](https://github.com/googleapis/python-bigquery/commit/cd0aaa15960e9ca7a0aaf411c8e4990f95421816))
+* Add default_query_job_config property and property setter to BQ client ([#1511](https://github.com/googleapis/python-bigquery/issues/1511)) ([a23092c](https://github.com/googleapis/python-bigquery/commit/a23092cad834c6a016f455d46fefa13bb6cdbf0f))
+
+
+### Documentation
+
+* Remove < 3.11 reference from README ([#1502](https://github.com/googleapis/python-bigquery/issues/1502)) ([c7417f4](https://github.com/googleapis/python-bigquery/commit/c7417f43563e20a3e6f1a57f46925fb274b28b07))
+
+## [3.6.0](https://github.com/googleapis/python-bigquery/compare/v3.5.0...v3.6.0) (2023-02-22)
+
+
+### Features
+
+* Adding preserveAsciiControlCharacter to CSVOptions ([#1491](https://github.com/googleapis/python-bigquery/issues/1491)) ([f832e7a](https://github.com/googleapis/python-bigquery/commit/f832e7a0b79f3567a0773ff11630e2f48bed60db))
+
+
+### Bug Fixes
+
+* Annotate optional integer parameters with optional type ([#1487](https://github.com/googleapis/python-bigquery/issues/1487)) ([a190aaa](https://github.com/googleapis/python-bigquery/commit/a190aaa09ae73e8b6a83b7b213247f95fde57615))
+* Loosen ipywidget dependency ([#1504](https://github.com/googleapis/python-bigquery/issues/1504)) ([20d3276](https://github.com/googleapis/python-bigquery/commit/20d3276cc29e9467eef9476d5fd572099d9a3f6f))
+* Removes scope to avoid unnecessary duplication ([#1503](https://github.com/googleapis/python-bigquery/issues/1503)) ([665d7ba](https://github.com/googleapis/python-bigquery/commit/665d7ba74a1b45de1ef51cc75b6860125afc5fe6))
+
+
+### Dependencies
+
+* Update minimum google-cloud-core to 1.6.0 ([a190aaa](https://github.com/googleapis/python-bigquery/commit/a190aaa09ae73e8b6a83b7b213247f95fde57615))
+
+## [3.5.0](https://github.com/googleapis/python-bigquery/compare/v3.4.2...v3.5.0) (2023-01-31)
+
+
+### Features
+
+* Add __str__ method to DatasetReference ([#1477](https://github.com/googleapis/python-bigquery/issues/1477)) ([f32df1f](https://github.com/googleapis/python-bigquery/commit/f32df1fb74e4aea24cd8a4099040ad2f7436e54d))
+* Add preserveAsciiControlCharacter to LoadJobConfig ([#1484](https://github.com/googleapis/python-bigquery/issues/1484)) ([bd1da9a](https://github.com/googleapis/python-bigquery/commit/bd1da9aa0a40b02b7d5409a0b094d8380e255c91))
+
+
+### Documentation
+
+* Adds snippet for creating table with external data config ([#1420](https://github.com/googleapis/python-bigquery/issues/1420)) ([f0ace2a](https://github.com/googleapis/python-bigquery/commit/f0ace2ac2307ef359511a235f80f5ce9e46264c1))
+* Revise delete label table code sample, add TODO to clean up sni… ([#1466](https://github.com/googleapis/python-bigquery/issues/1466)) ([0dab7d2](https://github.com/googleapis/python-bigquery/commit/0dab7d25ace4b63d2984485e7b0c5bb38f20476f))
+* **samples:** Table variable fix ([#1287](https://github.com/googleapis/python-bigquery/issues/1287)) ([a71888a](https://github.com/googleapis/python-bigquery/commit/a71888a60d1e5e5815ab459fe24368ad5b0d032a))
+
+## [3.4.2](https://github.com/googleapis/python-bigquery/compare/v3.4.1...v3.4.2) (2023-01-13)
+
+
+### Bug Fixes
+
+* Add support for python 3.11 ([#1463](https://github.com/googleapis/python-bigquery/issues/1463)) ([730a1de](https://github.com/googleapis/python-bigquery/commit/730a1dec8be49df26a3d805ebd4ad185ba72170d))
+* Require grpcio >= 1.49.1 for python 3.11 ([72b25c5](https://github.com/googleapis/python-bigquery/commit/72b25c52bc4b9a92c4cb187b6230b280d4af905c))
+
+
+### Dependencies
+
+* Remove upper bound on packaging dependency ([#1440](https://github.com/googleapis/python-bigquery/issues/1440)) ([6088129](https://github.com/googleapis/python-bigquery/commit/60881296a35067e7aa025d92b2425572f10fd4ec))
+
+
+### Documentation
+
+* Create sample to write schema file from table ([#1439](https://github.com/googleapis/python-bigquery/issues/1439)) ([093cc68](https://github.com/googleapis/python-bigquery/commit/093cc6852ada29898c4a4d047fd216544ef15bba))
+* Created samples for load table and create table from schema file ([#1436](https://github.com/googleapis/python-bigquery/issues/1436)) ([8ad2e5b](https://github.com/googleapis/python-bigquery/commit/8ad2e5bc1c04bf16fffe4c8773e722b68117c916))
+* Revise create table cmek sample ([#1452](https://github.com/googleapis/python-bigquery/issues/1452)) ([57740e4](https://github.com/googleapis/python-bigquery/commit/57740e49af7418449aec73a6fdd307fcb588c655))
+* Revise get table labels code sample, add TODO to clean up snipp… ([#1464](https://github.com/googleapis/python-bigquery/issues/1464)) ([b5ccbfe](https://github.com/googleapis/python-bigquery/commit/b5ccbfe4eee91d7f481d9708084cd29d0c85e666))
+* Revise label table code samples ([#1451](https://github.com/googleapis/python-bigquery/issues/1451)) ([14ae1f2](https://github.com/googleapis/python-bigquery/commit/14ae1f20538ea00829a1325f91f5e8524234bd0c))
+* Revise sample for nested schema ([#1446](https://github.com/googleapis/python-bigquery/issues/1446)) ([a097631](https://github.com/googleapis/python-bigquery/commit/a0976318fc5ad1620a68250c3e059e2a51d4946d))
+
+## [3.4.1](https://github.com/googleapis/python-bigquery/compare/v3.4.0...v3.4.1) (2022-12-09)
+
+
+### Documentation
+
+* Add info about streaming quota limits to `insert_rows*` methods ([#1409](https://github.com/googleapis/python-bigquery/issues/1409)) ([0f08e9a](https://github.com/googleapis/python-bigquery/commit/0f08e9a8ff638e78006d71acd974de2dff89b5d9))
+
+
+### Dependencies
+
+* make pyarrow and BQ Storage optional dependencies ([e1aa921](https://github.com/googleapis/python-bigquery/commit/e1aa9218ad22f85c9a6cab8b61d013779376a582))
+
+## [3.4.0](https://github.com/googleapis/python-bigquery/compare/v3.3.6...v3.4.0) (2022-11-17)
+
+
+### Features
+
+* Add `reference_file_schema_uri` to LoadJobConfig, ExternalConfig ([#1399](https://github.com/googleapis/python-bigquery/issues/1399)) ([931285f](https://github.com/googleapis/python-bigquery/commit/931285ff85842ab07a0ef2ff9db808181ea3c5e4))
+* Add default value expression ([#1408](https://github.com/googleapis/python-bigquery/issues/1408)) ([207aa50](https://github.com/googleapis/python-bigquery/commit/207aa506ab634bdb13256fa5bd8745ec9de23290))
+* Add More Specific Type Annotations for Row Dictionaries ([#1295](https://github.com/googleapis/python-bigquery/issues/1295)) ([eb49873](https://github.com/googleapis/python-bigquery/commit/eb49873176dee478617eb50472d44703abca53b5))
+
+## [3.3.6](https://github.com/googleapis/python-bigquery/compare/v3.3.4...v3.3.6) (2022-11-02)
+
+
+### Features
+
+* Reconfigure tqdm progress bar in %%bigquery magic ([#1355](https://github.com/googleapis/python-bigquery/issues/1355)) ([506f781](https://github.com/googleapis/python-bigquery/commit/506f781c2dd775193336ab9432f32148250ed81d))
+
+
+### Bug Fixes
+
+* Corrects test for non-existent attribute ([#1395](https://github.com/googleapis/python-bigquery/issues/1395)) ([a80f436](https://github.com/googleapis/python-bigquery/commit/a80f436f2e75a8fb680316f17a22eecb31a7101d))
+* **deps:** Allow protobuf 3.19.5 ([#1379](https://github.com/googleapis/python-bigquery/issues/1379)) ([3e4a074](https://github.com/googleapis/python-bigquery/commit/3e4a074a981eb2920c5f9a711c253565d4844858))
+* **deps:** Allow pyarrow < 11 ([#1393](https://github.com/googleapis/python-bigquery/issues/1393)) ([c898546](https://github.com/googleapis/python-bigquery/commit/c898546d3292f9ec1ba6120cd3f9e2805aa087bb))
+* **deps:** Require requests>=2.21.0 ([#1388](https://github.com/googleapis/python-bigquery/issues/1388)) ([e398336](https://github.com/googleapis/python-bigquery/commit/e39833673582e4a7a34103cfc45603932c9c33b3))
+* Refactor to adapt to changes to shapely dependency ([#1376](https://github.com/googleapis/python-bigquery/issues/1376)) ([2afd278](https://github.com/googleapis/python-bigquery/commit/2afd278febe1eb247adc6278ab59903962a5bb6c))
+
+
+### Documentation
+
+* Fix typos ([#1372](https://github.com/googleapis/python-bigquery/issues/1372)) ([21cc525](https://github.com/googleapis/python-bigquery/commit/21cc525a86a06acfe73e5c5a74ec5f0b61e410f2))
+
+
+### Miscellaneous Chores
+
+* release 3.3.6 ([4fce1d9](https://github.com/googleapis/python-bigquery/commit/4fce1d93b1763703b115a0480a2b97021786aff7))
+
+## [3.3.4](https://github.com/googleapis/python-bigquery/compare/v3.3.3...v3.3.4) (2022-09-29)
+
+
+### Bug Fixes
+
+* **deps:** Require protobuf >= 3.20.2 ([#1369](https://github.com/googleapis/python-bigquery/issues/1369)) ([f13383a](https://github.com/googleapis/python-bigquery/commit/f13383a22d7b1a0a714dc1b1210ad970146bd094))
+
+## [3.3.3](https://github.com/googleapis/python-bigquery/compare/v3.3.2...v3.3.3) (2022-09-28)
+
+
+### Bug Fixes
+
+* Refactors code to account for a tdqm code deprecation ([#1357](https://github.com/googleapis/python-bigquery/issues/1357)) ([1369a9d](https://github.com/googleapis/python-bigquery/commit/1369a9d937b85d6a2a6bf9a672c71620648b1e3e))
+* Validate opentelemetry span job attributes have values ([#1327](https://github.com/googleapis/python-bigquery/issues/1327)) ([8287af1](https://github.com/googleapis/python-bigquery/commit/8287af1299169546f847126f03ae04e48890139e))
+
+
+### Documentation
+
+* **samples:** uses function (create_job) more appropriate to the described sample intent ([5aeedaa](https://github.com/googleapis/python-bigquery/commit/5aeedaa2f4e6a0200d50521dfd90f39f9a24d0cc))
+
+## [3.3.2](https://github.com/googleapis/python-bigquery/compare/v3.3.1...v3.3.2) (2022-08-16)
+
+
+### Bug Fixes
+
+* **deps:** require proto-plus >= 1.22.0 ([1de7a52](https://github.com/googleapis/python-bigquery/commit/1de7a52cb85d4876e4aa87346aff5725c8294c4e))
+* **deps:** require protobuf >=3.19, < 5.0.0 ([#1311](https://github.com/googleapis/python-bigquery/issues/1311)) ([1de7a52](https://github.com/googleapis/python-bigquery/commit/1de7a52cb85d4876e4aa87346aff5725c8294c4e))
+
+## [3.3.1](https://github.com/googleapis/python-bigquery/compare/v3.3.0...v3.3.1) (2022-08-09)
+
+
+### Bug Fixes
+
+* **deps:** allow pyarrow < 10 ([#1304](https://github.com/googleapis/python-bigquery/issues/1304)) ([13616a9](https://github.com/googleapis/python-bigquery/commit/13616a910ba2e9b7bc3595847229b56e70c99f84))
+
+## [3.3.0](https://github.com/googleapis/python-bigquery/compare/v3.2.0...v3.3.0) (2022-07-25)
+
+
+### Features
+
+* add destination_expiration_time property to copy job ([#1277](https://github.com/googleapis/python-bigquery/issues/1277)) ([728b07c](https://github.com/googleapis/python-bigquery/commit/728b07c9177532bbbbfd1890f23e98950aea3f02))
+
+
+### Bug Fixes
+
+* require python 3.7+ ([#1284](https://github.com/googleapis/python-bigquery/issues/1284)) ([52d9f14](https://github.com/googleapis/python-bigquery/commit/52d9f14fb1d183f64a62fee1fddc0bf576a0a3e9))
+
+
+### Documentation
+
+* **samples:** add table snapshot sample ([#1274](https://github.com/googleapis/python-bigquery/issues/1274)) ([e760d1b](https://github.com/googleapis/python-bigquery/commit/e760d1bcb76561b4247adde2fd06ae0b686befb9))
+* **samples:** explicitly add bq to samples reqs, upgrade grpc to fix bug on m1 ([#1290](https://github.com/googleapis/python-bigquery/issues/1290)) ([9b7e3e4](https://github.com/googleapis/python-bigquery/commit/9b7e3e424cbd08af8b08c91e6397a3f1b7811064))
+
+## [3.2.0](https://github.com/googleapis/python-bigquery/compare/v3.1.0...v3.2.0) (2022-06-06)
+
+
+### Features
+
+* add support for table clones ([#1235](https://github.com/googleapis/python-bigquery/issues/1235)) ([176fb2a](https://github.com/googleapis/python-bigquery/commit/176fb2afc9888c6b0cd74d590065b3002bdbf533))
+
+
+### Bug Fixes
+
+* **deps:** proto-plus >= 1.15.0, <2.0.0dev ([ba58d3a](https://github.com/googleapis/python-bigquery/commit/ba58d3af80ca796be09c813529d3aadb79e0413c))
+* **deps:** require packaging >= 14.3, <22.0.0dev ([ba58d3a](https://github.com/googleapis/python-bigquery/commit/ba58d3af80ca796be09c813529d3aadb79e0413c))
+* **deps:** require protobuf>= 3.12.0, <4.0.0dev ([#1263](https://github.com/googleapis/python-bigquery/issues/1263)) ([ba58d3a](https://github.com/googleapis/python-bigquery/commit/ba58d3af80ca796be09c813529d3aadb79e0413c))
+
+
+### Documentation
+
+* fix changelog header to consistent size ([#1268](https://github.com/googleapis/python-bigquery/issues/1268)) ([d03e2a2](https://github.com/googleapis/python-bigquery/commit/d03e2a29ecfa5d2ccd5599f5c0faac55286e52e7))
+
+## [3.1.0](https://github.com/googleapis/python-bigquery/compare/v3.0.1...v3.1.0) (2022-05-09)
+
+
+### Features
+
+* add str method to table ([#1199](https://github.com/googleapis/python-bigquery/issues/1199)) ([8da4fa9](https://github.com/googleapis/python-bigquery/commit/8da4fa9e77bcfd2b68818b5d65b38ccc59899a01))
+* refactor AccessEntry to use _properties pattern ([#1125](https://github.com/googleapis/python-bigquery/issues/1125)) ([acd5612](https://github.com/googleapis/python-bigquery/commit/acd5612d2fc469633936dbc463ce4d70951e7fdd))
+* support using BIGQUERY_EMULATOR_HOST environment variable ([#1222](https://github.com/googleapis/python-bigquery/issues/1222)) ([39294b4](https://github.com/googleapis/python-bigquery/commit/39294b4950896b084573bedb4c5adc2b8d371eac))
+
+
+### Bug Fixes
+
+* **deps:** allow pyarrow v8 ([#1245](https://github.com/googleapis/python-bigquery/issues/1245)) ([d258690](https://github.com/googleapis/python-bigquery/commit/d258690dbf01108e1426f0e28d792c418a88bce0))
+* export bigquery.HivePartitioningOptions ([#1217](https://github.com/googleapis/python-bigquery/issues/1217)) ([8eb757b](https://github.com/googleapis/python-bigquery/commit/8eb757bcded7a3ef3b2264f47ec080c0a8fca579))
+* Skip geography_as_object conversion for REPEATED fields ([#1220](https://github.com/googleapis/python-bigquery/issues/1220)) ([4d3d6ec](https://github.com/googleapis/python-bigquery/commit/4d3d6ec9e667a781f8cb4a3aee0376c6179d5ce1))
+
+
+### Documentation
+
+* updated variable typo in comment in code sample ([#1239](https://github.com/googleapis/python-bigquery/issues/1239)) ([e420112](https://github.com/googleapis/python-bigquery/commit/e4201128bdb7f49cb732e12609448bbdbc122736))
+
+## [3.0.1](https://github.com/googleapis/python-bigquery/compare/v3.0.0...v3.0.1) (2022-03-30)
+
+
+### Bug Fixes
+
+* **deps:** raise exception when pandas is installed but db-dtypes is not ([#1191](https://github.com/googleapis/python-bigquery/issues/1191)) ([4333910](https://github.com/googleapis/python-bigquery/commit/433391097bae57dd12a93db18fc2bab573d8f128))
+* **deps:** restore dependency on python-dateutil ([#1187](https://github.com/googleapis/python-bigquery/issues/1187)) ([212d7ec](https://github.com/googleapis/python-bigquery/commit/212d7ec1f0740d04c26fb3ceffc9a4dd9eed6756))
+
+## [3.0.0](https://github.com/googleapis/python-bigquery/compare/v2.34.3...v3.0.0) (2022-03-29)
+
+
+### ⚠ BREAKING CHANGES
+
+* BigQuery Storage and pyarrow are required dependencies (#776)
+* use nullable `Int64` and `boolean` dtypes in `to_dataframe` (#786)
+* destination tables are no-longer removed by `create_job` (#891)
+* In `to_dataframe`, use `dbdate` and `dbtime` dtypes from db-dtypes package for BigQuery DATE and TIME columns (#972)
+* automatically convert out-of-bounds dates in `to_dataframe`, remove `date_as_object` argument (#972)
+* mark the package as type-checked (#1058)
+* default to DATETIME type when loading timezone-naive datetimes from Pandas (#1061)
+* remove out-of-date BigQuery ML protocol buffers (#1178)
+
+### Features
+
+* add `api_method` parameter to `Client.query` to select `INSERT` or `QUERY` API ([#967](https://github.com/googleapis/python-bigquery/issues/967)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+* default to DATETIME type when loading timezone-naive datetimes from Pandas ([#1061](https://github.com/googleapis/python-bigquery/issues/1061)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+* destination tables are no-longer removed by `create_job` ([#891](https://github.com/googleapis/python-bigquery/issues/891)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+* In `to_dataframe`, use `dbdate` and `dbtime` dtypes from db-dtypes package for BigQuery DATE and TIME columns ([#972](https://github.com/googleapis/python-bigquery/issues/972)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+* mark the package as type-checked ([#1058](https://github.com/googleapis/python-bigquery/issues/1058)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+* use `StandardSqlField` class for `Model.feature_columns` and `Model.label_columns` ([#1117](https://github.com/googleapis/python-bigquery/issues/1117)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+
+
+### Bug Fixes
+
+* automatically convert out-of-bounds dates in `to_dataframe`, remove `date_as_object` argument ([#972](https://github.com/googleapis/python-bigquery/issues/972)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+* improve type annotations for mypy validation ([#1081](https://github.com/googleapis/python-bigquery/issues/1081)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+* remove out-of-date BigQuery ML protocol buffers ([#1178](https://github.com/googleapis/python-bigquery/issues/1178)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+* use nullable `Int64` and `boolean` dtypes in `to_dataframe` ([#786](https://github.com/googleapis/python-bigquery/issues/786)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+
+
+### Documentation
+
+* Add migration guide from version 2.x to 3.x ([#1027](https://github.com/googleapis/python-bigquery/issues/1027)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+
+
+### Dependencies
+
+* BigQuery Storage and pyarrow are required dependencies ([#776](https://github.com/googleapis/python-bigquery/issues/776)) ([76d88fb](https://github.com/googleapis/python-bigquery/commit/76d88fbb1316317a61fa1a63c101bc6f42f23af8))
+
+## [2.34.3](https://github.com/googleapis/python-bigquery/compare/v2.34.2...v2.34.3) (2022-03-29)
+
+
+### Bug Fixes
+
+* update content-type header ([#1171](https://github.com/googleapis/python-bigquery/issues/1171)) ([921b440](https://github.com/googleapis/python-bigquery/commit/921b440fdd151e88ee5b3e0d9fb90177877dc11a))
+
+## [2.34.2](https://github.com/googleapis/python-bigquery/compare/v2.34.1...v2.34.2) (2022-03-05)
+
+
+### Bug Fixes
+
+* **deps:** require google-api-core>=1.31.5, >=2.3.2 ([#1157](https://github.com/googleapis/python-bigquery/issues/1157)) ([0c15790](https://github.com/googleapis/python-bigquery/commit/0c15790720ff573a501cfe760dd74ee166e1a353))
+* **deps:** require proto-plus>=1.15.0 ([0c15790](https://github.com/googleapis/python-bigquery/commit/0c15790720ff573a501cfe760dd74ee166e1a353))
+
+## [2.34.1](https://github.com/googleapis/python-bigquery/compare/v2.34.0...v2.34.1) (2022-03-02)
+
+
+### Dependencies
+
+* add "extra" for IPython, exclude bad IPython release ([#1151](https://github.com/googleapis/python-bigquery/issues/1151)) ([0fbe12d](https://github.com/googleapis/python-bigquery/commit/0fbe12d1ababa45aa774b026a93c0af9e8f343d9))
+* allow pyarrow 7.0 ([#1112](https://github.com/googleapis/python-bigquery/issues/1112)) ([57f8ea9](https://github.com/googleapis/python-bigquery/commit/57f8ea95e152dc238e7a4941f96c54be53f7eaf3))
+
+## [2.34.0](https://github.com/googleapis/python-bigquery/compare/v2.33.0...v2.34.0) (2022-02-18)
+
+
+### Features
+
+* support BI Engine statistics in query job ([#1144](https://github.com/googleapis/python-bigquery/issues/1144)) ([7482549](https://github.com/googleapis/python-bigquery/commit/7482549cb42ed5302634ab4fb7b4efcd97b35c68))
+
+## [2.33.0](https://github.com/googleapis/python-bigquery/compare/v2.32.0...v2.33.0) (2022-02-16)
+
+
+### Features
+
+* add `--no_query_cache` option to `%%bigquery` magics to disable query cache ([#1141](https://github.com/googleapis/python-bigquery/issues/1141)) ([7dd30af](https://github.com/googleapis/python-bigquery/commit/7dd30af41b8a595b96176c964ba14aa41645ef0d))
+
+
+### Bug Fixes
+
+* return 403 when VPC-SC violation happens ([#1131](https://github.com/googleapis/python-bigquery/issues/1131)) ([f5daa9b](https://github.com/googleapis/python-bigquery/commit/f5daa9b41377a58cb3220bb2ab7c72adc6462196))
+
+
+### Documentation
+
+* reference BigQuery REST API defaults in `LoadJobConfig` descrip… ([#1132](https://github.com/googleapis/python-bigquery/issues/1132)) ([18d9580](https://github.com/googleapis/python-bigquery/commit/18d958062721d6be81e7bd7a5bd66f277344a864))
+* show common job properties in `get_job` and `cancel_job` samples ([#1137](https://github.com/googleapis/python-bigquery/issues/1137)) ([8edc10d](https://github.com/googleapis/python-bigquery/commit/8edc10d019bd96defebc4f92a47774901e9b956f))
+
+## [2.32.0](https://github.com/googleapis/python-bigquery/compare/v2.31.0...v2.32.0) (2022-01-12)
+
+
+### Features
+
+* support authorized dataset entity ([#1075](https://github.com/googleapis/python-bigquery/issues/1075)) ([c098cd0](https://github.com/googleapis/python-bigquery/commit/c098cd01c755633bfaba7193dd5c044a489a5b61))
+
+
+### Bug Fixes
+
+* remove query text from exception message, use `exception.debug_message` instead ([#1105](https://github.com/googleapis/python-bigquery/issues/1105)) ([e23114c](https://github.com/googleapis/python-bigquery/commit/e23114ce362e09ac72f733a640e53a561cc9ce69))
+
+## [2.31.0](https://www.github.com/googleapis/python-bigquery/compare/v2.30.1...v2.31.0) (2021-11-24)
+
+
+### Features
+
+* allow cell magic body to be a $variable ([#1053](https://www.github.com/googleapis/python-bigquery/issues/1053)) ([3a681e0](https://www.github.com/googleapis/python-bigquery/commit/3a681e046819df18118aa0b2b5733416d004c9b3))
+* promote `RowIterator.to_arrow_iterable` to public method ([#1073](https://www.github.com/googleapis/python-bigquery/issues/1073)) ([21cd710](https://www.github.com/googleapis/python-bigquery/commit/21cd71022d60c32104f8f90ee2ca445fbb43f7f3))
+
+
+### Bug Fixes
+
+* apply timeout to all resumable upload requests ([#1070](https://www.github.com/googleapis/python-bigquery/issues/1070)) ([3314dfb](https://www.github.com/googleapis/python-bigquery/commit/3314dfbed62488503dc41b11e403a672fcf71048))
+
+
+### Dependencies
+
+* support OpenTelemetry >= 1.1.0 ([#1050](https://www.github.com/googleapis/python-bigquery/issues/1050)) ([4616cd5](https://www.github.com/googleapis/python-bigquery/commit/4616cd58d3c6da641fb881ce99a87dcdedc20ba2))
+
+## [2.30.1](https://www.github.com/googleapis/python-bigquery/compare/v2.30.0...v2.30.1) (2021-11-04)
+
+
+### Bug Fixes
+
+* error if eval()-ing repr(SchemaField) ([#1046](https://www.github.com/googleapis/python-bigquery/issues/1046)) ([13ac860](https://www.github.com/googleapis/python-bigquery/commit/13ac860de689ea13b35932c67042bc35e388cb30))
+
+
+### Documentation
+
+* show gcloud command to authorize against sheets ([#1045](https://www.github.com/googleapis/python-bigquery/issues/1045)) ([20c9024](https://www.github.com/googleapis/python-bigquery/commit/20c9024b5760f7ae41301f4da54568496922cbe2))
+* use stable URL for pandas intersphinx links ([#1048](https://www.github.com/googleapis/python-bigquery/issues/1048)) ([73312f8](https://www.github.com/googleapis/python-bigquery/commit/73312f8f0f22ff9175a4f5f7db9bb438a496c164))
+
+## [2.30.0](https://www.github.com/googleapis/python-bigquery/compare/v2.29.0...v2.30.0) (2021-11-03)
+
+
+### Features
+
+* accept TableListItem where TableReference is accepted ([#1016](https://www.github.com/googleapis/python-bigquery/issues/1016)) ([fe16adc](https://www.github.com/googleapis/python-bigquery/commit/fe16adc86a170d0992c32091b349b036f8b43884))
+* support Python 3.10 ([#1043](https://www.github.com/googleapis/python-bigquery/issues/1043)) ([5bbb832](https://www.github.com/googleapis/python-bigquery/commit/5bbb832a83ebb66db4b5ee740cdfc53f4df8430b))
+
+
+### Documentation
+
+* add code samples for Jupyter/IPython magics ([#1013](https://www.github.com/googleapis/python-bigquery/issues/1013)) ([61141ee](https://www.github.com/googleapis/python-bigquery/commit/61141ee0634024ad261d1595c95cd14a896fb87e))
+* **samples:** add create external table with hive partitioning ([#1033](https://www.github.com/googleapis/python-bigquery/issues/1033)) ([d64f5b6](https://www.github.com/googleapis/python-bigquery/commit/d64f5b682854a2293244426316890df4ab1e079e))
+
+## [2.29.0](https://www.github.com/googleapis/python-bigquery/compare/v2.28.1...v2.29.0) (2021-10-27)
+
+
+### Features
+
+* add `QueryJob.schema` property for dry run queries ([#1014](https://www.github.com/googleapis/python-bigquery/issues/1014)) ([2937fa1](https://www.github.com/googleapis/python-bigquery/commit/2937fa1386898766c561579fd39d42958182d260))
+* add session and connection properties to QueryJobConfig ([#1024](https://www.github.com/googleapis/python-bigquery/issues/1024)) ([e4c94f4](https://www.github.com/googleapis/python-bigquery/commit/e4c94f446c27eb474f30b033c1b62d11bd0acd98))
+* add support for INTERVAL data type to `list_rows` ([#840](https://www.github.com/googleapis/python-bigquery/issues/840)) ([e37380a](https://www.github.com/googleapis/python-bigquery/commit/e37380a959cbd5bb9cbbf6807f0a8ea147e0a713))
+* allow queryJob.result() to be called on a dryRun ([#1015](https://www.github.com/googleapis/python-bigquery/issues/1015)) ([685f06a](https://www.github.com/googleapis/python-bigquery/commit/685f06a5e7b5df17a53e9eb340ff04ecd1e51d1d))
+
+
+### Documentation
+
+* document ScriptStatistics and other missing resource classes ([#1023](https://www.github.com/googleapis/python-bigquery/issues/1023)) ([6679109](https://www.github.com/googleapis/python-bigquery/commit/66791093c61f262ea063d2a7950fc643915ee693))
+* fix formatting of generated client docstrings ([#1009](https://www.github.com/googleapis/python-bigquery/issues/1009)) ([f7b0ee4](https://www.github.com/googleapis/python-bigquery/commit/f7b0ee45a664295ccc9f209eeeac122af8de3c80))
+
+
+### Dependencies
+
+* allow pyarrow 6.x ([#1031](https://www.github.com/googleapis/python-bigquery/issues/1031)) ([1c2de74](https://www.github.com/googleapis/python-bigquery/commit/1c2de74a55046a343bcf9474f67100a82fb05401))
+
+## [2.28.1](https://www.github.com/googleapis/python-bigquery/compare/v2.28.0...v2.28.1) (2021-10-07)
+
+
+### Bug Fixes
+
+* support ARRAY data type when loading from DataFrame with Parquet ([#980](https://www.github.com/googleapis/python-bigquery/issues/980)) ([1e59083](https://www.github.com/googleapis/python-bigquery/commit/1e5908302d36e15442013af6f46b1c20af28255e))
+
+## [2.28.0](https://www.github.com/googleapis/python-bigquery/compare/v2.27.1...v2.28.0) (2021-09-30)
+
+
+### Features
+
+* add `AvroOptions` to configure AVRO external data ([#994](https://www.github.com/googleapis/python-bigquery/issues/994)) ([1a9431d](https://www.github.com/googleapis/python-bigquery/commit/1a9431d9e02eeb99e4712b61c623f9cca80134a6))
+
+
+### Documentation
+
+* link to stable pandas docs ([#990](https://www.github.com/googleapis/python-bigquery/issues/990)) ([ea50e80](https://www.github.com/googleapis/python-bigquery/commit/ea50e8031fc035b3772a338bc00982de263cefad))
+
+## [2.27.1](https://www.github.com/googleapis/python-bigquery/compare/v2.27.0...v2.27.1) (2021-09-27)
+
+
+### Bug Fixes
+
+* remove py.typed since package fails mypy check ([#988](https://www.github.com/googleapis/python-bigquery/issues/988)) ([39030f2](https://www.github.com/googleapis/python-bigquery/commit/39030f26ce081cfacd456b84694c68e3f04ed48d))
+
+## [2.27.0](https://www.github.com/googleapis/python-bigquery/compare/v2.26.0...v2.27.0) (2021-09-24)
+
+
+### Features
+
+* Add py.typed for PEP 561 compliance ([#976](https://www.github.com/googleapis/python-bigquery/issues/976)) ([96e6bee](https://www.github.com/googleapis/python-bigquery/commit/96e6beef3c63b663b7e5879b1458f4dd1a47a5b5))
+* include key metadata in Job representation ([#964](https://www.github.com/googleapis/python-bigquery/issues/964)) ([acca1cb](https://www.github.com/googleapis/python-bigquery/commit/acca1cb7baaa3b00508246c994ade40314d421c3))
+
+
+### Bug Fixes
+
+* Arrow extension-type metadata was not set when calling the REST API or when there are no rows ([#946](https://www.github.com/googleapis/python-bigquery/issues/946)) ([864383b](https://www.github.com/googleapis/python-bigquery/commit/864383bc01636b3774f7da194587b8b7edd0383d))
+* disambiguate missing policy tags from explicitly unset policy tags ([#983](https://www.github.com/googleapis/python-bigquery/issues/983)) ([f83c00a](https://www.github.com/googleapis/python-bigquery/commit/f83c00acead70fc0ce9959eefb133a672d816277))
+* remove default timeout ([#974](https://www.github.com/googleapis/python-bigquery/issues/974)) ([1cef0d4](https://www.github.com/googleapis/python-bigquery/commit/1cef0d4664bf448168b26487a71795144b7f4d6b))
+
+
+### Documentation
+
+* simplify destination table sample with f-strings ([#966](https://www.github.com/googleapis/python-bigquery/issues/966)) ([ab6e76f](https://www.github.com/googleapis/python-bigquery/commit/ab6e76f9489262fd9c1876a1c4f93d7e139aa999))
+
+## [2.26.0](https://www.github.com/googleapis/python-bigquery/compare/v2.25.2...v2.26.0) (2021-09-01)
+
+
+### Features
+
+* set the X-Server-Timeout header when timeout is set ([#927](https://www.github.com/googleapis/python-bigquery/issues/927)) ([ba02f24](https://www.github.com/googleapis/python-bigquery/commit/ba02f248ba9c449c34859579a4011f4bfd2f4a93))
+
+
+### Bug Fixes
+
+* guard imports against unsupported pyarrow versions ([#934](https://www.github.com/googleapis/python-bigquery/issues/934)) ([b289076](https://www.github.com/googleapis/python-bigquery/commit/b28907693bbe889becc1b9c8963f0a7e1ee6c35a))
+
+## [2.25.2](https://www.github.com/googleapis/python-bigquery/compare/v2.25.1...v2.25.2) (2021-08-31)
+
+
+### Bug Fixes
+
+* error inserting DataFrame with REPEATED field ([#925](https://www.github.com/googleapis/python-bigquery/issues/925)) ([656d2fa](https://www.github.com/googleapis/python-bigquery/commit/656d2fa6f870573a21235c83463752a2d084caba))
+* underscores weren't allowed in struct field names when passing parameters to the DB API ([#930](https://www.github.com/googleapis/python-bigquery/issues/930)) ([fcb0bc6](https://www.github.com/googleapis/python-bigquery/commit/fcb0bc68c972c2c98bb8542f54e9228308177ecb))
+
+
+### Documentation
+
+* update docstring for bigquery_create_routine sample ([#883](https://www.github.com/googleapis/python-bigquery/issues/883)) ([#917](https://www.github.com/googleapis/python-bigquery/issues/917)) ([e2d12b7](https://www.github.com/googleapis/python-bigquery/commit/e2d12b795ef2dc51b0ee36f1b3000edb1e64ce05))
+
+## [2.25.1](https://www.github.com/googleapis/python-bigquery/compare/v2.25.0...v2.25.1) (2021-08-25)
+
+
+### Bug Fixes
+
+* populate default `timeout` and retry after client-side timeout ([#896](https://www.github.com/googleapis/python-bigquery/issues/896)) ([b508809](https://www.github.com/googleapis/python-bigquery/commit/b508809c0f887575274309a463e763c56ddd017d))
+* use REST API in cell magic when requested ([#892](https://www.github.com/googleapis/python-bigquery/issues/892)) ([1cb3e55](https://www.github.com/googleapis/python-bigquery/commit/1cb3e55253e824e3a1da5201f6ec09065fb6b627))
+
+## [2.25.0](https://www.github.com/googleapis/python-bigquery/compare/v2.24.1...v2.25.0) (2021-08-24)
+
+
+### Features
+
+* Support using GeoPandas for GEOGRAPHY columns ([#848](https://www.github.com/googleapis/python-bigquery/issues/848)) ([16f65e6](https://www.github.com/googleapis/python-bigquery/commit/16f65e6ae15979217ceea6c6d398c9057a363a13))
+
+## [2.24.1](https://www.github.com/googleapis/python-bigquery/compare/v2.24.0...v2.24.1) (2021-08-13)
+
+
+### Bug Fixes
+
+* remove pytz dependency and require pyarrow>=3.0.0 ([#875](https://www.github.com/googleapis/python-bigquery/issues/875)) ([2cb3563](https://www.github.com/googleapis/python-bigquery/commit/2cb3563ee863edef7eaf5d04d739bcfe7bc6438e))
+
+## [2.24.0](https://www.github.com/googleapis/python-bigquery/compare/v2.23.3...v2.24.0) (2021-08-11)
+
+
+### Features
+
+* add support for transaction statistics ([#849](https://www.github.com/googleapis/python-bigquery/issues/849)) ([7f7b1a8](https://www.github.com/googleapis/python-bigquery/commit/7f7b1a808d50558772a0deb534ca654da65d629e))
+* make the same `Table*` instances equal to each other ([#867](https://www.github.com/googleapis/python-bigquery/issues/867)) ([c1a3d44](https://www.github.com/googleapis/python-bigquery/commit/c1a3d4435739a21d25aa154145e36d3a7c42eeb6))
+* retry failed query jobs in `result()` ([#837](https://www.github.com/googleapis/python-bigquery/issues/837)) ([519d99c](https://www.github.com/googleapis/python-bigquery/commit/519d99c20e7d1101f76981f3de036fdf3c7a4ecc))
+* support `ScalarQueryParameterType` for `type_` argument in `ScalarQueryParameter` constructor ([#850](https://www.github.com/googleapis/python-bigquery/issues/850)) ([93d15e2](https://www.github.com/googleapis/python-bigquery/commit/93d15e2e5405c2cc6d158c4e5737361344193dbc))
+
+
+### Bug Fixes
+
+* make unicode characters working well in load_table_from_json ([#865](https://www.github.com/googleapis/python-bigquery/issues/865)) ([ad9c802](https://www.github.com/googleapis/python-bigquery/commit/ad9c8026f0e667f13dd754279f9dc40d06f4fa78))
+
+## [2.23.3](https://www.github.com/googleapis/python-bigquery/compare/v2.23.2...v2.23.3) (2021-08-06)
+
+
+### Bug Fixes
+
+* increase default retry deadline to 10 minutes ([#859](https://www.github.com/googleapis/python-bigquery/issues/859)) ([30770fd](https://www.github.com/googleapis/python-bigquery/commit/30770fd0575fbd5aaa70c14196a4cc54627aecd2))
+
+## [2.23.2](https://www.github.com/googleapis/python-bigquery/compare/v2.23.1...v2.23.2) (2021-07-29)
+
+
+### Dependencies
+
+* expand pyarrow pins to support 5.x releases ([#833](https://www.github.com/googleapis/python-bigquery/issues/833)) ([80e3a61](https://www.github.com/googleapis/python-bigquery/commit/80e3a61c60419fb19b70b664c6415cd01ba82f5b))
+
+## [2.23.1](https://www.github.com/googleapis/python-bigquery/compare/v2.23.0...v2.23.1) (2021-07-28)
+
+
+### Bug Fixes
+
+* `insert_rows()` accepts float column values as strings again ([#824](https://www.github.com/googleapis/python-bigquery/issues/824)) ([d9378af](https://www.github.com/googleapis/python-bigquery/commit/d9378af13add879118a1d004529b811f72c325d6))
+
+## [2.23.0](https://www.github.com/googleapis/python-bigquery/compare/v2.22.1...v2.23.0) (2021-07-27)
+
+
+### Features
+
+* Update proto definitions for bigquery/v2 to support new proto fields for BQML. ([#817](https://www.github.com/googleapis/python-bigquery/issues/817)) ([fe7a902](https://www.github.com/googleapis/python-bigquery/commit/fe7a902e8b3e723ace335c9b499aea6d180a025b))
+
+
+### Bug Fixes
+
+* no longer raise a warning in `to_dataframe` if `max_results` set ([#815](https://www.github.com/googleapis/python-bigquery/issues/815)) ([3c1be14](https://www.github.com/googleapis/python-bigquery/commit/3c1be149e76b1d1d8879fdcf0924ddb1c1839e94))
+* retry ChunkedEncodingError by default ([#802](https://www.github.com/googleapis/python-bigquery/issues/802)) ([419d36d](https://www.github.com/googleapis/python-bigquery/commit/419d36d6b1887041e5795dbc8fc808890e91ab11))
+
+
+### Documentation
+
+* correct docs for `LoadJobConfig.destination_table_description` ([#810](https://www.github.com/googleapis/python-bigquery/issues/810)) ([da87fd9](https://www.github.com/googleapis/python-bigquery/commit/da87fd921cc8067b187d7985c978aac8eb58d107))
+
+## [2.22.1](https://www.github.com/googleapis/python-bigquery/compare/v2.22.0...v2.22.1) (2021-07-22)
+
+
+### Bug Fixes
+
+* issue a warning if buggy pyarrow is detected ([#787](https://www.github.com/googleapis/python-bigquery/issues/787)) ([e403721](https://www.github.com/googleapis/python-bigquery/commit/e403721af1373eb1f1a1c7be5b2182e3819ed1f9))
+* use a larger chunk size when loading data ([#799](https://www.github.com/googleapis/python-bigquery/issues/799)) ([b804373](https://www.github.com/googleapis/python-bigquery/commit/b804373277c1c1baa3370ebfb4783503b7ff360f))
+
+
+### Documentation
+
+* add Samples section to CONTRIBUTING.rst ([#785](https://www.github.com/googleapis/python-bigquery/issues/785)) ([e587029](https://www.github.com/googleapis/python-bigquery/commit/e58702967d572e83b4c774278818302594a511b7))
+* add sample to delete job metadata ([#798](https://www.github.com/googleapis/python-bigquery/issues/798)) ([be9b242](https://www.github.com/googleapis/python-bigquery/commit/be9b242f2180f5b795dfb3a168a97af1682999fd))
+
+## [2.22.0](https://www.github.com/googleapis/python-bigquery/compare/v2.21.0...v2.22.0) (2021-07-19)
+
+
+### Features
+
+* add `LoadJobConfig.projection_fields` to select DATASTORE_BACKUP fields ([#736](https://www.github.com/googleapis/python-bigquery/issues/736)) ([c45a738](https://www.github.com/googleapis/python-bigquery/commit/c45a7380871af3dfbd3c45524cb606c60e1a01d1))
+* add standard sql table type, update scalar type enums ([#777](https://www.github.com/googleapis/python-bigquery/issues/777)) ([b8b5433](https://www.github.com/googleapis/python-bigquery/commit/b8b5433898ec881f8da1303614780a660d94733a))
+* add support for more detailed DML stats ([#758](https://www.github.com/googleapis/python-bigquery/issues/758)) ([36fe86f](https://www.github.com/googleapis/python-bigquery/commit/36fe86f41c1a8f46167284f752a6d6bbf886a04b))
+* add support for user defined Table View Functions ([#724](https://www.github.com/googleapis/python-bigquery/issues/724)) ([8c7b839](https://www.github.com/googleapis/python-bigquery/commit/8c7b839a6ac1491c1c3b6b0e8755f4b70ed72ee3))
+
+
+### Bug Fixes
+
+* avoid possible job already exists error ([#751](https://www.github.com/googleapis/python-bigquery/issues/751)) ([45b9308](https://www.github.com/googleapis/python-bigquery/commit/45b93089f5398740413104285cc8acfd5ebc9c08))
+
+
+### Dependencies
+
+* allow 2.x versions of `google-api-core`, `google-cloud-core`, `google-resumable-media` ([#770](https://www.github.com/googleapis/python-bigquery/issues/770)) ([87a09fa](https://www.github.com/googleapis/python-bigquery/commit/87a09fa3f2a9ab35728a1ac925f9d5f2e6616c65))
+
+
+### Documentation
+
+* add loading data from Firestore backup sample ([#737](https://www.github.com/googleapis/python-bigquery/issues/737)) ([22fd848](https://www.github.com/googleapis/python-bigquery/commit/22fd848cae4af1148040e1faa31dd15a4d674687))
+
+## [2.21.0](https://www.github.com/googleapis/python-bigquery/compare/v2.20.0...v2.21.0) (2021-07-12)
+
+
+### Features
+
+* Add max_results parameter to some of the `QueryJob` methods. ([#698](https://www.github.com/googleapis/python-bigquery/issues/698)) ([2a9618f](https://www.github.com/googleapis/python-bigquery/commit/2a9618f4daaa4a014161e1a2f7376844eec9e8da))
+* Add support for decimal target types. ([#735](https://www.github.com/googleapis/python-bigquery/issues/735)) ([7d2d3e9](https://www.github.com/googleapis/python-bigquery/commit/7d2d3e906a9eb161911a198fb925ad79de5df934))
+* Add support for table snapshots. ([#740](https://www.github.com/googleapis/python-bigquery/issues/740)) ([ba86b2a](https://www.github.com/googleapis/python-bigquery/commit/ba86b2a6300ae5a9f3c803beeb42bda4c522e34c))
+* Enable unsetting policy tags on schema fields. ([#703](https://www.github.com/googleapis/python-bigquery/issues/703)) ([18bb443](https://www.github.com/googleapis/python-bigquery/commit/18bb443c7acd0a75dcb57d9aebe38b2d734ff8c7))
+* Make it easier to disable best-effort deduplication with streaming inserts. ([#734](https://www.github.com/googleapis/python-bigquery/issues/734)) ([1246da8](https://www.github.com/googleapis/python-bigquery/commit/1246da86b78b03ca1aa2c45ec71649e294cfb2f1))
+* Support passing struct data to the DB API. ([#718](https://www.github.com/googleapis/python-bigquery/issues/718)) ([38b3ef9](https://www.github.com/googleapis/python-bigquery/commit/38b3ef96c3dedc139b84f0ff06885141ae7ce78c))
+
+
+### Bug Fixes
+
+* Inserting non-finite floats with `insert_rows()`. ([#728](https://www.github.com/googleapis/python-bigquery/issues/728)) ([d047419](https://www.github.com/googleapis/python-bigquery/commit/d047419879e807e123296da2eee89a5253050166))
+* Use `pandas` function to check for `NaN`. ([#750](https://www.github.com/googleapis/python-bigquery/issues/750)) ([67bc5fb](https://www.github.com/googleapis/python-bigquery/commit/67bc5fbd306be7cdffd216f3791d4024acfa95b3))
+
+
+### Documentation
+
+* Add docs for all enums in module. ([#745](https://www.github.com/googleapis/python-bigquery/issues/745)) ([145944f](https://www.github.com/googleapis/python-bigquery/commit/145944f24fedc4d739687399a8309f9d51d43dfd))
+* Omit mention of Python 2.7 in `CONTRIBUTING.rst`. ([#706](https://www.github.com/googleapis/python-bigquery/issues/706)) ([27d6839](https://www.github.com/googleapis/python-bigquery/commit/27d6839ee8a40909e4199cfa0da8b6b64705b2e9))
+
+## [2.20.0](https://www.github.com/googleapis/python-bigquery/compare/v2.19.0...v2.20.0) (2021-06-07)
+
+
+### Features
+
+* support script options in query job config ([#690](https://www.github.com/googleapis/python-bigquery/issues/690)) ([1259e16](https://www.github.com/googleapis/python-bigquery/commit/1259e16394784315368e8be959c1ac097782b62e))
+
+## [2.19.0](https://www.github.com/googleapis/python-bigquery/compare/v2.18.0...v2.19.0) (2021-06-06)
+
+
+### Features
+
+* list_tables, list_projects, list_datasets, list_models, list_routines, and list_jobs now accept a page_size parameter to control page size ([#686](https://www.github.com/googleapis/python-bigquery/issues/686)) ([1f1c4b7](https://www.github.com/googleapis/python-bigquery/commit/1f1c4b7ba4390fc4c5c8186bc22b83b45304ca06))
+
+## [2.18.0](https://www.github.com/googleapis/python-bigquery/compare/v2.17.0...v2.18.0) (2021-06-02)
+
+
+### Features
+
+* add support for Parquet options ([#679](https://www.github.com/googleapis/python-bigquery/issues/679)) ([d792ce0](https://www.github.com/googleapis/python-bigquery/commit/d792ce09388a6ee3706777915dd2818d4c854f79))
+
+## [2.17.0](https://www.github.com/googleapis/python-bigquery/compare/v2.16.1...v2.17.0) (2021-05-21)
+
+
+### Features
+
+* detect obsolete BQ Storage extra at runtime ([#666](https://www.github.com/googleapis/python-bigquery/issues/666)) ([bd7dbda](https://www.github.com/googleapis/python-bigquery/commit/bd7dbdae5c972b16bafc53c67911eeaa3255a880))
+* Support parameterized NUMERIC, BIGNUMERIC, STRING, and BYTES types ([#673](https://www.github.com/googleapis/python-bigquery/issues/673)) ([45421e7](https://www.github.com/googleapis/python-bigquery/commit/45421e73bfcddb244822e6a5cd43be6bd1ca2256))
+
+
+### Bug Fixes
+
+* **tests:** invalid path to strptime() ([#672](https://www.github.com/googleapis/python-bigquery/issues/672)) ([591cdd8](https://www.github.com/googleapis/python-bigquery/commit/591cdd851bb1321b048a05a378a0ef48d3ade462))
+
+## [2.16.1](https://www.github.com/googleapis/python-bigquery/compare/v2.16.0...v2.16.1) (2021-05-12)
+
+
+### Bug Fixes
+
+* executemany rowcount only reflected the last execution ([#660](https://www.github.com/googleapis/python-bigquery/issues/660)) ([aeadc8c](https://www.github.com/googleapis/python-bigquery/commit/aeadc8c2d614bb9f0883ec901fca48930f3aaf19))
+
+## [2.16.0](https://www.github.com/googleapis/python-bigquery/compare/v2.15.0...v2.16.0) (2021-05-05)
+
+
+### Features
+
+* add with_name() to ScalarQueryParameterType ([#644](https://www.github.com/googleapis/python-bigquery/issues/644)) ([6cc6876](https://www.github.com/googleapis/python-bigquery/commit/6cc6876eb0e5bf49fdc047256a945dcf1b289576))
+
+
+### Dependencies
+
+* expand supported pyarrow versions to v4 ([#643](https://www.github.com/googleapis/python-bigquery/issues/643)) ([9e1d386](https://www.github.com/googleapis/python-bigquery/commit/9e1d3869c2024fe7a8af57ff59838d904ca5db03))
+
+## [2.15.0](https://www.github.com/googleapis/python-bigquery/compare/v2.14.0...v2.15.0) (2021-04-29)
+
+
+### Features
+
+* Extended DB API parameter syntax to optionally provide parameter types ([#626](https://www.github.com/googleapis/python-bigquery/issues/626)) ([8bcf397](https://www.github.com/googleapis/python-bigquery/commit/8bcf397fbe2527e06317741875a059b109cfcd9c))
+
+
+### Bug Fixes
+
+* add DECIMAL and BIGDECIMAL as aliases for NUMERIC and BIGNUMERIC ([#638](https://www.github.com/googleapis/python-bigquery/issues/638)) ([aa59023](https://www.github.com/googleapis/python-bigquery/commit/aa59023317b1c63720fb717b3544f755652da58d))
+* The DB API Binary function accepts bytes data ([#630](https://www.github.com/googleapis/python-bigquery/issues/630)) ([4396e70](https://www.github.com/googleapis/python-bigquery/commit/4396e70771af6889d3242c37c5ff2e80241023a2))
+
+## [2.14.0](https://www.github.com/googleapis/python-bigquery/compare/v2.13.1...v2.14.0) (2021-04-26)
+
+
+### Features
+
+* accept DatasetListItem where DatasetReference is accepted ([#597](https://www.github.com/googleapis/python-bigquery/issues/597)) ([c8b5581](https://www.github.com/googleapis/python-bigquery/commit/c8b5581ea3c94005d69755c4a3b5a0d8900f3fe2))
+* accept job object as argument to `get_job` and `cancel_job` ([#617](https://www.github.com/googleapis/python-bigquery/issues/617)) ([f75dcdf](https://www.github.com/googleapis/python-bigquery/commit/f75dcdf3943b87daba60011c9a3b42e34ff81910))
+* add `Client.delete_job_metadata` method to remove job metadata ([#610](https://www.github.com/googleapis/python-bigquery/issues/610)) ([0abb566](https://www.github.com/googleapis/python-bigquery/commit/0abb56669c097c59fbffce007c702e7a55f2d9c1))
+* add `max_queue_size` argument to `RowIterator.to_dataframe_iterable` ([#575](https://www.github.com/googleapis/python-bigquery/issues/575)) ([f95f415](https://www.github.com/googleapis/python-bigquery/commit/f95f415d3441b3928f6cc705cb8a75603d790fd6))
+* add type hints for public methods ([#613](https://www.github.com/googleapis/python-bigquery/issues/613)) ([f8d4aaa](https://www.github.com/googleapis/python-bigquery/commit/f8d4aaa335a0eef915e73596fc9b43b11d11be9f))
+* DB API cursors are now iterable ([#618](https://www.github.com/googleapis/python-bigquery/issues/618)) ([e0b373d](https://www.github.com/googleapis/python-bigquery/commit/e0b373d0e721a70656ed8faceb7f5c70f642d144))
+* retry google.auth TransportError by default ([#624](https://www.github.com/googleapis/python-bigquery/issues/624)) ([34ecc3f](https://www.github.com/googleapis/python-bigquery/commit/34ecc3f1ca0ff073330c0c605673d89b43af7ed9))
+* use pyarrow stream compression, if available ([#593](https://www.github.com/googleapis/python-bigquery/issues/593)) ([dde9dc5](https://www.github.com/googleapis/python-bigquery/commit/dde9dc5114c2311fb76fafc5b222fff561e8abf1))
+
+
+### Bug Fixes
+
+* consistent percents handling in DB API query ([#619](https://www.github.com/googleapis/python-bigquery/issues/619)) ([6502a60](https://www.github.com/googleapis/python-bigquery/commit/6502a602337ae562652a20b20270949f2c9d5073))
+* missing license headers in new test files ([#604](https://www.github.com/googleapis/python-bigquery/issues/604)) ([df48cc5](https://www.github.com/googleapis/python-bigquery/commit/df48cc5a0be99ad39d5835652d1b7422209afc5d))
+* unsetting clustering fields on Table is now possible ([#622](https://www.github.com/googleapis/python-bigquery/issues/622)) ([33a871f](https://www.github.com/googleapis/python-bigquery/commit/33a871f06329f9bf5a6a92fab9ead65bf2bee75d))
+
+
+### Documentation
+
+* add sample to run DML query ([#591](https://www.github.com/googleapis/python-bigquery/issues/591)) ([ff2ec3a](https://www.github.com/googleapis/python-bigquery/commit/ff2ec3abe418a443cd07751c08e654f94e8b3155))
+* update the description of the return value of `_QueryResults.rows()` ([#594](https://www.github.com/googleapis/python-bigquery/issues/594)) ([8f4c0b8](https://www.github.com/googleapis/python-bigquery/commit/8f4c0b84dac3840532d7865247b8ad94b625b897))
+
+## [2.13.1](https://www.github.com/googleapis/python-bigquery/compare/v2.13.0...v2.13.1) (2021-03-23)
+
+
+### Bug Fixes
+
+* add ConnectionError to default retry ([#571](https://www.github.com/googleapis/python-bigquery/issues/571)) ([a3edb8b](https://www.github.com/googleapis/python-bigquery/commit/a3edb8b921e029e2c03d33302d408ad5d4e9d4ad))
+
+## [2.13.0](https://www.github.com/googleapis/python-bigquery/compare/v2.12.0...v2.13.0) (2021-03-22)
+
+
+### Features
+
+* add `ExternalConfig.connection_id` property to connect to external sources ([#560](https://www.github.com/googleapis/python-bigquery/issues/560)) ([d93986e](https://www.github.com/googleapis/python-bigquery/commit/d93986e0259952257f2571f60719b52099c29c0c))
+
+
+### Bug Fixes
+
+* avoid overly strict dependency on pyarrow 3.x ([#564](https://www.github.com/googleapis/python-bigquery/issues/564)) ([97ee6ec](https://www.github.com/googleapis/python-bigquery/commit/97ee6ec6cd4bc9f833cd506dc6d244d103654cfd))
+* avoid policy tags 403 error in `load_table_from_dataframe` ([#557](https://www.github.com/googleapis/python-bigquery/issues/557)) ([84e646e](https://www.github.com/googleapis/python-bigquery/commit/84e646e6b7087a1626e56ad51eeb130f4ddfa2fb))
+
+## [2.12.0](https://www.github.com/googleapis/python-bigquery/compare/v2.11.0...v2.12.0) (2021-03-16)
+
+
+### Features
+
+* make QueryJob.done() method more performant ([#544](https://www.github.com/googleapis/python-bigquery/issues/544)) ([a3ab9ef](https://www.github.com/googleapis/python-bigquery/commit/a3ab9efdd0758829845cfcb6ca0ac1f03ab44f64))
+
+
+### Bug Fixes
+
+* remove DB-API dependency on pyarrow with decimal query parameters ([#551](https://www.github.com/googleapis/python-bigquery/issues/551)) ([1b946ba](https://www.github.com/googleapis/python-bigquery/commit/1b946ba23ee7df86114c6acb338ec34e6c92af6d))
+
+## [2.11.0](https://www.github.com/googleapis/python-bigquery/compare/v2.10.0...v2.11.0) (2021-03-09)
+
+
+### Features
+
+* add context manager support to client ([#540](https://www.github.com/googleapis/python-bigquery/issues/540)) ([d5c7e11](https://www.github.com/googleapis/python-bigquery/commit/d5c7e11a1dc2a149d74294bfadbae62d70573e69))
+
+## [2.10.0](https://www.github.com/googleapis/python-bigquery/compare/v2.9.0...v2.10.0) (2021-02-25)
+
+
+### Features
+
+* add BIGNUMERIC support ([#527](https://www.github.com/googleapis/python-bigquery/issues/527)) ([cc3394f](https://www.github.com/googleapis/python-bigquery/commit/cc3394f80934419eb00c2029bb81c92a696e7d88))
+
+
+### Bug Fixes
+
+* error using empty array of structs parameter ([#474](https://www.github.com/googleapis/python-bigquery/issues/474)) ([c1d15f4](https://www.github.com/googleapis/python-bigquery/commit/c1d15f4e5da4b7e10c00afffd59a5c7f3ded027a))
+* QueryJob.exception() *returns* the errors, not raises them ([#467](https://www.github.com/googleapis/python-bigquery/issues/467)) ([d763279](https://www.github.com/googleapis/python-bigquery/commit/d7632799769248b09a8558ba18f5025ebdd9675a))
+
+
+### Documentation
+
+* **bigquery:** Add alternative approach to setting credentials ([#517](https://www.github.com/googleapis/python-bigquery/issues/517)) ([60fbf28](https://www.github.com/googleapis/python-bigquery/commit/60fbf287b0d34d5db2e61cce7a5b42735ed43d0e))
+* explain retry behavior for DONE jobs ([#532](https://www.github.com/googleapis/python-bigquery/issues/532)) ([696c443](https://www.github.com/googleapis/python-bigquery/commit/696c443f0a6740be0767e12b706a7771bc1460c3))
+
+## [2.9.0](https://www.github.com/googleapis/python-bigquery/compare/v2.8.0...v2.9.0) (2021-02-18)
+
+
+### Features
+
+* add determinism level for javascript UDFs ([#522](https://www.github.com/googleapis/python-bigquery/issues/522)) ([edd3328](https://www.github.com/googleapis/python-bigquery/commit/edd3328fffa3040b2cd3a3c668c90a0e43e4c94c))
+* expose reservation usage stats on jobs ([#524](https://www.github.com/googleapis/python-bigquery/issues/524)) ([4ffb4e0](https://www.github.com/googleapis/python-bigquery/commit/4ffb4e067abdaa54dad6eff49a7fbdb0fa358637))
+
+
+### Documentation
+
+* clarify `%%bigquery`` magics and fix broken link ([#508](https://www.github.com/googleapis/python-bigquery/issues/508)) ([eedf93b](https://www.github.com/googleapis/python-bigquery/commit/eedf93b6636c5ff1bd810c6038cfeaea8ccb64d8))
+* update python contributing guide ([#514](https://www.github.com/googleapis/python-bigquery/issues/514)) ([01e851d](https://www.github.com/googleapis/python-bigquery/commit/01e851d00fc17a780375580776753d78f6d74174))
+
+## [2.8.0](https://www.github.com/googleapis/python-bigquery/compare/v2.7.0...v2.8.0) (2021-02-08)
+
+
+### Features
+
+* Add mTLS support to client. ([#492](https://www.github.com/googleapis/python-bigquery/issues/492)) ([1823cad](https://www.github.com/googleapis/python-bigquery/commit/1823cadee3acf95c516d0479400e4175349ea199))
+
+
+### Bug Fixes
+
+* Don't try to close closed cursors. ([#498](https://www.github.com/googleapis/python-bigquery/issues/498)) ([bf44e7b](https://www.github.com/googleapis/python-bigquery/commit/bf44e7b67d2de41c13053a4550484b9ea049db3e))
+
+## [2.7.0](https://www.github.com/googleapis/python-bigquery/compare/v2.6.2...v2.7.0) (2021-01-27)
+
+
+### Bug Fixes
+
+* invalid conversion of timezone-aware datetime values to JSON ([#480](https://www.github.com/googleapis/python-bigquery/issues/480)) ([61b4385](https://www.github.com/googleapis/python-bigquery/commit/61b438523d305ce66a68fde7cb49e9abbf0a8d1d))
+* reading the labels attribute on Job instances ([#471](https://www.github.com/googleapis/python-bigquery/issues/471)) ([80944f0](https://www.github.com/googleapis/python-bigquery/commit/80944f080bcc4fda870a6daf1d884de616d39ae7))
+* use explicitly given project over the client's default project for load jobs ([#482](https://www.github.com/googleapis/python-bigquery/issues/482)) ([530e1e8](https://www.github.com/googleapis/python-bigquery/commit/530e1e8d8fe8939e914a78ff1b220907c1b87af7))
+
+
+### Dependencies
+
+* declare support for Python 3.9 ([#488](https://www.github.com/googleapis/python-bigquery/issues/488)) ([55daa7d](https://www.github.com/googleapis/python-bigquery/commit/55daa7da9857a8a2fb14a80a4efa3f466386a85f))
+
+## [2.6.2](https://www.github.com/googleapis/python-bigquery/compare/v2.6.1...v2.6.2) (2021-01-11)
+
+
+### Bug Fixes
+
+* add minimum timeout to getQueryResults API requests ([#444](https://www.github.com/googleapis/python-bigquery/issues/444)) ([015a73e](https://www.github.com/googleapis/python-bigquery/commit/015a73e1839e3427408ef6e0f879717d9ddbdb61))
+* use debug logging level for OpenTelemetry message ([#442](https://www.github.com/googleapis/python-bigquery/issues/442)) ([7ea6b7c](https://www.github.com/googleapis/python-bigquery/commit/7ea6b7c2469d2415192cfdacc379e38e49d24775))
+
+
+### Documentation
+
+* add GEOGRAPHY data type code samples ([#428](https://www.github.com/googleapis/python-bigquery/issues/428)) ([dbc68b3](https://www.github.com/googleapis/python-bigquery/commit/dbc68b3d1f325f80d24a2da5f028b0f653fb0317))
+* fix Shapely import in GEOGRAPHY sample ([#431](https://www.github.com/googleapis/python-bigquery/issues/431)) ([96a1c5b](https://www.github.com/googleapis/python-bigquery/commit/96a1c5b3c72855ba6ae8c88dfd0cdb02d2faf909))
+* move and refresh view samples ([#420](https://www.github.com/googleapis/python-bigquery/issues/420)) ([079b6a1](https://www.github.com/googleapis/python-bigquery/commit/079b6a162f6929bf801366d92f8daeb3318426c4))
+
+## [2.6.1](https://www.github.com/googleapis/python-bigquery/compare/v2.6.0...v2.6.1) (2020-12-09)
+
+
+### Bug Fixes
+
+* handle null values in array query parameters ([#426](https://www.github.com/googleapis/python-bigquery/issues/426)) ([78fde4a](https://www.github.com/googleapis/python-bigquery/commit/78fde4a92e61a89d0b490b93acc90fff9635d1bf))
+
+
+### Documentation
+
+* add examples of `fields` argument to update methods ([#418](https://www.github.com/googleapis/python-bigquery/issues/418)) ([8c7e02b](https://www.github.com/googleapis/python-bigquery/commit/8c7e02b0de2c92ee965414e7c430eb57d1877326))
+
+## [2.6.0](https://www.github.com/googleapis/python-bigquery/compare/v2.5.0...v2.6.0) (2020-12-07)
+
+
+### Features
+
+* add support for materialized views ([#408](https://www.github.com/googleapis/python-bigquery/issues/408)) ([57ffc66](https://www.github.com/googleapis/python-bigquery/commit/57ffc665319331e0a00583d5d652fd14a510cf2a)), closes [#407](https://www.github.com/googleapis/python-bigquery/issues/407)
+* convert `BIGNUMERIC` values to decimal objects ([#414](https://www.github.com/googleapis/python-bigquery/issues/414)) ([d472d2d](https://www.github.com/googleapis/python-bigquery/commit/d472d2d2b33e40b954652d31476dea8c90e6a2dc)), closes [#367](https://www.github.com/googleapis/python-bigquery/issues/367)
+* support CSV format in `load_table_from_dataframe` pandas connector ([#399](https://www.github.com/googleapis/python-bigquery/issues/399)) ([0046742](https://www.github.com/googleapis/python-bigquery/commit/0046742abdd2b5eab3c3e935316f91e7eef44d44))
+
+
+### Bug Fixes
+
+* preserve timestamp microsecond precision with rows from REST API ([#402](https://www.github.com/googleapis/python-bigquery/issues/402)) ([04510a7](https://www.github.com/googleapis/python-bigquery/commit/04510a7dc7570466550bbdf500d7020bef2af44d))
+
+
+### Documentation
+
+* update intersphinx links ([#404](https://www.github.com/googleapis/python-bigquery/issues/404)) ([a9d8ae8](https://www.github.com/googleapis/python-bigquery/commit/a9d8ae8a920dec655b77dca9d9128e569f1d07a7))
+
+## [2.5.0](https://www.github.com/googleapis/python-bigquery/compare/v2.4.0...v2.5.0) (2020-12-02)
+
+
+### Features
+
+* add `TableReference.__str__` to get table ID in standard SQL ([#405](https://www.github.com/googleapis/python-bigquery/issues/405)) ([53dff2a](https://www.github.com/googleapis/python-bigquery/commit/53dff2ad3889af04369a22437e6ab9b92c5755b6)), closes [#354](https://www.github.com/googleapis/python-bigquery/issues/354)
+* add progress bar for magics ([#396](https://www.github.com/googleapis/python-bigquery/issues/396)) ([04d0273](https://www.github.com/googleapis/python-bigquery/commit/04d027317a99e3f353e0b7a18076da9b6ba4d8d3))
+* add support for unrecognized model types ([#401](https://www.github.com/googleapis/python-bigquery/issues/401)) ([168f035](https://www.github.com/googleapis/python-bigquery/commit/168f0354c4815bd1aeadbd4e388dcc9b32f97d6b))
+
+
+### Bug Fixes
+
+* avoid floating point for timestamp in `insert_rows` ([#393](https://www.github.com/googleapis/python-bigquery/issues/393)) ([a1949ae](https://www.github.com/googleapis/python-bigquery/commit/a1949ae20ec4f9c771b0cffbcd70792dd6a30dbf))
+
+
+### Performance Improvements
+
+* don't fetch rows when waiting for query to finish ([#400](https://www.github.com/googleapis/python-bigquery/issues/400)) ([730df17](https://www.github.com/googleapis/python-bigquery/commit/730df17ae1ab0b0bb2454f3c134c8f62665bc51b)), closes [#374](https://www.github.com/googleapis/python-bigquery/issues/374) [#394](https://www.github.com/googleapis/python-bigquery/issues/394)
+
+
+### Documentation
+
+* **samples:** add more clustering code snippets ([#330](https://www.github.com/googleapis/python-bigquery/issues/330)) ([809e4a2](https://www.github.com/googleapis/python-bigquery/commit/809e4a27b94ba30c10e0c9a7e89576a9de9fda2b)), closes [#329](https://www.github.com/googleapis/python-bigquery/issues/329)
+
+
+### Dependencies
+
+* update required version of opentelementry for opentelemetry-exporter-google-cloud ([#398](https://www.github.com/googleapis/python-bigquery/issues/398)) ([673a9cb](https://www.github.com/googleapis/python-bigquery/commit/673a9cb51c577c1dd016e76f3634b1e9e21482c5))
+
+## [2.4.0](https://www.github.com/googleapis/python-bigquery/compare/v2.3.1...v2.4.0) (2020-11-16)
+
+
+### Features
+
+* add progress bar to `QueryJob.to_dataframe` and `to_arrow` ([#352](https://www.github.com/googleapis/python-bigquery/issues/352)) ([dc78edd](https://www.github.com/googleapis/python-bigquery/commit/dc78eddde7a6a312c8fed7bace7d64036837ab1a))
+* allow routine references ([#378](https://www.github.com/googleapis/python-bigquery/issues/378)) ([f9480dc](https://www.github.com/googleapis/python-bigquery/commit/f9480dc2a1bc58367083176bd74725aa8b903301))
+
+
+### Bug Fixes
+
+* **dbapi:** allow rows to be fetched from scripts ([#387](https://www.github.com/googleapis/python-bigquery/issues/387)) ([b899ad1](https://www.github.com/googleapis/python-bigquery/commit/b899ad12e17cb87c58d3ae46b4388d917c5743f2)), closes [#377](https://www.github.com/googleapis/python-bigquery/issues/377)
+
+
+### Performance Improvements
+
+* avoid extra API calls from `to_dataframe` if all rows are cached ([#384](https://www.github.com/googleapis/python-bigquery/issues/384)) ([c52b317](https://www.github.com/googleapis/python-bigquery/commit/c52b31789998fc0dfde07c3296650c85104d719d))
+* cache first page of `jobs.getQueryResults` rows ([#374](https://www.github.com/googleapis/python-bigquery/issues/374)) ([86f6a51](https://www.github.com/googleapis/python-bigquery/commit/86f6a516d1c7c5dc204ab085ea2578793e6561ff))
+* use `getQueryResults` from DB-API ([#375](https://www.github.com/googleapis/python-bigquery/issues/375)) ([30de15f](https://www.github.com/googleapis/python-bigquery/commit/30de15f7255de5ea221df4e8db7991d279e0ea28))
+
+
+### Dependencies
+
+* expand pyarrow dependencies to include version 2 ([#368](https://www.github.com/googleapis/python-bigquery/issues/368)) ([cd9febd](https://www.github.com/googleapis/python-bigquery/commit/cd9febd20c34983781386c3bf603e5fca7135695))
+
+## 2.3.1
+
+11-05-2020 09:27 PST
+
+### Internal / Testing Changes
+
+- update `google.cloud.bigquery.__version__`
+
+## [2.3.0](https://www.github.com/googleapis/python-bigquery/compare/v2.2.0...v2.3.0) (2020-11-04)
+
+
+### Features
+
+* add `reload` argument to `*Job.done()` functions ([#341](https://www.github.com/googleapis/python-bigquery/issues/341)) ([e51fd45](https://www.github.com/googleapis/python-bigquery/commit/e51fd45fdb0481ac5d59cc0edbfa0750928b2596))
+* pass retry from Job.result() to Job.done() ([#41](https://www.github.com/googleapis/python-bigquery/issues/41)) ([284e17a](https://www.github.com/googleapis/python-bigquery/commit/284e17a17adf6844a17db2c6fed54a649b1f997e))
+
+
+### Bug Fixes
+
+* add missing spaces in opentelemetry log message ([#360](https://www.github.com/googleapis/python-bigquery/issues/360)) ([4f326b1](https://www.github.com/googleapis/python-bigquery/commit/4f326b1ca4411cfbf5ded86955a963d3e05a409f))
+* **dbapi:** avoid running % format with no query parameters ([#348](https://www.github.com/googleapis/python-bigquery/issues/348)) ([5dd1a5e](https://www.github.com/googleapis/python-bigquery/commit/5dd1a5e77f13b8e576e917069e247c5390a81900))
+* create_job method accepts dictionary arguments ([#300](https://www.github.com/googleapis/python-bigquery/issues/300)) ([155bacc](https://www.github.com/googleapis/python-bigquery/commit/155bacc156f181384ca6dba699ab83d0398176d1))
+
+
+### Performance Improvements
+
+* use `jobs.getQueryResults` to download result sets ([#363](https://www.github.com/googleapis/python-bigquery/issues/363)) ([0c3476d](https://www.github.com/googleapis/python-bigquery/commit/0c3476d56380d70115f6fd765bf5c5261967052f))
+
+
+### Documentation
+
+* add documents for QueryPlanEntry and QueryPlanEntryStep ([#344](https://www.github.com/googleapis/python-bigquery/issues/344)) ([dca2e4c](https://www.github.com/googleapis/python-bigquery/commit/dca2e4ca7c2ae183ac4bb60f653d425a43a86bea))
+
+## [2.2.0](https://www.github.com/googleapis/python-bigquery/compare/v2.1.0...v2.2.0) (2020-10-19)
+
+
+### Features
+
+* add method api_repr for table list item ([#299](https://www.github.com/googleapis/python-bigquery/issues/299)) ([07c70f0](https://www.github.com/googleapis/python-bigquery/commit/07c70f0292f9212f0c968cd5c9206e8b0409c0da))
+* add support for listing arima, automl, boosted tree, DNN, and matrix factorization models ([#328](https://www.github.com/googleapis/python-bigquery/issues/328)) ([502a092](https://www.github.com/googleapis/python-bigquery/commit/502a0926018abf058cb84bd18043c25eba15a2cc))
+* add timeout paramter to load_table_from_file and it dependent methods ([#327](https://www.github.com/googleapis/python-bigquery/issues/327)) ([b0dd892](https://www.github.com/googleapis/python-bigquery/commit/b0dd892176e31ac25fddd15554b5bfa054299d4d))
+* add to_api_repr method to Model ([#326](https://www.github.com/googleapis/python-bigquery/issues/326)) ([fb401bd](https://www.github.com/googleapis/python-bigquery/commit/fb401bd94477323bba68cf252dd88166495daf54))
+* allow client options to be set in magics context ([#322](https://www.github.com/googleapis/python-bigquery/issues/322)) ([5178b55](https://www.github.com/googleapis/python-bigquery/commit/5178b55682f5e264bfc082cde26acb1fdc953a18))
+
+
+### Bug Fixes
+
+* make TimePartitioning repr evaluable ([#110](https://www.github.com/googleapis/python-bigquery/issues/110)) ([20f473b](https://www.github.com/googleapis/python-bigquery/commit/20f473bfff5ae98377f5d9cdf18bfe5554d86ff4)), closes [#109](https://www.github.com/googleapis/python-bigquery/issues/109)
+* use version.py instead of pkg_resources.get_distribution ([#307](https://www.github.com/googleapis/python-bigquery/issues/307)) ([b8f502b](https://www.github.com/googleapis/python-bigquery/commit/b8f502b14f21d1815697e4d57cf1225dfb4a7c5e))
+
+
+### Performance Improvements
+
+* add size parameter for load table from dataframe and json methods ([#280](https://www.github.com/googleapis/python-bigquery/issues/280)) ([3be78b7](https://www.github.com/googleapis/python-bigquery/commit/3be78b737add7111e24e912cd02fc6df75a07de6))
+
+
+### Documentation
+
+* update clustering field docstrings ([#286](https://www.github.com/googleapis/python-bigquery/issues/286)) ([5ea1ece](https://www.github.com/googleapis/python-bigquery/commit/5ea1ece2d911cdd1f3d9549ee01559ce8ed8269a)), closes [#285](https://www.github.com/googleapis/python-bigquery/issues/285)
+* update snippets samples to support version 2.0 ([#309](https://www.github.com/googleapis/python-bigquery/issues/309)) ([61634be](https://www.github.com/googleapis/python-bigquery/commit/61634be9bf9e3df7589fc1bfdbda87288859bb13))
+
+
+### Dependencies
+
+* add protobuf dependency ([#306](https://www.github.com/googleapis/python-bigquery/issues/306)) ([cebb5e0](https://www.github.com/googleapis/python-bigquery/commit/cebb5e0e911e8c9059bc8c9e7fce4440e518bff3)), closes [#305](https://www.github.com/googleapis/python-bigquery/issues/305)
+* require pyarrow for pandas support ([#314](https://www.github.com/googleapis/python-bigquery/issues/314)) ([801e4c0](https://www.github.com/googleapis/python-bigquery/commit/801e4c0574b7e421aa3a28cafec6fd6bcce940dd)), closes [#265](https://www.github.com/googleapis/python-bigquery/issues/265)
+
+## [2.1.0](https://www.github.com/googleapis/python-bigquery/compare/v2.0.0...v2.1.0) (2020-10-08)
+
+
+### Features
+
+* add constants for MONTH and YEAR time partitioning types ([#283](https://www.github.com/googleapis/python-bigquery/issues/283)) ([9090e1c](https://www.github.com/googleapis/python-bigquery/commit/9090e1ccd8825a97835325b4829f6e7ecfd9ea88))
+
+
+### Bug Fixes
+
+* remove unnecessary dependency on libcst ([#308](https://www.github.com/googleapis/python-bigquery/issues/308)) ([c055930](https://www.github.com/googleapis/python-bigquery/commit/c05593094c1405f752b2c51b15202a6dbb5cb83f))
+
+
+### Performance Improvements
+
+* remove redundant array deepcopy ([#26](https://www.github.com/googleapis/python-bigquery/issues/26)) ([b54f867](https://www.github.com/googleapis/python-bigquery/commit/b54f86769c982ce5c8fcbf3889f82450428bb40c))
+
+
+### Documentation
+
+* **samples:** add create_table_clustered code snippet ([#291](https://www.github.com/googleapis/python-bigquery/issues/291)) ([d1eb8b3](https://www.github.com/googleapis/python-bigquery/commit/d1eb8b3dcc789916c5d3ba8464f62b1f8bef35ff))
+
+## 2.0.0
+
+09-30-2020 14:51 PDT
+
+
+### Implementation Changes
+
+- Transition the library to microgenerator. ([#278](https://github.com/googleapis/python-bigquery/pull/278))
+ This is a **breaking change** that **drops support for Python 2.7 and 3.5** and brings a few other changes.
+ See [migration guide](https://googleapis.dev/python/bigquery/latest/UPGRADING.html) for more info.
+
+
+
+### Internal / Testing Changes
+
+- Update protoc-generated comments (via synth). ([#270](https://github.com/googleapis/python-bigquery/pull/270))
+- Add CI secrets manager (via synth). ([#271](https://github.com/googleapis/python-bigquery/pull/271))
+
+## [1.28.0](https://www.github.com/googleapis/python-bigquery/compare/v1.27.2...v1.28.0) (2020-09-22)
+
+
+### Features
+
+* add custom cell magic parser to handle complex `--params` values ([#213](https://www.github.com/googleapis/python-bigquery/issues/213)) ([dcfbac2](https://www.github.com/googleapis/python-bigquery/commit/dcfbac267fbf66d189b0cc7e76f4712122a74b7b))
+* add instrumentation to list methods ([#239](https://www.github.com/googleapis/python-bigquery/issues/239)) ([fa9f9ca](https://www.github.com/googleapis/python-bigquery/commit/fa9f9ca491c3f9954287102c567ec483aa6151d4))
+* add opentelemetry tracing ([#215](https://www.github.com/googleapis/python-bigquery/issues/215)) ([a04996c](https://www.github.com/googleapis/python-bigquery/commit/a04996c537e9d8847411fcbb1b05da5f175b339e))
+* expose require_partition_filter for hive_partition ([#257](https://www.github.com/googleapis/python-bigquery/issues/257)) ([aa1613c](https://www.github.com/googleapis/python-bigquery/commit/aa1613c1bf48c7efb999cb8b8c422c80baf1950b))
+
+
+### Bug Fixes
+
+* fix dependency issue in fastavro ([#241](https://www.github.com/googleapis/python-bigquery/issues/241)) ([2874abf](https://www.github.com/googleapis/python-bigquery/commit/2874abf4827f1ea529519d4b138511d31f732a50))
+* update minimum dependency versions ([#263](https://www.github.com/googleapis/python-bigquery/issues/263)) ([1be66ce](https://www.github.com/googleapis/python-bigquery/commit/1be66ce94a32b1f924bdda05d068c2977631af9e))
+* validate job_config.source_format in load_table_from_dataframe ([#262](https://www.github.com/googleapis/python-bigquery/issues/262)) ([6160fee](https://www.github.com/googleapis/python-bigquery/commit/6160fee4b1a79b0ea9031cc18caf6322fe4c4084))
+
+
+### Documentation
+
+* recommend insert_rows_json to avoid call to tables.get ([#258](https://www.github.com/googleapis/python-bigquery/issues/258)) ([ae647eb](https://www.github.com/googleapis/python-bigquery/commit/ae647ebd68deff6e30ca2cffb5b7422c6de4940b))
+
+## [1.27.2](https://www.github.com/googleapis/python-bigquery/compare/v1.27.1...v1.27.2) (2020-08-18)
+
+
+### Bug Fixes
+
+* rationalize platform constraints for 'pyarrow' extra ([#235](https://www.github.com/googleapis/python-bigquery/issues/235)) ([c9a0567](https://www.github.com/googleapis/python-bigquery/commit/c9a0567f59491b769a9e2fd535430423e39d4fa8))
+
+## [1.27.1](https://www.github.com/googleapis/python-bigquery/compare/v1.27.0...v1.27.1) (2020-08-18)
+
+
+### Bug Fixes
+
+* tweak pyarrow extra to soothe PyPI ([#230](https://www.github.com/googleapis/python-bigquery/issues/230)) ([c15efbd](https://www.github.com/googleapis/python-bigquery/commit/c15efbd1ee4488898fc862768eef701443f492f6))
+
+## [1.27.0](https://www.github.com/googleapis/python-bigquery/compare/v1.26.1...v1.27.0) (2020-08-15)
+
+
+### Features
+
+* add support and tests for struct fields ([#146](https://www.github.com/googleapis/python-bigquery/issues/146)) ([fee2ba8](https://www.github.com/googleapis/python-bigquery/commit/fee2ba80e338d093ee61565359268da91a5c9913))
+* add support for getting and setting table IAM policy ([#144](https://www.github.com/googleapis/python-bigquery/issues/144)) ([f59fc9a](https://www.github.com/googleapis/python-bigquery/commit/f59fc9a482d9f9ae63e2b2bfc80b9a3481d09bde))
+* **bigquery:** add client_options to base class ([#216](https://www.github.com/googleapis/python-bigquery/issues/216)) ([478597a](https://www.github.com/googleapis/python-bigquery/commit/478597a38167fa57b60ae7f65b581f3fe75ddc7c))
+
+
+### Bug Fixes
+
+* converting to dataframe with out of bounds timestamps ([#209](https://www.github.com/googleapis/python-bigquery/issues/209)) ([8209203](https://www.github.com/googleapis/python-bigquery/commit/8209203e967f0624ad306166c0af6f6f1027c550)), closes [#168](https://www.github.com/googleapis/python-bigquery/issues/168)
+* raise error if inserting rows with unknown fields ([#163](https://www.github.com/googleapis/python-bigquery/issues/163)) ([8fe7254](https://www.github.com/googleapis/python-bigquery/commit/8fe725429541eed34ddc01cffc8b1ee846c14162))
+
+## [1.26.1](https://www.github.com/googleapis/python-bigquery/compare/v1.26.0...v1.26.1) (2020-07-25)
+
+### Documentation
+
+* Migrated code samples from
+ https://github.com/GoogleCloudPlatform/python-docs-samples
+
+### Bug Fixes
+
+* RowIterator.to_arrow() error when BQ Storage client cannot be created ([#181](https://www.github.com/googleapis/python-bigquery/issues/181)) ([7afa3d7](https://www.github.com/googleapis/python-bigquery/commit/7afa3d70f8564dcdacda2b9acbbd7207b50b186e))
+
+### Dependencies
+
+* Updated version constraints on grmp dependency in anticipation of 1.0.0 release
+ ([#189](https://github.com/googleapis/python-bigquery/pull/189))
+
+## [1.26.0](https://www.github.com/googleapis/python-bigquery/compare/v1.25.0...v1.26.0) (2020-07-20)
+
+
+### Features
+
+* use BigQuery Storage client by default (if dependencies available) ([#55](https://www.github.com/googleapis/python-bigquery/issues/55)) ([e75ff82](https://www.github.com/googleapis/python-bigquery/commit/e75ff8297c65981545b097f75a17cf9e78ac6772)), closes [#91](https://www.github.com/googleapis/python-bigquery/issues/91)
+* **bigquery:** add __eq__ method for class PartitionRange and RangePartitioning ([#162](https://www.github.com/googleapis/python-bigquery/issues/162)) ([0d2a88d](https://www.github.com/googleapis/python-bigquery/commit/0d2a88d8072154cfc9152afd6d26a60ddcdfbc73))
+* **bigquery:** expose date_as_object parameter to users ([#150](https://www.github.com/googleapis/python-bigquery/issues/150)) ([a2d5ce9](https://www.github.com/googleapis/python-bigquery/commit/a2d5ce9e97992318d7dc85c51c053cab74e25a11))
+* **bigquery:** expose date_as_object parameter to users ([#150](https://www.github.com/googleapis/python-bigquery/issues/150)) ([cbd831e](https://www.github.com/googleapis/python-bigquery/commit/cbd831e08024a67148723afd49e1db085e0a862c))
+
+
+### Bug Fixes
+
+* dry run queries with DB API cursor ([#128](https://www.github.com/googleapis/python-bigquery/issues/128)) ([bc33a67](https://www.github.com/googleapis/python-bigquery/commit/bc33a678a765f0232615aa2038b8cc67c88468a0))
+* omit `NaN` values when uploading from `insert_rows_from_dataframe` ([#170](https://www.github.com/googleapis/python-bigquery/issues/170)) ([f9f2f45](https://www.github.com/googleapis/python-bigquery/commit/f9f2f45bc009c03cd257441bd4b6beb1754e2177))
+
+
+### Documentation
+
+* **bigquery:** add client thread-safety documentation ([#132](https://www.github.com/googleapis/python-bigquery/issues/132)) ([fce76b3](https://www.github.com/googleapis/python-bigquery/commit/fce76b3776472b1da798df862a3405e659e35bab))
+* **bigquery:** add docstring for conflict exception ([#171](https://www.github.com/googleapis/python-bigquery/issues/171)) ([9c3409b](https://www.github.com/googleapis/python-bigquery/commit/9c3409bb06218bf499620544f8e92802df0cce47))
+* **bigquery:** consistent use of optional keyword ([#153](https://www.github.com/googleapis/python-bigquery/issues/153)) ([79d8c61](https://www.github.com/googleapis/python-bigquery/commit/79d8c61064cca18b596a24b6f738c7611721dd5c))
+* **bigquery:** fix the broken docs ([#139](https://www.github.com/googleapis/python-bigquery/issues/139)) ([3235255](https://www.github.com/googleapis/python-bigquery/commit/3235255cc5f483949f34d2e8ef13b372e8713782))
+
+## [1.25.0](https://www.github.com/googleapis/python-bigquery/compare/v1.24.0...v1.25.0) (2020-06-06)
+
+
+### Features
+
+* add BigQuery storage client support to DB API ([#36](https://www.github.com/googleapis/python-bigquery/issues/36)) ([ba9b2f8](https://www.github.com/googleapis/python-bigquery/commit/ba9b2f87e36320d80f6f6460b77e6daddb0fa214))
+* **bigquery:** add create job method ([#32](https://www.github.com/googleapis/python-bigquery/issues/32)) ([2abdef8](https://www.github.com/googleapis/python-bigquery/commit/2abdef82bed31601d1ca1aa92a10fea1e09f5297))
+* **bigquery:** add support of model for extract job ([#71](https://www.github.com/googleapis/python-bigquery/issues/71)) ([4a7a514](https://www.github.com/googleapis/python-bigquery/commit/4a7a514659a9f6f9bbd8af46bab3f8782d6b4b98))
+* add HOUR support for time partitioning interval ([#91](https://www.github.com/googleapis/python-bigquery/issues/91)) ([0dd90b9](https://www.github.com/googleapis/python-bigquery/commit/0dd90b90e3714c1d18f8a404917a9454870e338a))
+* add support for policy tags ([#77](https://www.github.com/googleapis/python-bigquery/issues/77)) ([38a5c01](https://www.github.com/googleapis/python-bigquery/commit/38a5c01ca830daf165592357c45f2fb4016aad23))
+* make AccessEntry objects hashable ([#93](https://www.github.com/googleapis/python-bigquery/issues/93)) ([23a173b](https://www.github.com/googleapis/python-bigquery/commit/23a173bc5a25c0c8200adc5af62eb05624c9099e))
+* **bigquery:** expose start index parameter for query result ([#121](https://www.github.com/googleapis/python-bigquery/issues/121)) ([be86de3](https://www.github.com/googleapis/python-bigquery/commit/be86de330a3c3801653a0ccef90e3d9bdb3eee7a))
+* **bigquery:** unit and system test for dataframe with int column with Nan values ([#39](https://www.github.com/googleapis/python-bigquery/issues/39)) ([5fd840e](https://www.github.com/googleapis/python-bigquery/commit/5fd840e9d4c592c4f736f2fd4792c9670ba6795e))
+
+
+### Bug Fixes
+
+* allow partial streaming_buffer statistics ([#37](https://www.github.com/googleapis/python-bigquery/issues/37)) ([645f0fd](https://www.github.com/googleapis/python-bigquery/commit/645f0fdb35ee0e81ee70f7459e796a42a1f03210))
+* distinguish server timeouts from transport timeouts ([#43](https://www.github.com/googleapis/python-bigquery/issues/43)) ([a17be5f](https://www.github.com/googleapis/python-bigquery/commit/a17be5f01043f32d9fbfb2ddf456031ea9205c8f))
+* improve cell magic error message on missing query ([#58](https://www.github.com/googleapis/python-bigquery/issues/58)) ([6182cf4](https://www.github.com/googleapis/python-bigquery/commit/6182cf48aef8f463bb96891cfc44a96768121dbc))
+* **bigquery:** fix repr of model reference ([#66](https://www.github.com/googleapis/python-bigquery/issues/66)) ([26c6204](https://www.github.com/googleapis/python-bigquery/commit/26c62046f4ec8880cf6561cc90a8b821dcc84ec5))
+* **bigquery:** fix start index with page size for list rows ([#27](https://www.github.com/googleapis/python-bigquery/issues/27)) ([400673b](https://www.github.com/googleapis/python-bigquery/commit/400673b5d0f2a6a3d828fdaad9d222ca967ffeff))
+
+## 1.24.0
+
+02-03-2020 01:38 PST
+
+### Implementation Changes
+
+- Fix inserting missing repeated fields. ([#10196](https://github.com/googleapis/google-cloud-python/pull/10196))
+- Deprecate `client.dataset()` in favor of `DatasetReference`. ([#7753](https://github.com/googleapis/google-cloud-python/pull/7753))
+- Use faster `to_arrow` + `to_pandas` in `to_dataframe()` when `pyarrow` is available. ([#10027](https://github.com/googleapis/google-cloud-python/pull/10027))
+- Write pandas `datetime[ns]` columns to BigQuery TIMESTAMP columns. ([#10028](https://github.com/googleapis/google-cloud-python/pull/10028))
+
+### New Features
+
+- Check `rows` argument type in `insert_rows()`. ([#10174](https://github.com/googleapis/google-cloud-python/pull/10174))
+- Check `json_rows` arg type in `insert_rows_json()`. ([#10162](https://github.com/googleapis/google-cloud-python/pull/10162))
+- Make `RowIterator.to_dataframe_iterable()` method public. ([#10017](https://github.com/googleapis/google-cloud-python/pull/10017))
+- Add retry parameter to public methods where missing. ([#10026](https://github.com/googleapis/google-cloud-python/pull/10026))
+- Add timeout parameter to Client and Job public methods. ([#10002](https://github.com/googleapis/google-cloud-python/pull/10002))
+- Add timeout parameter to `QueryJob.done()` method. ([#9875](https://github.com/googleapis/google-cloud-python/pull/9875))
+- Add `create_bqstorage_client` parameter to `to_dataframe()` and `to_arrow()` methods. ([#9573](https://github.com/googleapis/google-cloud-python/pull/9573))
+
+### Dependencies
+
+- Fix minimum versions of `google-cloud-core` and `google-resumable-media` dependencies. ([#10016](https://github.com/googleapis/google-cloud-python/pull/10016))
+
+### Documentation
+
+- Fix a comment typo in `job.py`. ([#10209](https://github.com/googleapis/google-cloud-python/pull/10209))
+- Update code samples of load table file and load table URI. ([#10175](https://github.com/googleapis/google-cloud-python/pull/10175))
+- Uncomment `Client` constructor and imports in samples. ([#10058](https://github.com/googleapis/google-cloud-python/pull/10058))
+- Remove unused query code sample. ([#10024](https://github.com/googleapis/google-cloud-python/pull/10024))
+- Update code samples to use strings for table and dataset IDs. ([#9974](https://github.com/googleapis/google-cloud-python/pull/9974))
+
+### Internal / Testing Changes
+
+- Bump copyright year to 2020, tweak docstring formatting (via synth). [#10225](https://github.com/googleapis/google-cloud-python/pull/10225)
+- Add tests for concatenating categorical columns. ([#10180](https://github.com/googleapis/google-cloud-python/pull/10180))
+- Adjust test assertions to the new default timeout. ([#10222](https://github.com/googleapis/google-cloud-python/pull/10222))
+- Use Python 3.6 for the nox blacken session (via synth). ([#10012](https://github.com/googleapis/google-cloud-python/pull/10012))
+
+## 1.23.1
+
+12-16-2019 09:39 PST
+
+
+### Implementation Changes
+
+- Add `iamMember` entity type to allowed access classes. ([#9973](https://github.com/googleapis/google-cloud-python/pull/9973))
+- Fix typo in import error message (pandas -> pyarrow). ([#9955](https://github.com/googleapis/google-cloud-python/pull/9955))
+
+### Dependencies
+
+- Add `six` as an explicit dependency. ([#9979](https://github.com/googleapis/google-cloud-python/pull/9979))
+
+### Documentation
+
+- Add sample to read from query destination table. ([#9964](https://github.com/googleapis/google-cloud-python/pull/9964))
+
+## 1.23.0
+
+12-11-2019 13:31 PST
+
+### New Features
+
+- Add `close()` method to client for releasing open sockets. ([#9894](https://github.com/googleapis/google-cloud-python/pull/9894))
+- Add support of `use_avro_logical_types` for extract jobs. ([#9642](https://github.com/googleapis/google-cloud-python/pull/9642))
+- Add support for hive partitioning options configuration. ([#9626](https://github.com/googleapis/google-cloud-python/pull/9626))
+- Add description for routine entities. ([#9785](https://github.com/googleapis/google-cloud-python/pull/9785))
+
+### Documentation
+
+- Update code samples to use strings for table and dataset IDs. ([#9495](https://github.com/googleapis/google-cloud-python/pull/9495))
+
+### Internal / Testing Changes
+
+- Run unit tests with Python 3.8. ([#9880](https://github.com/googleapis/google-cloud-python/pull/9880))
+- Import `Mapping` from `collections.abc` not from `collections`. ([#9826](https://github.com/googleapis/google-cloud-python/pull/9826))
+
+## 1.22.0
+
+11-13-2019 12:23 PST
+
+
+### Implementation Changes
+- Preserve job config passed to Client methods. ([#9735](https://github.com/googleapis/google-cloud-python/pull/9735))
+- Use pyarrow fallback for improved schema detection. ([#9321](https://github.com/googleapis/google-cloud-python/pull/9321))
+- Add TypeError if wrong `job_config type` is passed to client job methods. ([#9506](https://github.com/googleapis/google-cloud-python/pull/9506))
+- Fix arrow deprecation warning. ([#9504](https://github.com/googleapis/google-cloud-python/pull/9504))
+
+### New Features
+- Add `--destination_table` parameter to IPython magic. ([#9599](https://github.com/googleapis/google-cloud-python/pull/9599))
+- Allow passing schema as a sequence of dicts. ([#9550](https://github.com/googleapis/google-cloud-python/pull/9550))
+- Implement defaultEncryptionConfiguration on datasets. ([#9489](https://github.com/googleapis/google-cloud-python/pull/9489))
+- Add range partitioning to tables, load jobs, and query jobs. ([#9477](https://github.com/googleapis/google-cloud-python/pull/9477))
+
+### Dependencies
+- Pin `google-resumable-media` to includ 0.5.x. ([#9572](https://github.com/googleapis/google-cloud-python/pull/9572))
+
+### Documentation
+- Fix link anchors in external config docstrings. ([#9627](https://github.com/googleapis/google-cloud-python/pull/9627))
+- Add python 2 sunset banner to documentation. ([#9036](https://github.com/googleapis/google-cloud-python/pull/9036))
+- Add table create sample using integer range partitioning. ([#9478](https://github.com/googleapis/google-cloud-python/pull/9478))
+- Document how to achieve higher write limit and add tests. ([#9574](https://github.com/googleapis/google-cloud-python/pull/9574))
+- Add code sample for scripting. ([#9537](https://github.com/googleapis/google-cloud-python/pull/9537))
+- Rewrite docs in Google style, part 2. ([#9481](https://github.com/googleapis/google-cloud-python/pull/9481))
+- Use multi-regional key path for CMEK in snippets. ([#9523](https://github.com/googleapis/google-cloud-python/pull/9523))
+
+### Internal / Testing Changes
+- Fix undelete table system test to use milliseconds in snapshot decorator. ([#9649](https://github.com/googleapis/google-cloud-python/pull/9649))
+- Format code with latest version of black. ([#9556](https://github.com/googleapis/google-cloud-python/pull/9556))
+- Remove duplicate test dependencies. ([#9503](https://github.com/googleapis/google-cloud-python/pull/9503))
+
+## 1.21.0
+
+10-16-2019 10:33 PDT
+
+
+### New Features
+
+- add ability to pass in a table ID instead of a query to the `%%bigquery` magic ([#9170](https://github.com/googleapis/google-cloud-python/pull/9170))
+- add support for custom `QueryJobConfig` in `BigQuery.cursor.execute` method ([#9278](https://github.com/googleapis/google-cloud-python/pull/9278))
+- store `QueryJob` to destination var on error in `%%bigquery` magic ([#9245](https://github.com/googleapis/google-cloud-python/pull/9245))
+- add script statistics to job resource ([#9428](https://github.com/googleapis/google-cloud-python/pull/9428))
+- add support for sheets ranges ([#9416](https://github.com/googleapis/google-cloud-python/pull/9416))
+- add support for listing jobs by parent job ([#9225](https://github.com/googleapis/google-cloud-python/pull/9225))
+- expose customer managed encryption key for ML models ([#9302](https://github.com/googleapis/google-cloud-python/pull/9302))
+- add `Dataset.default_partition_expiration_ms` and `Table.require_partition_filter` properties ([#9464](https://github.com/googleapis/google-cloud-python/pull/9464))
+
+### Dependencies
+
+- restrict version range of `google-resumable-media` ([#9243](https://github.com/googleapis/google-cloud-python/pull/9243))
+
+### Documentation
+
+- document how to load data as JSON string ([#9231](https://github.com/googleapis/google-cloud-python/pull/9231))
+- standardize comments and formatting in existing code samples ([#9212](https://github.com/googleapis/google-cloud-python/pull/9212))
+- rewrite docstrings in Google style ([#9326](https://github.com/googleapis/google-cloud-python/pull/9326))
+- fix incorrect links to REST API in reference docs ([#9436](https://github.com/googleapis/google-cloud-python/pull/9436))
+
+### Internal / Testing Changes
+
+- add code samples to lint check ([#9277](https://github.com/googleapis/google-cloud-python/pull/9277))
+- update code samples to use strings for table and dataset IDs ([#9136](https://github.com/googleapis/google-cloud-python/pull/9136))
+- simplify scripting system test to reduce flakiness ([#9458](https://github.com/googleapis/google-cloud-python/pull/9458))
+
+## 1.20.0
+
+09-13-2019 11:22 PDT
+
+
+### Implementation Changes
+- Change default endpoint to bigquery.googleapis.com ([#9213](https://github.com/googleapis/google-cloud-python/pull/9213))
+- Change the default value of Cursor instances' `arraysize` attribute to None ([#9199](https://github.com/googleapis/google-cloud-python/pull/9199))
+- Deprecate automatic schema conversion. ([#9176](https://github.com/googleapis/google-cloud-python/pull/9176))
+- Fix `list_rows()` max results with BQ storage client ([#9178](https://github.com/googleapis/google-cloud-python/pull/9178))
+
+### New Features
+- Add `Model.encryption_config`. (via synth) ([#9214](https://github.com/googleapis/google-cloud-python/pull/9214))
+- Add `Client.insert_rows_from_dataframe()` method ([#9162](https://github.com/googleapis/google-cloud-python/pull/9162))
+- Add support for array parameters to `Cursor.execute()`. ([#9189](https://github.com/googleapis/google-cloud-python/pull/9189))
+- Add support for project IDs with org prefix to `Table.from_string()` factory. ([#9161](https://github.com/googleapis/google-cloud-python/pull/9161))
+- Add `--max_results` option to Jupyter magics ([#9169](https://github.com/googleapis/google-cloud-python/pull/9169))
+- Autofetch table schema on load if not provided. ([#9108](https://github.com/googleapis/google-cloud-python/pull/9108))
+- Add `max_results` parameter to `QueryJob.result()`. ([#9167](https://github.com/googleapis/google-cloud-python/pull/9167))
+
+### Documentation
+- Fix doc link. ([#9200](https://github.com/googleapis/google-cloud-python/pull/9200))
+
+### Internal / Testing Changes
+- Revert "Disable failing snippets test ([#9156](https://github.com/googleapis/google-cloud-python/pull/9156))." ([#9220](https://github.com/googleapis/google-cloud-python/pull/9220))
+
+## 1.19.0
+
+09-03-2019 14:33 PDT
+
+### Implementation Changes
+
+- Raise when unexpected fields are present in the `LoadJobConfig.schema` when calling `load_table_from_dataframe`. ([#9096](https://github.com/googleapis/google-cloud-python/pull/9096))
+- Determine the schema in `load_table_from_dataframe` based on dtypes. ([#9049](https://github.com/googleapis/google-cloud-python/pull/9049))
+- Raise helpful error when loading table from dataframe with `STRUCT` columns. ([#9053](https://github.com/googleapis/google-cloud-python/pull/9053))
+- Fix schema recognition of struct field types. ([#9001](https://github.com/googleapis/google-cloud-python/pull/9001))
+- Fix deserializing `None` in `QueryJob` for queries with parameters. ([#9029](https://github.com/googleapis/google-cloud-python/pull/9029))
+
+### New Features
+
+- Include indexes in table written by `load_table_from_dataframe`, only if
+ fields corresponding to indexes are present in `LoadJobConfig.schema`.
+ ([#9084](https://github.com/googleapis/google-cloud-python/pull/9084))
+- Add `client_options` to constructor. ([#8999](https://github.com/googleapis/google-cloud-python/pull/8999))
+- Add `--dry_run` option to `%%bigquery` magic. ([#9067](https://github.com/googleapis/google-cloud-python/pull/9067))
+- Add `load_table_from_json()` method to create a table from a list of dictionaries. ([#9076](https://github.com/googleapis/google-cloud-python/pull/9076))
+- Allow subset of schema to be passed into `load_table_from_dataframe`. ([#9064](https://github.com/googleapis/google-cloud-python/pull/9064))
+- Add support for unsetting `LoadJobConfig.schema`. ([#9077](https://github.com/googleapis/google-cloud-python/pull/9077))
+- Add support to `Dataset` for project IDs containing an org prefix. ([#8877](https://github.com/googleapis/google-cloud-python/pull/8877))
+- Add enum with SQL type names allowed to be used in `SchemaField`. ([#9040](https://github.com/googleapis/google-cloud-python/pull/9040))
+
+### Documentation
+
+- Fix the reference URL for `Client.create_dataset()`. ([#9149](https://github.com/googleapis/google-cloud-python/pull/9149))
+- Update code samples to use strings for table names instead of `client.dataset()`. ([#9032](https://github.com/googleapis/google-cloud-python/pull/9032))
+- Remove compatability badges from READMEs. ([#9035](https://github.com/googleapis/google-cloud-python/pull/9035))
+- Fix Pandas DataFrame load example under Python 2.7. ([#9022](https://github.com/googleapis/google-cloud-python/pull/9022))
+
+### Internal / Testing Changes
+
+- Disable failing snippets test for copying CMEK-protected tables. ([#9156](https://github.com/googleapis/google-cloud-python/pull/9156))
+- Fix BigQuery client unit test assertions ([#9112](https://github.com/googleapis/google-cloud-python/pull/9112))
+- Replace avro with arrow schemas in `test_table.py` ([#9056](https://github.com/googleapis/google-cloud-python/pull/9056))
+
+## 1.18.0
+
+08-08-2019 12:28 PDT
+
+### New Features
+
+- Add `bqstorage_client` param to `QueryJob.to_arrow()` ([#8693](https://github.com/googleapis/google-cloud-python/pull/8693))
+- Include SQL query and job ID in exception messages. ([#8748](https://github.com/googleapis/google-cloud-python/pull/8748))
+- Allow using TableListItem to construct a Table object. ([#8738](https://github.com/googleapis/google-cloud-python/pull/8738))
+- Add StandardSqlDataTypes enum to BigQuery ([#8782](https://github.com/googleapis/google-cloud-python/pull/8782))
+- Add `to_standard_sql()` method to SchemaField ([#8880](https://github.com/googleapis/google-cloud-python/pull/8880))
+- Add debug logging statements to track when BQ Storage API is used. ([#8838](https://github.com/googleapis/google-cloud-python/pull/8838))
+- Hide error traceback in BigQuery cell magic ([#8808](https://github.com/googleapis/google-cloud-python/pull/8808))
+- Allow choice of compression when loading from dataframe ([#8938](https://github.com/googleapis/google-cloud-python/pull/8938))
+- Additional clustering metrics for BQML K-means models (via synth). ([#8945](https://github.com/googleapis/google-cloud-python/pull/8945))
+
+### Documentation
+
+- Add compatibility check badges to READMEs. ([#8288](https://github.com/googleapis/google-cloud-python/pull/8288))
+- Link to googleapis.dev documentation in READMEs. ([#8705](https://github.com/googleapis/google-cloud-python/pull/8705))
+- Remove redundant service account key code sample. ([#8891](https://github.com/googleapis/google-cloud-python/pull/8891))
+
+### Internal / Testing Changes
+
+- Fix several pytest "skip if" markers ([#8694](https://github.com/googleapis/google-cloud-python/pull/8694))
+- Update tests to support conversion of NaN as NULL in pyarrow `0.14.*`. ([#8785](https://github.com/googleapis/google-cloud-python/pull/8785))
+- Mock external calls in one of BigQuery unit tests ([#8727](https://github.com/googleapis/google-cloud-python/pull/8727))
+- Set IPython user agent when running queries with IPython cell magic ([#8713](https://github.com/googleapis/google-cloud-python/pull/8713))
+- Use configurable bucket name for GCS samples data in systems tests. ([#8783](https://github.com/googleapis/google-cloud-python/pull/8783))
+- Move `maybe_fail_import()` to top level test utils ([#8840](https://github.com/googleapis/google-cloud-python/pull/8840))
+- Set BQ Storage client user-agent when in Jupyter cell ([#8734](https://github.com/googleapis/google-cloud-python/pull/8734))
+
+## 1.17.0
+
+07-12-2019 07:56 PDT
+
+### New Features
+
+- Support faster Arrow data format in `to_dataframe` when using BigQuery Storage API. ([#8551](https://github.com/googleapis/google-cloud-python/pull/8551))
+- Add `to_arrow` to get a `pyarrow.Table` from query results. ([#8609](https://github.com/googleapis/google-cloud-python/pull/8609))
+
+### Dependencies
+
+- Exclude bad 0.14.0 `pyarrow` release. ([#8551](https://github.com/googleapis/google-cloud-python/pull/8551))
+
+## 1.16.0
+
+07-01-2019 10:22 PDT
+
+### New Features
+
+- Add Routines API. ([#8491](https://github.com/googleapis/google-cloud-python/pull/8491))
+- Add more stats to Models API, such as `optimization_strategy` (via synth). ([#8344](https://github.com/googleapis/google-cloud-python/pull/8344))
+
+### Documentation
+
+- Add docs job to publish to googleapis.dev. ([#8464](https://github.com/googleapis/google-cloud-python/pull/8464))
+- Add sample demonstrating how to create a job. ([#8422](https://github.com/googleapis/google-cloud-python/pull/8422))
+- Use autodetected location in code samples. ([#8340](https://github.com/googleapis/google-cloud-python/pull/8340), [#8341](https://github.com/googleapis/google-cloud-python/pull/8341))
+
+### Internal / Testing Changes
+
+- Refactor `to_dataframe` to deterministicly update progress bar. ([#8303](https://github.com/googleapis/google-cloud-python/pull/8303))
+
+## 1.15.0
+
+06-14-2019 10:10 PDT
+
+### Implementation Changes
+
+- Fix bug where `load_table_from_dataframe` could not append to REQUIRED fields. ([#8230](https://github.com/googleapis/google-cloud-python/pull/8230))
+
+### New Features
+
+- Add `page_size` parameter to `QueryJob.result`. ([#8206](https://github.com/googleapis/google-cloud-python/pull/8206))
+
+## 1.14.0
+
+06-04-2019 11:11 PDT
+
+
+### New Features
+- Add `maximum_bytes_billed` argument and `context.default_query_job_config` property to magics. ([#8179](https://github.com/googleapis/google-cloud-python/pull/8179))
+
+### Dependencies
+- Don't pin `google-api-core` in libs using `google-cloud-core`. ([#8213](https://github.com/googleapis/google-cloud-python/pull/8213))
+
+## 1.13.0
+
+05-31-2019 10:22 PDT
+
+### New Features
+
+- Use `job_config.schema` for data type conversion if specified in `load_table_from_dataframe`. ([#8105](https://github.com/googleapis/google-cloud-python/pull/8105))
+
+### Internal / Testing Changes
+
+- Adds private `_connection` object to magics context. ([#8192](https://github.com/googleapis/google-cloud-python/pull/8192))
+- Fix coverage in 'types.py' (via synth). ([#8146](https://github.com/googleapis/google-cloud-python/pull/8146))
+
+## 1.12.1
+
+05-21-2019 11:16 PDT
+
+### Implementation Changes
+
+- Don't raise error when encountering unknown fields in Models API. ([#8083](https://github.com/googleapis/google-cloud-python/pull/8083))
+
+### Documentation
+
+- Use alabaster theme everwhere. ([#8021](https://github.com/googleapis/google-cloud-python/pull/8021))
+
+### Internal / Testing Changes
+
+- Add empty lines (via synth). ([#8049](https://github.com/googleapis/google-cloud-python/pull/8049))
+
+## 1.12.0
+
+05-16-2019 11:25 PDT
+
+### Implementation Changes
+- Remove duplicates from index on pandas DataFrames returned by `to_dataframe()`. ([#7953](https://github.com/googleapis/google-cloud-python/pull/7953))
+- Prevent error when time partitioning is populated with empty dict ([#7904](https://github.com/googleapis/google-cloud-python/pull/7904))
+- Preserve order in `to_dataframe` with BQ Storage from queries containing `ORDER BY` ([#7793](https://github.com/googleapis/google-cloud-python/pull/7793))
+- Respect `progress_bar_type` in `to_dataframe` when used with BQ Storage API ([#7697](https://github.com/googleapis/google-cloud-python/pull/7697))
+- Refactor QueryJob.query to read from resource dictionary ([#7763](https://github.com/googleapis/google-cloud-python/pull/7763))
+- Close the `to_dataframe` progress bar when finished. ([#7757](https://github.com/googleapis/google-cloud-python/pull/7757))
+- Ensure that `KeyboardInterrupt` during `to_dataframe`no longer hangs. ([#7698](https://github.com/googleapis/google-cloud-python/pull/7698))
+- Raise ValueError when BQ Storage is required but missing ([#7726](https://github.com/googleapis/google-cloud-python/pull/7726))
+- Make `total_rows` available on RowIterator before iteration ([#7622](https://github.com/googleapis/google-cloud-python/pull/7622))
+- Avoid masking auth errors in `to_dataframe` with BQ Storage API ([#7674](https://github.com/googleapis/google-cloud-python/pull/7674))
+
+### New Features
+- Add support for passing `client_info`. ([#7849](https://github.com/googleapis/google-cloud-python/pull/7849) and ([#7806](https://github.com/googleapis/google-cloud-python/pull/7806))
+- Phase 1 for storing schemas for later use. ([#7761](https://github.com/googleapis/google-cloud-python/pull/7761))
+- Add `destination` and related properties to LoadJob. ([#7710](https://github.com/googleapis/google-cloud-python/pull/7710))
+- Add `clustering_fields` property to TableListItem ([#7692](https://github.com/googleapis/google-cloud-python/pull/7692))
+- Add `created` and `expires` properties to TableListItem ([#7684](https://github.com/googleapis/google-cloud-python/pull/7684))
+
+### Dependencies
+- Pin `google-cloud-core >= 1.0.0, < 2.0dev`. ([#7993](https://github.com/googleapis/google-cloud-python/pull/7993))
+- Add `[all]` extras to install all extra dependencies ([#7610](https://github.com/googleapis/google-cloud-python/pull/7610))
+
+### Documentation
+- Move table and dataset snippets to samples/ directory ([#7683](https://github.com/googleapis/google-cloud-python/pull/7683))
+
+### Internal / Testing Changes
+- Blacken unit tests. ([#7960](https://github.com/googleapis/google-cloud-python/pull/7960))
+- Cleanup client tests with method to create minimal table resource ([#7802](https://github.com/googleapis/google-cloud-python/pull/7802))
+
+## 1.11.2
+
+04-05-2019 08:16 PDT
+
+### Dependencies
+
+- Add dependency on protobuf. ([#7668](https://github.com/googleapis/google-cloud-python/pull/7668))
+
+## 1.11.1
+
+04-04-2019 09:19 PDT
+
+### Internal / Testing Changes
+
+- Increment version number in `setup.py`.
+
+## 1.11.0
+
+04-03-2019 19:33 PDT
+
+### Implementation Changes
+
+- Remove classifier for Python 3.4 for end-of-life. ([#7535](https://github.com/googleapis/google-cloud-python/pull/7535))
+
+### New Features
+
+- Enable fastparquet support by using temporary file in `load_table_from_dataframe` ([#7545](https://github.com/googleapis/google-cloud-python/pull/7545))
+- Allow string for copy sources, query destination, and default dataset ([#7560](https://github.com/googleapis/google-cloud-python/pull/7560))
+- Add `progress_bar_type` argument to `to_dataframe` to use `tqdm` to display a progress bar ([#7552](https://github.com/googleapis/google-cloud-python/pull/7552))
+- Call `get_table` in `list_rows` if the schema is not available ([#7621](https://github.com/googleapis/google-cloud-python/pull/7621))
+- Fallback to BQ API when there are problems reading from BQ Storage. ([#7633](https://github.com/googleapis/google-cloud-python/pull/7633))
+- Add methods for Models API ([#7562](https://github.com/googleapis/google-cloud-python/pull/7562))
+- Add option to use BigQuery Storage API from IPython magics ([#7640](https://github.com/googleapis/google-cloud-python/pull/7640))
+
+### Documentation
+
+- Remove typo in `Table.from_api_repr` docstring. ([#7509](https://github.com/googleapis/google-cloud-python/pull/7509))
+- Add docs session to nox configuration for BigQuery ([#7541](https://github.com/googleapis/google-cloud-python/pull/7541))
+
+### Internal / Testing Changes
+
+- Refactor `table()` methods into shared implementation. ([#7516](https://github.com/googleapis/google-cloud-python/pull/7516))
+- Blacken noxfile and setup file in nox session ([#7619](https://github.com/googleapis/google-cloud-python/pull/7619))
+- Actually use the `progress_bar_type` argument in `QueryJob.to_dataframe()`. ([#7616](https://github.com/googleapis/google-cloud-python/pull/7616))
+
+## 1.10.0
+
+03-06-2019 15:20 PST
+
+### Implementation Changes
+
+- Harden 'ArrayQueryParameter.from_api_repr' against missing 'parameterValue'. ([#7311](https://github.com/googleapis/google-cloud-python/pull/7311))
+- Allow nested records w/ null values. ([#7297](https://github.com/googleapis/google-cloud-python/pull/7297))
+
+### New Features
+
+- Add `exists_ok` and `not_found_ok` options to ignore errors when creating/deleting datasets/tables. ([#7491](https://github.com/googleapis/google-cloud-python/pull/7491))
+- Accept a string in Table and Dataset constructors. ([#7483](https://github.com/googleapis/google-cloud-python/pull/7483))
+
+### Documentation
+
+- Update docstring of RowIterator's to_dataframe ([#7306](https://github.com/googleapis/google-cloud-python/pull/7306))
+- Updated client library documentation URLs. ([#7307](https://github.com/googleapis/google-cloud-python/pull/7307))
+
+### Internal / Testing Changes
+
+- Fix lint. ([#7383](https://github.com/googleapis/google-cloud-python/pull/7383))
+
+## 1.9.0
+
+02-04-2019 13:28 PST
+
+### New Features
+
+- Add arguments to select `dtypes` and use BQ Storage API to `QueryJob.to_dataframe()`. ([#7241](https://github.com/googleapis/google-cloud-python/pull/7241))
+
+### Documentation
+
+- Add sample for fetching `total_rows` from query results. ([#7217](https://github.com/googleapis/google-cloud-python/pull/7217))
+
+## 1.8.1
+
+12-17-2018 17:53 PST
+
+
+### Documentation
+- Document Python 2 deprecation ([#6910](https://github.com/googleapis/google-cloud-python/pull/6910))
+- Normalize docs for 'page_size' / 'max_results' / 'page_token' ([#6842](https://github.com/googleapis/google-cloud-python/pull/6842))
+
+## 1.8.0
+
+12-10-2018 12:39 PST
+
+
+### Implementation Changes
+- Add option to use BQ Storage API with `to_dataframe` ([#6854](https://github.com/googleapis/google-cloud-python/pull/6854))
+- Fix exception type in comment ([#6847](https://github.com/googleapis/google-cloud-python/pull/6847))
+- Add `to_bqstorage` to convert from Table[Reference] google-cloud-bigquery-storage reference ([#6840](https://github.com/googleapis/google-cloud-python/pull/6840))
+- Import `iam.policy` from `google.api_core`. ([#6741](https://github.com/googleapis/google-cloud-python/pull/6741))
+- Add avro logical type control for load jobs. ([#6827](https://github.com/googleapis/google-cloud-python/pull/6827))
+- Allow setting partition expiration to 'None'. ([#6823](https://github.com/googleapis/google-cloud-python/pull/6823))
+- Add `retry` argument to `_AsyncJob.result`. ([#6302](https://github.com/googleapis/google-cloud-python/pull/6302))
+
+### Dependencies
+- Update dependency to google-cloud-core ([#6835](https://github.com/googleapis/google-cloud-python/pull/6835))
+
+### Documentation
+- Add avro load samples ([#6832](https://github.com/googleapis/google-cloud-python/pull/6832))
+
+### Internal / Testing Changes
+- Blacken libraries ([#6794](https://github.com/googleapis/google-cloud-python/pull/6794))
+- Fix copy/paste typos in noxfile comments ([#6831](https://github.com/googleapis/google-cloud-python/pull/6831))
+
+## 1.7.0
+
+11-05-2018 16:41 PST
+
+### Implementation Changes
+
+- Add destination table properties to `LoadJobConfig`. ([#6202](https://github.com/googleapis/google-cloud-python/pull/6202))
+- Allow strings or references in `create_dataset` and `create_table` ([#6199](https://github.com/googleapis/google-cloud-python/pull/6199))
+- Fix swallowed error message ([#6168](https://github.com/googleapis/google-cloud-python/pull/6168))
+
+### New Features
+
+- Add `--params option` to `%%bigquery` magic ([#6277](https://github.com/googleapis/google-cloud-python/pull/6277))
+- Expose `to_api_repr` method for jobs. ([#6176](https://github.com/googleapis/google-cloud-python/pull/6176))
+- Allow string in addition to DatasetReference / TableReference in Client methods. ([#6164](https://github.com/googleapis/google-cloud-python/pull/6164))
+- Add keyword arguments to job config constructors for setting properties ([#6397](https://github.com/googleapis/google-cloud-python/pull/6397))
+
+### Documentation
+
+- Update README service links in quickstart guides. ([#6322](https://github.com/googleapis/google-cloud-python/pull/6322))
+- Move usage guides to their own docs. ([#6238](https://github.com/googleapis/google-cloud-python/pull/6238))
+- Normalize use of support level badges ([#6159](https://github.com/googleapis/google-cloud-python/pull/6159))
+
+### Internal / Testing Changes
+
+- Deprecation cleanups ([#6304](https://github.com/googleapis/google-cloud-python/pull/6304))
+- Use `_get_sub_prop` helper so missing load stats don't raise. ([#6269](https://github.com/googleapis/google-cloud-python/pull/6269))
+- Use new Nox ([#6175](https://github.com/googleapis/google-cloud-python/pull/6175))
+- Harden snippets against transient GCS errors. ([#6184](https://github.com/googleapis/google-cloud-python/pull/6184))
+
+## 1.6.0
+
+### New Features
+- Add support for `GEOGRAPHY` type ([#6147](https://github.com/googleapis/google-cloud-python/pull/6147))
+- Add default QueryJobConfig to Client ([#6088](https://github.com/googleapis/google-cloud-python/pull/6088))
+
+### Documentation
+- Remove unused "append" samples ([#6100](https://github.com/googleapis/google-cloud-python/pull/6100))
+
+### Internal / Testing Changes
+- Address dataset leaks, conflicts in systests ([#6099](https://github.com/googleapis/google-cloud-python/pull/6099))
+- Harden bucket teardown against `429 Too Many Requests`. ([#6101](https://github.com/googleapis/google-cloud-python/pull/6101))
+
+## 1.5.1
+
+### Implementation Changes
+
+- Retry '502 Bad Gateway' errors by default. (#5930)
+- Avoid pulling entire result set into memory when constructing dataframe. (#5870)
+- Add support for retrying unstructured 429 / 500 / 502 responses. (#6011)
+- Populate the jobReference from the API response. (#6044)
+
+### Documentation
+
+- Prepare documentation for repo split (#5955)
+- Fix leakage of bigquery/spanner sections into sidebar menu. (#5986)
+
+### Internal / Testing Changes
+
+- Test pandas support under Python 3.7. (#5857)
+- Nox: use inplace installs (#5865)
+- Update system test to use test data in bigquery-public-data. (#5965)
+
+## 1.5.0
+
+### Implementation Changes
+
+- Make 'Table.location' read-only. (#5687)
+
+### New Features
+
+- Add 'clustering_fields' properties. (#5630)
+- Add support for job labels (#5654)
+- Add 'QueryJob.estimated_bytes_processed' property (#5655)
+- Add support/tests for loading tables from 'gzip.GzipFile'. (#5711)
+- Add 'ExternalSourceFormat' enum. (#5674)
+- Add default location to client (#5678)
+
+### Documentation
+
+- Fix typo in CopyJob sources docstring (#5690)
+
+### Internal / Testing Changes
+
+- Add/refactor snippets for managing BigQuery jobs (#5631)
+- Reenable systests for 'dataset.update'/'table.update'. (#5732)
+
+## 1.4.0
+
+### Implementation Changes
+
+- Add 'internalError' to retryable error reasons. (#5599)
+- Don't raise exception if viewing CREATE VIEW DDL results (#5602)
+
+### New Features
+
+- Add Orc source format support and samples (#5500)
+- Move 'DEFAULT_RETRY' (w/ its predicate) to a new public 'retry' module. (#5552)
+- Allow listing rows on an empty table. (#5584)
+
+### Documentation
+
+- Add load_table_from_dataframe() to usage docs and changelog and dedents snippets in usage page (#5501)
+- Add samples for query external data sources (GCS & Sheets) (#5491)
+- Add BigQuery authorized view samples (#5515)
+- Update docs to show pyarrow as the only dependency of load_table_from_dataframe() (#5582)
+
+### Internal / Testing Changes
+
+- Add missing explict coverage for '_helpers' (#5550)
+- Skip update_table and update_dataset tests until etag issue is resolved. (#5590)
+
+## 1.3.0
+
+### New Features
+
+- NUMERIC type support (#5331)
+- Add timeline and top-level slot-millis to query statistics. (#5312)
+- Add additional statistics to query plan stages. (#5307)
+- Add `client.load_table_from_dataframe()` (#5387)
+
+### Documentation
+
+- Use autosummary to split up API reference docs (#5340)
+- Fix typo in Client docstrings (#5342)
+
+### Internal / Testing Changes
+
+- Prune systests identified as reduntant to snippets. (#5365)
+- Modify system tests to use prerelease versions of grpcio (#5304)
+- Improve system test performance (#5319)
+
+## 1.2.0
+
+### Implementation Changes
+- Switch `list_partitions` helper to a direct metatable read (#5273)
+- Fix typo in `Encoding.ISO_8859_1` enum value (#5211)
+
+### New Features
+- Add UnknownJob type for redacted jobs. (#5281)
+- Add project parameter to `list_datasets` and `list_jobs` (#5217)
+- Add from_string factory methods to Dataset and Table (#5255)
+- Add column based time partitioning (#5267)
+
+### Documentation
+- Standardize docstrings for constants (#5289)
+- Fix docstring / impl of `ExtractJob.destination_uri_file_counts`. (#5245)
+
+### Internal / Testing Changes
+- Add testing support for Python 3.7; remove testing support for Python 3.4. (#5295)
+
+## 1.1.0
+
+### New Features
+- Add `client.get_service_account_email` (#5203)
+
+### Documentation
+- Update samples and standardize region tags (#5195)
+
+### Internal / Testing Changes
+- Fix trove classifier to be Production/Stable
+- Don't suppress 'dots' output on test (#5202)
+
+## 1.0.0
+
+### Implementation Changes
+- Remove deprecated Client methods (#5182)
+
+## 0.32.0
+
+### :warning: Interface changes
+
+- Use `job.configuration` resource for XXXJobConfig classes (#5036)
+
+### Interface additions
+
+- Add `page_size` parameter for `list_rows` and use in DB-API for `arraysize` (#4931)
+- Add IPython magics for running queries (#4983)
+
+### Documentation
+
+- Add job string constant parameters in init and snippets documentation (#4987)
+
+### Internal / Testing changes
+
+- Specify IPython version 5.5 when running Python 2.7 tests (#5145)
+- Move all Dataset property conversion logic into properties (#5130)
+- Remove unnecessary _Table class from test_job.py (#5126)
+- Use explicit bytes to initialize 'BytesIO'. (#5116)
+- Make SchemaField be able to include description via from_api_repr method (#5114)
+- Remove _ApiResourceProperty class (#5107)
+- Add dev version for 0.32.0 release (#5105)
+- StringIO to BytesIO (#5101)
+- Shorten snippets test name (#5091)
+- Don't use `selected_fields` for listing query result rows (#5072)
+- Add location property to job classes. (#5071)
+- Use autospec for Connection in tests. (#5066)
+- Add Parquet SourceFormat and samples (#5057)
+- Remove test_load_table_from_uri_w_autodetect_schema_then_get_job because of duplicate test in snippets (#5004)
+- Fix encoding variable and strings UTF-8 and ISO-8859-1 difference documentation (#4990)
+
+## 0.31.0
+
+### Interface additions
+
+- Add support for `EncryptionConfiguration` (#4845)
+
+### Implementation changes
+
+- Allow listing/getting jobs even when there is an "invalid" job. (#4786)
+
+### Dependencies
+
+- The minimum version for `google-api-core` has been updated to version 1.0.0. This may cause some incompatibility with older google-cloud libraries, you will need to update those libraries if you have a dependency conflict. (#4944, #4946)
+
+### Documentation
+
+- Update format in `Table.full_table_id` and `TableListItem.full_table_id` docstrings. (#4906)
+
+### Testing and internal changes
+
+- Install local dependencies when running lint (#4936)
+- Re-enable lint for tests, remove usage of pylint (#4921)
+- Normalize all setup.py files (#4909)
+- Remove unnecessary debug print from tests (#4907)
+- Use constant strings for job properties in tests (#4833)
+
+## 0.30.0
+
+This is the release candidate for v1.0.0.
+
+### Interface changes / additions
+
+- Add `delete_contents` to `delete_dataset`. (#4724)
+
+### Bugfixes
+
+- Add handling of missing properties in `SchemaField.from_api_repr()`. (#4754)
+- Fix missing return value in `LoadJobConfig.from_api_repr`. (#4727)
+
+### Documentation
+
+- Minor documentation and typo fixes. (#4782, #4718, #4784, #4835, #4836)
+
+## 0.29.0
+
+### Interface changes / additions
+
+- Add `to_dataframe()` method to row iterators. When Pandas is installed this
+ method returns a `DataFrame` containing the query's or table's rows.
+ ([#4354](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4354))
+- Iterate over a `QueryJob` to wait for and get the query results.
+ ([#4350](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4350))
+- Add `Table.reference` and `Dataset.reference` properties to get the
+ `TableReference` or `DatasetReference` corresponding to that `Table` or
+ `Dataset`, respectively.
+ ([#4405](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4405))
+- Add `Row.keys()`, `Row.items()`, and `Row.get()`. This makes `Row` act
+ more like a built-in dictionary.
+ ([#4393](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4393),
+ [#4413](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4413))
+
+### Interface changes / breaking changes
+
+- Add `Client.insert_rows()` and `Client.insert_rows_json()`, deprecate
+ `Client.create_rows()` and `Client.create_rows_json()`.
+ ([#4657](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4657))
+- Add `Client.list_tables`, deprecate `Client.list_dataset_tables`.
+ ([#4653](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4653))
+- `Client.list_tables` returns an iterators of `TableListItem`. The API
+ only returns a subset of properties of a table when listing.
+ ([#4427](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4427))
+- Remove `QueryJob.query_results()`. Use `QueryJob.result()` instead.
+ ([#4652](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4652))
+- Remove `Client.query_rows()`. Use `Client.query()` instead.
+ ([#4429](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4429))
+- `Client.list_datasets` returns an iterator of `DatasetListItem`. The API
+ only returns a subset of properties of a dataset when listing.
+ ([#4439](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/4439))
+
+## 0.28.0
+
+**0.28.0 significantly changes the interface for this package.** For examples
+of the differences between 0.28.0 and previous versions, see
+[Migrating to the BigQuery Python client library 0.28][2].
+These changes can be summarized as follows:
+
+- Query and view operations default to the standard SQL dialect. (#4192)
+- Client functions related to
+ [jobs](https://cloud.google.com/bigquery/docs/jobs-overview), like running
+ queries, immediately start the job.
+- Functions to create, get, update, delete datasets and tables moved to the
+ client class.
+
+[2]: https://cloud.google.com/bigquery/docs/python-client-migration
+
+### Fixes
+
+- Populate timeout parameter correctly for queries (#4209)
+- Automatically retry idempotent RPCs (#4148, #4178)
+- Parse timestamps in query parameters using canonical format (#3945)
+- Parse array parameters that contain a struct type. (#4040)
+- Support Sub Second Datetimes in row data (#3901, #3915, #3926), h/t @page1
+
+### Interface changes / additions
+
+- Support external table configuration (#4182) in query jobs (#4191) and
+ tables (#4193).
+- New `Row` class allows for access by integer index like a tuple, string
+ index like a dictionary, or attribute access like an object. (#4149)
+- Add option for job ID generation with user-supplied prefix (#4198)
+- Add support for update of dataset access entries (#4197)
+- Add support for atomic read-modify-write of a dataset using etag (#4052)
+- Add support for labels to `Dataset` (#4026)
+- Add support for labels to `Table` (#4207)
+- Add `Table.streaming_buffer` property (#4161)
+- Add `TableReference` class (#3942)
+- Add `DatasetReference` class (#3938, #3942, #3993)
+- Add `ExtractJob.destination_uri_file_counts` property. (#3803)
+- Add `client.create_rows_json()` to bypass conversions on streaming writes.
+ (#4189)
+- Add `client.get_job()` to get arbitrary jobs. (#3804, #4213)
+- Add filter to `client.list_datasets()` (#4205)
+- Add `QueryJob.undeclared_query_parameters` property. (#3802)
+- Add `QueryJob.referenced_tables` property. (#3801)
+- Add new scalar statistics properties to `QueryJob` (#3800)
+- Add `QueryJob.query_plan` property. (#3799)
+
+### Interface changes / breaking changes
+
+- Remove `client.run_async_query()`, use `client.query()` instead. (#4130)
+- Remove `client.run_sync_query()`, use `client.query_rows()` instead. (#4065, #4248)
+- Make `QueryResults` read-only. (#4094, #4144)
+- Make `get_query_results` private. Return rows for `QueryJob.result()` (#3883)
+- Move `*QueryParameter` and `UDFResource` classes to `query` module (also
+ exposed in `bigquery` module). (#4156)
+
+#### Changes to tables
+
+- Remove `client` from `Table` class (#4159)
+- Remove `table.exists()` (#4145)
+- Move `table.list_parations` to `client.list_partitions` (#4146)
+- Move `table.upload_from_file` to `client.load_table_from_file` (#4136)
+- Move `table.update()` and `table.patch()` to `client.update_table()` (#4076)
+- Move `table.insert_data()` to `client.create_rows()`. Automatically
+ generates row IDs if not supplied. (#4151, #4173)
+- Move `table.fetch_data()` to `client.list_rows()` (#4119, #4143)
+- Move `table.delete()` to `client.delete_table()` (#4066)
+- Move `table.create()` to `client.create_table()` (#4038, #4043)
+- Move `table.reload()` to `client.get_table()` (#4004)
+- Rename `Table.name` attribute to `Table.table_id` (#3959)
+- `Table` constructor takes a `TableReference` as parameter (#3997)
+
+#### Changes to datasets
+
+- Remove `client` from `Dataset` class (#4018)
+- Remove `dataset.exists()` (#3996)
+- Move `dataset.list_tables()` to `client.list_dataset_tables()` (#4013)
+- Move `dataset.delete()` to `client.delete_dataset()` (#4012)
+- Move `dataset.patch()` and `dataset.update()` to `client.update_dataset()` (#4003)
+- Move `dataset.create()` to `client.create_dataset()` (#3982)
+- Move `dataset.reload()` to `client.get_dataset()` (#3973)
+- Rename `Dataset.name` attribute to `Dataset.dataset_id` (#3955)
+- `client.dataset()` returns a `DatasetReference` instead of `Dataset`. (#3944)
+- Rename class: `dataset.AccessGrant -> dataset.AccessEntry`. (#3798)
+- `dataset.table()` returns a `TableReference` instead of a `Table` (#4014)
+- `Dataset` constructor takes a DatasetReference (#4036)
+
+#### Changes to jobs
+
+- Make `job.begin()` method private. (#4242)
+- Add `LoadJobConfig` class and modify `LoadJob` (#4103, #4137)
+- Add `CopyJobConfig` class and modify `CopyJob` (#4051, #4059)
+- Type of Job's and Query's `default_dataset` changed from `Dataset` to
+ `DatasetReference` (#4037)
+- Rename `client.load_table_from_storage()` to `client.load_table_from_uri()`
+ (#4235)
+- Rename `client.extract_table_to_storage` to `client.extract_table()`.
+ Method starts the extract job immediately. (#3991, #4177)
+- Rename `XJob.name` to `XJob.job_id`. (#3962)
+- Rename job classes. `LoadTableFromStorageJob -> LoadJob` and
+ `ExtractTableToStorageJob -> jobs.ExtractJob` (#3797)
+
+### Dependencies
+
+- Updating to `google-cloud-core ~= 0.28`, in particular, the
+ `google-api-core` package has been moved out of `google-cloud-core`. (#4221)
+
+PyPI: https://pypi.org/project/google-cloud-bigquery/0.28.0/
+
+
+## 0.27.0
+
+- Remove client-side enum validation. (#3735)
+- Add `Table.row_from_mapping` helper. (#3425)
+- Move `google.cloud.future` to `google.api.core` (#3764)
+- Fix `__eq__` and `__ne__`. (#3765)
+- Move `google.cloud.iterator` to `google.api.core.page_iterator` (#3770)
+- `nullMarker` support for BigQuery Load Jobs (#3777), h/t @leondealmeida
+- Allow `job_id` to be explicitly specified in DB-API. (#3779)
+- Add support for a custom null marker. (#3776)
+- Add `SchemaField` serialization and deserialization. (#3786)
+- Add `get_query_results` method to the client. (#3838)
+- Poll for query completion via `getQueryResults` method. (#3844)
+- Allow fetching more than the first page when `max_results` is set. (#3845)
+
+PyPI: https://pypi.org/project/google-cloud-bigquery/0.27.0/
+
+## 0.26.0
+
+### Notable implementation changes
+
+- Using the `requests` transport attached to a Client for for resumable media
+ (i.e. downloads and uploads) (#3705) (this relates to the `httplib2` to
+ `requests` switch)
+
+### Interface changes / additions
+
+- Adding `autodetect` property on `LoadTableFromStorageJob` to enable schema
+ autodetection. (#3648)
+- Implementing the Python Futures interface for Jobs. Call `job.result()` to
+ wait for jobs to complete instead of polling manually on the job status.
+ (#3626)
+- Adding `is_nullable` property on `SchemaField`. Can be used to check if a
+ column is nullable. (#3620)
+- `job_name` argument added to `Table.upload_from_file` for setting the job
+ ID. (#3605)
+- Adding `google.cloud.bigquery.dbapi` package, which implements PEP-249
+ DB-API specification. (#2921)
+- Adding `Table.view_use_legacy_sql` property. Can be used to create views
+ with legacy or standard SQL. (#3514)
+
+### Interface changes / breaking changes
+
+- Removing `results()` method from the `QueryJob` class. Use
+ `query_results()` instead. (#3661)
+- `SchemaField` is now immutable. It is also hashable so that it can be used
+ in sets. (#3601)
+
+### Dependencies
+
+- Updating to `google-cloud-core ~= 0.26`, in particular, the underlying HTTP
+ transport switched from `httplib2` to `requests` (#3654, #3674)
+- Adding dependency on `google-resumable-media` for loading BigQuery tables
+ from local files. (#3555)
+
+### Packaging
+
+- Fix inclusion of `tests` (vs. `unit_tests`) in `MANIFEST.in` (#3552)
+- Updating `author_email` in `setup.py` to `googleapis-publisher@google.com`.
+ (#3598)
+
+PyPI: https://pypi.org/project/google-cloud-bigquery/0.26.0/
diff --git a/testbed/googleapis__python-bigquery/docs/conf.py b/testbed/googleapis__python-bigquery/docs/conf.py
new file mode 100644
index 0000000000000000000000000000000000000000..826298090d453ed876979ee5cda52fec09d7a6e5
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/conf.py
@@ -0,0 +1,388 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# google-cloud-bigquery documentation build configuration file
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+import shlex
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath(".."))
+
+# For plugins that can not read conf.py.
+# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
+sys.path.insert(0, os.path.abspath("."))
+
+__version__ = ""
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+needs_sphinx = "1.5.5"
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ "sphinx.ext.autodoc",
+ "sphinx.ext.autosummary",
+ "sphinx.ext.intersphinx",
+ "sphinx.ext.coverage",
+ "sphinx.ext.doctest",
+ "sphinx.ext.napoleon",
+ "sphinx.ext.todo",
+ "sphinx.ext.viewcode",
+ "recommonmark",
+]
+
+# autodoc/autosummary flags
+autoclass_content = "both"
+autodoc_default_options = {"members": True, "inherited-members": True}
+autosummary_generate = True
+
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ["_templates"]
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+# source_suffix = ['.rst', '.md']
+source_suffix = [".rst", ".md"]
+
+# The encoding of source files.
+# source_encoding = 'utf-8-sig'
+
+# The root toctree document.
+root_doc = "index"
+
+# General information about the project.
+project = "google-cloud-bigquery"
+copyright = "2019, Google"
+author = "Google APIs"
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The full version, including alpha/beta/rc tags.
+release = __version__
+# The short X.Y version.
+version = ".".join(release.split(".")[0:2])
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+# today = ''
+# Else, today_fmt is used as the format for a strftime call.
+# today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = [
+ "google/cloud/bigquery_v2/**", # Legacy proto-based types.
+ "_build",
+ "**/.nox/**/*",
+ "samples/AUTHORING_GUIDE.md",
+ "samples/CONTRIBUTING.md",
+ "samples/snippets/README.rst",
+]
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+# default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+# add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+# add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+# show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = "sphinx"
+
+# A list of ignored prefixes for module index sorting.
+# modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+# keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = True
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = "alabaster"
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+html_theme_options = {
+ "description": "Google Cloud Client Libraries for google-cloud-bigquery",
+ "github_user": "googleapis",
+ "github_repo": "python-bigquery",
+ "github_banner": True,
+ "font_family": "'Roboto', Georgia, sans",
+ "head_font_family": "'Roboto', Georgia, serif",
+ "code_font_family": "'Roboto Mono', 'Consolas', monospace",
+}
+
+# Add any paths that contain custom themes here, relative to this directory.
+# html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# " v documentation".
+# html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+# html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+# html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+# html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ["_static"]
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+# html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+# html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+# html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+# html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+# html_additional_pages = {}
+
+# If false, no module index is generated.
+# html_domain_indices = True
+
+# If false, no index is generated.
+# html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+# html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+# html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+# html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+# html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+# html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
+# html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# Now only 'ja' uses this config value
+# html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+# html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = "google-cloud-bigquery-doc"
+
+# -- Options for warnings ------------------------------------------------------
+
+
+suppress_warnings = [
+ # Temporarily suppress this to avoid "more than one target found for
+ # cross-reference" warning, which are intractable for us to avoid while in
+ # a mono-repo.
+ # See https://github.com/sphinx-doc/sphinx/blob
+ # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
+ "ref.python"
+]
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ #'papersize': 'letterpaper',
+ # The font size ('10pt', '11pt' or '12pt').
+ #'pointsize': '10pt',
+ # Additional stuff for the LaTeX preamble.
+ #'preamble': '',
+ # Latex figure (float) alignment
+ #'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (
+ root_doc,
+ "google-cloud-bigquery.tex",
+ "google-cloud-bigquery Documentation",
+ author,
+ "manual",
+ )
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+# latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+# latex_use_parts = False
+
+# If true, show page references after internal links.
+# latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+# latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+# latex_appendices = []
+
+# If false, no module index is generated.
+# latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (
+ root_doc,
+ "google-cloud-bigquery",
+ "google-cloud-bigquery Documentation",
+ [author],
+ 1,
+ )
+]
+
+# If true, show URL addresses after external links.
+# man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (
+ root_doc,
+ "google-cloud-bigquery",
+ "google-cloud-bigquery Documentation",
+ author,
+ "google-cloud-bigquery",
+ "google-cloud-bigquery Library",
+ "APIs",
+ )
+]
+
+# Documents to append as an appendix to all manuals.
+# texinfo_appendices = []
+
+# If false, no module index is generated.
+# texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+# texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+# texinfo_no_detailmenu = False
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {
+ "python": ("https://python.readthedocs.org/en/latest/", None),
+ "google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
+ "google.api_core": (
+ "https://googleapis.dev/python/google-api-core/latest/",
+ None,
+ ),
+ "grpc": ("https://grpc.github.io/grpc/python/", None),
+ "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
+ "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
+ "dateutil": ("https://dateutil.readthedocs.io/en/latest/", None),
+ "geopandas": ("https://geopandas.org/", None),
+ "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
+}
+
+
+# Napoleon settings
+napoleon_google_docstring = True
+napoleon_numpy_docstring = True
+napoleon_include_private_with_doc = False
+napoleon_include_special_with_doc = True
+napoleon_use_admonition_for_examples = False
+napoleon_use_admonition_for_notes = False
+napoleon_use_admonition_for_references = False
+napoleon_use_ivar = False
+napoleon_use_param = True
+napoleon_use_rtype = True
diff --git a/testbed/googleapis__python-bigquery/docs/dbapi.rst b/testbed/googleapis__python-bigquery/docs/dbapi.rst
new file mode 100644
index 0000000000000000000000000000000000000000..81f000bc7c85402ce748d41d6a880056d49e4fcb
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/dbapi.rst
@@ -0,0 +1,50 @@
+DB-API Reference
+~~~~~~~~~~~~~~~~
+
+.. automodule:: google.cloud.bigquery.dbapi
+ :members:
+ :show-inheritance:
+
+
+DB-API Query-Parameter Syntax
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The BigQuery DB-API uses the `qmark` `parameter style
+`_ for
+unnamed/positional parameters and the `pyformat` parameter style for
+named parameters.
+
+An example of a query using unnamed parameters::
+
+ insert into people (name, income) values (?, ?)
+
+and using named parameters::
+
+ insert into people (name, income) values (%(name)s, %(income)s)
+
+Providing explicit type information
+-----------------------------------
+
+BigQuery requires type information for parameters. The BigQuery
+DB-API can usually determine parameter types for parameters based on
+provided values. Sometimes, however, types can't be determined (for
+example when `None` is passed) or are determined incorrectly (for
+example when passing a floating-point value to a numeric column).
+
+The BigQuery DB-API provides an extended parameter syntax. For named
+parameters, a BigQuery type is provided after the name separated by a
+colon, as in::
+
+ insert into people (name, income) values (%(name:string)s, %(income:numeric)s)
+
+For unnamed parameters, use the named syntax with a type, but no
+name, as in::
+
+ insert into people (name, income) values (%(:string)s, %(:numeric)s)
+
+Providing type information is the *only* way to pass `struct` data::
+
+ cursor.execute(
+ "insert into points (point) values (%(:struct)s)",
+ [{"x": 10, "y": 20}],
+ )
diff --git a/testbed/googleapis__python-bigquery/docs/design/index.rst b/testbed/googleapis__python-bigquery/docs/design/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5750c7a9890e84ea9c84abaad1b29473500bcab8
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/design/index.rst
@@ -0,0 +1,11 @@
+Client Library Design
+=====================
+
+Some features of this client library have complex requirements and/or
+implementation. These documents describe the design decisions that contributued
+to those features.
+
+.. toctree::
+ :maxdepth: 2
+
+ query-retries
diff --git a/testbed/googleapis__python-bigquery/docs/design/query-retries.md b/testbed/googleapis__python-bigquery/docs/design/query-retries.md
new file mode 100644
index 0000000000000000000000000000000000000000..08d75302bc659c5cee9bc4a0b676ea06b4935eea
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/design/query-retries.md
@@ -0,0 +1,115 @@
+# Design of query retries in the BigQuery client libraries for Python
+
+
+## Overview
+
+The BigQuery client libraries for Python must safely retry API requests related to initiating a query. By "safely", it is meant that the BigQuery backend never successfully executes the query twice. This avoids duplicated rows from INSERT DML queries, among other problems.
+
+To achieve this goal, the client library only retries an API request relating to queries if at least one of the following is true: (1) issuing this exact request is idempotent, meaning that it won't result in a duplicate query being issued, or (2) the query has already failed in such a way that it is safe to re-issue the query.
+
+
+## Background
+
+
+### API-level retries
+
+Retries for nearly all API requests were [added in 2017](https://github.com/googleapis/google-cloud-python/pull/4148) and are [configurable via a Retry object](https://googleapis.dev/python/google-api-core/latest/retry.html#google.api_core.retry.Retry) passed to the retry argument. Notably, this includes the "query" method on the Python client, corresponding to the [jobs.insert REST API method](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert). The Python client always populates the [jobReference.jobId](https://cloud.google.com/bigquery/docs/reference/rest/v2/JobReference#FIELDS.job_id) field of the request body. If the BigQuery REST API receives a jobs.insert request for a job with the same ID, the REST API fails because the job already exists.
+
+
+### jobs.insert and jobs.query API requests
+
+By default, the Python client starts a query using the [jobs.insert REST API
+method](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert).
+Support for the [jobs.query REST API
+method](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query)
+was [added via the `api_method`
+parameter](https://github.com/googleapis/python-bigquery/pull/967) and is
+included in version 3.0 of the Python client library.
+
+The jobs.query REST API method differs from jobs.insert in that it does not accept a job ID. Instead, the [requestId parameter](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#QueryRequest.FIELDS.request_id) provides a window of idempotency for duplicate requests.
+
+
+### Re-issuing a query
+
+The ability to re-issue a query automatically was a [long](https://github.com/googleapis/google-cloud-python/issues/5555) [requested](https://github.com/googleapis/python-bigquery/issues/14) [feature](https://github.com/googleapis/python-bigquery/issues/539). As work ramped up on the SQLAlchemy connector, it became clear that this feature was necessary to keep the test suite, which issues hundreds of queries, from being [too flakey](https://github.com/googleapis/python-bigquery-sqlalchemy/issues?q=is%3Aissue+is%3Aclosed+author%3Aapp%2Fflaky-bot+sort%3Acreated-asc).
+
+Retrying a query is not as simple as retrying a single API request. In many
+cases the client library does not "know" about a query job failure until it
+tries to fetch the query results. To solve this, the [client re-issues a
+query](https://github.com/googleapis/python-bigquery/pull/837) as it was
+originally issued only if the query job has failed for a retryable reason.
+
+
+### getQueryResults error behavior
+
+The client library uses [the jobs.getQueryResults REST API method](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/getQueryResults) to wait for a query to finish. This REST API has a unique behavior in that it translates query job failures into HTTP error status codes. To disambiguate these error responses from one that may have occurred further up the REST API stack (such as from the Google load balancer), the client library inspects the error response body.
+
+When the error corresponds to a query job failure, BigQuery populates the
+"errors" array field, with the first element in the list corresponding to the
+error which directly caused the job failure. There are many [error response
+messages](https://cloud.google.com/bigquery/docs/error-messages), but only some
+of them indicate that re-issuing the query job may help. For example, if the
+job fails due to invalid query syntax, re-issuing the query won't help. If a
+query job fails due to "backendError" or "rateLimitExceeded", we know that the
+job did not successfully execute for some other reason.
+
+
+## Detailed design
+
+As mentioned in the "Overview" section, the Python client only retries a query request if at least one of the following is true: (1) issuing this exact request is idempotent, meaning that it won't result in a duplicate query being issued, or (2) the query has already failed in such a way that it is safe to re-issue the query.
+
+A developer can configure when to retry an API request (corresponding to #1 "issuing this exact request is idempotent") via the query method's `retry` parameter. A developer can configure when to re-issue a query job after a job failure (corresponding to #2 "the query has already failed") via the query method's `job_retry` parameter.
+
+
+### Retrying API requests via the `retry` parameter
+
+The first set of retries are at the API layer. The client library sends an
+identical request if the request is idempotent.
+
+#### Retrying the jobs.insert API via the retry parameter
+
+When the `api_method` parameter is set to `"INSERT"`, which is the default
+value, the client library uses the jobs.insert REST API to start a query job.
+Before it issues this request, it sets a job ID. This job ID remains constant
+across API retries.
+
+If the job ID was randomly generated, and the jobs.insert request and all retries fail, the client library sends a request to the jobs.get API. This covers the case when a query request succeeded, but there was a transient issue that prevented the client from receiving a successful response. Note: `jobs.get` requires the location of the query. It will fail with 404 if the location is not specified and the job is not in the US multi-region.
+
+
+#### Retrying the jobs.query API via the retry parameter
+
+When the `api_method` parameter is set to `"QUERY"` (available in version 3 of
+the client library), the client library sends a request to the jobs.query REST
+API. The client library automatically populates the `requestId` parameter in
+the request body. The `requestId` remains constant across API retries, ensuring
+that requests are idempotent.
+
+As there is no job ID available, the client library cannot call jobs.get if the query happened to succeed, but all retries resulted in an error response. In this case, the client library throws an exception.
+
+
+#### Retrying the jobs.getQueryResults API via the retry parameter
+
+The jobs.getQueryResults REST API is read-only. Thus, it is always safe to
+retry. As noted in the "Background" section, HTTP error response codes can
+indicate that the job itself has failed, so this may retry more often than is
+strictly needed
+([Issue #1122](https://github.com/googleapis/python-bigquery/issues/1122)
+has been opened to investigate this).
+
+
+### Re-issuing queries via the `job_retry` parameter
+
+The first set of retries are at the "job" layer, called "re-issue" in this
+document. The client library sends an identical query request (except for the
+job or request identifier) if the query job has failed for a re-issuable reason.
+
+
+#### Deciding when it is safe to re-issue a query
+
+The conditions when it is safe to re-issue a query are different from the conditions when it is safe to retry an individual API request. As such, the `job_retry` parameter is provided to configure this behavior.
+
+The `job_retry` parameter is only used if (1) a query job fails and (2) a job ID is not provided by the developer. This is because it must generate a new job ID (or request ID, depending on the method used to create the query job) to avoid getting the same failed job.
+
+The `job_retry` parameter logic only happens after the client makes a request to the `jobs.getQueryRequest` REST API, which fails. The client examines the exception to determine if this failure was caused by a failed job and that the failure reason (e.g. "backendError" or "rateLimitExceeded") indicates that re-issuing the query may help.
+
+If it is determined that the query job can be re-issued safely, the original logic to issue the query is executed. If the jobs.insert REST API was originally used, a new job ID is generated. Otherwise, if the jobs.query REST API was originally used, a new request ID is generated. All other parts of the request body remain identical to the original request body for the failed query job, and the process repeats until `job_retry` is exhausted.
diff --git a/testbed/googleapis__python-bigquery/docs/enums.rst b/testbed/googleapis__python-bigquery/docs/enums.rst
new file mode 100644
index 0000000000000000000000000000000000000000..57608968a3fac13f2f259a5b4ca9b4e8aff26b6c
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/enums.rst
@@ -0,0 +1,6 @@
+BigQuery Enums
+==============
+
+.. automodule:: google.cloud.bigquery.enums
+ :members:
+ :undoc-members:
diff --git a/testbed/googleapis__python-bigquery/docs/format_options.rst b/testbed/googleapis__python-bigquery/docs/format_options.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b3948209ec8c9b2b08ce71a5e62b34bd513acef7
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/format_options.rst
@@ -0,0 +1,6 @@
+BigQuery Format Options
+=======================
+
+.. automodule:: google.cloud.bigquery.format_options
+ :members:
+ :undoc-members:
diff --git a/testbed/googleapis__python-bigquery/docs/generated/google.cloud.bigquery.magics.html b/testbed/googleapis__python-bigquery/docs/generated/google.cloud.bigquery.magics.html
new file mode 100644
index 0000000000000000000000000000000000000000..0d2a00fa14c9277dd2b85547e5dd74c322fe5912
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/generated/google.cloud.bigquery.magics.html
@@ -0,0 +1,8 @@
+
+
+
+
+
+
diff --git a/testbed/googleapis__python-bigquery/docs/index.rst b/testbed/googleapis__python-bigquery/docs/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6d6ed63f6cd4359461cfacc6ee65075522efda00
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/index.rst
@@ -0,0 +1,55 @@
+.. include:: README.rst
+
+.. note::
+
+ Because the BigQuery client uses the third-party :mod:`requests` library
+ by default and the BigQuery-Storage client uses :mod:`grpcio` library,
+ both are safe to share instances across threads. In multiprocessing
+ scenarios, the best practice is to create client instances *after*
+ :class:`multiprocessing.Pool` or :class:`multiprocessing.Process` invokes
+ :func:`os.fork`.
+
+More Examples
+~~~~~~~~~~~~~
+
+.. toctree::
+ :maxdepth: 2
+
+ usage/index
+ Official Google BigQuery How-to Guides
+
+API Reference
+-------------
+
+.. toctree::
+ :maxdepth: 2
+
+ reference
+ dbapi
+ design/index
+
+Migration Guide
+---------------
+
+See the guides below for instructions on migrating from older to newer *major* releases
+of this library (from ``1.x`` to ``2.x``, or from ``2.x`` to ``3.x``).
+
+.. toctree::
+ :maxdepth: 2
+
+ UPGRADING
+
+Changelog
+---------
+
+For a list of all ``google-cloud-bigquery`` releases:
+
+.. toctree::
+ :maxdepth: 2
+
+ changelog
+
+.. toctree::
+ :hidden:
+
+ summary_overview.md
diff --git a/testbed/googleapis__python-bigquery/docs/job_base.rst b/testbed/googleapis__python-bigquery/docs/job_base.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f5ef06b88e4799f58996d83e7678d53756ec3eca
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/job_base.rst
@@ -0,0 +1,5 @@
+Common Job Resource Classes
+===========================
+
+.. automodule:: google.cloud.bigquery.job.base
+ :members:
diff --git a/testbed/googleapis__python-bigquery/docs/magics.rst b/testbed/googleapis__python-bigquery/docs/magics.rst
new file mode 100644
index 0000000000000000000000000000000000000000..aa14c6bfa4822ff7d15abc64b0c7604e6959356b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/magics.rst
@@ -0,0 +1,34 @@
+IPython Magics for BigQuery
+===========================
+
+To use these magics, you must first register them. Run the ``%load_ext`` magic
+in a Jupyter notebook cell.
+
+.. code::
+
+ %load_ext google.cloud.bigquery
+
+This makes the ``%%bigquery`` magic available.
+
+Code Samples
+------------
+
+Running a query:
+
+.. literalinclude:: ./samples/magics/query.py
+ :dedent: 4
+ :start-after: [START bigquery_jupyter_query]
+ :end-before: [END bigquery_jupyter_query]
+
+Running a parameterized query:
+
+.. literalinclude:: ./samples/magics/query_params_scalars.py
+ :dedent: 4
+ :start-after: [START bigquery_jupyter_query_params_scalars]
+ :end-before: [END bigquery_jupyter_query_params_scalars]
+
+API Reference
+-------------
+
+.. automodule:: google.cloud.bigquery.magics.magics
+ :members:
diff --git a/testbed/googleapis__python-bigquery/docs/query.rst b/testbed/googleapis__python-bigquery/docs/query.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d3cb8fe83537a265698947587aceaf54eca1e489
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/query.rst
@@ -0,0 +1,5 @@
+Query Resource Classes
+======================
+
+.. automodule:: google.cloud.bigquery.query
+ :members:
diff --git a/testbed/googleapis__python-bigquery/docs/reference.rst b/testbed/googleapis__python-bigquery/docs/reference.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6c00df0771c2dd802d3b1e0e30aa8382fe52762f
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/reference.rst
@@ -0,0 +1,131 @@
+API Reference
+~~~~~~~~~~~~~
+
+The main concepts with this API are:
+
+- :class:`~google.cloud.bigquery.client.Client` manages connections to the
+ BigQuery API. Use the client methods to run jobs (such as a
+ :class:`~google.cloud.bigquery.job.QueryJob` via
+ :meth:`~google.cloud.bigquery.client.Client.query`) and manage resources.
+
+- :class:`~google.cloud.bigquery.dataset.Dataset` represents a
+ collection of tables.
+
+- :class:`~google.cloud.bigquery.table.Table` represents a single "relation".
+
+Client
+======
+
+.. automodule:: google.cloud.bigquery.client
+
+Job
+===
+
+.. automodule:: google.cloud.bigquery.job
+
+.. toctree::
+ :maxdepth: 2
+
+ job_base
+
+
+Dataset
+=======
+
+.. automodule:: google.cloud.bigquery.dataset
+
+
+Table
+=====
+
+.. automodule:: google.cloud.bigquery.table
+
+Model
+=====
+
+.. automodule:: google.cloud.bigquery.model
+
+Routine
+=======
+
+.. automodule:: google.cloud.bigquery.routine
+
+Schema
+======
+
+.. automodule:: google.cloud.bigquery.schema
+
+Query
+=====
+
+.. toctree::
+ :maxdepth: 2
+
+ query
+
+
+Retries
+=======
+
+.. automodule:: google.cloud.bigquery.retry
+
+
+External Configuration
+======================
+
+.. automodule:: google.cloud.bigquery.external_config
+
+.. toctree::
+ :maxdepth: 2
+
+ format_options
+
+
+Magics
+======
+
+.. toctree::
+ :maxdepth: 2
+
+ magics
+
+
+Enums
+=====
+
+.. toctree::
+ :maxdepth: 2
+
+ enums
+
+
+Encryption Configuration
+========================
+
+.. automodule:: google.cloud.bigquery.encryption_configuration
+
+
+Additional Types
+================
+
+Helper SQL type classes.
+
+.. toctree::
+ :maxdepth: 2
+
+ bigquery/standard_sql
+
+
+Legacy proto-based Types (deprecated)
+=====================================
+
+The legacy type classes based on protocol buffers.
+
+.. deprecated:: 3.0.0
+ These types are provided for backward compatibility only, and are not maintained
+ anymore.
+
+.. toctree::
+ :maxdepth: 2
+
+ bigquery/legacy_proto_types
diff --git a/testbed/googleapis__python-bigquery/docs/snippets.py b/testbed/googleapis__python-bigquery/docs/snippets.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4e78e36fcbe95e062c37277c86cf437920f115b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/snippets.py
@@ -0,0 +1,580 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Testable usage examples for Google BigQuery API wrapper
+Each example function takes a ``client`` argument (which must be an instance
+of :class:`google.cloud.bigquery.client.Client`) and uses it to perform a task
+with the API.
+To facilitate running the examples as system tests, each example is also passed
+a ``to_delete`` list; the function adds to the list any objects created which
+need to be deleted during teardown.
+"""
+
+import os
+import time
+
+import pytest
+
+try:
+ import pandas
+except (ImportError, AttributeError):
+ pandas = None
+
+try:
+ import pyarrow
+except (ImportError, AttributeError):
+ pyarrow = None
+
+from google.api_core.exceptions import InternalServerError
+from google.api_core.exceptions import ServiceUnavailable
+from google.api_core.exceptions import TooManyRequests
+from google.cloud import bigquery
+from google.cloud import storage
+from test_utils.retry import RetryErrors
+
+ORIGINAL_FRIENDLY_NAME = "Original friendly name"
+ORIGINAL_DESCRIPTION = "Original description"
+LOCALLY_CHANGED_FRIENDLY_NAME = "Locally-changed friendly name"
+LOCALLY_CHANGED_DESCRIPTION = "Locally-changed description"
+UPDATED_FRIENDLY_NAME = "Updated friendly name"
+UPDATED_DESCRIPTION = "Updated description"
+
+SCHEMA = [
+ bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
+]
+
+ROWS = [
+ ("Phred Phlyntstone", 32),
+ ("Bharney Rhubble", 33),
+ ("Wylma Phlyntstone", 29),
+ ("Bhettye Rhubble", 27),
+]
+
+QUERY = (
+ "SELECT name FROM `bigquery-public-data.usa_names.usa_1910_2013` "
+ 'WHERE state = "TX"'
+)
+
+
+retry_429 = RetryErrors(TooManyRequests)
+retry_storage_errors = RetryErrors(
+ (TooManyRequests, InternalServerError, ServiceUnavailable)
+)
+
+
+@pytest.fixture(scope="module")
+def client():
+ return bigquery.Client()
+
+
+@pytest.fixture
+def to_delete(client):
+ doomed = []
+ yield doomed
+ for item in doomed:
+ if isinstance(item, (bigquery.Dataset, bigquery.DatasetReference)):
+ retry_429(client.delete_dataset)(item, delete_contents=True)
+ elif isinstance(item, storage.Bucket):
+ retry_storage_errors(item.delete)()
+ else:
+ retry_429(item.delete)()
+
+
+def _millis():
+ return int(time.time() * 1000)
+
+
+class _CloseOnDelete(object):
+ def __init__(self, wrapped):
+ self._wrapped = wrapped
+
+ def delete(self):
+ self._wrapped.close()
+
+
+def test_create_client_default_credentials():
+ """Create a BigQuery client with Application Default Credentials"""
+
+ # [START bigquery_client_default_credentials]
+ from google.cloud import bigquery
+
+ # If you don't specify credentials when constructing the client, the
+ # client library will look for credentials in the environment.
+ client = bigquery.Client()
+ # [END bigquery_client_default_credentials]
+
+ assert client is not None
+
+
+@pytest.mark.skip(
+ reason=(
+ "update_table() is flaky "
+ "https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589"
+ )
+)
+def test_update_table_description(client, to_delete):
+ """Update a table's description."""
+ dataset_id = "update_table_description_dataset_{}".format(_millis())
+ table_id = "update_table_description_table_{}".format(_millis())
+ project = client.project
+ dataset_ref = bigquery.DatasetReference(project, dataset_id)
+ dataset = bigquery.Dataset(dataset_ref)
+ client.create_dataset(dataset)
+ to_delete.append(dataset)
+
+ table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
+ table.description = "Original description."
+ table = client.create_table(table)
+
+ # [START bigquery_update_table_description]
+ # from google.cloud import bigquery
+ # client = bigquery.Client()
+ # project = client.project
+ # dataset_ref = bigquery.DatasetReference(project, dataset_id)
+ # table_ref = dataset_ref.table('my_table')
+ # table = client.get_table(table_ref) # API request
+
+ assert table.description == "Original description."
+ table.description = "Updated description."
+
+ table = client.update_table(table, ["description"]) # API request
+
+ assert table.description == "Updated description."
+ # [END bigquery_update_table_description]
+
+
+@pytest.mark.skip(
+ reason=(
+ "update_table() is flaky "
+ "https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589"
+ )
+)
+def test_update_table_cmek(client, to_delete):
+ """Patch a table's metadata."""
+ dataset_id = "update_table_cmek_{}".format(_millis())
+ table_id = "update_table_cmek_{}".format(_millis())
+ project = client.project
+ dataset_ref = bigquery.DatasetReference(project, dataset_id)
+ dataset = bigquery.Dataset(dataset_ref)
+ client.create_dataset(dataset)
+ to_delete.append(dataset)
+
+ table = bigquery.Table(dataset.table(table_id))
+ original_kms_key_name = "projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}".format(
+ "cloud-samples-tests", "us", "test", "test"
+ )
+ table.encryption_configuration = bigquery.EncryptionConfiguration(
+ kms_key_name=original_kms_key_name
+ )
+ table = client.create_table(table)
+
+ # [START bigquery_update_table_cmek]
+ # from google.cloud import bigquery
+ # client = bigquery.Client()
+
+ assert table.encryption_configuration.kms_key_name == original_kms_key_name
+
+ # Set a new encryption key to use for the destination.
+ # TODO: Replace this key with a key you have created in KMS.
+ updated_kms_key_name = (
+ "projects/cloud-samples-tests/locations/us/keyRings/test/cryptoKeys/otherkey"
+ )
+ table.encryption_configuration = bigquery.EncryptionConfiguration(
+ kms_key_name=updated_kms_key_name
+ )
+
+ table = client.update_table(table, ["encryption_configuration"]) # API request
+
+ assert table.encryption_configuration.kms_key_name == updated_kms_key_name
+ assert original_kms_key_name != updated_kms_key_name
+ # [END bigquery_update_table_cmek]
+
+
+def test_load_table_add_column(client, to_delete):
+ dataset_id = "load_table_add_column_{}".format(_millis())
+ project = client.project
+ dataset_ref = bigquery.DatasetReference(project, dataset_id)
+ dataset = bigquery.Dataset(dataset_ref)
+ dataset.location = "US"
+ dataset = client.create_dataset(dataset)
+ to_delete.append(dataset)
+
+ snippets_dir = os.path.abspath(os.path.dirname(__file__))
+ filepath = os.path.join(snippets_dir, "..", "tests", "data", "people.csv")
+ table_ref = dataset_ref.table("my_table")
+ old_schema = [bigquery.SchemaField("full_name", "STRING", mode="REQUIRED")]
+ table = client.create_table(bigquery.Table(table_ref, schema=old_schema))
+
+ # [START bigquery_add_column_load_append]
+ # from google.cloud import bigquery
+ # client = bigquery.Client()
+ # project = client.project
+ # dataset_ref = bigquery.DatasetReference(project, 'my_dataset')
+ # filepath = 'path/to/your_file.csv'
+
+ # Retrieves the destination table and checks the length of the schema
+ table_id = "my_table"
+ table_ref = dataset_ref.table(table_id)
+ table = client.get_table(table_ref)
+ print("Table {} contains {} columns.".format(table_id, len(table.schema)))
+
+ # Configures the load job to append the data to the destination table,
+ # allowing field addition
+ job_config = bigquery.LoadJobConfig()
+ job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
+ job_config.schema_update_options = [
+ bigquery.SchemaUpdateOption.ALLOW_FIELD_ADDITION
+ ]
+ # In this example, the existing table contains only the 'full_name' column.
+ # 'REQUIRED' fields cannot be added to an existing schema, so the
+ # additional column must be 'NULLABLE'.
+ job_config.schema = [
+ bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("age", "INTEGER", mode="NULLABLE"),
+ ]
+ job_config.source_format = bigquery.SourceFormat.CSV
+ job_config.skip_leading_rows = 1
+
+ with open(filepath, "rb") as source_file:
+ job = client.load_table_from_file(
+ source_file,
+ table_ref,
+ location="US", # Must match the destination dataset location.
+ job_config=job_config,
+ ) # API request
+
+ job.result() # Waits for table load to complete.
+ print(
+ "Loaded {} rows into {}:{}.".format(
+ job.output_rows, dataset_id, table_ref.table_id
+ )
+ )
+
+ # Checks the updated length of the schema
+ table = client.get_table(table)
+ print("Table {} now contains {} columns.".format(table_id, len(table.schema)))
+ # [END bigquery_add_column_load_append]
+ assert len(table.schema) == 2
+ assert table.num_rows > 0
+
+
+def test_load_table_relax_column(client, to_delete):
+ dataset_id = "load_table_relax_column_{}".format(_millis())
+ project = client.project
+ dataset_ref = bigquery.DatasetReference(project, dataset_id)
+ dataset = bigquery.Dataset(dataset_ref)
+ dataset.location = "US"
+ dataset = client.create_dataset(dataset)
+ to_delete.append(dataset)
+
+ snippets_dir = os.path.abspath(os.path.dirname(__file__))
+ filepath = os.path.join(snippets_dir, "..", "tests", "data", "people.csv")
+ table_ref = dataset_ref.table("my_table")
+ old_schema = [
+ bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
+ bigquery.SchemaField("favorite_color", "STRING", mode="REQUIRED"),
+ ]
+ table = client.create_table(bigquery.Table(table_ref, schema=old_schema))
+
+ # [START bigquery_relax_column_load_append]
+ # from google.cloud import bigquery
+ # client = bigquery.Client()
+ # project = client.project
+ # dataset_ref = bigquery.DatasetReference(project, 'my_dataset')
+ # filepath = 'path/to/your_file.csv'
+
+ # Retrieves the destination table and checks the number of required fields
+ table_id = "my_table"
+ table_ref = dataset_ref.table(table_id)
+ table = client.get_table(table_ref)
+ original_required_fields = sum(field.mode == "REQUIRED" for field in table.schema)
+ # In this example, the existing table has 3 required fields.
+ print("{} fields in the schema are required.".format(original_required_fields))
+
+ # Configures the load job to append the data to a destination table,
+ # allowing field relaxation
+ job_config = bigquery.LoadJobConfig()
+ job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
+ job_config.schema_update_options = [
+ bigquery.SchemaUpdateOption.ALLOW_FIELD_RELAXATION
+ ]
+ # In this example, the existing table contains three required fields
+ # ('full_name', 'age', and 'favorite_color'), while the data to load
+ # contains only the first two fields.
+ job_config.schema = [
+ bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ job_config.source_format = bigquery.SourceFormat.CSV
+ job_config.skip_leading_rows = 1
+
+ with open(filepath, "rb") as source_file:
+ job = client.load_table_from_file(
+ source_file,
+ table_ref,
+ location="US", # Must match the destination dataset location.
+ job_config=job_config,
+ ) # API request
+
+ job.result() # Waits for table load to complete.
+ print(
+ "Loaded {} rows into {}:{}.".format(
+ job.output_rows, dataset_id, table_ref.table_id
+ )
+ )
+
+ # Checks the updated number of required fields
+ table = client.get_table(table)
+ current_required_fields = sum(field.mode == "REQUIRED" for field in table.schema)
+ print("{} fields in the schema are now required.".format(current_required_fields))
+ # [END bigquery_relax_column_load_append]
+ assert original_required_fields - current_required_fields == 1
+ assert len(table.schema) == 3
+ assert table.schema[2].mode == "NULLABLE"
+ assert table.num_rows > 0
+
+
+def test_extract_table(client, to_delete):
+ bucket_name = "extract_shakespeare_{}".format(_millis())
+ storage_client = storage.Client()
+ bucket = retry_storage_errors(storage_client.create_bucket)(bucket_name)
+ to_delete.append(bucket)
+
+ # [START bigquery_extract_table]
+ # from google.cloud import bigquery
+ # client = bigquery.Client()
+ # bucket_name = 'my-bucket'
+ project = "bigquery-public-data"
+ dataset_id = "samples"
+ table_id = "shakespeare"
+
+ destination_uri = "gs://{}/{}".format(bucket_name, "shakespeare.csv")
+ dataset_ref = bigquery.DatasetReference(project, dataset_id)
+ table_ref = dataset_ref.table(table_id)
+
+ extract_job = client.extract_table(
+ table_ref,
+ destination_uri,
+ # Location must match that of the source table.
+ location="US",
+ ) # API request
+ extract_job.result() # Waits for job to complete.
+
+ print(
+ "Exported {}:{}.{} to {}".format(project, dataset_id, table_id, destination_uri)
+ )
+ # [END bigquery_extract_table]
+
+ blob = retry_storage_errors(bucket.get_blob)("shakespeare.csv")
+ assert blob.exists
+ assert blob.size > 0
+ to_delete.insert(0, blob)
+
+
+def test_extract_table_json(client, to_delete):
+ bucket_name = "extract_shakespeare_json_{}".format(_millis())
+ storage_client = storage.Client()
+ bucket = retry_storage_errors(storage_client.create_bucket)(bucket_name)
+ to_delete.append(bucket)
+ project = "bigquery-public-data"
+ dataset_id = "samples"
+
+ # [START bigquery_extract_table_json]
+ # from google.cloud import bigquery
+ # client = bigquery.Client()
+ # bucket_name = 'my-bucket'
+
+ destination_uri = "gs://{}/{}".format(bucket_name, "shakespeare.json")
+ dataset_ref = bigquery.DatasetReference(project, dataset_id)
+ table_ref = dataset_ref.table("shakespeare")
+ job_config = bigquery.job.ExtractJobConfig()
+ job_config.destination_format = bigquery.DestinationFormat.NEWLINE_DELIMITED_JSON
+
+ extract_job = client.extract_table(
+ table_ref,
+ destination_uri,
+ job_config=job_config,
+ # Location must match that of the source table.
+ location="US",
+ ) # API request
+ extract_job.result() # Waits for job to complete.
+ # [END bigquery_extract_table_json]
+
+ blob = retry_storage_errors(bucket.get_blob)("shakespeare.json")
+ assert blob.exists
+ assert blob.size > 0
+ to_delete.insert(0, blob)
+
+
+def test_extract_table_compressed(client, to_delete):
+ bucket_name = "extract_shakespeare_compress_{}".format(_millis())
+ storage_client = storage.Client()
+ bucket = retry_storage_errors(storage_client.create_bucket)(bucket_name)
+ to_delete.append(bucket)
+ project = "bigquery-public-data"
+ dataset_id = "samples"
+
+ # [START bigquery_extract_table_compressed]
+ # from google.cloud import bigquery
+ # client = bigquery.Client()
+ # bucket_name = 'my-bucket'
+
+ destination_uri = "gs://{}/{}".format(bucket_name, "shakespeare.csv.gz")
+ dataset_ref = bigquery.DatasetReference(project, dataset_id)
+ table_ref = dataset_ref.table("shakespeare")
+ job_config = bigquery.job.ExtractJobConfig()
+ job_config.compression = bigquery.Compression.GZIP
+
+ extract_job = client.extract_table(
+ table_ref,
+ destination_uri,
+ # Location must match that of the source table.
+ location="US",
+ job_config=job_config,
+ ) # API request
+ extract_job.result() # Waits for job to complete.
+ # [END bigquery_extract_table_compressed]
+
+ blob = retry_storage_errors(bucket.get_blob)("shakespeare.csv.gz")
+ assert blob.exists
+ assert blob.size > 0
+ to_delete.insert(0, blob)
+
+
+def test_client_query_total_rows(client, capsys):
+ """Run a query and just check for how many rows."""
+ # [START bigquery_query_total_rows]
+ # from google.cloud import bigquery
+ # client = bigquery.Client()
+
+ query = (
+ "SELECT name FROM `bigquery-public-data.usa_names.usa_1910_2013` "
+ 'WHERE state = "TX" '
+ "LIMIT 100"
+ )
+ results = client.query_and_wait(
+ query,
+ # Location must match that of the dataset(s) referenced in the query.
+ location="US",
+ ) # API request - starts the query and waits for results.
+
+ print("Got {} rows.".format(results.total_rows))
+ # [END bigquery_query_total_rows]
+
+ out, _ = capsys.readouterr()
+ assert "Got 100 rows." in out
+
+
+def test_ddl_create_view(client, to_delete, capsys):
+ """Create a view via a DDL query."""
+ project = client.project
+ dataset_id = "ddl_view_{}".format(_millis())
+ table_id = "new_view"
+ dataset_ref = bigquery.DatasetReference(project, dataset_id)
+ dataset = bigquery.Dataset(dataset_ref)
+ client.create_dataset(dataset)
+ to_delete.append(dataset)
+
+ # [START bigquery_ddl_create_view]
+ # from google.cloud import bigquery
+ # project = 'my-project'
+ # dataset_id = 'my_dataset'
+ # table_id = 'new_view'
+ # client = bigquery.Client(project=project)
+
+ sql = """
+ CREATE VIEW `{}.{}.{}`
+ OPTIONS(
+ expiration_timestamp=TIMESTAMP_ADD(
+ CURRENT_TIMESTAMP(), INTERVAL 48 HOUR),
+ friendly_name="new_view",
+ description="a view that expires in 2 days",
+ labels=[("org_unit", "development")]
+ )
+ AS SELECT name, state, year, number
+ FROM `bigquery-public-data.usa_names.usa_1910_current`
+ WHERE state LIKE 'W%'
+ """.format(
+ project, dataset_id, table_id
+ )
+
+ job = client.query(sql) # API request.
+ job.result() # Waits for the query to finish.
+
+ print(
+ 'Created new view "{}.{}.{}".'.format(
+ job.destination.project,
+ job.destination.dataset_id,
+ job.destination.table_id,
+ )
+ )
+ # [END bigquery_ddl_create_view]
+
+ out, _ = capsys.readouterr()
+ assert 'Created new view "{}.{}.{}".'.format(project, dataset_id, table_id) in out
+
+ # Test that listing query result rows succeeds so that generic query
+ # processing tools work with DDL statements.
+ rows = list(job)
+ assert len(rows) == 0
+
+ if pandas is not None:
+ df = job.to_dataframe()
+ assert len(df) == 0
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_query_results_as_dataframe(client):
+ # [START bigquery_query_results_dataframe]
+ # from google.cloud import bigquery
+ # client = bigquery.Client()
+
+ sql = """
+ SELECT name, SUM(number) as count
+ FROM `bigquery-public-data.usa_names.usa_1910_current`
+ GROUP BY name
+ ORDER BY count DESC
+ LIMIT 10
+ """
+
+ df = client.query_and_wait(sql).to_dataframe()
+ # [END bigquery_query_results_dataframe]
+ assert isinstance(df, pandas.DataFrame)
+ assert len(list(df)) == 2 # verify the number of columns
+ assert len(df) == 10 # verify the number of rows
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_list_rows_as_dataframe(client):
+ # [START bigquery_list_rows_dataframe]
+ # from google.cloud import bigquery
+ # client = bigquery.Client()
+ project = "bigquery-public-data"
+ dataset_id = "samples"
+
+ dataset_ref = bigquery.DatasetReference(project, dataset_id)
+ table_ref = dataset_ref.table("shakespeare")
+ table = client.get_table(table_ref)
+
+ df = client.list_rows(table).to_dataframe()
+ # [END bigquery_list_rows_dataframe]
+ assert isinstance(df, pandas.DataFrame)
+ assert len(list(df)) == len(table.schema) # verify the number of columns
+ assert len(df) == table.num_rows # verify the number of rows
+
+
+if __name__ == "__main__":
+ pytest.main()
diff --git a/testbed/googleapis__python-bigquery/docs/summary_overview.md b/testbed/googleapis__python-bigquery/docs/summary_overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..6dd228e13a92c3475f5b5938cbac0d08978815e1
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/summary_overview.md
@@ -0,0 +1,22 @@
+[
+This is a templated file. Adding content to this file may result in it being
+reverted. Instead, if you want to place additional content, create an
+"overview_content.md" file in `docs/` directory. The Sphinx tool will
+pick up on the content and merge the content.
+]: #
+
+# Google Cloud BigQuery API
+
+Overview of the APIs available for Google Cloud BigQuery API.
+
+## All entries
+
+Classes, methods and properties & attributes for
+Google Cloud BigQuery API.
+
+[classes](https://cloud.google.com/python/docs/reference/bigquery/latest/summary_class.html)
+
+[methods](https://cloud.google.com/python/docs/reference/bigquery/latest/summary_method.html)
+
+[properties and
+attributes](https://cloud.google.com/python/docs/reference/bigquery/latest/summary_property.html)
diff --git a/testbed/googleapis__python-bigquery/docs/usage.html b/testbed/googleapis__python-bigquery/docs/usage.html
new file mode 100644
index 0000000000000000000000000000000000000000..78dc14b9ca03c576d0fd2af87065d8a10da62593
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/usage.html
@@ -0,0 +1,8 @@
+
+
+
+
+
+
diff --git a/testbed/googleapis__python-bigquery/docs/usage/client.rst b/testbed/googleapis__python-bigquery/docs/usage/client.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d631585ea2fe2126a2dea21c565c8d235a5712dd
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/usage/client.rst
@@ -0,0 +1,25 @@
+Creating a Client
+~~~~~~~~~~~~~~~~~
+
+A project is the top-level container in the ``BigQuery`` API: it is tied
+closely to billing, and can provide default access control across all its
+datasets. If no ``project`` is passed to the client container, the library
+attempts to infer a project using the environment (including explicit
+environment variables, GAE, and GCE).
+
+To override the project inferred from the environment, pass an explicit
+``project`` to the :class:`~google.cloud.bigquery.client.Client` constructor,
+or to either of the alternative ``classmethod`` factories:
+
+.. code-block:: python
+
+ from google.cloud import bigquery
+ client = bigquery.Client(project='PROJECT_ID')
+
+
+Project ACLs
+^^^^^^^^^^^^
+
+Each project has an access control list granting reader / writer / owner
+permission to one or more entities. This list cannot be queried or set
+via the API; it must be managed using the Google Developer Console.
diff --git a/testbed/googleapis__python-bigquery/docs/usage/datasets.rst b/testbed/googleapis__python-bigquery/docs/usage/datasets.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2daee77f36d28588ebaa2cd782c4373e513bbfec
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/usage/datasets.rst
@@ -0,0 +1,131 @@
+Managing Datasets
+~~~~~~~~~~~~~~~~~
+
+A dataset represents a collection of tables, and applies several default
+policies to tables as they are created:
+
+- An access control list (ACL). When created, a dataset has an ACL
+ which maps to the ACL inherited from its project.
+
+- A default table expiration period. If set, tables created within the
+ dataset will have the value as their expiration period.
+
+See BigQuery documentation for more information on
+`Datasets `_.
+
+Listing Datasets
+^^^^^^^^^^^^^^^^
+
+List datasets for a project with the
+:func:`~google.cloud.bigquery.client.Client.list_datasets` method:
+
+.. literalinclude:: ../samples/list_datasets.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_list_datasets]
+ :end-before: [END bigquery_list_datasets]
+
+List datasets by label for a project with the
+:func:`~google.cloud.bigquery.client.Client.list_datasets` method:
+
+.. literalinclude:: ../samples/list_datasets_by_label.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_list_datasets_by_label]
+ :end-before: [END bigquery_list_datasets_by_label]
+
+Getting a Dataset
+^^^^^^^^^^^^^^^^^
+
+Get a dataset resource (to pick up changes made by another client) with the
+:func:`~google.cloud.bigquery.client.Client.get_dataset` method:
+
+.. literalinclude:: ../samples/get_dataset.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_get_dataset]
+ :end-before: [END bigquery_get_dataset]
+
+Determine if a dataset exists with the
+:func:`~google.cloud.bigquery.client.Client.get_dataset` method:
+
+.. literalinclude:: ../samples/dataset_exists.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_dataset_exists]
+ :end-before: [END bigquery_dataset_exists]
+
+Creating a Dataset
+^^^^^^^^^^^^^^^^^^
+
+Create a new dataset with the
+:func:`~google.cloud.bigquery.client.Client.create_dataset` method:
+
+.. literalinclude:: ../samples/create_dataset.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_create_dataset]
+ :end-before: [END bigquery_create_dataset]
+
+Updating a Dataset
+^^^^^^^^^^^^^^^^^^
+
+Update a property in a dataset's metadata with the
+:func:`~google.cloud.bigquery.client.Client.update_dataset` method:
+
+.. literalinclude:: ../samples/update_dataset_description.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_update_dataset_description]
+ :end-before: [END bigquery_update_dataset_description]
+
+Modify user permissions on a dataset with the
+:func:`~google.cloud.bigquery.client.Client.update_dataset` method:
+
+.. literalinclude:: ../samples/update_dataset_access.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_update_dataset_access]
+ :end-before: [END bigquery_update_dataset_access]
+
+Manage Dataset labels
+^^^^^^^^^^^^^^^^^^^^^
+
+Add labels to a dataset with the
+:func:`~google.cloud.bigquery.client.Client.update_dataset` method:
+
+.. literalinclude:: ../samples/label_dataset.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_label_dataset]
+ :end-before: [END bigquery_label_dataset]
+
+Get dataset's labels with the
+:func:`~google.cloud.bigquery.client.Client.get_dataset` method:
+
+.. literalinclude:: ../samples/get_dataset_labels.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_get_dataset_labels]
+ :end-before: [END bigquery_get_dataset_labels]
+
+Delete dataset's labels with the
+:func:`~google.cloud.bigquery.client.Client.update_dataset` method:
+
+.. literalinclude:: ../samples/delete_dataset_labels.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_delete_label_dataset]
+ :end-before: [END bigquery_delete_label_dataset]
+
+Deleting a Dataset
+^^^^^^^^^^^^^^^^^^
+
+Delete a dataset with the
+:func:`~google.cloud.bigquery.client.Client.delete_dataset` method:
+
+.. literalinclude:: ../samples/delete_dataset.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_delete_dataset]
+ :end-before: [END bigquery_delete_dataset]
diff --git a/testbed/googleapis__python-bigquery/docs/usage/encryption.rst b/testbed/googleapis__python-bigquery/docs/usage/encryption.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3e6d5aacc5319632e890f1497e3f3c71f7bf5aaf
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/usage/encryption.rst
@@ -0,0 +1,52 @@
+Using Customer Managed Encryption Keys
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Table data is always encrypted at rest, but BigQuery also provides a way for
+you to control what keys it uses to encrypt they data. See `Protecting data
+with Cloud KMS keys
+`_
+in the BigQuery documentation for more details.
+
+Create a new table, using a customer-managed encryption key from
+Cloud KMS to encrypt it.
+
+.. literalinclude:: ../samples/snippets/create_table_cmek.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_create_table_cmek]
+ :end-before: [END bigquery_create_table_cmek]
+
+Change the key used to encrypt a table.
+
+.. literalinclude:: ../snippets.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_update_table_cmek]
+ :end-before: [END bigquery_update_table_cmek]
+
+Load a file from Cloud Storage, using a customer-managed encryption key from
+Cloud KMS for the destination table.
+
+.. literalinclude:: ../samples/load_table_uri_cmek.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_load_table_gcs_json_cmek]
+ :end-before: [END bigquery_load_table_gcs_json_cmek]
+
+Copy a table, using a customer-managed encryption key from Cloud KMS for the
+destination table.
+
+.. literalinclude:: ../samples/copy_table_cmek.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_copy_table_cmek]
+ :end-before: [END bigquery_copy_table_cmek]
+
+Write query results to a table, using a customer-managed encryption key from
+Cloud KMS for the destination table.
+
+.. literalinclude:: ../samples/client_query_destination_table_cmek.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_query_destination_table_cmek]
+ :end-before: [END bigquery_query_destination_table_cmek]
diff --git a/testbed/googleapis__python-bigquery/docs/usage/index.rst b/testbed/googleapis__python-bigquery/docs/usage/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1d3cc9f64fddafec2c6f20c6b5219f2a0f7505da
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/usage/index.rst
@@ -0,0 +1,35 @@
+Usage Guides
+~~~~~~~~~~~~
+
+BigQuery Basics
+^^^^^^^^^^^^^^^
+
+.. toctree::
+ :maxdepth: 1
+
+ client
+ queries
+
+Working with BigQuery Resources
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. toctree::
+ :maxdepth: 1
+
+ datasets
+ tables
+ encryption
+ jobs
+
+Integrations with Other Libraries
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. toctree::
+ :maxdepth: 1
+
+ pandas
+
+See also, the :mod:`google.cloud.bigquery.magics.magics` module for
+integrations with Jupyter.
+
+
diff --git a/testbed/googleapis__python-bigquery/docs/usage/jobs.rst b/testbed/googleapis__python-bigquery/docs/usage/jobs.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c3dd71031bfc4c77560f0b2d51db54b57033b55b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/usage/jobs.rst
@@ -0,0 +1,21 @@
+Managing Jobs
+~~~~~~~~~~~~~
+
+Jobs describe actions performed on data in BigQuery tables:
+
+- Load data into a table
+- Run a query against data in one or more tables
+- Extract data from a table
+- Copy a table
+
+Listing jobs
+^^^^^^^^^^^^
+
+List jobs for a project with the
+:func:`~google.cloud.bigquery.client.Client.list_jobs` method:
+
+.. literalinclude:: ../samples/client_list_jobs.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_list_jobs]
+ :end-before: [END bigquery_list_jobs]
diff --git a/testbed/googleapis__python-bigquery/docs/usage/pandas.rst b/testbed/googleapis__python-bigquery/docs/usage/pandas.rst
new file mode 100644
index 0000000000000000000000000000000000000000..550a6779256ce5ab62591bdedcb40077db875e9d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/usage/pandas.rst
@@ -0,0 +1,109 @@
+Using BigQuery with Pandas
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Retrieve BigQuery data as a Pandas DataFrame
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+As of version 0.29.0, you can use the
+:func:`~google.cloud.bigquery.table.RowIterator.to_dataframe` function to
+retrieve query results or table rows as a :class:`pandas.DataFrame`.
+
+First, ensure that the :mod:`pandas` library is installed by running:
+
+.. code-block:: bash
+
+ pip install --upgrade pandas
+
+Alternatively, you can install the BigQuery Python client library with
+:mod:`pandas` by running:
+
+.. code-block:: bash
+
+ pip install --upgrade 'google-cloud-bigquery[pandas]'
+
+To retrieve query results as a :class:`pandas.DataFrame`:
+
+.. literalinclude:: ../snippets.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_query_results_dataframe]
+ :end-before: [END bigquery_query_results_dataframe]
+
+To retrieve table rows as a :class:`pandas.DataFrame`:
+
+.. literalinclude:: ../snippets.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_list_rows_dataframe]
+ :end-before: [END bigquery_list_rows_dataframe]
+
+The following data types are used when creating a pandas DataFrame.
+
+.. list-table:: Pandas Data Type Mapping
+ :header-rows: 1
+
+ * - BigQuery
+ - pandas
+ - Notes
+ * - BOOL
+ - boolean
+ -
+ * - DATETIME
+ - datetime64[ns], object
+ - The object dtype is used when there are values not representable in a
+ pandas nanosecond-precision timestamp.
+ * - DATE
+ - dbdate, object
+ - The object dtype is used when there are values not representable in a
+ pandas nanosecond-precision timestamp.
+
+ Requires the ``db-dtypes`` package. See the `db-dtypes usage guide
+ `_
+ * - FLOAT64
+ - float64
+ -
+ * - INT64
+ - Int64
+ -
+ * - TIME
+ - dbtime
+ - Requires the ``db-dtypes`` package. See the `db-dtypes usage guide
+ `_
+
+Retrieve BigQuery GEOGRAPHY data as a GeoPandas GeoDataFrame
+------------------------------------------------------------
+
+`GeoPandas `_ adds geospatial analytics
+capabilities to Pandas. To retrieve query results containing
+GEOGRAPHY data as a :class:`geopandas.GeoDataFrame`:
+
+.. literalinclude:: ../samples/geography/to_geodataframe.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_query_results_geodataframe]
+ :end-before: [END bigquery_query_results_geodataframe]
+
+
+Load a Pandas DataFrame to a BigQuery Table
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+As of version 1.3.0, you can use the
+:func:`~google.cloud.bigquery.client.Client.load_table_from_dataframe` function
+to load data from a :class:`pandas.DataFrame` to a
+:class:`~google.cloud.bigquery.table.Table`. To use this function, in addition
+to :mod:`pandas`, you will need to install the :mod:`pyarrow` library. You can
+install the BigQuery Python client library with :mod:`pandas` and
+:mod:`pyarrow` by running:
+
+.. code-block:: bash
+
+ pip install --upgrade google-cloud-bigquery[pandas,pyarrow]
+
+The following example demonstrates how to create a :class:`pandas.DataFrame`
+and load it into a new table:
+
+.. literalinclude:: ../samples/load_table_dataframe.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_load_table_dataframe]
+ :end-before: [END bigquery_load_table_dataframe]
diff --git a/testbed/googleapis__python-bigquery/docs/usage/queries.rst b/testbed/googleapis__python-bigquery/docs/usage/queries.rst
new file mode 100644
index 0000000000000000000000000000000000000000..56be8497e4c288f69ed19702946052827e1db558
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/usage/queries.rst
@@ -0,0 +1,63 @@
+Running Queries
+~~~~~~~~~~~~~~~
+
+Querying data
+^^^^^^^^^^^^^
+
+Run a query and wait for it to finish with the
+:func:`~google.cloud.bigquery.client.Client.query_and_wait` method:
+
+.. literalinclude:: ../samples/snippets/client_query.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_query]
+ :end-before: [END bigquery_query]
+
+
+Run a dry run query
+^^^^^^^^^^^^^^^^^^^
+
+.. literalinclude:: ../samples/client_query_dry_run.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_query_dry_run]
+ :end-before: [END bigquery_query_dry_run]
+
+
+Writing query results to a destination table
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+See BigQuery documentation for more information on
+`writing query results `_.
+
+.. literalinclude:: ../samples/client_query_destination_table.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_query_destination_table]
+ :end-before: [END bigquery_query_destination_table]
+
+
+Run a query using a named query parameter
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+See BigQuery documentation for more information on
+`parameterized queries `_.
+
+.. literalinclude:: ../samples/client_query_w_named_params.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_query_params_named]
+ :end-before: [END bigquery_query_params_named]
+
+Run a script
+^^^^^^^^^^^^
+
+See BigQuery documentation for more information on `scripting in BigQuery
+standard SQL
+`_.
+
+.. literalinclude:: ../samples/query_script.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_query_script]
+ :end-before: [END bigquery_query_script]
diff --git a/testbed/googleapis__python-bigquery/docs/usage/tables.rst b/testbed/googleapis__python-bigquery/docs/usage/tables.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a4f42b15cc85242b8af9d4247667447e06b4c8d4
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/docs/usage/tables.rst
@@ -0,0 +1,316 @@
+Managing Tables
+~~~~~~~~~~~~~~~
+
+Tables exist within datasets. See BigQuery documentation for more information
+on `Tables `_.
+
+Listing Tables
+^^^^^^^^^^^^^^
+
+List the tables belonging to a dataset with the
+:func:`~google.cloud.bigquery.client.Client.list_tables` method:
+
+.. literalinclude:: ../samples/list_tables.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_list_tables]
+ :end-before: [END bigquery_list_tables]
+
+Getting a Table
+^^^^^^^^^^^^^^^
+
+Get a table resource with the
+:func:`~google.cloud.bigquery.client.Client.get_table` method:
+
+.. literalinclude:: ../samples/get_table.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_get_table]
+ :end-before: [END bigquery_get_table]
+
+Determine if a table exists with the
+:func:`~google.cloud.bigquery.client.Client.get_table` method:
+
+.. literalinclude:: ../samples/table_exists.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_table_exists]
+ :end-before: [END bigquery_table_exists]
+
+Browse data rows in a table with the
+:func:`~google.cloud.bigquery.client.Client.list_rows` method:
+
+.. literalinclude:: ../samples/browse_table_data.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_browse_table]
+ :end-before: [END bigquery_browse_table]
+
+Creating a Table
+^^^^^^^^^^^^^^^^
+
+Create an empty table with the
+:func:`~google.cloud.bigquery.client.Client.create_table` method:
+
+.. literalinclude:: ../samples/create_table.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_create_table]
+ :end-before: [END bigquery_create_table]
+
+Create a table using an external data source with the
+:func:`~google.cloud.bigquery.client.Client.create_table` method:
+
+.. literalinclude:: ../samples/snippets/create_table_external_data_configuration.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_create_table_external_data_configuration]
+ :end-before: [END bigquery_create_table_external_data_configuration]
+
+Create a clustered table with the
+:func:`~google.cloud.bigquery.client.Client.create_table` method:
+
+.. literalinclude:: ../samples/create_table_clustered.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_create_table_clustered]
+ :end-before: [END bigquery_create_table_clustered]
+
+Create an integer range partitioned table with the
+:func:`~google.cloud.bigquery.client.Client.create_table` method:
+
+.. literalinclude:: ../samples/create_table_range_partitioned.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_create_table_range_partitioned]
+ :end-before: [END bigquery_create_table_range_partitioned]
+
+Load table data from a file with the
+:func:`~google.cloud.bigquery.client.Client.load_table_from_file` method:
+
+.. literalinclude:: ../samples/load_table_file.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_load_from_file]
+ :end-before: [END bigquery_load_from_file]
+
+Creating a clustered table from a query result:
+
+.. literalinclude:: ../samples/client_query_destination_table_clustered.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_query_clustered_table]
+ :end-before: [END bigquery_query_clustered_table]
+
+Creating a clustered table when you load data with the
+:func:`~google.cloud.bigquery.client.Client.load_table_from_uri` method:
+
+.. literalinclude:: ../samples/load_table_clustered.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_load_table_clustered]
+ :end-before: [END bigquery_load_table_clustered]
+
+Load a CSV file from Cloud Storage with the
+:func:`~google.cloud.bigquery.client.Client.load_table_from_uri` method:
+
+.. literalinclude:: ../samples/load_table_uri_csv.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_load_table_gcs_csv]
+ :end-before: [END bigquery_load_table_gcs_csv]
+
+See also: `Loading CSV data from Cloud Storage
+`_.
+
+Load a JSON file from Cloud Storage:
+
+.. literalinclude:: ../samples/load_table_uri_json.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_load_table_gcs_json]
+ :end-before: [END bigquery_load_table_gcs_json]
+
+See also: `Loading JSON data from Cloud Storage
+`_.
+
+Load a Parquet file from Cloud Storage:
+
+.. literalinclude:: ../samples/load_table_uri_parquet.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_load_table_gcs_parquet]
+ :end-before: [END bigquery_load_table_gcs_parquet]
+
+See also: `Loading Parquet data from Cloud Storage
+`_.
+
+Load an Avro file from Cloud Storage:
+
+.. literalinclude:: ../samples/load_table_uri_avro.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_load_table_gcs_avro]
+ :end-before: [END bigquery_load_table_gcs_avro]
+
+See also: `Loading Avro data from Cloud Storage
+`_.
+
+Load an ORC file from Cloud Storage:
+
+.. literalinclude:: ../samples/load_table_uri_orc.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_load_table_gcs_orc]
+ :end-before: [END bigquery_load_table_gcs_orc]
+
+See also: `Loading ORC data from Cloud Storage
+`_.
+
+Load a CSV file from Cloud Storage and auto-detect schema:
+
+.. literalinclude:: ../samples/load_table_uri_autodetect_csv.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_load_table_gcs_csv_autodetect]
+ :end-before: [END bigquery_load_table_gcs_csv_autodetect]
+
+Load a JSON file from Cloud Storage and auto-detect schema:
+
+.. literalinclude:: ../samples/load_table_uri_autodetect_json.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_load_table_gcs_json_autodetect]
+ :end-before: [END bigquery_load_table_gcs_json_autodetect]
+
+Updating a Table
+^^^^^^^^^^^^^^^^
+
+Update a property in a table's metadata with the
+:func:`~google.cloud.bigquery.client.Client.update_table` method:
+
+.. literalinclude:: ../snippets.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_update_table_description]
+ :end-before: [END bigquery_update_table_description]
+
+Insert rows into a table's data with the
+:func:`~google.cloud.bigquery.client.Client.insert_rows` method:
+
+.. literalinclude:: ../samples/table_insert_rows.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_table_insert_rows]
+ :end-before: [END bigquery_table_insert_rows]
+
+Insert rows into a table's data with the
+:func:`~google.cloud.bigquery.client.Client.insert_rows` method, achieving
+higher write limit:
+
+.. literalinclude:: ../samples/table_insert_rows_explicit_none_insert_ids.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_table_insert_rows_explicit_none_insert_ids]
+ :end-before: [END bigquery_table_insert_rows_explicit_none_insert_ids]
+
+Mind that inserting data with ``None`` row insert IDs can come at the expense of
+more duplicate inserts. See also:
+`Streaming inserts `_.
+
+Add an empty column to the existing table with the
+:func:`~google.cloud.bigquery.update_table` method:
+
+.. literalinclude:: ../samples/add_empty_column.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_add_empty_column]
+ :end-before: [END bigquery_add_empty_column]
+
+Copying a Table
+^^^^^^^^^^^^^^^
+
+Copy a table with the
+:func:`~google.cloud.bigquery.client.Client.copy_table` method:
+
+.. literalinclude:: ../samples/copy_table.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_copy_table]
+ :end-before: [END bigquery_copy_table]
+
+Copy table data to Google Cloud Storage with the
+:func:`~google.cloud.bigquery.client.Client.extract_table` method:
+
+.. literalinclude:: ../snippets.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_extract_table]
+ :end-before: [END bigquery_extract_table]
+
+Deleting a Table
+^^^^^^^^^^^^^^^^
+
+Delete a table with the
+:func:`~google.cloud.bigquery.client.Client.delete_table` method:
+
+.. literalinclude:: ../samples/delete_table.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_delete_table]
+ :end-before: [END bigquery_delete_table]
+
+Restoring a Deleted Table
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Restore a deleted table from a snapshot by using the
+:func:`~google.cloud.bigquery.client.Client.copy_table` method:
+
+.. literalinclude:: ../samples/undelete_table.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_undelete_table]
+ :end-before: [END bigquery_undelete_table]
+
+Overwrite a Table
+^^^^^^^^^^^^^^^^^
+
+Replace the table data with an Avro file from Cloud Storage:
+
+.. literalinclude:: ../samples/load_table_uri_truncate_avro.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_load_table_gcs_avro_truncate]
+ :end-before: [END bigquery_load_table_gcs_avro_truncate]
+
+Replace the table data with a CSV file from Cloud Storage:
+
+.. literalinclude:: ../samples/load_table_uri_truncate_csv.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_load_table_gcs_csv_truncate]
+ :end-before: [END bigquery_load_table_gcs_csv_truncate]
+
+Replace the table data with a JSON file from Cloud Storage:
+
+.. literalinclude:: ../samples/load_table_uri_truncate_json.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_load_table_gcs_json_truncate]
+ :end-before: [END bigquery_load_table_gcs_json_truncate]
+
+Replace the table data with an ORC file from Cloud Storage:
+
+.. literalinclude:: ../samples/load_table_uri_truncate_orc.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_load_table_gcs_orc_truncate]
+ :end-before: [END bigquery_load_table_gcs_orc_truncate]
+
+Replace the table data with a Parquet file from Cloud Storage:
+
+.. literalinclude:: ../samples/load_table_uri_truncate_parquet.py
+ :language: python
+ :dedent: 4
+ :start-after: [START bigquery_load_table_gcs_parquet_truncate]
+ :end-before: [END bigquery_load_table_gcs_parquet_truncate]
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/__init__.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..caf75333aa1760eacccf62ab2a5c1bd6341011d0
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/__init__.py
@@ -0,0 +1,249 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google BigQuery API wrapper.
+
+The main concepts with this API are:
+
+- :class:`~google.cloud.bigquery.client.Client` manages connections to the
+ BigQuery API. Use the client methods to run jobs (such as a
+ :class:`~google.cloud.bigquery.job.QueryJob` via
+ :meth:`~google.cloud.bigquery.client.Client.query`) and manage resources.
+
+- :class:`~google.cloud.bigquery.dataset.Dataset` represents a
+ collection of tables.
+
+- :class:`~google.cloud.bigquery.table.Table` represents a single "relation".
+"""
+
+import warnings
+
+from google.cloud.bigquery import version as bigquery_version
+
+__version__ = bigquery_version.__version__
+
+from google.cloud.bigquery.client import Client
+from google.cloud.bigquery.dataset import AccessEntry
+from google.cloud.bigquery.dataset import Dataset
+from google.cloud.bigquery.dataset import DatasetReference
+from google.cloud.bigquery import enums
+from google.cloud.bigquery.enums import AutoRowIDs
+from google.cloud.bigquery.enums import DecimalTargetType
+from google.cloud.bigquery.enums import KeyResultStatementKind
+from google.cloud.bigquery.enums import SqlTypeNames
+from google.cloud.bigquery.enums import StandardSqlTypeNames
+from google.cloud.bigquery.exceptions import LegacyBigQueryStorageError
+from google.cloud.bigquery.exceptions import LegacyPandasError
+from google.cloud.bigquery.exceptions import LegacyPyarrowError
+from google.cloud.bigquery.external_config import ExternalConfig
+from google.cloud.bigquery.external_config import BigtableOptions
+from google.cloud.bigquery.external_config import BigtableColumnFamily
+from google.cloud.bigquery.external_config import BigtableColumn
+from google.cloud.bigquery.external_config import CSVOptions
+from google.cloud.bigquery.external_config import GoogleSheetsOptions
+from google.cloud.bigquery.external_config import ExternalSourceFormat
+from google.cloud.bigquery.external_config import HivePartitioningOptions
+from google.cloud.bigquery.format_options import AvroOptions
+from google.cloud.bigquery.format_options import ParquetOptions
+from google.cloud.bigquery.job.base import SessionInfo
+from google.cloud.bigquery.job import Compression
+from google.cloud.bigquery.job import CopyJob
+from google.cloud.bigquery.job import CopyJobConfig
+from google.cloud.bigquery.job import CreateDisposition
+from google.cloud.bigquery.job import DestinationFormat
+from google.cloud.bigquery.job import DmlStats
+from google.cloud.bigquery.job import Encoding
+from google.cloud.bigquery.job import ExtractJob
+from google.cloud.bigquery.job import ExtractJobConfig
+from google.cloud.bigquery.job import LoadJob
+from google.cloud.bigquery.job import LoadJobConfig
+from google.cloud.bigquery.job import OperationType
+from google.cloud.bigquery.job import QueryJob
+from google.cloud.bigquery.job import QueryJobConfig
+from google.cloud.bigquery.job import QueryPriority
+from google.cloud.bigquery.job import SchemaUpdateOption
+from google.cloud.bigquery.job import ScriptOptions
+from google.cloud.bigquery.job import SourceFormat
+from google.cloud.bigquery.job import UnknownJob
+from google.cloud.bigquery.job import TransactionInfo
+from google.cloud.bigquery.job import WriteDisposition
+from google.cloud.bigquery.model import Model
+from google.cloud.bigquery.model import ModelReference
+from google.cloud.bigquery.query import ArrayQueryParameter
+from google.cloud.bigquery.query import ArrayQueryParameterType
+from google.cloud.bigquery.query import ConnectionProperty
+from google.cloud.bigquery.query import ScalarQueryParameter
+from google.cloud.bigquery.query import ScalarQueryParameterType
+from google.cloud.bigquery.query import RangeQueryParameter
+from google.cloud.bigquery.query import RangeQueryParameterType
+from google.cloud.bigquery.query import SqlParameterScalarTypes
+from google.cloud.bigquery.query import StructQueryParameter
+from google.cloud.bigquery.query import StructQueryParameterType
+from google.cloud.bigquery.query import UDFResource
+from google.cloud.bigquery.retry import DEFAULT_RETRY
+from google.cloud.bigquery.routine import DeterminismLevel
+from google.cloud.bigquery.routine import Routine
+from google.cloud.bigquery.routine import RoutineArgument
+from google.cloud.bigquery.routine import RoutineReference
+from google.cloud.bigquery.routine import RoutineType
+from google.cloud.bigquery.routine import RemoteFunctionOptions
+from google.cloud.bigquery.schema import PolicyTagList
+from google.cloud.bigquery.schema import SchemaField
+from google.cloud.bigquery.schema import FieldElementType
+from google.cloud.bigquery.standard_sql import StandardSqlDataType
+from google.cloud.bigquery.standard_sql import StandardSqlField
+from google.cloud.bigquery.standard_sql import StandardSqlStructType
+from google.cloud.bigquery.standard_sql import StandardSqlTableType
+from google.cloud.bigquery.table import PartitionRange
+from google.cloud.bigquery.table import RangePartitioning
+from google.cloud.bigquery.table import Row
+from google.cloud.bigquery.table import SnapshotDefinition
+from google.cloud.bigquery.table import CloneDefinition
+from google.cloud.bigquery.table import Table
+from google.cloud.bigquery.table import TableReference
+from google.cloud.bigquery.table import TimePartitioningType
+from google.cloud.bigquery.table import TimePartitioning
+from google.cloud.bigquery.encryption_configuration import EncryptionConfiguration
+from google.cloud.bigquery import _versions_helpers
+
+try:
+ import bigquery_magics # type: ignore
+except ImportError:
+ bigquery_magics = None
+
+sys_major, sys_minor, sys_micro = _versions_helpers.extract_runtime_version()
+
+if sys_major == 3 and sys_minor in (7, 8):
+ warnings.warn(
+ "The python-bigquery library will stop supporting Python 3.7 "
+ "and Python 3.8 in a future major release expected in Q4 2024. "
+ f"Your Python version is {sys_major}.{sys_minor}.{sys_micro}. We "
+ "recommend that you update soon to ensure ongoing support. For "
+ "more details, see: [Google Cloud Client Libraries Supported Python Versions policy](https://cloud.google.com/python/docs/supported-python-versions)",
+ PendingDeprecationWarning,
+ )
+
+__all__ = [
+ "__version__",
+ "Client",
+ # Queries
+ "ConnectionProperty",
+ "QueryJob",
+ "QueryJobConfig",
+ "ArrayQueryParameter",
+ "ScalarQueryParameter",
+ "StructQueryParameter",
+ "RangeQueryParameter",
+ "ArrayQueryParameterType",
+ "ScalarQueryParameterType",
+ "SqlParameterScalarTypes",
+ "StructQueryParameterType",
+ "RangeQueryParameterType",
+ # Datasets
+ "Dataset",
+ "DatasetReference",
+ "AccessEntry",
+ # Tables
+ "Table",
+ "TableReference",
+ "PartitionRange",
+ "RangePartitioning",
+ "Row",
+ "SnapshotDefinition",
+ "CloneDefinition",
+ "TimePartitioning",
+ "TimePartitioningType",
+ # Jobs
+ "CopyJob",
+ "CopyJobConfig",
+ "ExtractJob",
+ "ExtractJobConfig",
+ "LoadJob",
+ "LoadJobConfig",
+ "SessionInfo",
+ "UnknownJob",
+ # Models
+ "Model",
+ "ModelReference",
+ # Routines
+ "Routine",
+ "RoutineArgument",
+ "RoutineReference",
+ "RemoteFunctionOptions",
+ # Shared helpers
+ "SchemaField",
+ "FieldElementType",
+ "PolicyTagList",
+ "UDFResource",
+ "ExternalConfig",
+ "AvroOptions",
+ "BigtableOptions",
+ "BigtableColumnFamily",
+ "BigtableColumn",
+ "DmlStats",
+ "CSVOptions",
+ "GoogleSheetsOptions",
+ "HivePartitioningOptions",
+ "ParquetOptions",
+ "ScriptOptions",
+ "TransactionInfo",
+ "DEFAULT_RETRY",
+ # Standard SQL types
+ "StandardSqlDataType",
+ "StandardSqlField",
+ "StandardSqlStructType",
+ "StandardSqlTableType",
+ # Enum Constants
+ "enums",
+ "AutoRowIDs",
+ "Compression",
+ "CreateDisposition",
+ "DecimalTargetType",
+ "DestinationFormat",
+ "DeterminismLevel",
+ "ExternalSourceFormat",
+ "Encoding",
+ "KeyResultStatementKind",
+ "OperationType",
+ "QueryPriority",
+ "RoutineType",
+ "SchemaUpdateOption",
+ "SourceFormat",
+ "SqlTypeNames",
+ "StandardSqlTypeNames",
+ "WriteDisposition",
+ # EncryptionConfiguration
+ "EncryptionConfiguration",
+ # Custom exceptions
+ "LegacyBigQueryStorageError",
+ "LegacyPyarrowError",
+ "LegacyPandasError",
+]
+
+
+def load_ipython_extension(ipython):
+ """Called by IPython when this module is loaded as an IPython extension."""
+ warnings.warn(
+ "%load_ext google.cloud.bigquery is deprecated. Install bigquery-magics package and use `%load_ext bigquery_magics`, instead.",
+ category=FutureWarning,
+ )
+
+ if bigquery_magics is not None:
+ bigquery_magics.load_ipython_extension(ipython)
+ else:
+ from google.cloud.bigquery.magics.magics import _cell_magic
+
+ ipython.register_magic_function(
+ _cell_magic, magic_kind="cell", magic_name="bigquery"
+ )
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/_helpers.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/_helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..1eda807129698b366f1d7525b845fb5381774dc1
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/_helpers.py
@@ -0,0 +1,1006 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Shared helper functions for BigQuery API classes."""
+
+import base64
+import datetime
+import decimal
+import json
+import math
+import re
+import os
+import warnings
+from typing import Optional, Union
+
+from dateutil import relativedelta
+from google.cloud._helpers import UTC # type: ignore
+from google.cloud._helpers import _date_from_iso8601_date
+from google.cloud._helpers import _datetime_from_microseconds
+from google.cloud._helpers import _RFC3339_MICROS
+from google.cloud._helpers import _RFC3339_NO_FRACTION
+from google.cloud._helpers import _to_bytes
+from google.auth import credentials as ga_credentials # type: ignore
+from google.api_core import client_options as client_options_lib
+
+TimeoutType = Union[float, None]
+
+_RFC3339_MICROS_NO_ZULU = "%Y-%m-%dT%H:%M:%S.%f"
+_TIMEONLY_WO_MICROS = "%H:%M:%S"
+_TIMEONLY_W_MICROS = "%H:%M:%S.%f"
+_PROJECT_PREFIX_PATTERN = re.compile(
+ r"""
+ (?P\S+\:[^.]+)\.(?P[^.]+)(?:$|\.(?P[^.]+)$)
+""",
+ re.VERBOSE,
+)
+
+# BigQuery sends INTERVAL data in "canonical format"
+# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#interval_type
+_INTERVAL_PATTERN = re.compile(
+ r"(?P-?)(?P\d+)-(?P\d+) "
+ r"(?P-?\d+) "
+ r"(?P-?)(?P\d+):(?P\d+):(?P\d+)\.?(?P\d*)?$"
+)
+_RANGE_PATTERN = re.compile(r"\[.*, .*\)")
+
+BIGQUERY_EMULATOR_HOST = "BIGQUERY_EMULATOR_HOST"
+"""Environment variable defining host for emulator."""
+
+_DEFAULT_HOST = "https://bigquery.googleapis.com"
+"""Default host for JSON API."""
+
+_DEFAULT_HOST_TEMPLATE = "https://bigquery.{UNIVERSE_DOMAIN}"
+""" Templatized endpoint format. """
+
+_DEFAULT_UNIVERSE = "googleapis.com"
+"""Default universe for the JSON API."""
+
+_UNIVERSE_DOMAIN_ENV = "GOOGLE_CLOUD_UNIVERSE_DOMAIN"
+"""Environment variable for setting universe domain."""
+
+_SUPPORTED_RANGE_ELEMENTS = {"TIMESTAMP", "DATETIME", "DATE"}
+
+
+def _get_client_universe(
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]]
+) -> str:
+ """Retrieves the specified universe setting.
+
+ Args:
+ client_options: specified client options.
+ Returns:
+ str: resolved universe setting.
+
+ """
+ if isinstance(client_options, dict):
+ client_options = client_options_lib.from_dict(client_options)
+ universe = _DEFAULT_UNIVERSE
+ options_universe = getattr(client_options, "universe_domain", None)
+ if (
+ options_universe
+ and isinstance(options_universe, str)
+ and len(options_universe) > 0
+ ):
+ universe = options_universe
+ else:
+ env_universe = os.getenv(_UNIVERSE_DOMAIN_ENV)
+ if isinstance(env_universe, str) and len(env_universe) > 0:
+ universe = env_universe
+ return universe
+
+
+def _validate_universe(client_universe: str, credentials: ga_credentials.Credentials):
+ """Validates that client provided universe and universe embedded in credentials match.
+
+ Args:
+ client_universe (str): The universe domain configured via the client options.
+ credentials (ga_credentials.Credentials): The credentials being used in the client.
+
+ Raises:
+ ValueError: when client_universe does not match the universe in credentials.
+ """
+ if hasattr(credentials, "universe_domain"):
+ cred_universe = getattr(credentials, "universe_domain")
+ if isinstance(cred_universe, str):
+ if client_universe != cred_universe:
+ raise ValueError(
+ "The configured universe domain "
+ f"({client_universe}) does not match the universe domain "
+ f"found in the credentials ({cred_universe}). "
+ "If you haven't configured the universe domain explicitly, "
+ f"`{_DEFAULT_UNIVERSE}` is the default."
+ )
+
+
+def _get_bigquery_host():
+ return os.environ.get(BIGQUERY_EMULATOR_HOST, _DEFAULT_HOST)
+
+
+def _not_null(value, field):
+ """Check whether 'value' should be coerced to 'field' type."""
+ return value is not None or (field is not None and field.mode != "NULLABLE")
+
+
+def _int_from_json(value, field):
+ """Coerce 'value' to an int, if set or not nullable."""
+ if _not_null(value, field):
+ return int(value)
+
+
+def _interval_from_json(
+ value: Optional[str], field
+) -> Optional[relativedelta.relativedelta]:
+ """Coerce 'value' to an interval, if set or not nullable."""
+ if not _not_null(value, field):
+ return None
+ if value is None:
+ raise TypeError(f"got {value} for REQUIRED field: {repr(field)}")
+
+ parsed = _INTERVAL_PATTERN.match(value)
+ if parsed is None:
+ raise ValueError(f"got interval: '{value}' with unexpected format")
+
+ calendar_sign = -1 if parsed.group("calendar_sign") == "-" else 1
+ years = calendar_sign * int(parsed.group("years"))
+ months = calendar_sign * int(parsed.group("months"))
+ days = int(parsed.group("days"))
+ time_sign = -1 if parsed.group("time_sign") == "-" else 1
+ hours = time_sign * int(parsed.group("hours"))
+ minutes = time_sign * int(parsed.group("minutes"))
+ seconds = time_sign * int(parsed.group("seconds"))
+ fraction = parsed.group("fraction")
+ microseconds = time_sign * int(fraction.ljust(6, "0")[:6]) if fraction else 0
+
+ return relativedelta.relativedelta(
+ years=years,
+ months=months,
+ days=days,
+ hours=hours,
+ minutes=minutes,
+ seconds=seconds,
+ microseconds=microseconds,
+ )
+
+
+def _float_from_json(value, field):
+ """Coerce 'value' to a float, if set or not nullable."""
+ if _not_null(value, field):
+ return float(value)
+
+
+def _decimal_from_json(value, field):
+ """Coerce 'value' to a Decimal, if set or not nullable."""
+ if _not_null(value, field):
+ return decimal.Decimal(value)
+
+
+def _bool_from_json(value, field):
+ """Coerce 'value' to a bool, if set or not nullable."""
+ if _not_null(value, field):
+ return value.lower() in ["t", "true", "1"]
+
+
+def _string_from_json(value, _):
+ """NOOP string -> string coercion"""
+ return value
+
+
+def _bytes_from_json(value, field):
+ """Base64-decode value"""
+ if _not_null(value, field):
+ return base64.standard_b64decode(_to_bytes(value))
+
+
+def _timestamp_from_json(value, field):
+ """Coerce 'value' to a datetime, if set or not nullable."""
+ if _not_null(value, field):
+ # value will be a integer in seconds, to microsecond precision, in UTC.
+ return _datetime_from_microseconds(int(value))
+
+
+def _timestamp_query_param_from_json(value, field):
+ """Coerce 'value' to a datetime, if set or not nullable.
+
+ Args:
+ value (str): The timestamp.
+
+ field (google.cloud.bigquery.schema.SchemaField):
+ The field corresponding to the value.
+
+ Returns:
+ Optional[datetime.datetime]:
+ The parsed datetime object from
+ ``value`` if the ``field`` is not null (otherwise it is
+ :data:`None`).
+ """
+ if _not_null(value, field):
+ # Canonical formats for timestamps in BigQuery are flexible. See:
+ # g.co/cloud/bigquery/docs/reference/standard-sql/data-types#timestamp-type
+ # The separator between the date and time can be 'T' or ' '.
+ value = value.replace(" ", "T", 1)
+ # The UTC timezone may be formatted as Z or +00:00.
+ value = value.replace("Z", "")
+ value = value.replace("+00:00", "")
+
+ if "." in value:
+ # YYYY-MM-DDTHH:MM:SS.ffffff
+ return datetime.datetime.strptime(value, _RFC3339_MICROS_NO_ZULU).replace(
+ tzinfo=UTC
+ )
+ else:
+ # YYYY-MM-DDTHH:MM:SS
+ return datetime.datetime.strptime(value, _RFC3339_NO_FRACTION).replace(
+ tzinfo=UTC
+ )
+ else:
+ return None
+
+
+def _datetime_from_json(value, field):
+ """Coerce 'value' to a datetime, if set or not nullable.
+
+ Args:
+ value (str): The timestamp.
+ field (google.cloud.bigquery.schema.SchemaField):
+ The field corresponding to the value.
+
+ Returns:
+ Optional[datetime.datetime]:
+ The parsed datetime object from
+ ``value`` if the ``field`` is not null (otherwise it is
+ :data:`None`).
+ """
+ if _not_null(value, field):
+ if "." in value:
+ # YYYY-MM-DDTHH:MM:SS.ffffff
+ return datetime.datetime.strptime(value, _RFC3339_MICROS_NO_ZULU)
+ else:
+ # YYYY-MM-DDTHH:MM:SS
+ return datetime.datetime.strptime(value, _RFC3339_NO_FRACTION)
+ else:
+ return None
+
+
+def _date_from_json(value, field):
+ """Coerce 'value' to a datetime date, if set or not nullable"""
+ if _not_null(value, field):
+ # value will be a string, in YYYY-MM-DD form.
+ return _date_from_iso8601_date(value)
+
+
+def _time_from_json(value, field):
+ """Coerce 'value' to a datetime date, if set or not nullable"""
+ if _not_null(value, field):
+ if len(value) == 8: # HH:MM:SS
+ fmt = _TIMEONLY_WO_MICROS
+ elif len(value) == 15: # HH:MM:SS.micros
+ fmt = _TIMEONLY_W_MICROS
+ else:
+ raise ValueError("Unknown time format: {}".format(value))
+ return datetime.datetime.strptime(value, fmt).time()
+
+
+def _record_from_json(value, field):
+ """Coerce 'value' to a mapping, if set or not nullable."""
+ if _not_null(value, field):
+ record = {}
+ record_iter = zip(field.fields, value["f"])
+ for subfield, cell in record_iter:
+ record[subfield.name] = _field_from_json(cell["v"], subfield)
+ return record
+
+
+def _json_from_json(value, field):
+ """Coerce 'value' to a Pythonic JSON representation."""
+ if _not_null(value, field):
+ return json.loads(value)
+ else:
+ return None
+
+
+def _range_element_from_json(value, field):
+ """Coerce 'value' to a range element value."""
+ if value == "UNBOUNDED":
+ return None
+ if field.element_type in _SUPPORTED_RANGE_ELEMENTS:
+ return _CELLDATA_FROM_JSON[field.element_type](value, field.element_type)
+ else:
+ raise ValueError(f"Unsupported range element type: {field.element_type}")
+
+
+def _range_from_json(value, field):
+ """Coerce 'value' to a range, if set or not nullable.
+
+ Args:
+ value (str): The literal representation of the range.
+ field (google.cloud.bigquery.schema.SchemaField):
+ The field corresponding to the value.
+
+ Returns:
+ Optional[dict]:
+ The parsed range object from ``value`` if the ``field`` is not
+ null (otherwise it is :data:`None`).
+ """
+ if _not_null(value, field):
+ if _RANGE_PATTERN.match(value):
+ start, end = value[1:-1].split(", ")
+ start = _range_element_from_json(start, field.range_element_type)
+ end = _range_element_from_json(end, field.range_element_type)
+ return {"start": start, "end": end}
+ else:
+ raise ValueError(f"Unknown format for range value: {value}")
+ else:
+ return None
+
+
+# Parse BigQuery API response JSON into a Python representation.
+_CELLDATA_FROM_JSON = {
+ "INTEGER": _int_from_json,
+ "INT64": _int_from_json,
+ "INTERVAL": _interval_from_json,
+ "FLOAT": _float_from_json,
+ "FLOAT64": _float_from_json,
+ "NUMERIC": _decimal_from_json,
+ "BIGNUMERIC": _decimal_from_json,
+ "BOOLEAN": _bool_from_json,
+ "BOOL": _bool_from_json,
+ "STRING": _string_from_json,
+ "GEOGRAPHY": _string_from_json,
+ "BYTES": _bytes_from_json,
+ "TIMESTAMP": _timestamp_from_json,
+ "DATETIME": _datetime_from_json,
+ "DATE": _date_from_json,
+ "TIME": _time_from_json,
+ "RECORD": _record_from_json,
+ "JSON": _json_from_json,
+ "RANGE": _range_from_json,
+}
+
+_QUERY_PARAMS_FROM_JSON = dict(_CELLDATA_FROM_JSON)
+_QUERY_PARAMS_FROM_JSON["TIMESTAMP"] = _timestamp_query_param_from_json
+
+
+def _field_to_index_mapping(schema):
+ """Create a mapping from schema field name to index of field."""
+ return {f.name: i for i, f in enumerate(schema)}
+
+
+def _field_from_json(resource, field):
+ def default_converter(value, field):
+ _warn_unknown_field_type(field)
+ return value
+
+ converter = _CELLDATA_FROM_JSON.get(field.field_type, default_converter)
+ if field.mode == "REPEATED":
+ return [converter(item["v"], field) for item in resource]
+ else:
+ return converter(resource, field)
+
+
+def _row_tuple_from_json(row, schema):
+ """Convert JSON row data to row with appropriate types.
+
+ Note: ``row['f']`` and ``schema`` are presumed to be of the same length.
+
+ Args:
+ row (Dict): A JSON response row to be converted.
+ schema (Sequence[Union[ \
+ :class:`~google.cloud.bigquery.schema.SchemaField`, \
+ Mapping[str, Any] \
+ ]]): Specification of the field types in ``row``.
+
+ Returns:
+ Tuple: A tuple of data converted to native types.
+ """
+ from google.cloud.bigquery.schema import _to_schema_fields
+
+ schema = _to_schema_fields(schema)
+
+ row_data = []
+ for field, cell in zip(schema, row["f"]):
+ row_data.append(_field_from_json(cell["v"], field))
+ return tuple(row_data)
+
+
+def _rows_from_json(values, schema):
+ """Convert JSON row data to rows with appropriate types.
+
+ Args:
+ values (Sequence[Dict]): The list of responses (JSON rows) to convert.
+ schema (Sequence[Union[ \
+ :class:`~google.cloud.bigquery.schema.SchemaField`, \
+ Mapping[str, Any] \
+ ]]):
+ The table's schema. If any item is a mapping, its content must be
+ compatible with
+ :meth:`~google.cloud.bigquery.schema.SchemaField.from_api_repr`.
+
+ Returns:
+ List[:class:`~google.cloud.bigquery.Row`]
+ """
+ from google.cloud.bigquery import Row
+ from google.cloud.bigquery.schema import _to_schema_fields
+
+ schema = _to_schema_fields(schema)
+ field_to_index = _field_to_index_mapping(schema)
+ return [Row(_row_tuple_from_json(r, schema), field_to_index) for r in values]
+
+
+def _int_to_json(value):
+ """Coerce 'value' to an JSON-compatible representation."""
+ if isinstance(value, int):
+ value = str(value)
+ return value
+
+
+def _float_to_json(value) -> Union[None, str, float]:
+ """Coerce 'value' to an JSON-compatible representation."""
+ if value is None:
+ return None
+
+ if isinstance(value, str):
+ value = float(value)
+
+ return str(value) if (math.isnan(value) or math.isinf(value)) else float(value)
+
+
+def _decimal_to_json(value):
+ """Coerce 'value' to a JSON-compatible representation."""
+ if isinstance(value, decimal.Decimal):
+ value = str(value)
+ return value
+
+
+def _bool_to_json(value):
+ """Coerce 'value' to an JSON-compatible representation."""
+ if isinstance(value, bool):
+ value = "true" if value else "false"
+ return value
+
+
+def _bytes_to_json(value):
+ """Coerce 'value' to an JSON-compatible representation."""
+ if isinstance(value, bytes):
+ value = base64.standard_b64encode(value).decode("ascii")
+ return value
+
+
+def _json_to_json(value):
+ """Coerce 'value' to a BigQuery REST API representation."""
+ if value is None:
+ return None
+ return json.dumps(value)
+
+
+def _string_to_json(value):
+ """NOOP string -> string coercion"""
+ return value
+
+
+def _timestamp_to_json_parameter(value):
+ """Coerce 'value' to an JSON-compatible representation.
+
+ This version returns the string representation used in query parameters.
+ """
+ if isinstance(value, datetime.datetime):
+ if value.tzinfo not in (None, UTC):
+ # Convert to UTC and remove the time zone info.
+ value = value.replace(tzinfo=None) - value.utcoffset()
+ value = "%s %s+00:00" % (value.date().isoformat(), value.time().isoformat())
+ return value
+
+
+def _timestamp_to_json_row(value):
+ """Coerce 'value' to an JSON-compatible representation."""
+ if isinstance(value, datetime.datetime):
+ # For naive datetime objects UTC timezone is assumed, thus we format
+ # those to string directly without conversion.
+ if value.tzinfo is not None:
+ value = value.astimezone(UTC)
+ value = value.strftime(_RFC3339_MICROS)
+ return value
+
+
+def _datetime_to_json(value):
+ """Coerce 'value' to an JSON-compatible representation."""
+ if isinstance(value, datetime.datetime):
+ # For naive datetime objects UTC timezone is assumed, thus we format
+ # those to string directly without conversion.
+ if value.tzinfo is not None:
+ value = value.astimezone(UTC)
+ value = value.strftime(_RFC3339_MICROS_NO_ZULU)
+ return value
+
+
+def _date_to_json(value):
+ """Coerce 'value' to an JSON-compatible representation."""
+ if isinstance(value, datetime.date):
+ value = value.isoformat()
+ return value
+
+
+def _time_to_json(value):
+ """Coerce 'value' to an JSON-compatible representation."""
+ if isinstance(value, datetime.time):
+ value = value.isoformat()
+ return value
+
+
+def _range_element_to_json(value, element_type=None):
+ """Coerce 'value' to an JSON-compatible representation."""
+ if value is None:
+ return None
+ elif isinstance(value, str):
+ if value.upper() in ("UNBOUNDED", "NULL"):
+ return None
+ else:
+ # We do not enforce range element value to be valid to reduce
+ # redundancy with backend.
+ return value
+ elif (
+ element_type and element_type.element_type.upper() in _SUPPORTED_RANGE_ELEMENTS
+ ):
+ converter = _SCALAR_VALUE_TO_JSON_ROW.get(element_type.element_type.upper())
+ return converter(value)
+ else:
+ raise ValueError(
+ f"Unsupported RANGE element type {element_type}, or "
+ "element type is empty. Must be DATE, DATETIME, or "
+ "TIMESTAMP"
+ )
+
+
+def _range_field_to_json(range_element_type, value):
+ """Coerce 'value' to an JSON-compatible representation."""
+ if isinstance(value, str):
+ # string literal
+ if _RANGE_PATTERN.match(value):
+ start, end = value[1:-1].split(", ")
+ else:
+ raise ValueError(f"RANGE literal {value} has incorrect format")
+ elif isinstance(value, dict):
+ # dictionary
+ start = value.get("start")
+ end = value.get("end")
+ else:
+ raise ValueError(
+ f"Unsupported type of RANGE value {value}, must be " "string or dict"
+ )
+
+ start = _range_element_to_json(start, range_element_type)
+ end = _range_element_to_json(end, range_element_type)
+ return {"start": start, "end": end}
+
+
+# Converters used for scalar values marshalled to the BigQuery API, such as in
+# query parameters or the tabledata.insert API.
+_SCALAR_VALUE_TO_JSON_ROW = {
+ "INTEGER": _int_to_json,
+ "INT64": _int_to_json,
+ "FLOAT": _float_to_json,
+ "FLOAT64": _float_to_json,
+ "NUMERIC": _decimal_to_json,
+ "BIGNUMERIC": _decimal_to_json,
+ "BOOLEAN": _bool_to_json,
+ "BOOL": _bool_to_json,
+ "BYTES": _bytes_to_json,
+ "TIMESTAMP": _timestamp_to_json_row,
+ "DATETIME": _datetime_to_json,
+ "DATE": _date_to_json,
+ "TIME": _time_to_json,
+ "JSON": _json_to_json,
+ "STRING": _string_to_json,
+ # Make sure DECIMAL and BIGDECIMAL are handled, even though
+ # requests for them should be converted to NUMERIC. Better safe
+ # than sorry.
+ "DECIMAL": _decimal_to_json,
+ "BIGDECIMAL": _decimal_to_json,
+}
+
+
+# Converters used for scalar values marshalled as query parameters.
+_SCALAR_VALUE_TO_JSON_PARAM = _SCALAR_VALUE_TO_JSON_ROW.copy()
+_SCALAR_VALUE_TO_JSON_PARAM["TIMESTAMP"] = _timestamp_to_json_parameter
+
+
+def _warn_unknown_field_type(field):
+ warnings.warn(
+ "Unknown type '{}' for field '{}'. Behavior reading and writing this type is not officially supported and may change in the future.".format(
+ field.field_type, field.name
+ ),
+ FutureWarning,
+ )
+
+
+def _scalar_field_to_json(field, row_value):
+ """Maps a field and value to a JSON-safe value.
+
+ Args:
+ field (google.cloud.bigquery.schema.SchemaField):
+ The SchemaField to use for type conversion and field name.
+ row_value (Any):
+ Value to be converted, based on the field's type.
+
+ Returns:
+ Any: A JSON-serializable object.
+ """
+
+ def default_converter(value):
+ _warn_unknown_field_type(field)
+ return value
+
+ converter = _SCALAR_VALUE_TO_JSON_ROW.get(field.field_type, default_converter)
+ return converter(row_value)
+
+
+def _repeated_field_to_json(field, row_value):
+ """Convert a repeated/array field to its JSON representation.
+
+ Args:
+ field (google.cloud.bigquery.schema.SchemaField):
+ The SchemaField to use for type conversion and field name. The
+ field mode must equal ``REPEATED``.
+ row_value (Sequence[Any]):
+ A sequence of values to convert to JSON-serializable values.
+
+ Returns:
+ List[Any]: A list of JSON-serializable objects.
+ """
+ values = []
+ for item in row_value:
+ values.append(_single_field_to_json(field, item))
+ return values
+
+
+def _record_field_to_json(fields, row_value):
+ """Convert a record/struct field to its JSON representation.
+
+ Args:
+ fields (Sequence[google.cloud.bigquery.schema.SchemaField]):
+ The :class:`~google.cloud.bigquery.schema.SchemaField`s of the
+ record's subfields to use for type conversion and field names.
+ row_value (Union[Tuple[Any], Mapping[str, Any]):
+ A tuple or dictionary to convert to JSON-serializable values.
+
+ Returns:
+ Mapping[str, Any]: A JSON-serializable dictionary.
+ """
+ isdict = isinstance(row_value, dict)
+
+ # If row is passed as a tuple, make the length sanity check to avoid either
+ # uninformative index errors a few lines below or silently omitting some of
+ # the values from the result (we cannot know exactly which fields are missing
+ # or redundant, since we don't have their names).
+ if not isdict and len(row_value) != len(fields):
+ msg = "The number of row fields ({}) does not match schema length ({}).".format(
+ len(row_value), len(fields)
+ )
+ raise ValueError(msg)
+
+ record = {}
+
+ if isdict:
+ processed_fields = set()
+
+ for subindex, subfield in enumerate(fields):
+ subname = subfield.name
+ subvalue = row_value.get(subname) if isdict else row_value[subindex]
+
+ # None values are unconditionally omitted
+ if subvalue is not None:
+ record[subname] = _field_to_json(subfield, subvalue)
+
+ if isdict:
+ processed_fields.add(subname)
+
+ # Unknown fields should not be silently dropped, include them. Since there
+ # is no schema information available for them, include them as strings
+ # to make them JSON-serializable.
+ if isdict:
+ not_processed = set(row_value.keys()) - processed_fields
+
+ for field_name in not_processed:
+ value = row_value[field_name]
+ if value is not None:
+ record[field_name] = str(value)
+
+ return record
+
+
+def _single_field_to_json(field, row_value):
+ """Convert a single field into JSON-serializable values.
+
+ Ignores mode so that this can function for ARRAY / REPEATING fields
+ without requiring a deepcopy of the field. See:
+ https://github.com/googleapis/python-bigquery/issues/6
+
+ Args:
+ field (google.cloud.bigquery.schema.SchemaField):
+ The SchemaField to use for type conversion and field name.
+
+ row_value (Any):
+ Scalar or Struct to be inserted. The type
+ is inferred from the SchemaField's field_type.
+
+ Returns:
+ Any: A JSON-serializable object.
+ """
+ if row_value is None:
+ return None
+
+ if field.field_type == "RECORD":
+ return _record_field_to_json(field.fields, row_value)
+ if field.field_type == "RANGE":
+ return _range_field_to_json(field.range_element_type, row_value)
+
+ return _scalar_field_to_json(field, row_value)
+
+
+def _field_to_json(field, row_value):
+ """Convert a field into JSON-serializable values.
+
+ Args:
+ field (google.cloud.bigquery.schema.SchemaField):
+ The SchemaField to use for type conversion and field name.
+
+ row_value (Union[Sequence[List], Any]):
+ Row data to be inserted. If the SchemaField's mode is
+ REPEATED, assume this is a list. If not, the type
+ is inferred from the SchemaField's field_type.
+
+ Returns:
+ Any: A JSON-serializable object.
+ """
+ if row_value is None:
+ return None
+
+ if field.mode == "REPEATED":
+ return _repeated_field_to_json(field, row_value)
+
+ return _single_field_to_json(field, row_value)
+
+
+def _snake_to_camel_case(value):
+ """Convert snake case string to camel case."""
+ words = value.split("_")
+ return words[0] + "".join(map(str.capitalize, words[1:]))
+
+
+def _get_sub_prop(container, keys, default=None):
+ """Get a nested value from a dictionary.
+
+ This method works like ``dict.get(key)``, but for nested values.
+
+ Args:
+ container (Dict):
+ A dictionary which may contain other dictionaries as values.
+ keys (Iterable):
+ A sequence of keys to attempt to get the value for. If ``keys`` is a
+ string, it is treated as sequence containing a single string key. Each item
+ in the sequence represents a deeper nesting. The first key is for
+ the top level. If there is a dictionary there, the second key
+ attempts to get the value within that, and so on.
+ default (Optional[object]):
+ Value to returned if any of the keys are not found.
+ Defaults to ``None``.
+
+ Examples:
+ Get a top-level value (equivalent to ``container.get('key')``).
+
+ >>> _get_sub_prop({'key': 'value'}, ['key'])
+ 'value'
+
+ Get a top-level value, providing a default (equivalent to
+ ``container.get('key', default='default')``).
+
+ >>> _get_sub_prop({'nothere': 123}, ['key'], default='not found')
+ 'not found'
+
+ Get a nested value.
+
+ >>> _get_sub_prop({'key': {'subkey': 'value'}}, ['key', 'subkey'])
+ 'value'
+
+ Returns:
+ object: The value if present or the default.
+ """
+ if isinstance(keys, str):
+ keys = [keys]
+
+ sub_val = container
+ for key in keys:
+ if key not in sub_val:
+ return default
+ sub_val = sub_val[key]
+ return sub_val
+
+
+def _set_sub_prop(container, keys, value):
+ """Set a nested value in a dictionary.
+
+ Args:
+ container (Dict):
+ A dictionary which may contain other dictionaries as values.
+ keys (Iterable):
+ A sequence of keys to attempt to set the value for. If ``keys`` is a
+ string, it is treated as sequence containing a single string key. Each item
+ in the sequence represents a deeper nesting. The first key is for
+ the top level. If there is a dictionary there, the second key
+ attempts to get the value within that, and so on.
+ value (object): Value to set within the container.
+
+ Examples:
+ Set a top-level value (equivalent to ``container['key'] = 'value'``).
+
+ >>> container = {}
+ >>> _set_sub_prop(container, ['key'], 'value')
+ >>> container
+ {'key': 'value'}
+
+ Set a nested value.
+
+ >>> container = {}
+ >>> _set_sub_prop(container, ['key', 'subkey'], 'value')
+ >>> container
+ {'key': {'subkey': 'value'}}
+
+ Replace a nested value.
+
+ >>> container = {'key': {'subkey': 'prev'}}
+ >>> _set_sub_prop(container, ['key', 'subkey'], 'new')
+ >>> container
+ {'key': {'subkey': 'new'}}
+ """
+ if isinstance(keys, str):
+ keys = [keys]
+
+ sub_val = container
+ for key in keys[:-1]:
+ if key not in sub_val:
+ sub_val[key] = {}
+ sub_val = sub_val[key]
+ sub_val[keys[-1]] = value
+
+
+def _del_sub_prop(container, keys):
+ """Remove a nested key fro a dictionary.
+
+ Args:
+ container (Dict):
+ A dictionary which may contain other dictionaries as values.
+ keys (Iterable):
+ A sequence of keys to attempt to clear the value for. Each item in
+ the sequence represents a deeper nesting. The first key is for
+ the top level. If there is a dictionary there, the second key
+ attempts to get the value within that, and so on.
+
+ Examples:
+ Remove a top-level value (equivalent to ``del container['key']``).
+
+ >>> container = {'key': 'value'}
+ >>> _del_sub_prop(container, ['key'])
+ >>> container
+ {}
+
+ Remove a nested value.
+
+ >>> container = {'key': {'subkey': 'value'}}
+ >>> _del_sub_prop(container, ['key', 'subkey'])
+ >>> container
+ {'key': {}}
+ """
+ sub_val = container
+ for key in keys[:-1]:
+ if key not in sub_val:
+ sub_val[key] = {}
+ sub_val = sub_val[key]
+ if keys[-1] in sub_val:
+ del sub_val[keys[-1]]
+
+
+def _int_or_none(value):
+ """Helper: deserialize int value from JSON string."""
+ if isinstance(value, int):
+ return value
+ if value is not None:
+ return int(value)
+
+
+def _str_or_none(value):
+ """Helper: serialize value to JSON string."""
+ if value is not None:
+ return str(value)
+
+
+def _split_id(full_id):
+ """Helper: split full_id into composite parts.
+
+ Args:
+ full_id (str): Fully-qualified ID in standard SQL format.
+
+ Returns:
+ List[str]: ID's parts separated into components.
+ """
+ with_prefix = _PROJECT_PREFIX_PATTERN.match(full_id)
+ if with_prefix is None:
+ parts = full_id.split(".")
+ else:
+ parts = with_prefix.groups()
+ parts = [part for part in parts if part]
+ return parts
+
+
+def _parse_3_part_id(full_id, default_project=None, property_name="table_id"):
+ output_project_id = default_project
+ output_dataset_id = None
+ output_resource_id = None
+ parts = _split_id(full_id)
+
+ if len(parts) != 2 and len(parts) != 3:
+ raise ValueError(
+ "{property_name} must be a fully-qualified ID in "
+ 'standard SQL format, e.g., "project.dataset.{property_name}", '
+ "got {}".format(full_id, property_name=property_name)
+ )
+
+ if len(parts) == 2 and not default_project:
+ raise ValueError(
+ "When default_project is not set, {property_name} must be a "
+ "fully-qualified ID in standard SQL format, "
+ 'e.g., "project.dataset_id.{property_name}", got {}'.format(
+ full_id, property_name=property_name
+ )
+ )
+
+ if len(parts) == 2:
+ output_dataset_id, output_resource_id = parts
+ else:
+ output_project_id, output_dataset_id, output_resource_id = parts
+
+ return output_project_id, output_dataset_id, output_resource_id
+
+
+def _build_resource_from_properties(obj, filter_fields):
+ """Build a resource based on a ``_properties`` dictionary, filtered by
+ ``filter_fields``, which follow the name of the Python object.
+ """
+ partial = {}
+ for filter_field in filter_fields:
+ api_field = obj._PROPERTY_TO_API_FIELD.get(filter_field)
+ if api_field is None and filter_field not in obj._properties:
+ raise ValueError("No property %s" % filter_field)
+ elif api_field is not None:
+ partial[api_field] = obj._properties.get(api_field)
+ else:
+ # allows properties that are not defined in the library
+ # and properties that have the same name as API resource key
+ partial[filter_field] = obj._properties[filter_field]
+
+ return partial
+
+
+def _verify_job_config_type(job_config, expected_type, param_name="job_config"):
+ if not isinstance(job_config, expected_type):
+ msg = (
+ "Expected an instance of {expected_type} class for the {param_name} parameter, "
+ "but received {param_name} = {job_config}"
+ )
+ raise TypeError(
+ msg.format(
+ expected_type=expected_type.__name__,
+ param_name=param_name,
+ job_config=job_config,
+ )
+ )
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/_http.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/_http.py
new file mode 100644
index 0000000000000000000000000000000000000000..7921900f81daa796740e1338643ffe3d1fb76bc3
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/_http.py
@@ -0,0 +1,47 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Create / interact with Google BigQuery connections."""
+
+from google.cloud import _http # type: ignore # pytype: disable=import-error
+from google.cloud.bigquery import __version__
+
+
+class Connection(_http.JSONConnection):
+ """A connection to Google BigQuery via the JSON REST API.
+
+ Args:
+ client (google.cloud.bigquery.client.Client): The client that owns the current connection.
+
+ client_info (Optional[google.api_core.client_info.ClientInfo]): Instance used to generate user agent.
+
+ api_endpoint (str): The api_endpoint to use. If None, the library will decide what endpoint to use.
+ """
+
+ DEFAULT_API_ENDPOINT = "https://bigquery.googleapis.com"
+ DEFAULT_API_MTLS_ENDPOINT = "https://bigquery.mtls.googleapis.com"
+
+ def __init__(self, client, client_info=None, api_endpoint=None):
+ super(Connection, self).__init__(client, client_info)
+ self.API_BASE_URL = api_endpoint or self.DEFAULT_API_ENDPOINT
+ self.API_BASE_MTLS_URL = self.DEFAULT_API_MTLS_ENDPOINT
+ self.ALLOW_AUTO_SWITCH_TO_MTLS_URL = api_endpoint is None
+ self._client_info.gapic_version = __version__
+ self._client_info.client_library_version = __version__
+
+ API_VERSION = "v2" # type: ignore
+ """The version of the API, used in building the API call's URL."""
+
+ API_URL_TEMPLATE = "{api_base_url}/bigquery/{api_version}{path}" # type: ignore
+ """A template for the URL of a particular API call."""
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/_job_helpers.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/_job_helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..e66ab2763b31aa72e6ba59d72a0ff2f873843a3a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/_job_helpers.py
@@ -0,0 +1,575 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helpers for interacting with the job REST APIs from the client.
+
+For queries, there are three cases to consider:
+
+1. jobs.insert: This always returns a job resource.
+2. jobs.query, jobCreationMode=JOB_CREATION_REQUIRED:
+ This sometimes can return the results inline, but always includes a job ID.
+3. jobs.query, jobCreationMode=JOB_CREATION_OPTIONAL:
+ This sometimes doesn't create a job at all, instead returning the results.
+ For better debugging, an auto-generated query ID is included in the
+ response.
+
+Client.query() calls either (1) or (2), depending on what the user provides
+for the api_method parameter. query() always returns a QueryJob object, which
+can retry the query when the query job fails for a retriable reason.
+
+Client.query_and_wait() calls (3). This returns a RowIterator that may wrap
+local results from the response or may wrap a query job containing multiple
+pages of results. Even though query_and_wait() waits for the job to complete,
+we still need a separate job_retry object because there are different
+predicates where it is safe to generate a new query ID.
+"""
+
+import copy
+import functools
+import os
+import uuid
+from typing import Any, Dict, Optional, TYPE_CHECKING, Union
+
+import google.api_core.exceptions as core_exceptions
+from google.api_core import retry as retries
+
+from google.cloud.bigquery import job
+import google.cloud.bigquery.query
+from google.cloud.bigquery import table
+from google.cloud.bigquery.retry import POLLING_DEFAULT_VALUE
+
+# Avoid circular imports
+if TYPE_CHECKING: # pragma: NO COVER
+ from google.cloud.bigquery.client import Client
+
+
+# The purpose of _TIMEOUT_BUFFER_MILLIS is to allow the server-side timeout to
+# happen before the client-side timeout. This is not strictly necessary, as the
+# client retries client-side timeouts, but the hope by making the server-side
+# timeout slightly shorter is that it can save the server from some unncessary
+# processing time.
+#
+# 250 milliseconds is chosen arbitrarily, though should be about the right
+# order of magnitude for network latency and switching delays. It is about the
+# amount of time for light to circumnavigate the world twice.
+_TIMEOUT_BUFFER_MILLIS = 250
+
+
+def make_job_id(job_id: Optional[str] = None, prefix: Optional[str] = None) -> str:
+ """Construct an ID for a new job.
+
+ Args:
+ job_id: the user-provided job ID.
+ prefix: the user-provided prefix for a job ID.
+
+ Returns:
+ str: A job ID
+ """
+ if job_id is not None:
+ return job_id
+ elif prefix is not None:
+ return str(prefix) + str(uuid.uuid4())
+ else:
+ return str(uuid.uuid4())
+
+
+def job_config_with_defaults(
+ job_config: Optional[job.QueryJobConfig],
+ default_job_config: Optional[job.QueryJobConfig],
+) -> Optional[job.QueryJobConfig]:
+ """Create a copy of `job_config`, replacing unset values with those from
+ `default_job_config`.
+ """
+ if job_config is None:
+ return default_job_config
+
+ if default_job_config is None:
+ return job_config
+
+ # Both job_config and default_job_config are not None, so make a copy of
+ # job_config merged with default_job_config. Anything already explicitly
+ # set on job_config should not be replaced.
+ return job_config._fill_from_default(default_job_config)
+
+
+def query_jobs_insert(
+ client: "Client",
+ query: str,
+ job_config: Optional[job.QueryJobConfig],
+ job_id: Optional[str],
+ job_id_prefix: Optional[str],
+ location: Optional[str],
+ project: str,
+ retry: Optional[retries.Retry],
+ timeout: Optional[float],
+ job_retry: Optional[retries.Retry],
+) -> job.QueryJob:
+ """Initiate a query using jobs.insert.
+
+ See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert
+ """
+ job_id_given = job_id is not None
+ job_id_save = job_id
+ job_config_save = job_config
+
+ def do_query():
+ # Make a copy now, so that original doesn't get changed by the process
+ # below and to facilitate retry
+ job_config = copy.deepcopy(job_config_save)
+
+ job_id = make_job_id(job_id_save, job_id_prefix)
+ job_ref = job._JobReference(job_id, project=project, location=location)
+ query_job = job.QueryJob(job_ref, query, client=client, job_config=job_config)
+
+ try:
+ query_job._begin(retry=retry, timeout=timeout)
+ except core_exceptions.Conflict as create_exc:
+ # The thought is if someone is providing their own job IDs and they get
+ # their job ID generation wrong, this could end up returning results for
+ # the wrong query. We thus only try to recover if job ID was not given.
+ if job_id_given:
+ raise create_exc
+
+ try:
+ query_job = client.get_job(
+ job_id,
+ project=project,
+ location=location,
+ retry=retry,
+ timeout=timeout,
+ )
+ except core_exceptions.GoogleAPIError: # (includes RetryError)
+ raise
+ else:
+ return query_job
+ else:
+ return query_job
+
+ future = do_query()
+ # The future might be in a failed state now, but if it's
+ # unrecoverable, we'll find out when we ask for it's result, at which
+ # point, we may retry.
+ if not job_id_given:
+ future._retry_do_query = do_query # in case we have to retry later
+ future._job_retry = job_retry
+
+ return future
+
+
+def _validate_job_config(request_body: Dict[str, Any], invalid_key: str):
+ """Catch common mistakes, such as passing in a *JobConfig object of the
+ wrong type.
+ """
+ if invalid_key in request_body:
+ raise ValueError(f"got unexpected key {repr(invalid_key)} in job_config")
+
+
+def _to_query_request(
+ job_config: Optional[job.QueryJobConfig] = None,
+ *,
+ query: str,
+ location: Optional[str] = None,
+ timeout: Optional[float] = None,
+) -> Dict[str, Any]:
+ """Transform from Job resource to QueryRequest resource.
+
+ Most of the keys in job.configuration.query are in common with
+ QueryRequest. If any configuration property is set that is not available in
+ jobs.query, it will result in a server-side error.
+ """
+ request_body = copy.copy(job_config.to_api_repr()) if job_config else {}
+
+ _validate_job_config(request_body, job.CopyJob._JOB_TYPE)
+ _validate_job_config(request_body, job.ExtractJob._JOB_TYPE)
+ _validate_job_config(request_body, job.LoadJob._JOB_TYPE)
+
+ # Move query.* properties to top-level.
+ query_config_resource = request_body.pop("query", {})
+ request_body.update(query_config_resource)
+
+ # Default to standard SQL.
+ request_body.setdefault("useLegacySql", False)
+
+ # Since jobs.query can return results, ensure we use the lossless timestamp
+ # format. See: https://github.com/googleapis/python-bigquery/issues/395
+ request_body.setdefault("formatOptions", {})
+ request_body["formatOptions"]["useInt64Timestamp"] = True # type: ignore
+
+ if timeout is not None:
+ # Subtract a buffer for context switching, network latency, etc.
+ request_body["timeoutMs"] = max(0, int(1000 * timeout) - _TIMEOUT_BUFFER_MILLIS)
+
+ if location is not None:
+ request_body["location"] = location
+
+ request_body["query"] = query
+
+ return request_body
+
+
+def _to_query_job(
+ client: "Client",
+ query: str,
+ request_config: Optional[job.QueryJobConfig],
+ query_response: Dict[str, Any],
+) -> job.QueryJob:
+ job_ref_resource = query_response["jobReference"]
+ job_ref = job._JobReference._from_api_repr(job_ref_resource)
+ query_job = job.QueryJob(job_ref, query, client=client)
+ query_job._properties.setdefault("configuration", {})
+
+ # Not all relevant properties are in the jobs.query response. Populate some
+ # expected properties based on the job configuration.
+ if request_config is not None:
+ query_job._properties["configuration"].update(request_config.to_api_repr())
+
+ query_job._properties["configuration"].setdefault("query", {})
+ query_job._properties["configuration"]["query"]["query"] = query
+ query_job._properties["configuration"]["query"].setdefault("useLegacySql", False)
+
+ query_job._properties.setdefault("statistics", {})
+ query_job._properties["statistics"].setdefault("query", {})
+ query_job._properties["statistics"]["query"]["cacheHit"] = query_response.get(
+ "cacheHit"
+ )
+ query_job._properties["statistics"]["query"]["schema"] = query_response.get(
+ "schema"
+ )
+ query_job._properties["statistics"]["query"][
+ "totalBytesProcessed"
+ ] = query_response.get("totalBytesProcessed")
+
+ # Set errors if any were encountered.
+ query_job._properties.setdefault("status", {})
+ if "errors" in query_response:
+ # Set errors but not errorResult. If there was an error that failed
+ # the job, jobs.query behaves like jobs.getQueryResults and returns a
+ # non-success HTTP status code.
+ errors = query_response["errors"]
+ query_job._properties["status"]["errors"] = errors
+
+ # Avoid an extra call to `getQueryResults` if the query has finished.
+ job_complete = query_response.get("jobComplete")
+ if job_complete:
+ query_job._query_results = google.cloud.bigquery.query._QueryResults(
+ query_response
+ )
+
+ # We want job.result() to refresh the job state, so the conversion is
+ # always "PENDING", even if the job is finished.
+ query_job._properties["status"]["state"] = "PENDING"
+
+ return query_job
+
+
+def _to_query_path(project: str) -> str:
+ return f"/projects/{project}/queries"
+
+
+def query_jobs_query(
+ client: "Client",
+ query: str,
+ job_config: Optional[job.QueryJobConfig],
+ location: Optional[str],
+ project: str,
+ retry: retries.Retry,
+ timeout: Optional[float],
+ job_retry: retries.Retry,
+) -> job.QueryJob:
+ """Initiate a query using jobs.query with jobCreationMode=JOB_CREATION_REQUIRED.
+
+ See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query
+ """
+ path = _to_query_path(project)
+ request_body = _to_query_request(
+ query=query, job_config=job_config, location=location, timeout=timeout
+ )
+
+ def do_query():
+ request_body["requestId"] = make_job_id()
+ span_attributes = {"path": path}
+ api_response = client._call_api(
+ retry,
+ span_name="BigQuery.query",
+ span_attributes=span_attributes,
+ method="POST",
+ path=path,
+ data=request_body,
+ timeout=timeout,
+ )
+ return _to_query_job(client, query, job_config, api_response)
+
+ future = do_query()
+
+ # The future might be in a failed state now, but if it's
+ # unrecoverable, we'll find out when we ask for it's result, at which
+ # point, we may retry.
+ future._retry_do_query = do_query # in case we have to retry later
+ future._job_retry = job_retry
+
+ return future
+
+
+def query_and_wait(
+ client: "Client",
+ query: str,
+ *,
+ job_config: Optional[job.QueryJobConfig],
+ location: Optional[str],
+ project: str,
+ api_timeout: Optional[float] = None,
+ wait_timeout: Optional[Union[float, object]] = POLLING_DEFAULT_VALUE,
+ retry: Optional[retries.Retry],
+ job_retry: Optional[retries.Retry],
+ page_size: Optional[int] = None,
+ max_results: Optional[int] = None,
+) -> table.RowIterator:
+ """Run the query, wait for it to finish, and return the results.
+
+ While ``jobCreationMode=JOB_CREATION_OPTIONAL`` is in preview in the
+ ``jobs.query`` REST API, use the default ``jobCreationMode`` unless
+ the environment variable ``QUERY_PREVIEW_ENABLED=true``. After
+ ``jobCreationMode`` is GA, this method will always use
+ ``jobCreationMode=JOB_CREATION_OPTIONAL``. See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query
+
+ Args:
+ client:
+ BigQuery client to make API calls.
+ query (str):
+ SQL query to be executed. Defaults to the standard SQL
+ dialect. Use the ``job_config`` parameter to change dialects.
+ job_config (Optional[google.cloud.bigquery.job.QueryJobConfig]):
+ Extra configuration options for the job.
+ To override any options that were previously set in
+ the ``default_query_job_config`` given to the
+ ``Client`` constructor, manually set those options to ``None``,
+ or whatever value is preferred.
+ location (Optional[str]):
+ Location where to run the job. Must match the location of the
+ table used in the query as well as the destination table.
+ project (Optional[str]):
+ Project ID of the project of where to run the job. Defaults
+ to the client's project.
+ api_timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ wait_timeout (Optional[Union[float, object]]):
+ The number of seconds to wait for the query to finish. If the
+ query doesn't finish before this timeout, the client attempts
+ to cancel the query. If unset, the underlying Client.get_job() API
+ call has timeout, but we still wait indefinitely for the job to
+ finish.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC. This only applies to making RPC
+ calls. It isn't used to retry failed jobs. This has
+ a reasonable default that should only be overridden
+ with care.
+ job_retry (Optional[google.api_core.retry.Retry]):
+ How to retry failed jobs. The default retries
+ rate-limit-exceeded errors. Passing ``None`` disables
+ job retry. Not all jobs can be retried.
+ page_size (Optional[int]):
+ The maximum number of rows in each page of results from this
+ request. Non-positive values are ignored.
+ max_results (Optional[int]):
+ The maximum total number of rows from this request.
+
+ Returns:
+ google.cloud.bigquery.table.RowIterator:
+ Iterator of row data
+ :class:`~google.cloud.bigquery.table.Row`-s. During each
+ page, the iterator will have the ``total_rows`` attribute
+ set, which counts the total number of rows **in the result
+ set** (this is distinct from the total number of rows in the
+ current page: ``iterator.page.num_items``).
+
+ If the query is a special query that produces no results, e.g.
+ a DDL query, an ``_EmptyRowIterator`` instance is returned.
+
+ Raises:
+ TypeError:
+ If ``job_config`` is not an instance of
+ :class:`~google.cloud.bigquery.job.QueryJobConfig`
+ class.
+ """
+ request_body = _to_query_request(
+ query=query, job_config=job_config, location=location, timeout=api_timeout
+ )
+
+ # Some API parameters aren't supported by the jobs.query API. In these
+ # cases, fallback to a jobs.insert call.
+ if not _supported_by_jobs_query(request_body):
+ return _wait_or_cancel(
+ query_jobs_insert(
+ client=client,
+ query=query,
+ job_id=None,
+ job_id_prefix=None,
+ job_config=job_config,
+ location=location,
+ project=project,
+ retry=retry,
+ timeout=api_timeout,
+ job_retry=job_retry,
+ ),
+ api_timeout=api_timeout,
+ wait_timeout=wait_timeout,
+ retry=retry,
+ page_size=page_size,
+ max_results=max_results,
+ )
+
+ path = _to_query_path(project)
+
+ if page_size is not None and max_results is not None:
+ request_body["maxResults"] = min(page_size, max_results)
+ elif page_size is not None or max_results is not None:
+ request_body["maxResults"] = page_size or max_results
+
+ if os.getenv("QUERY_PREVIEW_ENABLED", "").casefold() == "true":
+ request_body["jobCreationMode"] = "JOB_CREATION_OPTIONAL"
+
+ def do_query():
+ request_body["requestId"] = make_job_id()
+ span_attributes = {"path": path}
+
+ # For easier testing, handle the retries ourselves.
+ if retry is not None:
+ response = retry(client._call_api)(
+ retry=None, # We're calling the retry decorator ourselves.
+ span_name="BigQuery.query",
+ span_attributes=span_attributes,
+ method="POST",
+ path=path,
+ data=request_body,
+ timeout=api_timeout,
+ )
+ else:
+ response = client._call_api(
+ retry=None,
+ span_name="BigQuery.query",
+ span_attributes=span_attributes,
+ method="POST",
+ path=path,
+ data=request_body,
+ timeout=api_timeout,
+ )
+
+ # Even if we run with JOB_CREATION_OPTIONAL, if there are more pages
+ # to fetch, there will be a job ID for jobs.getQueryResults.
+ query_results = google.cloud.bigquery.query._QueryResults.from_api_repr(
+ response
+ )
+ page_token = query_results.page_token
+ more_pages = page_token is not None
+
+ if more_pages or not query_results.complete:
+ # TODO(swast): Avoid a call to jobs.get in some cases (few
+ # remaining pages) by waiting for the query to finish and calling
+ # client._list_rows_from_query_results directly. Need to update
+ # RowIterator to fetch destination table via the job ID if needed.
+ return _wait_or_cancel(
+ _to_query_job(client, query, job_config, response),
+ api_timeout=api_timeout,
+ wait_timeout=wait_timeout,
+ retry=retry,
+ page_size=page_size,
+ max_results=max_results,
+ )
+
+ return table.RowIterator(
+ client=client,
+ api_request=functools.partial(client._call_api, retry, timeout=api_timeout),
+ path=None,
+ schema=query_results.schema,
+ max_results=max_results,
+ page_size=page_size,
+ total_rows=query_results.total_rows,
+ first_page_response=response,
+ location=query_results.location,
+ job_id=query_results.job_id,
+ query_id=query_results.query_id,
+ project=query_results.project,
+ num_dml_affected_rows=query_results.num_dml_affected_rows,
+ )
+
+ if job_retry is not None:
+ return job_retry(do_query)()
+ else:
+ return do_query()
+
+
+def _supported_by_jobs_query(request_body: Dict[str, Any]) -> bool:
+ """True if jobs.query can be used. False if jobs.insert is needed."""
+ request_keys = frozenset(request_body.keys())
+
+ # Per issue: https://github.com/googleapis/python-bigquery/issues/1867
+ # use an allowlist here instead of a denylist because the backend API allows
+ # unsupported parameters without any warning or failure. Instead, keep this
+ # set in sync with those in QueryRequest:
+ # https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#QueryRequest
+ keys_allowlist = {
+ "kind",
+ "query",
+ "maxResults",
+ "defaultDataset",
+ "timeoutMs",
+ "dryRun",
+ "preserveNulls",
+ "useQueryCache",
+ "useLegacySql",
+ "parameterMode",
+ "queryParameters",
+ "location",
+ "formatOptions",
+ "connectionProperties",
+ "labels",
+ "maximumBytesBilled",
+ "requestId",
+ "createSession",
+ }
+
+ unsupported_keys = request_keys - keys_allowlist
+ return len(unsupported_keys) == 0
+
+
+def _wait_or_cancel(
+ job: job.QueryJob,
+ api_timeout: Optional[float],
+ wait_timeout: Optional[Union[object, float]],
+ retry: Optional[retries.Retry],
+ page_size: Optional[int],
+ max_results: Optional[int],
+) -> table.RowIterator:
+ """Wait for a job to complete and return the results.
+
+ If we can't return the results within the ``wait_timeout``, try to cancel
+ the job.
+ """
+ try:
+ return job.result(
+ page_size=page_size,
+ max_results=max_results,
+ retry=retry,
+ timeout=wait_timeout,
+ )
+ except Exception:
+ # Attempt to cancel the job since we can't return the results.
+ try:
+ job.cancel(retry=retry, timeout=api_timeout)
+ except Exception:
+ # Don't eat the original exception if cancel fails.
+ pass
+ raise
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/_pandas_helpers.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/_pandas_helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..210ab48758f010cb58901fa97c437a94969beab8
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/_pandas_helpers.py
@@ -0,0 +1,1026 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Shared helper functions for connecting BigQuery and pandas."""
+
+import concurrent.futures
+from datetime import datetime
+import functools
+from itertools import islice
+import logging
+import queue
+import warnings
+from typing import Any, Union
+
+
+from google.cloud.bigquery import _pyarrow_helpers
+from google.cloud.bigquery import _versions_helpers
+from google.cloud.bigquery import schema
+
+try:
+ import pandas # type: ignore
+
+ pandas_import_exception = None
+except ImportError as exc:
+ pandas = None
+ pandas_import_exception = exc
+else:
+ import numpy
+
+try:
+ import db_dtypes # type: ignore
+
+ date_dtype_name = db_dtypes.DateDtype.name
+ time_dtype_name = db_dtypes.TimeDtype.name
+ db_dtypes_import_exception = None
+except ImportError as exc:
+ db_dtypes = None
+ db_dtypes_import_exception = exc
+ date_dtype_name = time_dtype_name = "" # Use '' rather than None because pytype
+
+pyarrow = _versions_helpers.PYARROW_VERSIONS.try_import()
+
+try:
+ # _BaseGeometry is used to detect shapely objevys in `bq_to_arrow_array`
+ from shapely.geometry.base import BaseGeometry as _BaseGeometry # type: ignore
+except ImportError:
+ # No shapely, use NoneType for _BaseGeometry as a placeholder.
+ _BaseGeometry = type(None)
+else:
+ # We don't have any unit test sessions that install shapely but not pandas.
+ if pandas is not None: # pragma: NO COVER
+
+ def _to_wkb():
+ from shapely import wkb # type: ignore
+
+ write = wkb.dumps
+ notnull = pandas.notnull
+
+ def _to_wkb(v):
+ return write(v) if notnull(v) else v
+
+ return _to_wkb
+
+ _to_wkb = _to_wkb()
+
+try:
+ from google.cloud.bigquery_storage import ArrowSerializationOptions
+except ImportError:
+ _ARROW_COMPRESSION_SUPPORT = False
+else:
+ # Having BQ Storage available implies that pyarrow >=1.0.0 is available, too.
+ _ARROW_COMPRESSION_SUPPORT = True
+
+_LOGGER = logging.getLogger(__name__)
+
+_PROGRESS_INTERVAL = 0.2 # Maximum time between download status checks, in seconds.
+
+_MAX_QUEUE_SIZE_DEFAULT = object() # max queue size sentinel for BQ Storage downloads
+
+_NO_PANDAS_ERROR = "Please install the 'pandas' package to use this function."
+_NO_DB_TYPES_ERROR = "Please install the 'db-dtypes' package to use this function."
+
+_PANDAS_DTYPE_TO_BQ = {
+ "bool": "BOOLEAN",
+ "datetime64[ns, UTC]": "TIMESTAMP",
+ "datetime64[ns]": "DATETIME",
+ "float32": "FLOAT",
+ "float64": "FLOAT",
+ "int8": "INTEGER",
+ "int16": "INTEGER",
+ "int32": "INTEGER",
+ "int64": "INTEGER",
+ "uint8": "INTEGER",
+ "uint16": "INTEGER",
+ "uint32": "INTEGER",
+ "geometry": "GEOGRAPHY",
+ date_dtype_name: "DATE",
+ time_dtype_name: "TIME",
+}
+
+
+class _DownloadState(object):
+ """Flag to indicate that a thread should exit early."""
+
+ def __init__(self):
+ # No need for a lock because reading/replacing a variable is defined to
+ # be an atomic operation in the Python language definition (enforced by
+ # the global interpreter lock).
+ self.done = False
+
+
+BQ_FIELD_TYPE_TO_ARROW_FIELD_METADATA = {
+ "GEOGRAPHY": {
+ b"ARROW:extension:name": b"google:sqlType:geography",
+ b"ARROW:extension:metadata": b'{"encoding": "WKT"}',
+ },
+ "DATETIME": {b"ARROW:extension:name": b"google:sqlType:datetime"},
+}
+
+
+def bq_to_arrow_struct_data_type(field):
+ arrow_fields = []
+ for subfield in field.fields:
+ arrow_subfield = bq_to_arrow_field(subfield)
+ if arrow_subfield:
+ arrow_fields.append(arrow_subfield)
+ else:
+ # Could not determine a subfield type. Fallback to type
+ # inference.
+ return None
+ return pyarrow.struct(arrow_fields)
+
+
+def bq_to_arrow_range_data_type(field):
+ if field is None:
+ raise ValueError(
+ "Range element type cannot be None, must be one of "
+ "DATE, DATETIME, or TIMESTAMP"
+ )
+ element_type = field.element_type.upper()
+ arrow_element_type = _pyarrow_helpers.bq_to_arrow_scalars(element_type)()
+ return pyarrow.struct([("start", arrow_element_type), ("end", arrow_element_type)])
+
+
+def bq_to_arrow_data_type(field):
+ """Return the Arrow data type, corresponding to a given BigQuery column.
+
+ Returns:
+ None: if default Arrow type inspection should be used.
+ """
+ if field.mode is not None and field.mode.upper() == "REPEATED":
+ inner_type = bq_to_arrow_data_type(
+ schema.SchemaField(field.name, field.field_type, fields=field.fields)
+ )
+ if inner_type:
+ return pyarrow.list_(inner_type)
+ return None
+
+ field_type_upper = field.field_type.upper() if field.field_type else ""
+ if field_type_upper in schema._STRUCT_TYPES:
+ return bq_to_arrow_struct_data_type(field)
+
+ if field_type_upper == "RANGE":
+ return bq_to_arrow_range_data_type(field.range_element_type)
+
+ data_type_constructor = _pyarrow_helpers.bq_to_arrow_scalars(field_type_upper)
+ if data_type_constructor is None:
+ return None
+ return data_type_constructor()
+
+
+def bq_to_arrow_field(bq_field, array_type=None):
+ """Return the Arrow field, corresponding to a given BigQuery column.
+
+ Returns:
+ None: if the Arrow type cannot be determined.
+ """
+ arrow_type = bq_to_arrow_data_type(bq_field)
+ if arrow_type is not None:
+ if array_type is not None:
+ arrow_type = array_type # For GEOGRAPHY, at least initially
+ metadata = BQ_FIELD_TYPE_TO_ARROW_FIELD_METADATA.get(
+ bq_field.field_type.upper() if bq_field.field_type else ""
+ )
+ return pyarrow.field(
+ bq_field.name,
+ arrow_type,
+ # Even if the remote schema is REQUIRED, there's a chance there's
+ # local NULL values. Arrow will gladly interpret these NULL values
+ # as non-NULL and give you an arbitrary value. See:
+ # https://github.com/googleapis/python-bigquery/issues/1692
+ nullable=False if bq_field.mode.upper() == "REPEATED" else True,
+ metadata=metadata,
+ )
+
+ warnings.warn(
+ "Unable to determine Arrow type for field '{}'.".format(bq_field.name)
+ )
+ return None
+
+
+def bq_to_arrow_schema(bq_schema):
+ """Return the Arrow schema, corresponding to a given BigQuery schema.
+
+ Returns:
+ None: if any Arrow type cannot be determined.
+ """
+ arrow_fields = []
+ for bq_field in bq_schema:
+ arrow_field = bq_to_arrow_field(bq_field)
+ if arrow_field is None:
+ # Auto-detect the schema if there is an unknown field type.
+ return None
+ arrow_fields.append(arrow_field)
+ return pyarrow.schema(arrow_fields)
+
+
+def default_types_mapper(
+ date_as_object: bool = False,
+ bool_dtype: Union[Any, None] = None,
+ int_dtype: Union[Any, None] = None,
+ float_dtype: Union[Any, None] = None,
+ string_dtype: Union[Any, None] = None,
+ date_dtype: Union[Any, None] = None,
+ datetime_dtype: Union[Any, None] = None,
+ time_dtype: Union[Any, None] = None,
+ timestamp_dtype: Union[Any, None] = None,
+ range_date_dtype: Union[Any, None] = None,
+ range_datetime_dtype: Union[Any, None] = None,
+ range_timestamp_dtype: Union[Any, None] = None,
+):
+ """Create a mapping from pyarrow types to pandas types.
+
+ This overrides the pandas defaults to use null-safe extension types where
+ available.
+
+ See: https://arrow.apache.org/docs/python/api/datatypes.html for a list of
+ data types. See:
+ tests/unit/test__pandas_helpers.py::test_bq_to_arrow_data_type for
+ BigQuery to Arrow type mapping.
+
+ Note to google-cloud-bigquery developers: If you update the default dtypes,
+ also update the docs at docs/usage/pandas.rst.
+ """
+
+ def types_mapper(arrow_data_type):
+ if bool_dtype is not None and pyarrow.types.is_boolean(arrow_data_type):
+ return bool_dtype
+
+ elif int_dtype is not None and pyarrow.types.is_integer(arrow_data_type):
+ return int_dtype
+
+ elif float_dtype is not None and pyarrow.types.is_floating(arrow_data_type):
+ return float_dtype
+
+ elif string_dtype is not None and pyarrow.types.is_string(arrow_data_type):
+ return string_dtype
+
+ elif (
+ # If date_as_object is True, we know some DATE columns are
+ # out-of-bounds of what is supported by pandas.
+ date_dtype is not None
+ and not date_as_object
+ and pyarrow.types.is_date(arrow_data_type)
+ ):
+ return date_dtype
+
+ elif (
+ datetime_dtype is not None
+ and pyarrow.types.is_timestamp(arrow_data_type)
+ and arrow_data_type.tz is None
+ ):
+ return datetime_dtype
+
+ elif (
+ timestamp_dtype is not None
+ and pyarrow.types.is_timestamp(arrow_data_type)
+ and arrow_data_type.tz is not None
+ ):
+ return timestamp_dtype
+
+ elif time_dtype is not None and pyarrow.types.is_time(arrow_data_type):
+ return time_dtype
+
+ elif pyarrow.types.is_struct(arrow_data_type):
+ if range_datetime_dtype is not None and arrow_data_type.equals(
+ range_datetime_dtype.pyarrow_dtype
+ ):
+ return range_datetime_dtype
+
+ elif range_date_dtype is not None and arrow_data_type.equals(
+ range_date_dtype.pyarrow_dtype
+ ):
+ return range_date_dtype
+
+ elif range_timestamp_dtype is not None and arrow_data_type.equals(
+ range_timestamp_dtype.pyarrow_dtype
+ ):
+ return range_timestamp_dtype
+
+ return types_mapper
+
+
+def bq_to_arrow_array(series, bq_field):
+ if bq_field.field_type.upper() == "GEOGRAPHY":
+ arrow_type = None
+ first = _first_valid(series)
+ if first is not None:
+ if series.dtype.name == "geometry" or isinstance(first, _BaseGeometry):
+ arrow_type = pyarrow.binary()
+ # Convert shapey geometry to WKB binary format:
+ series = series.apply(_to_wkb)
+ elif isinstance(first, bytes):
+ arrow_type = pyarrow.binary()
+ elif series.dtype.name == "geometry":
+ # We have a GeoSeries containing all nulls, convert it to a pandas series
+ series = pandas.Series(numpy.array(series))
+
+ if arrow_type is None:
+ arrow_type = bq_to_arrow_data_type(bq_field)
+ else:
+ arrow_type = bq_to_arrow_data_type(bq_field)
+
+ field_type_upper = bq_field.field_type.upper() if bq_field.field_type else ""
+
+ try:
+ if bq_field.mode.upper() == "REPEATED":
+ return pyarrow.ListArray.from_pandas(series, type=arrow_type)
+ if field_type_upper in schema._STRUCT_TYPES:
+ return pyarrow.StructArray.from_pandas(series, type=arrow_type)
+ return pyarrow.Array.from_pandas(series, type=arrow_type)
+ except pyarrow.ArrowTypeError:
+ msg = f"""Error converting Pandas column with name: "{series.name}" and datatype: "{series.dtype}" to an appropriate pyarrow datatype: Array, ListArray, or StructArray"""
+ _LOGGER.error(msg)
+ raise pyarrow.ArrowTypeError(msg)
+
+
+def get_column_or_index(dataframe, name):
+ """Return a column or index as a pandas series."""
+ if name in dataframe.columns:
+ return dataframe[name].reset_index(drop=True)
+
+ if isinstance(dataframe.index, pandas.MultiIndex):
+ if name in dataframe.index.names:
+ return (
+ dataframe.index.get_level_values(name)
+ .to_series()
+ .reset_index(drop=True)
+ )
+ else:
+ if name == dataframe.index.name:
+ return dataframe.index.to_series().reset_index(drop=True)
+
+ raise ValueError("column or index '{}' not found.".format(name))
+
+
+def list_columns_and_indexes(dataframe):
+ """Return all index and column names with dtypes.
+
+ Returns:
+ Sequence[Tuple[str, dtype]]:
+ Returns a sorted list of indexes and column names with
+ corresponding dtypes. If an index is missing a name or has the
+ same name as a column, the index is omitted.
+ """
+ column_names = frozenset(dataframe.columns)
+ columns_and_indexes = []
+ if isinstance(dataframe.index, pandas.MultiIndex):
+ for name in dataframe.index.names:
+ if name and name not in column_names:
+ values = dataframe.index.get_level_values(name)
+ columns_and_indexes.append((name, values.dtype))
+ else:
+ if dataframe.index.name and dataframe.index.name not in column_names:
+ columns_and_indexes.append((dataframe.index.name, dataframe.index.dtype))
+
+ columns_and_indexes += zip(dataframe.columns, dataframe.dtypes)
+ return columns_and_indexes
+
+
+def _first_valid(series):
+ first_valid_index = series.first_valid_index()
+ if first_valid_index is not None:
+ return series.at[first_valid_index]
+
+
+def _first_array_valid(series):
+ """Return the first "meaningful" element from the array series.
+
+ Here, "meaningful" means the first non-None element in one of the arrays that can
+ be used for type detextion.
+ """
+ first_valid_index = series.first_valid_index()
+ if first_valid_index is None:
+ return None
+
+ valid_array = series.at[first_valid_index]
+ valid_item = next((item for item in valid_array if not pandas.isna(item)), None)
+
+ if valid_item is not None:
+ return valid_item
+
+ # Valid item is None because all items in the "valid" array are invalid. Try
+ # to find a true valid array manually.
+ for array in islice(series, first_valid_index + 1, None):
+ try:
+ array_iter = iter(array)
+ except TypeError:
+ continue # Not an array, apparently, e.g. None, thus skip.
+ valid_item = next((item for item in array_iter if not pandas.isna(item)), None)
+ if valid_item is not None:
+ break
+
+ return valid_item
+
+
+def dataframe_to_bq_schema(dataframe, bq_schema):
+ """Convert a pandas DataFrame schema to a BigQuery schema.
+
+ Args:
+ dataframe (pandas.DataFrame):
+ DataFrame for which the client determines the BigQuery schema.
+ bq_schema (Sequence[Union[ \
+ :class:`~google.cloud.bigquery.schema.SchemaField`, \
+ Mapping[str, Any] \
+ ]]):
+ A BigQuery schema. Use this argument to override the autodetected
+ type for some or all of the DataFrame columns.
+
+ Returns:
+ Optional[Sequence[google.cloud.bigquery.schema.SchemaField]]:
+ The automatically determined schema. Returns None if the type of
+ any column cannot be determined.
+ """
+ if bq_schema:
+ bq_schema = schema._to_schema_fields(bq_schema)
+ bq_schema_index = {field.name: field for field in bq_schema}
+ bq_schema_unused = set(bq_schema_index.keys())
+ else:
+ bq_schema_index = {}
+ bq_schema_unused = set()
+
+ bq_schema_out = []
+ unknown_type_fields = []
+
+ for column, dtype in list_columns_and_indexes(dataframe):
+ # Use provided type from schema, if present.
+ bq_field = bq_schema_index.get(column)
+ if bq_field:
+ bq_schema_out.append(bq_field)
+ bq_schema_unused.discard(bq_field.name)
+ continue
+
+ # Otherwise, try to automatically determine the type based on the
+ # pandas dtype.
+ bq_type = _PANDAS_DTYPE_TO_BQ.get(dtype.name)
+ if bq_type is None:
+ sample_data = _first_valid(dataframe.reset_index()[column])
+ if (
+ isinstance(sample_data, _BaseGeometry)
+ and sample_data is not None # Paranoia
+ ):
+ bq_type = "GEOGRAPHY"
+ bq_field = schema.SchemaField(column, bq_type)
+ bq_schema_out.append(bq_field)
+
+ if bq_field.field_type is None:
+ unknown_type_fields.append(bq_field)
+
+ # Catch any schema mismatch. The developer explicitly asked to serialize a
+ # column, but it was not found.
+ if bq_schema_unused:
+ raise ValueError(
+ "bq_schema contains fields not present in dataframe: {}".format(
+ bq_schema_unused
+ )
+ )
+
+ # If schema detection was not successful for all columns, also try with
+ # pyarrow, if available.
+ if unknown_type_fields:
+ if not pyarrow:
+ msg = "Could not determine the type of columns: {}".format(
+ ", ".join(field.name for field in unknown_type_fields)
+ )
+ warnings.warn(msg)
+ return None # We cannot detect the schema in full.
+
+ # The augment_schema() helper itself will also issue unknown type
+ # warnings if detection still fails for any of the fields.
+ bq_schema_out = augment_schema(dataframe, bq_schema_out)
+
+ return tuple(bq_schema_out) if bq_schema_out else None
+
+
+def augment_schema(dataframe, current_bq_schema):
+ """Try to deduce the unknown field types and return an improved schema.
+
+ This function requires ``pyarrow`` to run. If all the missing types still
+ cannot be detected, ``None`` is returned. If all types are already known,
+ a shallow copy of the given schema is returned.
+
+ Args:
+ dataframe (pandas.DataFrame):
+ DataFrame for which some of the field types are still unknown.
+ current_bq_schema (Sequence[google.cloud.bigquery.schema.SchemaField]):
+ A BigQuery schema for ``dataframe``. The types of some or all of
+ the fields may be ``None``.
+ Returns:
+ Optional[Sequence[google.cloud.bigquery.schema.SchemaField]]
+ """
+ # pytype: disable=attribute-error
+ augmented_schema = []
+ unknown_type_fields = []
+ for field in current_bq_schema:
+ if field.field_type is not None:
+ augmented_schema.append(field)
+ continue
+
+ arrow_table = pyarrow.array(dataframe.reset_index()[field.name])
+
+ if pyarrow.types.is_list(arrow_table.type):
+ # `pyarrow.ListType`
+ detected_mode = "REPEATED"
+ detected_type = _pyarrow_helpers.arrow_scalar_ids_to_bq(
+ arrow_table.values.type.id
+ )
+
+ # For timezone-naive datetimes, pyarrow assumes the UTC timezone and adds
+ # it to such datetimes, causing them to be recognized as TIMESTAMP type.
+ # We thus additionally check the actual data to see if we need to overrule
+ # that and choose DATETIME instead.
+ # Note that this should only be needed for datetime values inside a list,
+ # since scalar datetime values have a proper Pandas dtype that allows
+ # distinguishing between timezone-naive and timezone-aware values before
+ # even requiring the additional schema augment logic in this method.
+ if detected_type == "TIMESTAMP":
+ valid_item = _first_array_valid(dataframe[field.name])
+ if isinstance(valid_item, datetime) and valid_item.tzinfo is None:
+ detected_type = "DATETIME"
+ else:
+ detected_mode = field.mode
+ detected_type = _pyarrow_helpers.arrow_scalar_ids_to_bq(arrow_table.type.id)
+ if detected_type == "NUMERIC" and arrow_table.type.scale > 9:
+ detected_type = "BIGNUMERIC"
+
+ if detected_type is None:
+ unknown_type_fields.append(field)
+ continue
+
+ new_field = schema.SchemaField(
+ name=field.name,
+ field_type=detected_type,
+ mode=detected_mode,
+ description=field.description,
+ fields=field.fields,
+ )
+ augmented_schema.append(new_field)
+
+ if unknown_type_fields:
+ warnings.warn(
+ "Pyarrow could not determine the type of columns: {}.".format(
+ ", ".join(field.name for field in unknown_type_fields)
+ )
+ )
+ return None
+
+ return augmented_schema
+ # pytype: enable=attribute-error
+
+
+def dataframe_to_arrow(dataframe, bq_schema):
+ """Convert pandas dataframe to Arrow table, using BigQuery schema.
+
+ Args:
+ dataframe (pandas.DataFrame):
+ DataFrame to convert to Arrow table.
+ bq_schema (Sequence[Union[ \
+ :class:`~google.cloud.bigquery.schema.SchemaField`, \
+ Mapping[str, Any] \
+ ]]):
+ Desired BigQuery schema. The number of columns must match the
+ number of columns in the DataFrame.
+
+ Returns:
+ pyarrow.Table:
+ Table containing dataframe data, with schema derived from
+ BigQuery schema.
+ """
+ column_names = set(dataframe.columns)
+ column_and_index_names = set(
+ name for name, _ in list_columns_and_indexes(dataframe)
+ )
+
+ bq_schema = schema._to_schema_fields(bq_schema)
+ bq_field_names = set(field.name for field in bq_schema)
+
+ extra_fields = bq_field_names - column_and_index_names
+ if extra_fields:
+ raise ValueError(
+ "bq_schema contains fields not present in dataframe: {}".format(
+ extra_fields
+ )
+ )
+
+ # It's okay for indexes to be missing from bq_schema, but it's not okay to
+ # be missing columns.
+ missing_fields = column_names - bq_field_names
+ if missing_fields:
+ raise ValueError(
+ "bq_schema is missing fields from dataframe: {}".format(missing_fields)
+ )
+
+ arrow_arrays = []
+ arrow_names = []
+ arrow_fields = []
+ for bq_field in bq_schema:
+ arrow_names.append(bq_field.name)
+ arrow_arrays.append(
+ bq_to_arrow_array(get_column_or_index(dataframe, bq_field.name), bq_field)
+ )
+ arrow_fields.append(bq_to_arrow_field(bq_field, arrow_arrays[-1].type))
+
+ if all((field is not None for field in arrow_fields)):
+ return pyarrow.Table.from_arrays(
+ arrow_arrays, schema=pyarrow.schema(arrow_fields)
+ )
+ return pyarrow.Table.from_arrays(arrow_arrays, names=arrow_names)
+
+
+def dataframe_to_parquet(
+ dataframe,
+ bq_schema,
+ filepath,
+ parquet_compression="SNAPPY",
+ parquet_use_compliant_nested_type=True,
+):
+ """Write dataframe as a Parquet file, according to the desired BQ schema.
+
+ This function requires the :mod:`pyarrow` package. Arrow is used as an
+ intermediate format.
+
+ Args:
+ dataframe (pandas.DataFrame):
+ DataFrame to convert to Parquet file.
+ bq_schema (Sequence[Union[ \
+ :class:`~google.cloud.bigquery.schema.SchemaField`, \
+ Mapping[str, Any] \
+ ]]):
+ Desired BigQuery schema. Number of columns must match number of
+ columns in the DataFrame.
+ filepath (str):
+ Path to write Parquet file to.
+ parquet_compression (Optional[str]):
+ The compression codec to use by the the ``pyarrow.parquet.write_table``
+ serializing method. Defaults to "SNAPPY".
+ https://arrow.apache.org/docs/python/generated/pyarrow.parquet.write_table.html#pyarrow-parquet-write-table
+ parquet_use_compliant_nested_type (bool):
+ Whether the ``pyarrow.parquet.write_table`` serializing method should write
+ compliant Parquet nested type (lists). Defaults to ``True``.
+ https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#nested-types
+ https://arrow.apache.org/docs/python/generated/pyarrow.parquet.write_table.html#pyarrow-parquet-write-table
+
+ This argument is ignored for ``pyarrow`` versions earlier than ``4.0.0``.
+ """
+ pyarrow = _versions_helpers.PYARROW_VERSIONS.try_import(raise_if_error=True)
+
+ import pyarrow.parquet # type: ignore
+
+ kwargs = (
+ {"use_compliant_nested_type": parquet_use_compliant_nested_type}
+ if _versions_helpers.PYARROW_VERSIONS.use_compliant_nested_type
+ else {}
+ )
+
+ bq_schema = schema._to_schema_fields(bq_schema)
+ arrow_table = dataframe_to_arrow(dataframe, bq_schema)
+ pyarrow.parquet.write_table(
+ arrow_table,
+ filepath,
+ compression=parquet_compression,
+ **kwargs,
+ )
+
+
+def _row_iterator_page_to_arrow(page, column_names, arrow_types):
+ # Iterate over the page to force the API request to get the page data.
+ try:
+ next(iter(page))
+ except StopIteration:
+ pass
+
+ arrays = []
+ for column_index, arrow_type in enumerate(arrow_types):
+ arrays.append(pyarrow.array(page._columns[column_index], type=arrow_type))
+
+ if isinstance(column_names, pyarrow.Schema):
+ return pyarrow.RecordBatch.from_arrays(arrays, schema=column_names)
+ return pyarrow.RecordBatch.from_arrays(arrays, names=column_names)
+
+
+def download_arrow_row_iterator(pages, bq_schema):
+ """Use HTTP JSON RowIterator to construct an iterable of RecordBatches.
+
+ Args:
+ pages (Iterator[:class:`google.api_core.page_iterator.Page`]):
+ An iterator over the result pages.
+ bq_schema (Sequence[Union[ \
+ :class:`~google.cloud.bigquery.schema.SchemaField`, \
+ Mapping[str, Any] \
+ ]]):
+ A decription of the fields in result pages.
+ Yields:
+ :class:`pyarrow.RecordBatch`
+ The next page of records as a ``pyarrow`` record batch.
+ """
+ bq_schema = schema._to_schema_fields(bq_schema)
+ column_names = bq_to_arrow_schema(bq_schema) or [field.name for field in bq_schema]
+ arrow_types = [bq_to_arrow_data_type(field) for field in bq_schema]
+
+ for page in pages:
+ yield _row_iterator_page_to_arrow(page, column_names, arrow_types)
+
+
+def _row_iterator_page_to_dataframe(page, column_names, dtypes):
+ # Iterate over the page to force the API request to get the page data.
+ try:
+ next(iter(page))
+ except StopIteration:
+ pass
+
+ columns = {}
+ for column_index, column_name in enumerate(column_names):
+ dtype = dtypes.get(column_name)
+ columns[column_name] = pandas.Series(page._columns[column_index], dtype=dtype)
+
+ return pandas.DataFrame(columns, columns=column_names)
+
+
+def download_dataframe_row_iterator(pages, bq_schema, dtypes):
+ """Use HTTP JSON RowIterator to construct a DataFrame.
+
+ Args:
+ pages (Iterator[:class:`google.api_core.page_iterator.Page`]):
+ An iterator over the result pages.
+ bq_schema (Sequence[Union[ \
+ :class:`~google.cloud.bigquery.schema.SchemaField`, \
+ Mapping[str, Any] \
+ ]]):
+ A decription of the fields in result pages.
+ dtypes(Mapping[str, numpy.dtype]):
+ The types of columns in result data to hint construction of the
+ resulting DataFrame. Not all column types have to be specified.
+ Yields:
+ :class:`pandas.DataFrame`
+ The next page of records as a ``pandas.DataFrame`` record batch.
+ """
+ bq_schema = schema._to_schema_fields(bq_schema)
+ column_names = [field.name for field in bq_schema]
+ for page in pages:
+ yield _row_iterator_page_to_dataframe(page, column_names, dtypes)
+
+
+def _bqstorage_page_to_arrow(page):
+ return page.to_arrow()
+
+
+def _bqstorage_page_to_dataframe(column_names, dtypes, page):
+ # page.to_dataframe() does not preserve column order in some versions
+ # of google-cloud-bigquery-storage. Access by column name to rearrange.
+ return page.to_dataframe(dtypes=dtypes)[column_names]
+
+
+def _download_table_bqstorage_stream(
+ download_state, bqstorage_client, session, stream, worker_queue, page_to_item
+):
+ reader = bqstorage_client.read_rows(stream.name)
+
+ # Avoid deprecation warnings for passing in unnecessary read session.
+ # https://github.com/googleapis/python-bigquery-storage/issues/229
+ if _versions_helpers.BQ_STORAGE_VERSIONS.is_read_session_optional:
+ rowstream = reader.rows()
+ else:
+ rowstream = reader.rows(session)
+
+ for page in rowstream.pages:
+ if download_state.done:
+ return
+ item = page_to_item(page)
+ worker_queue.put(item)
+
+
+def _nowait(futures):
+ """Separate finished and unfinished threads, much like
+ :func:`concurrent.futures.wait`, but don't wait.
+ """
+ done = []
+ not_done = []
+ for future in futures:
+ if future.done():
+ done.append(future)
+ else:
+ not_done.append(future)
+ return done, not_done
+
+
+def _download_table_bqstorage(
+ project_id,
+ table,
+ bqstorage_client,
+ preserve_order=False,
+ selected_fields=None,
+ page_to_item=None,
+ max_queue_size=_MAX_QUEUE_SIZE_DEFAULT,
+):
+ """Use (faster, but billable) BQ Storage API to construct DataFrame."""
+
+ # Passing a BQ Storage client in implies that the BigQuery Storage library
+ # is available and can be imported.
+ from google.cloud import bigquery_storage
+
+ if "$" in table.table_id:
+ raise ValueError(
+ "Reading from a specific partition is not currently supported."
+ )
+ if "@" in table.table_id:
+ raise ValueError("Reading from a specific snapshot is not currently supported.")
+
+ requested_streams = 1 if preserve_order else 0
+
+ requested_session = bigquery_storage.types.ReadSession(
+ table=table.to_bqstorage(), data_format=bigquery_storage.types.DataFormat.ARROW
+ )
+ if selected_fields is not None:
+ for field in selected_fields:
+ requested_session.read_options.selected_fields.append(field.name)
+
+ if _ARROW_COMPRESSION_SUPPORT:
+ requested_session.read_options.arrow_serialization_options.buffer_compression = (
+ ArrowSerializationOptions.CompressionCodec.LZ4_FRAME
+ )
+
+ session = bqstorage_client.create_read_session(
+ parent="projects/{}".format(project_id),
+ read_session=requested_session,
+ max_stream_count=requested_streams,
+ )
+
+ _LOGGER.debug(
+ "Started reading table '{}.{}.{}' with BQ Storage API session '{}'.".format(
+ table.project, table.dataset_id, table.table_id, session.name
+ )
+ )
+
+ # Avoid reading rows from an empty table.
+ if not session.streams:
+ return
+
+ total_streams = len(session.streams)
+
+ # Use _DownloadState to notify worker threads when to quit.
+ # See: https://stackoverflow.com/a/29237343/101923
+ download_state = _DownloadState()
+
+ # Create a queue to collect frames as they are created in each thread.
+ #
+ # The queue needs to be bounded by default, because if the user code processes the
+ # fetched result pages too slowly, while at the same time new pages are rapidly being
+ # fetched from the server, the queue can grow to the point where the process runs
+ # out of memory.
+ if max_queue_size is _MAX_QUEUE_SIZE_DEFAULT:
+ max_queue_size = total_streams
+ elif max_queue_size is None:
+ max_queue_size = 0 # unbounded
+
+ worker_queue = queue.Queue(maxsize=max_queue_size)
+
+ with concurrent.futures.ThreadPoolExecutor(max_workers=total_streams) as pool:
+ try:
+ # Manually submit jobs and wait for download to complete rather
+ # than using pool.map because pool.map continues running in the
+ # background even if there is an exception on the main thread.
+ # See: https://github.com/googleapis/google-cloud-python/pull/7698
+ not_done = [
+ pool.submit(
+ _download_table_bqstorage_stream,
+ download_state,
+ bqstorage_client,
+ session,
+ stream,
+ worker_queue,
+ page_to_item,
+ )
+ for stream in session.streams
+ ]
+
+ while not_done:
+ # Don't block on the worker threads. For performance reasons,
+ # we want to block on the queue's get method, instead. This
+ # prevents the queue from filling up, because the main thread
+ # has smaller gaps in time between calls to the queue's get
+ # method. For a detailed explaination, see:
+ # https://friendliness.dev/2019/06/18/python-nowait/
+ done, not_done = _nowait(not_done)
+ for future in done:
+ # Call result() on any finished threads to raise any
+ # exceptions encountered.
+ future.result()
+
+ try:
+ frame = worker_queue.get(timeout=_PROGRESS_INTERVAL)
+ yield frame
+ except queue.Empty: # pragma: NO COVER
+ continue
+
+ # Return any remaining values after the workers finished.
+ while True: # pragma: NO COVER
+ try:
+ frame = worker_queue.get_nowait()
+ yield frame
+ except queue.Empty: # pragma: NO COVER
+ break
+ finally:
+ # No need for a lock because reading/replacing a variable is
+ # defined to be an atomic operation in the Python language
+ # definition (enforced by the global interpreter lock).
+ download_state.done = True
+
+ # Shutdown all background threads, now that they should know to
+ # exit early.
+ pool.shutdown(wait=True)
+
+
+def download_arrow_bqstorage(
+ project_id,
+ table,
+ bqstorage_client,
+ preserve_order=False,
+ selected_fields=None,
+ max_queue_size=_MAX_QUEUE_SIZE_DEFAULT,
+):
+ return _download_table_bqstorage(
+ project_id,
+ table,
+ bqstorage_client,
+ preserve_order=preserve_order,
+ selected_fields=selected_fields,
+ page_to_item=_bqstorage_page_to_arrow,
+ max_queue_size=max_queue_size,
+ )
+
+
+def download_dataframe_bqstorage(
+ project_id,
+ table,
+ bqstorage_client,
+ column_names,
+ dtypes,
+ preserve_order=False,
+ selected_fields=None,
+ max_queue_size=_MAX_QUEUE_SIZE_DEFAULT,
+):
+ page_to_item = functools.partial(_bqstorage_page_to_dataframe, column_names, dtypes)
+ return _download_table_bqstorage(
+ project_id,
+ table,
+ bqstorage_client,
+ preserve_order=preserve_order,
+ selected_fields=selected_fields,
+ page_to_item=page_to_item,
+ max_queue_size=max_queue_size,
+ )
+
+
+def dataframe_to_json_generator(dataframe):
+ for row in dataframe.itertuples(index=False, name=None):
+ output = {}
+ for column, value in zip(dataframe.columns, row):
+ # Omit NaN values.
+ is_nan = pandas.isna(value)
+
+ # isna() can also return an array-like of bools, but the latter's boolean
+ # value is ambiguous, hence an extra check. An array-like value is *not*
+ # considered a NaN, however.
+ if isinstance(is_nan, bool) and is_nan:
+ continue
+
+ # Convert numpy types to corresponding Python types.
+ # https://stackoverflow.com/a/60441783/101923
+ if isinstance(value, numpy.bool_):
+ value = bool(value)
+ elif isinstance(
+ value,
+ (
+ numpy.int64,
+ numpy.int32,
+ numpy.int16,
+ numpy.int8,
+ numpy.uint64,
+ numpy.uint32,
+ numpy.uint16,
+ numpy.uint8,
+ ),
+ ):
+ value = int(value)
+ output[column] = value
+
+ yield output
+
+
+def verify_pandas_imports():
+ if pandas is None:
+ raise ValueError(_NO_PANDAS_ERROR) from pandas_import_exception
+ if db_dtypes is None:
+ raise ValueError(_NO_DB_TYPES_ERROR) from db_dtypes_import_exception
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/_pyarrow_helpers.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/_pyarrow_helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c745a611bdb264e134275fb52b14909456c94be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/_pyarrow_helpers.py
@@ -0,0 +1,124 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Shared helper functions for connecting BigQuery and pyarrow."""
+
+from typing import Any
+
+from packaging import version
+
+try:
+ import pyarrow # type: ignore
+except ImportError:
+ pyarrow = None
+
+
+def pyarrow_datetime():
+ return pyarrow.timestamp("us", tz=None)
+
+
+def pyarrow_numeric():
+ return pyarrow.decimal128(38, 9)
+
+
+def pyarrow_bignumeric():
+ # 77th digit is partial.
+ # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#decimal_types
+ return pyarrow.decimal256(76, 38)
+
+
+def pyarrow_time():
+ return pyarrow.time64("us")
+
+
+def pyarrow_timestamp():
+ return pyarrow.timestamp("us", tz="UTC")
+
+
+_BQ_TO_ARROW_SCALARS = {}
+_ARROW_SCALAR_IDS_TO_BQ = {}
+
+if pyarrow:
+ # This dictionary is duplicated in bigquery_storage/test/unite/test_reader.py
+ # When modifying it be sure to update it there as well.
+ # Note(todo!!): type "BIGNUMERIC"'s matching pyarrow type is added in _pandas_helpers.py
+ _BQ_TO_ARROW_SCALARS = {
+ "BOOL": pyarrow.bool_,
+ "BOOLEAN": pyarrow.bool_,
+ "BYTES": pyarrow.binary,
+ "DATE": pyarrow.date32,
+ "DATETIME": pyarrow_datetime,
+ "FLOAT": pyarrow.float64,
+ "FLOAT64": pyarrow.float64,
+ "GEOGRAPHY": pyarrow.string,
+ "INT64": pyarrow.int64,
+ "INTEGER": pyarrow.int64,
+ "NUMERIC": pyarrow_numeric,
+ "STRING": pyarrow.string,
+ "TIME": pyarrow_time,
+ "TIMESTAMP": pyarrow_timestamp,
+ }
+
+ _ARROW_SCALAR_IDS_TO_BQ = {
+ # https://arrow.apache.org/docs/python/api/datatypes.html#type-classes
+ pyarrow.bool_().id: "BOOL",
+ pyarrow.int8().id: "INT64",
+ pyarrow.int16().id: "INT64",
+ pyarrow.int32().id: "INT64",
+ pyarrow.int64().id: "INT64",
+ pyarrow.uint8().id: "INT64",
+ pyarrow.uint16().id: "INT64",
+ pyarrow.uint32().id: "INT64",
+ pyarrow.uint64().id: "INT64",
+ pyarrow.float16().id: "FLOAT64",
+ pyarrow.float32().id: "FLOAT64",
+ pyarrow.float64().id: "FLOAT64",
+ pyarrow.time32("ms").id: "TIME",
+ pyarrow.time64("ns").id: "TIME",
+ pyarrow.timestamp("ns").id: "TIMESTAMP",
+ pyarrow.date32().id: "DATE",
+ pyarrow.date64().id: "DATETIME", # because millisecond resolution
+ pyarrow.binary().id: "BYTES",
+ pyarrow.string().id: "STRING", # also alias for pyarrow.utf8()
+ pyarrow.large_string().id: "STRING",
+ # The exact scale and precision don't matter, see below.
+ pyarrow.decimal128(38, scale=9).id: "NUMERIC",
+ }
+
+ # Adds bignumeric support only if pyarrow version >= 3.0.0
+ # Decimal256 support was added to arrow 3.0.0
+ # https://arrow.apache.org/blog/2021/01/25/3.0.0-release/
+ if version.parse(pyarrow.__version__) >= version.parse("3.0.0"):
+ _BQ_TO_ARROW_SCALARS["BIGNUMERIC"] = pyarrow_bignumeric
+ # The exact decimal's scale and precision are not important, as only
+ # the type ID matters, and it's the same for all decimal256 instances.
+ _ARROW_SCALAR_IDS_TO_BQ[pyarrow.decimal256(76, scale=38).id] = "BIGNUMERIC"
+
+
+def bq_to_arrow_scalars(bq_scalar: str):
+ """
+ Returns:
+ The Arrow scalar type that the input BigQuery scalar type maps to.
+ If it cannot find the BigQuery scalar, return None.
+ """
+ return _BQ_TO_ARROW_SCALARS.get(bq_scalar)
+
+
+def arrow_scalar_ids_to_bq(arrow_scalar: Any):
+ """
+ Returns:
+ The BigQuery scalar type that the input arrow scalar type maps to.
+ If it cannot find the arrow scalar, return None.
+ """
+ return _ARROW_SCALAR_IDS_TO_BQ.get(arrow_scalar)
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/_tqdm_helpers.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/_tqdm_helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..22ccee9717332488ada6bef462eab9f4f6056b1e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/_tqdm_helpers.py
@@ -0,0 +1,137 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Shared helper functions for tqdm progress bar."""
+
+import concurrent.futures
+import sys
+import time
+import typing
+from typing import Optional
+import warnings
+
+try:
+ import tqdm # type: ignore
+except ImportError:
+ tqdm = None
+
+try:
+ import tqdm.notebook as tqdm_notebook # type: ignore
+except ImportError:
+ tqdm_notebook = None
+
+if typing.TYPE_CHECKING: # pragma: NO COVER
+ from google.cloud.bigquery import QueryJob
+ from google.cloud.bigquery.table import RowIterator
+
+_NO_TQDM_ERROR = (
+ "A progress bar was requested, but there was an error loading the tqdm "
+ "library. Please install tqdm to use the progress bar functionality."
+)
+
+_PROGRESS_BAR_UPDATE_INTERVAL = 0.5
+
+
+def get_progress_bar(progress_bar_type, description, total, unit):
+ """Construct a tqdm progress bar object, if tqdm is installed."""
+ if tqdm is None or tqdm_notebook is None and progress_bar_type == "tqdm_notebook":
+ if progress_bar_type is not None:
+ warnings.warn(_NO_TQDM_ERROR, UserWarning, stacklevel=3)
+ return None
+
+ try:
+ if progress_bar_type == "tqdm":
+ return tqdm.tqdm(
+ bar_format="{l_bar}{bar}|",
+ colour="green",
+ desc=description,
+ file=sys.stdout,
+ total=total,
+ unit=unit,
+ )
+ elif progress_bar_type == "tqdm_notebook":
+ return tqdm_notebook.tqdm(
+ bar_format="{l_bar}{bar}|",
+ desc=description,
+ file=sys.stdout,
+ total=total,
+ unit=unit,
+ )
+ elif progress_bar_type == "tqdm_gui":
+ return tqdm.tqdm_gui(desc=description, total=total, unit=unit)
+ except (KeyError, TypeError): # pragma: NO COVER
+ # Protect ourselves from any tqdm errors. In case of
+ # unexpected tqdm behavior, just fall back to showing
+ # no progress bar.
+ warnings.warn(_NO_TQDM_ERROR, UserWarning, stacklevel=3)
+ return None
+
+
+def wait_for_query(
+ query_job: "QueryJob",
+ progress_bar_type: Optional[str] = None,
+ max_results: Optional[int] = None,
+) -> "RowIterator":
+ """Return query result and display a progress bar while the query running, if tqdm is installed.
+
+ Args:
+ query_job:
+ The job representing the execution of the query on the server.
+ progress_bar_type:
+ The type of progress bar to use to show query progress.
+ max_results:
+ The maximum number of rows the row iterator should return.
+
+ Returns:
+ A row iterator over the query results.
+ """
+ default_total = 1
+ current_stage = None
+ start_time = time.perf_counter()
+
+ progress_bar = get_progress_bar(
+ progress_bar_type, "Query is running", default_total, "query"
+ )
+ if progress_bar is None:
+ return query_job.result(max_results=max_results)
+
+ i = 0
+ while True:
+ if query_job.query_plan:
+ default_total = len(query_job.query_plan)
+ current_stage = query_job.query_plan[i]
+ progress_bar.total = len(query_job.query_plan)
+ progress_bar.set_description(
+ f"Query executing stage {current_stage.name} and status {current_stage.status} : {time.perf_counter() - start_time:.2f}s"
+ )
+ try:
+ query_result = query_job.result(
+ timeout=_PROGRESS_BAR_UPDATE_INTERVAL, max_results=max_results
+ )
+ progress_bar.update(default_total)
+ progress_bar.set_description(
+ f"Job ID {query_job.job_id} successfully executed",
+ )
+ break
+ except concurrent.futures.TimeoutError:
+ query_job.reload() # Refreshes the state via a GET request.
+ if current_stage:
+ if current_stage.status == "COMPLETE":
+ if i < default_total - 1:
+ progress_bar.update(i + 1)
+ i += 1
+ continue
+
+ progress_bar.close()
+ return query_result
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/_versions_helpers.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/_versions_helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfbf70a8edbea2b42dd5969a28f5c8bbf0f4076f
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/_versions_helpers.py
@@ -0,0 +1,264 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Shared helper functions for verifying versions of installed modules."""
+
+import sys
+from typing import Any
+
+import packaging.version
+
+from google.cloud.bigquery import exceptions
+
+
+_MIN_PYARROW_VERSION = packaging.version.Version("3.0.0")
+_MIN_BQ_STORAGE_VERSION = packaging.version.Version("2.0.0")
+_BQ_STORAGE_OPTIONAL_READ_SESSION_VERSION = packaging.version.Version("2.6.0")
+_MIN_PANDAS_VERSION = packaging.version.Version("1.1.0")
+
+_MIN_PANDAS_VERSION_RANGE = packaging.version.Version("1.5.0")
+_MIN_PYARROW_VERSION_RANGE = packaging.version.Version("10.0.1")
+
+
+class PyarrowVersions:
+ """Version comparisons for pyarrow package."""
+
+ def __init__(self):
+ self._installed_version = None
+
+ @property
+ def installed_version(self) -> packaging.version.Version:
+ """Return the parsed version of pyarrow."""
+ if self._installed_version is None:
+ import pyarrow # type: ignore
+
+ self._installed_version = packaging.version.parse(
+ # Use 0.0.0, since it is earlier than any released version.
+ # Legacy versions also have the same property, but
+ # creating a LegacyVersion has been deprecated.
+ # https://github.com/pypa/packaging/issues/321
+ getattr(pyarrow, "__version__", "0.0.0")
+ )
+
+ return self._installed_version
+
+ @property
+ def use_compliant_nested_type(self) -> bool:
+ return self.installed_version.major >= 4
+
+ def try_import(self, raise_if_error: bool = False) -> Any:
+ """Verifies that a recent enough version of pyarrow extra is installed.
+
+ The function assumes that pyarrow extra is installed, and should thus
+ be used in places where this assumption holds.
+
+ Because `pip` can install an outdated version of this extra despite
+ the constraints in `setup.py`, the calling code can use this helper
+ to verify the version compatibility at runtime.
+
+ Returns:
+ The ``pyarrow`` module or ``None``.
+
+ Raises:
+ exceptions.LegacyPyarrowError:
+ If the pyarrow package is outdated and ``raise_if_error`` is
+ ``True``.
+ """
+ try:
+ import pyarrow
+ except ImportError as exc:
+ if raise_if_error:
+ raise exceptions.LegacyPyarrowError(
+ "pyarrow package not found. Install pyarrow version >="
+ f" {_MIN_PYARROW_VERSION}."
+ ) from exc
+ return None
+
+ if self.installed_version < _MIN_PYARROW_VERSION:
+ if raise_if_error:
+ msg = (
+ "Dependency pyarrow is outdated, please upgrade"
+ f" it to version >= {_MIN_PYARROW_VERSION}"
+ f" (version found: {self.installed_version})."
+ )
+ raise exceptions.LegacyPyarrowError(msg)
+ return None
+
+ return pyarrow
+
+
+PYARROW_VERSIONS = PyarrowVersions()
+
+
+class BQStorageVersions:
+ """Version comparisons for google-cloud-bigqueyr-storage package."""
+
+ def __init__(self):
+ self._installed_version = None
+
+ @property
+ def installed_version(self) -> packaging.version.Version:
+ """Return the parsed version of google-cloud-bigquery-storage."""
+ if self._installed_version is None:
+ from google.cloud import bigquery_storage
+
+ self._installed_version = packaging.version.parse(
+ # Use 0.0.0, since it is earlier than any released version.
+ # Legacy versions also have the same property, but
+ # creating a LegacyVersion has been deprecated.
+ # https://github.com/pypa/packaging/issues/321
+ getattr(bigquery_storage, "__version__", "0.0.0")
+ )
+
+ return self._installed_version # type: ignore
+
+ @property
+ def is_read_session_optional(self) -> bool:
+ """True if read_session is optional to rows().
+
+ See: https://github.com/googleapis/python-bigquery-storage/pull/228
+ """
+ return self.installed_version >= _BQ_STORAGE_OPTIONAL_READ_SESSION_VERSION
+
+ def try_import(self, raise_if_error: bool = False) -> Any:
+ """Tries to import the bigquery_storage module, and returns results
+ accordingly. It also verifies the module version is recent enough.
+
+ If the import succeeds, returns the ``bigquery_storage`` module.
+
+ If the import fails,
+ returns ``None`` when ``raise_if_error == False``,
+ raises Error when ``raise_if_error == True``.
+
+ Returns:
+ The ``bigquery_storage`` module or ``None``.
+
+ Raises:
+ exceptions.BigQueryStorageNotFoundError:
+ If google-cloud-bigquery-storage is not installed
+ exceptions.LegacyBigQueryStorageError:
+ If google-cloud-bigquery-storage package is outdated
+ """
+ try:
+ from google.cloud import bigquery_storage # type: ignore
+ except ImportError:
+ if raise_if_error:
+ msg = (
+ "Package google-cloud-bigquery-storage not found. "
+ "Install google-cloud-bigquery-storage version >= "
+ f"{_MIN_BQ_STORAGE_VERSION}."
+ )
+ raise exceptions.BigQueryStorageNotFoundError(msg)
+ return None
+
+ if self.installed_version < _MIN_BQ_STORAGE_VERSION:
+ if raise_if_error:
+ msg = (
+ "Dependency google-cloud-bigquery-storage is outdated, "
+ f"please upgrade it to version >= {_MIN_BQ_STORAGE_VERSION} "
+ f"(version found: {self.installed_version})."
+ )
+ raise exceptions.LegacyBigQueryStorageError(msg)
+ return None
+
+ return bigquery_storage
+
+
+BQ_STORAGE_VERSIONS = BQStorageVersions()
+
+
+class PandasVersions:
+ """Version comparisons for pandas package."""
+
+ def __init__(self):
+ self._installed_version = None
+
+ @property
+ def installed_version(self) -> packaging.version.Version:
+ """Return the parsed version of pandas"""
+ if self._installed_version is None:
+ import pandas # type: ignore
+
+ self._installed_version = packaging.version.parse(
+ # Use 0.0.0, since it is earlier than any released version.
+ # Legacy versions also have the same property, but
+ # creating a LegacyVersion has been deprecated.
+ # https://github.com/pypa/packaging/issues/321
+ getattr(pandas, "__version__", "0.0.0")
+ )
+
+ return self._installed_version
+
+ def try_import(self, raise_if_error: bool = False) -> Any:
+ """Verify that a recent enough version of pandas extra is installed.
+ The function assumes that pandas extra is installed, and should thus
+ be used in places where this assumption holds.
+ Because `pip` can install an outdated version of this extra despite
+ the constraints in `setup.py`, the calling code can use this helper
+ to verify the version compatibility at runtime.
+ Returns:
+ The ``pandas`` module or ``None``.
+ Raises:
+ exceptions.LegacyPandasError:
+ If the pandas package is outdated and ``raise_if_error`` is
+ ``True``.
+ """
+ try:
+ import pandas
+ except ImportError as exc:
+ if raise_if_error:
+ raise exceptions.LegacyPandasError(
+ "pandas package not found. Install pandas version >="
+ f" {_MIN_PANDAS_VERSION}"
+ ) from exc
+ return None
+
+ if self.installed_version < _MIN_PANDAS_VERSION:
+ if raise_if_error:
+ msg = (
+ "Dependency pandas is outdated, please upgrade"
+ f" it to version >= {_MIN_PANDAS_VERSION}"
+ f" (version found: {self.installed_version})."
+ )
+ raise exceptions.LegacyPandasError(msg)
+ return None
+
+ return pandas
+
+
+PANDAS_VERSIONS = PandasVersions()
+
+# Since RANGE support in pandas requires specific versions
+# of both pyarrow and pandas, we make this a separate
+# constant instead of as a property of PANDAS_VERSIONS
+# or PYARROW_VERSIONS.
+SUPPORTS_RANGE_PYARROW = (
+ PANDAS_VERSIONS.try_import() is not None
+ and PANDAS_VERSIONS.installed_version >= _MIN_PANDAS_VERSION_RANGE
+ and PYARROW_VERSIONS.try_import() is not None
+ and PYARROW_VERSIONS.installed_version >= _MIN_PYARROW_VERSION_RANGE
+)
+
+
+def extract_runtime_version():
+ # Retrieve the version information
+ version_info = sys.version_info
+
+ # Extract the major, minor, and micro components
+ major = version_info.major
+ minor = version_info.minor
+ micro = version_info.micro
+
+ # Display the version number in a clear format
+ return major, minor, micro
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/client.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/client.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c222f2ddb7c2f3346d0ee87bc61bc64880a4a8e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/client.py
@@ -0,0 +1,4388 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Client for interacting with the Google BigQuery API."""
+
+from __future__ import absolute_import
+from __future__ import division
+
+from collections import abc as collections_abc
+import copy
+import datetime
+import functools
+import gzip
+import io
+import itertools
+import json
+import math
+import os
+import tempfile
+import typing
+from typing import (
+ Any,
+ Dict,
+ IO,
+ Iterable,
+ Mapping,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+)
+import uuid
+import warnings
+
+from google import resumable_media # type: ignore
+from google.resumable_media.requests import MultipartUpload # type: ignore
+from google.resumable_media.requests import ResumableUpload
+
+import google.api_core.client_options
+import google.api_core.exceptions as core_exceptions
+from google.api_core.iam import Policy
+from google.api_core import page_iterator
+from google.api_core import retry as retries
+import google.cloud._helpers # type: ignore
+from google.cloud import exceptions # pytype: disable=import-error
+from google.cloud.client import ClientWithProject # type: ignore # pytype: disable=import-error
+
+try:
+ from google.cloud.bigquery_storage_v1.services.big_query_read.client import (
+ DEFAULT_CLIENT_INFO as DEFAULT_BQSTORAGE_CLIENT_INFO,
+ )
+except ImportError:
+ DEFAULT_BQSTORAGE_CLIENT_INFO = None # type: ignore
+
+
+from google.cloud.bigquery._http import Connection
+from google.cloud.bigquery import _job_helpers
+from google.cloud.bigquery import _pandas_helpers
+from google.cloud.bigquery import _versions_helpers
+from google.cloud.bigquery import enums
+from google.cloud.bigquery import exceptions as bq_exceptions
+from google.cloud.bigquery import job
+from google.cloud.bigquery._helpers import _get_sub_prop
+from google.cloud.bigquery._helpers import _record_field_to_json
+from google.cloud.bigquery._helpers import _str_or_none
+from google.cloud.bigquery._helpers import _verify_job_config_type
+from google.cloud.bigquery._helpers import _get_bigquery_host
+from google.cloud.bigquery._helpers import _DEFAULT_HOST
+from google.cloud.bigquery._helpers import _DEFAULT_HOST_TEMPLATE
+from google.cloud.bigquery._helpers import _DEFAULT_UNIVERSE
+from google.cloud.bigquery._helpers import _validate_universe
+from google.cloud.bigquery._helpers import _get_client_universe
+from google.cloud.bigquery._helpers import TimeoutType
+from google.cloud.bigquery._job_helpers import make_job_id as _make_job_id
+from google.cloud.bigquery.dataset import Dataset
+from google.cloud.bigquery.dataset import DatasetListItem
+from google.cloud.bigquery.dataset import DatasetReference
+from google.cloud.bigquery.enums import AutoRowIDs
+from google.cloud.bigquery.format_options import ParquetOptions
+from google.cloud.bigquery.job import (
+ CopyJob,
+ CopyJobConfig,
+ ExtractJob,
+ ExtractJobConfig,
+ LoadJob,
+ LoadJobConfig,
+ QueryJob,
+ QueryJobConfig,
+)
+from google.cloud.bigquery.model import Model
+from google.cloud.bigquery.model import ModelReference
+from google.cloud.bigquery.model import _model_arg_to_model_ref
+from google.cloud.bigquery.opentelemetry_tracing import create_span
+from google.cloud.bigquery.query import _QueryResults
+from google.cloud.bigquery.retry import (
+ DEFAULT_JOB_RETRY,
+ DEFAULT_RETRY,
+ DEFAULT_TIMEOUT,
+ DEFAULT_GET_JOB_TIMEOUT,
+ POLLING_DEFAULT_VALUE,
+)
+from google.cloud.bigquery.routine import Routine
+from google.cloud.bigquery.routine import RoutineReference
+from google.cloud.bigquery.schema import SchemaField
+from google.cloud.bigquery.table import _table_arg_to_table
+from google.cloud.bigquery.table import _table_arg_to_table_ref
+from google.cloud.bigquery.table import Table
+from google.cloud.bigquery.table import TableListItem
+from google.cloud.bigquery.table import TableReference
+from google.cloud.bigquery.table import RowIterator
+
+pyarrow = _versions_helpers.PYARROW_VERSIONS.try_import()
+pandas = (
+ _versions_helpers.PANDAS_VERSIONS.try_import()
+) # mypy check fails because pandas import is outside module, there are type: ignore comments related to this
+
+ResumableTimeoutType = Union[
+ None, float, Tuple[float, float]
+] # for resumable media methods
+
+if typing.TYPE_CHECKING: # pragma: NO COVER
+ # os.PathLike is only subscriptable in Python 3.9+, thus shielding with a condition.
+ PathType = Union[str, bytes, os.PathLike[str], os.PathLike[bytes]]
+ import requests # required by api-core
+
+_DEFAULT_CHUNKSIZE = 100 * 1024 * 1024 # 100 MB
+_MAX_MULTIPART_SIZE = 5 * 1024 * 1024
+_DEFAULT_NUM_RETRIES = 6
+_BASE_UPLOAD_TEMPLATE = "{host}/upload/bigquery/v2/projects/{project}/jobs?uploadType="
+_MULTIPART_URL_TEMPLATE = _BASE_UPLOAD_TEMPLATE + "multipart"
+_RESUMABLE_URL_TEMPLATE = _BASE_UPLOAD_TEMPLATE + "resumable"
+_GENERIC_CONTENT_TYPE = "*/*"
+_READ_LESS_THAN_SIZE = (
+ "Size {:d} was specified but the file-like object only had " "{:d} bytes remaining."
+)
+_NEED_TABLE_ARGUMENT = (
+ "The table argument should be a table ID string, Table, or TableReference"
+)
+_LIST_ROWS_FROM_QUERY_RESULTS_FIELDS = "jobReference,totalRows,pageToken,rows"
+
+# In microbenchmarks, it's been shown that even in ideal conditions (query
+# finished, local data), requests to getQueryResults can take 10+ seconds.
+# In less-than-ideal situations, the response can take even longer, as it must
+# be able to download a full 100+ MB row in that time. Don't let the
+# connection timeout before data can be downloaded.
+# https://github.com/googleapis/python-bigquery/issues/438
+_MIN_GET_QUERY_RESULTS_TIMEOUT = 120
+
+TIMEOUT_HEADER = "X-Server-Timeout"
+
+
+class Project(object):
+ """Wrapper for resource describing a BigQuery project.
+
+ Args:
+ project_id (str): Opaque ID of the project
+
+ numeric_id (int): Numeric ID of the project
+
+ friendly_name (str): Display name of the project
+ """
+
+ def __init__(self, project_id, numeric_id, friendly_name):
+ self.project_id = project_id
+ self.numeric_id = numeric_id
+ self.friendly_name = friendly_name
+
+ @classmethod
+ def from_api_repr(cls, resource):
+ """Factory: construct an instance from a resource dict."""
+ return cls(resource["id"], resource["numericId"], resource["friendlyName"])
+
+
+class Client(ClientWithProject):
+ """Client to bundle configuration needed for API requests.
+
+ Args:
+ project (Optional[str]):
+ Project ID for the project which the client acts on behalf of.
+ Will be passed when creating a dataset / job. If not passed,
+ falls back to the default inferred from the environment.
+ credentials (Optional[google.auth.credentials.Credentials]):
+ The OAuth2 Credentials to use for this client. If not passed
+ (and if no ``_http`` object is passed), falls back to the
+ default inferred from the environment.
+ _http (Optional[requests.Session]):
+ HTTP object to make requests. Can be any object that
+ defines ``request()`` with the same interface as
+ :meth:`requests.Session.request`. If not passed, an ``_http``
+ object is created that is bound to the ``credentials`` for the
+ current object.
+ This parameter should be considered private, and could change in
+ the future.
+ location (Optional[str]):
+ Default location for jobs / datasets / tables.
+ default_query_job_config (Optional[google.cloud.bigquery.job.QueryJobConfig]):
+ Default ``QueryJobConfig``.
+ Will be merged into job configs passed into the ``query`` method.
+ default_load_job_config (Optional[google.cloud.bigquery.job.LoadJobConfig]):
+ Default ``LoadJobConfig``.
+ Will be merged into job configs passed into the ``load_table_*`` methods.
+ client_info (Optional[google.api_core.client_info.ClientInfo]):
+ The client info used to send a user-agent string along with API
+ requests. If ``None``, then default info will be used. Generally,
+ you only need to set this if you're developing your own library
+ or partner tool.
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, Dict]]):
+ Client options used to set user options on the client. API Endpoint
+ should be set through client_options.
+
+ Raises:
+ google.auth.exceptions.DefaultCredentialsError:
+ Raised if ``credentials`` is not specified and the library fails
+ to acquire default credentials.
+ """
+
+ SCOPE = ("https://www.googleapis.com/auth/cloud-platform",) # type: ignore
+ """The scopes required for authenticating as a BigQuery consumer."""
+
+ def __init__(
+ self,
+ project=None,
+ credentials=None,
+ _http=None,
+ location=None,
+ default_query_job_config=None,
+ default_load_job_config=None,
+ client_info=None,
+ client_options=None,
+ ) -> None:
+ super(Client, self).__init__(
+ project=project,
+ credentials=credentials,
+ client_options=client_options,
+ _http=_http,
+ )
+
+ kw_args = {"client_info": client_info}
+ bq_host = _get_bigquery_host()
+ kw_args["api_endpoint"] = bq_host if bq_host != _DEFAULT_HOST else None
+ client_universe = None
+ if client_options is None:
+ client_options = {}
+ if isinstance(client_options, dict):
+ client_options = google.api_core.client_options.from_dict(client_options)
+ if client_options.api_endpoint:
+ api_endpoint = client_options.api_endpoint
+ kw_args["api_endpoint"] = api_endpoint
+ else:
+ client_universe = _get_client_universe(client_options)
+ if client_universe != _DEFAULT_UNIVERSE:
+ kw_args["api_endpoint"] = _DEFAULT_HOST_TEMPLATE.replace(
+ "{UNIVERSE_DOMAIN}", client_universe
+ )
+ # Ensure credentials and universe are not in conflict.
+ if hasattr(self, "_credentials") and client_universe is not None:
+ _validate_universe(client_universe, self._credentials)
+
+ self._connection = Connection(self, **kw_args)
+ self._location = location
+ self._default_load_job_config = copy.deepcopy(default_load_job_config)
+
+ # Use property setter so validation can run.
+ self.default_query_job_config = default_query_job_config
+
+ @property
+ def location(self):
+ """Default location for jobs / datasets / tables."""
+ return self._location
+
+ @property
+ def default_query_job_config(self) -> Optional[QueryJobConfig]:
+ """Default ``QueryJobConfig`` or ``None``.
+
+ Will be merged into job configs passed into the ``query`` or
+ ``query_and_wait`` methods.
+ """
+ return self._default_query_job_config
+
+ @default_query_job_config.setter
+ def default_query_job_config(self, value: Optional[QueryJobConfig]):
+ if value is not None:
+ _verify_job_config_type(
+ value, QueryJobConfig, param_name="default_query_job_config"
+ )
+ self._default_query_job_config = copy.deepcopy(value)
+
+ @property
+ def default_load_job_config(self):
+ """Default ``LoadJobConfig``.
+ Will be merged into job configs passed into the ``load_table_*`` methods.
+ """
+ return self._default_load_job_config
+
+ @default_load_job_config.setter
+ def default_load_job_config(self, value: LoadJobConfig):
+ self._default_load_job_config = copy.deepcopy(value)
+
+ def close(self):
+ """Close the underlying transport objects, releasing system resources.
+
+ .. note::
+
+ The client instance can be used for making additional requests even
+ after closing, in which case the underlying connections are
+ automatically re-created.
+ """
+ self._http._auth_request.session.close()
+ self._http.close()
+
+ def get_service_account_email(
+ self,
+ project: Optional[str] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> str:
+ """Get the email address of the project's BigQuery service account
+
+ Note:
+ This is the service account that BigQuery uses to manage tables
+ encrypted by a key in KMS.
+
+ Args:
+ project (Optional[str]):
+ Project ID to use for retreiving service account email.
+ Defaults to the client's project.
+ retry (Optional[google.api_core.retry.Retry]): How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ str:
+ service account email address
+
+ Example:
+
+ >>> from google.cloud import bigquery
+ >>> client = bigquery.Client()
+ >>> client.get_service_account_email()
+ my_service_account@my-project.iam.gserviceaccount.com
+
+ """
+ if project is None:
+ project = self.project
+ path = "/projects/%s/serviceAccount" % (project,)
+ span_attributes = {"path": path}
+ api_response = self._call_api(
+ retry,
+ span_name="BigQuery.getServiceAccountEmail",
+ span_attributes=span_attributes,
+ method="GET",
+ path=path,
+ timeout=timeout,
+ )
+ return api_response["email"]
+
+ def list_projects(
+ self,
+ max_results: Optional[int] = None,
+ page_token: Optional[str] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ page_size: Optional[int] = None,
+ ) -> page_iterator.Iterator:
+ """List projects for the project associated with this client.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/projects/list
+
+ Args:
+ max_results (Optional[int]):
+ Maximum number of projects to return.
+ Defaults to a value set by the API.
+
+ page_token (Optional[str]):
+ Token representing a cursor into the projects. If not passed,
+ the API will return the first page of projects. The token marks
+ the beginning of the iterator to be returned and the value of
+ the ``page_token`` can be accessed at ``next_page_token`` of the
+ :class:`~google.api_core.page_iterator.HTTPIterator`.
+
+ retry (Optional[google.api_core.retry.Retry]): How to retry the RPC.
+
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ page_size (Optional[int]):
+ Maximum number of projects to return in each page.
+ Defaults to a value set by the API.
+
+ Returns:
+ google.api_core.page_iterator.Iterator:
+ Iterator of :class:`~google.cloud.bigquery.client.Project`
+ accessible to the current client.
+ """
+ span_attributes = {"path": "/projects"}
+
+ def api_request(*args, **kwargs):
+ return self._call_api(
+ retry,
+ span_name="BigQuery.listProjects",
+ span_attributes=span_attributes,
+ *args,
+ timeout=timeout,
+ **kwargs,
+ )
+
+ return page_iterator.HTTPIterator(
+ client=self,
+ api_request=api_request,
+ path="/projects",
+ item_to_value=_item_to_project,
+ items_key="projects",
+ page_token=page_token,
+ max_results=max_results,
+ page_size=page_size,
+ )
+
+ def list_datasets(
+ self,
+ project: Optional[str] = None,
+ include_all: bool = False,
+ filter: Optional[str] = None,
+ max_results: Optional[int] = None,
+ page_token: Optional[str] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ page_size: Optional[int] = None,
+ ) -> page_iterator.Iterator:
+ """List datasets for the project associated with this client.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
+
+ Args:
+ project (Optional[str]):
+ Project ID to use for retreiving datasets. Defaults to the
+ client's project.
+ include_all (Optional[bool]):
+ True if results include hidden datasets. Defaults to False.
+ filter (Optional[str]):
+ An expression for filtering the results by label.
+ For syntax, see
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#body.QUERY_PARAMETERS.filter
+ max_results (Optional[int]):
+ Maximum number of datasets to return.
+ page_token (Optional[str]):
+ Token representing a cursor into the datasets. If not passed,
+ the API will return the first page of datasets. The token marks
+ the beginning of the iterator to be returned and the value of
+ the ``page_token`` can be accessed at ``next_page_token`` of the
+ :class:`~google.api_core.page_iterator.HTTPIterator`.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ page_size (Optional[int]):
+ Maximum number of datasets to return per page.
+
+ Returns:
+ google.api_core.page_iterator.Iterator:
+ Iterator of :class:`~google.cloud.bigquery.dataset.DatasetListItem`.
+ associated with the project.
+ """
+ extra_params: Dict[str, Any] = {}
+ if project is None:
+ project = self.project
+ if include_all:
+ extra_params["all"] = True
+ if filter:
+ # TODO: consider supporting a dict of label -> value for filter,
+ # and converting it into a string here.
+ extra_params["filter"] = filter
+ path = "/projects/%s/datasets" % (project,)
+
+ span_attributes = {"path": path}
+
+ def api_request(*args, **kwargs):
+ return self._call_api(
+ retry,
+ span_name="BigQuery.listDatasets",
+ span_attributes=span_attributes,
+ *args,
+ timeout=timeout,
+ **kwargs,
+ )
+
+ return page_iterator.HTTPIterator(
+ client=self,
+ api_request=api_request,
+ path=path,
+ item_to_value=_item_to_dataset,
+ items_key="datasets",
+ page_token=page_token,
+ max_results=max_results,
+ extra_params=extra_params,
+ page_size=page_size,
+ )
+
+ def dataset(
+ self, dataset_id: str, project: Optional[str] = None
+ ) -> DatasetReference:
+ """Deprecated: Construct a reference to a dataset.
+
+ .. deprecated:: 1.24.0
+ Construct a
+ :class:`~google.cloud.bigquery.dataset.DatasetReference` using its
+ constructor or use a string where previously a reference object
+ was used.
+
+ As of ``google-cloud-bigquery`` version 1.7.0, all client methods
+ that take a
+ :class:`~google.cloud.bigquery.dataset.DatasetReference` or
+ :class:`~google.cloud.bigquery.table.TableReference` also take a
+ string in standard SQL format, e.g. ``project.dataset_id`` or
+ ``project.dataset_id.table_id``.
+
+ Args:
+ dataset_id (str): ID of the dataset.
+
+ project (Optional[str]):
+ Project ID for the dataset (defaults to the project of the client).
+
+ Returns:
+ google.cloud.bigquery.dataset.DatasetReference:
+ a new ``DatasetReference`` instance.
+ """
+ if project is None:
+ project = self.project
+
+ warnings.warn(
+ "Client.dataset is deprecated and will be removed in a future version. "
+ "Use a string like 'my_project.my_dataset' or a "
+ "cloud.google.bigquery.DatasetReference object, instead.",
+ PendingDeprecationWarning,
+ stacklevel=2,
+ )
+ return DatasetReference(project, dataset_id)
+
+ def _ensure_bqstorage_client(
+ self,
+ bqstorage_client: Optional[
+ "google.cloud.bigquery_storage.BigQueryReadClient"
+ ] = None,
+ client_options: Optional[google.api_core.client_options.ClientOptions] = None,
+ client_info: Optional[
+ "google.api_core.gapic_v1.client_info.ClientInfo"
+ ] = DEFAULT_BQSTORAGE_CLIENT_INFO,
+ ) -> Optional["google.cloud.bigquery_storage.BigQueryReadClient"]:
+ """Create a BigQuery Storage API client using this client's credentials.
+
+ Args:
+ bqstorage_client:
+ An existing BigQuery Storage client instance. If ``None``, a new
+ instance is created and returned.
+ client_options:
+ Custom options used with a new BigQuery Storage client instance
+ if one is created.
+ client_info:
+ The client info used with a new BigQuery Storage client
+ instance if one is created.
+
+ Returns:
+ A BigQuery Storage API client.
+ """
+
+ try:
+ bigquery_storage = _versions_helpers.BQ_STORAGE_VERSIONS.try_import(
+ raise_if_error=True
+ )
+ except bq_exceptions.BigQueryStorageNotFoundError:
+ warnings.warn(
+ "Cannot create BigQuery Storage client, the dependency "
+ "google-cloud-bigquery-storage is not installed."
+ )
+ return None
+ except bq_exceptions.LegacyBigQueryStorageError as exc:
+ warnings.warn(
+ "Dependency google-cloud-bigquery-storage is outdated: " + str(exc)
+ )
+ return None
+
+ if bqstorage_client is None: # pragma: NO COVER
+ bqstorage_client = bigquery_storage.BigQueryReadClient(
+ credentials=self._credentials,
+ client_options=client_options,
+ client_info=client_info, # type: ignore # (None is also accepted)
+ )
+
+ return bqstorage_client
+
+ def _dataset_from_arg(self, dataset) -> Union[Dataset, DatasetReference]:
+ if isinstance(dataset, str):
+ dataset = DatasetReference.from_string(
+ dataset, default_project=self.project
+ )
+
+ if not isinstance(dataset, (Dataset, DatasetReference)):
+ if isinstance(dataset, DatasetListItem):
+ dataset = dataset.reference
+ else:
+ raise TypeError(
+ "dataset must be a Dataset, DatasetReference, DatasetListItem,"
+ " or string"
+ )
+ return dataset
+
+ def create_dataset(
+ self,
+ dataset: Union[str, Dataset, DatasetReference, DatasetListItem],
+ exists_ok: bool = False,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Dataset:
+ """API call: create the dataset via a POST request.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/insert
+
+ Args:
+ dataset (Union[ \
+ google.cloud.bigquery.dataset.Dataset, \
+ google.cloud.bigquery.dataset.DatasetReference, \
+ google.cloud.bigquery.dataset.DatasetListItem, \
+ str, \
+ ]):
+ A :class:`~google.cloud.bigquery.dataset.Dataset` to create.
+ If ``dataset`` is a reference, an empty dataset is created
+ with the specified ID and client's default location.
+ exists_ok (Optional[bool]):
+ Defaults to ``False``. If ``True``, ignore "already exists"
+ errors when creating the dataset.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ google.cloud.bigquery.dataset.Dataset:
+ A new ``Dataset`` returned from the API.
+
+ Raises:
+ google.cloud.exceptions.Conflict:
+ If the dataset already exists.
+
+ Example:
+
+ >>> from google.cloud import bigquery
+ >>> client = bigquery.Client()
+ >>> dataset = bigquery.Dataset('my_project.my_dataset')
+ >>> dataset = client.create_dataset(dataset)
+
+ """
+ dataset = self._dataset_from_arg(dataset)
+ if isinstance(dataset, DatasetReference):
+ dataset = Dataset(dataset)
+
+ path = "/projects/%s/datasets" % (dataset.project,)
+
+ data = dataset.to_api_repr()
+ if data.get("location") is None and self.location is not None:
+ data["location"] = self.location
+
+ try:
+ span_attributes = {"path": path}
+
+ api_response = self._call_api(
+ retry,
+ span_name="BigQuery.createDataset",
+ span_attributes=span_attributes,
+ method="POST",
+ path=path,
+ data=data,
+ timeout=timeout,
+ )
+ return Dataset.from_api_repr(api_response)
+ except core_exceptions.Conflict:
+ if not exists_ok:
+ raise
+ return self.get_dataset(dataset.reference, retry=retry)
+
+ def create_routine(
+ self,
+ routine: Routine,
+ exists_ok: bool = False,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Routine:
+ """[Beta] Create a routine via a POST request.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/routines/insert
+
+ Args:
+ routine (google.cloud.bigquery.routine.Routine):
+ A :class:`~google.cloud.bigquery.routine.Routine` to create.
+ The dataset that the routine belongs to must already exist.
+ exists_ok (Optional[bool]):
+ Defaults to ``False``. If ``True``, ignore "already exists"
+ errors when creating the routine.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ google.cloud.bigquery.routine.Routine:
+ A new ``Routine`` returned from the service.
+
+ Raises:
+ google.cloud.exceptions.Conflict:
+ If the routine already exists.
+ """
+ reference = routine.reference
+ path = "/projects/{}/datasets/{}/routines".format(
+ reference.project, reference.dataset_id
+ )
+ resource = routine.to_api_repr()
+ try:
+ span_attributes = {"path": path}
+ api_response = self._call_api(
+ retry,
+ span_name="BigQuery.createRoutine",
+ span_attributes=span_attributes,
+ method="POST",
+ path=path,
+ data=resource,
+ timeout=timeout,
+ )
+ return Routine.from_api_repr(api_response)
+ except core_exceptions.Conflict:
+ if not exists_ok:
+ raise
+ return self.get_routine(routine.reference, retry=retry)
+
+ def create_table(
+ self,
+ table: Union[str, Table, TableReference, TableListItem],
+ exists_ok: bool = False,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Table:
+ """API call: create a table via a PUT request
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/insert
+
+ Args:
+ table (Union[ \
+ google.cloud.bigquery.table.Table, \
+ google.cloud.bigquery.table.TableReference, \
+ google.cloud.bigquery.table.TableListItem, \
+ str, \
+ ]):
+ A :class:`~google.cloud.bigquery.table.Table` to create.
+ If ``table`` is a reference, an empty table is created
+ with the specified ID. The dataset that the table belongs to
+ must already exist.
+ exists_ok (Optional[bool]):
+ Defaults to ``False``. If ``True``, ignore "already exists"
+ errors when creating the table.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ google.cloud.bigquery.table.Table:
+ A new ``Table`` returned from the service.
+
+ Raises:
+ google.cloud.exceptions.Conflict:
+ If the table already exists.
+ """
+ table = _table_arg_to_table(table, default_project=self.project)
+ dataset_id = table.dataset_id
+ path = "/projects/%s/datasets/%s/tables" % (table.project, dataset_id)
+ data = table.to_api_repr()
+ try:
+ span_attributes = {"path": path, "dataset_id": dataset_id}
+ api_response = self._call_api(
+ retry,
+ span_name="BigQuery.createTable",
+ span_attributes=span_attributes,
+ method="POST",
+ path=path,
+ data=data,
+ timeout=timeout,
+ )
+ return Table.from_api_repr(api_response)
+ except core_exceptions.Conflict:
+ if not exists_ok:
+ raise
+ return self.get_table(table.reference, retry=retry)
+
+ def _call_api(
+ self,
+ retry,
+ span_name=None,
+ span_attributes=None,
+ job_ref=None,
+ headers: Optional[Dict[str, str]] = None,
+ **kwargs,
+ ):
+ kwargs = _add_server_timeout_header(headers, kwargs)
+ call = functools.partial(self._connection.api_request, **kwargs)
+
+ if retry:
+ call = retry(call)
+
+ if span_name is not None:
+ with create_span(
+ name=span_name, attributes=span_attributes, client=self, job_ref=job_ref
+ ):
+ return call()
+
+ return call()
+
+ def get_dataset(
+ self,
+ dataset_ref: Union[DatasetReference, str],
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Dataset:
+ """Fetch the dataset referenced by ``dataset_ref``
+
+ Args:
+ dataset_ref (Union[ \
+ google.cloud.bigquery.dataset.DatasetReference, \
+ str, \
+ ]):
+ A reference to the dataset to fetch from the BigQuery API.
+ If a string is passed in, this method attempts to create a
+ dataset reference from a string using
+ :func:`~google.cloud.bigquery.dataset.DatasetReference.from_string`.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ google.cloud.bigquery.dataset.Dataset:
+ A ``Dataset`` instance.
+ """
+ if isinstance(dataset_ref, str):
+ dataset_ref = DatasetReference.from_string(
+ dataset_ref, default_project=self.project
+ )
+ path = dataset_ref.path
+ span_attributes = {"path": path}
+ api_response = self._call_api(
+ retry,
+ span_name="BigQuery.getDataset",
+ span_attributes=span_attributes,
+ method="GET",
+ path=path,
+ timeout=timeout,
+ )
+ return Dataset.from_api_repr(api_response)
+
+ def get_iam_policy(
+ self,
+ table: Union[Table, TableReference, TableListItem, str],
+ requested_policy_version: int = 1,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Policy:
+ """Return the access control policy for a table resource.
+
+ Args:
+ table (Union[ \
+ google.cloud.bigquery.table.Table, \
+ google.cloud.bigquery.table.TableReference, \
+ google.cloud.bigquery.table.TableListItem, \
+ str, \
+ ]):
+ The table to get the access control policy for.
+ If a string is passed in, this method attempts to create a
+ table reference from a string using
+ :func:`~google.cloud.bigquery.table.TableReference.from_string`.
+ requested_policy_version (int):
+ Optional. The maximum policy version that will be used to format the policy.
+
+ Only version ``1`` is currently supported.
+
+ See: https://cloud.google.com/bigquery/docs/reference/rest/v2/GetPolicyOptions
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ google.api_core.iam.Policy:
+ The access control policy.
+ """
+ table = _table_arg_to_table_ref(table, default_project=self.project)
+
+ if requested_policy_version != 1:
+ raise ValueError("only IAM policy version 1 is supported")
+
+ body = {"options": {"requestedPolicyVersion": 1}}
+
+ path = "{}:getIamPolicy".format(table.path)
+ span_attributes = {"path": path}
+ response = self._call_api(
+ retry,
+ span_name="BigQuery.getIamPolicy",
+ span_attributes=span_attributes,
+ method="POST",
+ path=path,
+ data=body,
+ timeout=timeout,
+ )
+
+ return Policy.from_api_repr(response)
+
+ def set_iam_policy(
+ self,
+ table: Union[Table, TableReference, TableListItem, str],
+ policy: Policy,
+ updateMask: Optional[str] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ *,
+ fields: Sequence[str] = (),
+ ) -> Policy:
+ """Return the access control policy for a table resource.
+
+ Args:
+ table (Union[ \
+ google.cloud.bigquery.table.Table, \
+ google.cloud.bigquery.table.TableReference, \
+ google.cloud.bigquery.table.TableListItem, \
+ str, \
+ ]):
+ The table to get the access control policy for.
+ If a string is passed in, this method attempts to create a
+ table reference from a string using
+ :func:`~google.cloud.bigquery.table.TableReference.from_string`.
+ policy (google.api_core.iam.Policy):
+ The access control policy to set.
+ updateMask (Optional[str]):
+ Mask as defined by
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/setIamPolicy#body.request_body.FIELDS.update_mask
+
+ Incompatible with ``fields``.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ fields (Sequence[str]):
+ Which properties to set on the policy. See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/setIamPolicy#body.request_body.FIELDS.update_mask
+
+ Incompatible with ``updateMask``.
+
+ Returns:
+ google.api_core.iam.Policy:
+ The updated access control policy.
+ """
+ if updateMask is not None and not fields:
+ update_mask = updateMask
+ elif updateMask is not None and fields:
+ raise ValueError("Cannot set both fields and updateMask")
+ elif fields:
+ update_mask = ",".join(fields)
+ else:
+ update_mask = None
+
+ table = _table_arg_to_table_ref(table, default_project=self.project)
+
+ if not isinstance(policy, (Policy)):
+ raise TypeError("policy must be a Policy")
+
+ body = {"policy": policy.to_api_repr()}
+
+ if update_mask is not None:
+ body["updateMask"] = update_mask
+
+ path = "{}:setIamPolicy".format(table.path)
+ span_attributes = {"path": path}
+
+ response = self._call_api(
+ retry,
+ span_name="BigQuery.setIamPolicy",
+ span_attributes=span_attributes,
+ method="POST",
+ path=path,
+ data=body,
+ timeout=timeout,
+ )
+
+ return Policy.from_api_repr(response)
+
+ def test_iam_permissions(
+ self,
+ table: Union[Table, TableReference, TableListItem, str],
+ permissions: Sequence[str],
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Dict[str, Any]:
+ table = _table_arg_to_table_ref(table, default_project=self.project)
+
+ body = {"permissions": permissions}
+
+ path = "{}:testIamPermissions".format(table.path)
+ span_attributes = {"path": path}
+ response = self._call_api(
+ retry,
+ span_name="BigQuery.testIamPermissions",
+ span_attributes=span_attributes,
+ method="POST",
+ path=path,
+ data=body,
+ timeout=timeout,
+ )
+
+ return response
+
+ def get_model(
+ self,
+ model_ref: Union[ModelReference, str],
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Model:
+ """[Beta] Fetch the model referenced by ``model_ref``.
+
+ Args:
+ model_ref (Union[ \
+ google.cloud.bigquery.model.ModelReference, \
+ str, \
+ ]):
+ A reference to the model to fetch from the BigQuery API.
+ If a string is passed in, this method attempts to create a
+ model reference from a string using
+ :func:`google.cloud.bigquery.model.ModelReference.from_string`.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ google.cloud.bigquery.model.Model: A ``Model`` instance.
+ """
+ if isinstance(model_ref, str):
+ model_ref = ModelReference.from_string(
+ model_ref, default_project=self.project
+ )
+ path = model_ref.path
+ span_attributes = {"path": path}
+
+ api_response = self._call_api(
+ retry,
+ span_name="BigQuery.getModel",
+ span_attributes=span_attributes,
+ method="GET",
+ path=path,
+ timeout=timeout,
+ )
+ return Model.from_api_repr(api_response)
+
+ def get_routine(
+ self,
+ routine_ref: Union[Routine, RoutineReference, str],
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Routine:
+ """[Beta] Get the routine referenced by ``routine_ref``.
+
+ Args:
+ routine_ref (Union[ \
+ google.cloud.bigquery.routine.Routine, \
+ google.cloud.bigquery.routine.RoutineReference, \
+ str, \
+ ]):
+ A reference to the routine to fetch from the BigQuery API. If
+ a string is passed in, this method attempts to create a
+ reference from a string using
+ :func:`google.cloud.bigquery.routine.RoutineReference.from_string`.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the API call.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ google.cloud.bigquery.routine.Routine:
+ A ``Routine`` instance.
+ """
+ if isinstance(routine_ref, str):
+ routine_ref = RoutineReference.from_string(
+ routine_ref, default_project=self.project
+ )
+ path = routine_ref.path
+ span_attributes = {"path": path}
+ api_response = self._call_api(
+ retry,
+ span_name="BigQuery.getRoutine",
+ span_attributes=span_attributes,
+ method="GET",
+ path=path,
+ timeout=timeout,
+ )
+ return Routine.from_api_repr(api_response)
+
+ def get_table(
+ self,
+ table: Union[Table, TableReference, TableListItem, str],
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Table:
+ """Fetch the table referenced by ``table``.
+
+ Args:
+ table (Union[ \
+ google.cloud.bigquery.table.Table, \
+ google.cloud.bigquery.table.TableReference, \
+ google.cloud.bigquery.table.TableListItem, \
+ str, \
+ ]):
+ A reference to the table to fetch from the BigQuery API.
+ If a string is passed in, this method attempts to create a
+ table reference from a string using
+ :func:`google.cloud.bigquery.table.TableReference.from_string`.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ google.cloud.bigquery.table.Table:
+ A ``Table`` instance.
+ """
+ table_ref = _table_arg_to_table_ref(table, default_project=self.project)
+ path = table_ref.path
+ span_attributes = {"path": path}
+ api_response = self._call_api(
+ retry,
+ span_name="BigQuery.getTable",
+ span_attributes=span_attributes,
+ method="GET",
+ path=path,
+ timeout=timeout,
+ )
+ return Table.from_api_repr(api_response)
+
+ def update_dataset(
+ self,
+ dataset: Dataset,
+ fields: Sequence[str],
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Dataset:
+ """Change some fields of a dataset.
+
+ Use ``fields`` to specify which fields to update. At least one field
+ must be provided. If a field is listed in ``fields`` and is ``None`` in
+ ``dataset``, it will be deleted.
+
+ If ``dataset.etag`` is not ``None``, the update will only
+ succeed if the dataset on the server has the same ETag. Thus
+ reading a dataset with ``get_dataset``, changing its fields,
+ and then passing it to ``update_dataset`` will ensure that the changes
+ will only be saved if no modifications to the dataset occurred
+ since the read.
+
+ Args:
+ dataset (google.cloud.bigquery.dataset.Dataset):
+ The dataset to update.
+ fields (Sequence[str]):
+ The properties of ``dataset`` to change. These are strings
+ corresponding to the properties of
+ :class:`~google.cloud.bigquery.dataset.Dataset`.
+
+ For example, to update the default expiration times, specify
+ both properties in the ``fields`` argument:
+
+ .. code-block:: python
+
+ bigquery_client.update_dataset(
+ dataset,
+ [
+ "default_partition_expiration_ms",
+ "default_table_expiration_ms",
+ ]
+ )
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ google.cloud.bigquery.dataset.Dataset:
+ The modified ``Dataset`` instance.
+ """
+ partial = dataset._build_resource(fields)
+ if dataset.etag is not None:
+ headers: Optional[Dict[str, str]] = {"If-Match": dataset.etag}
+ else:
+ headers = None
+ path = dataset.path
+ span_attributes = {"path": path, "fields": fields}
+
+ api_response = self._call_api(
+ retry,
+ span_name="BigQuery.updateDataset",
+ span_attributes=span_attributes,
+ method="PATCH",
+ path=path,
+ data=partial,
+ headers=headers,
+ timeout=timeout,
+ )
+ return Dataset.from_api_repr(api_response)
+
+ def update_model(
+ self,
+ model: Model,
+ fields: Sequence[str],
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Model:
+ """[Beta] Change some fields of a model.
+
+ Use ``fields`` to specify which fields to update. At least one field
+ must be provided. If a field is listed in ``fields`` and is ``None``
+ in ``model``, the field value will be deleted.
+
+ If ``model.etag`` is not ``None``, the update will only succeed if
+ the model on the server has the same ETag. Thus reading a model with
+ ``get_model``, changing its fields, and then passing it to
+ ``update_model`` will ensure that the changes will only be saved if
+ no modifications to the model occurred since the read.
+
+ Args:
+ model (google.cloud.bigquery.model.Model): The model to update.
+ fields (Sequence[str]):
+ The properties of ``model`` to change. These are strings
+ corresponding to the properties of
+ :class:`~google.cloud.bigquery.model.Model`.
+
+ For example, to update the descriptive properties of the model,
+ specify them in the ``fields`` argument:
+
+ .. code-block:: python
+
+ bigquery_client.update_model(
+ model, ["description", "friendly_name"]
+ )
+ retry (Optional[google.api_core.retry.Retry]):
+ A description of how to retry the API call.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ google.cloud.bigquery.model.Model:
+ The model resource returned from the API call.
+ """
+ partial = model._build_resource(fields)
+ if model.etag:
+ headers: Optional[Dict[str, str]] = {"If-Match": model.etag}
+ else:
+ headers = None
+ path = model.path
+ span_attributes = {"path": path, "fields": fields}
+
+ api_response = self._call_api(
+ retry,
+ span_name="BigQuery.updateModel",
+ span_attributes=span_attributes,
+ method="PATCH",
+ path=path,
+ data=partial,
+ headers=headers,
+ timeout=timeout,
+ )
+ return Model.from_api_repr(api_response)
+
+ def update_routine(
+ self,
+ routine: Routine,
+ fields: Sequence[str],
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Routine:
+ """[Beta] Change some fields of a routine.
+
+ Use ``fields`` to specify which fields to update. At least one field
+ must be provided. If a field is listed in ``fields`` and is ``None``
+ in ``routine``, the field value will be deleted.
+
+ .. warning::
+ During beta, partial updates are not supported. You must provide
+ all fields in the resource.
+
+ If :attr:`~google.cloud.bigquery.routine.Routine.etag` is not
+ ``None``, the update will only succeed if the resource on the server
+ has the same ETag. Thus reading a routine with
+ :func:`~google.cloud.bigquery.client.Client.get_routine`, changing
+ its fields, and then passing it to this method will ensure that the
+ changes will only be saved if no modifications to the resource
+ occurred since the read.
+
+ Args:
+ routine (google.cloud.bigquery.routine.Routine):
+ The routine to update.
+ fields (Sequence[str]):
+ The fields of ``routine`` to change, spelled as the
+ :class:`~google.cloud.bigquery.routine.Routine` properties.
+
+ For example, to update the description property of the routine,
+ specify it in the ``fields`` argument:
+
+ .. code-block:: python
+
+ bigquery_client.update_routine(
+ routine, ["description"]
+ )
+ retry (Optional[google.api_core.retry.Retry]):
+ A description of how to retry the API call.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ google.cloud.bigquery.routine.Routine:
+ The routine resource returned from the API call.
+ """
+ partial = routine._build_resource(fields)
+ if routine.etag:
+ headers: Optional[Dict[str, str]] = {"If-Match": routine.etag}
+ else:
+ headers = None
+
+ # TODO: remove when routines update supports partial requests.
+ partial["routineReference"] = routine.reference.to_api_repr()
+
+ path = routine.path
+ span_attributes = {"path": path, "fields": fields}
+
+ api_response = self._call_api(
+ retry,
+ span_name="BigQuery.updateRoutine",
+ span_attributes=span_attributes,
+ method="PUT",
+ path=path,
+ data=partial,
+ headers=headers,
+ timeout=timeout,
+ )
+ return Routine.from_api_repr(api_response)
+
+ def update_table(
+ self,
+ table: Table,
+ fields: Sequence[str],
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Table:
+ """Change some fields of a table.
+
+ Use ``fields`` to specify which fields to update. At least one field
+ must be provided. If a field is listed in ``fields`` and is ``None``
+ in ``table``, the field value will be deleted.
+
+ If ``table.etag`` is not ``None``, the update will only succeed if
+ the table on the server has the same ETag. Thus reading a table with
+ ``get_table``, changing its fields, and then passing it to
+ ``update_table`` will ensure that the changes will only be saved if
+ no modifications to the table occurred since the read.
+
+ Args:
+ table (google.cloud.bigquery.table.Table): The table to update.
+ fields (Sequence[str]):
+ The fields of ``table`` to change, spelled as the
+ :class:`~google.cloud.bigquery.table.Table` properties.
+
+ For example, to update the descriptive properties of the table,
+ specify them in the ``fields`` argument:
+
+ .. code-block:: python
+
+ bigquery_client.update_table(
+ table,
+ ["description", "friendly_name"]
+ )
+ retry (Optional[google.api_core.retry.Retry]):
+ A description of how to retry the API call.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ google.cloud.bigquery.table.Table:
+ The table resource returned from the API call.
+ """
+ partial = table._build_resource(fields)
+ if table.etag is not None:
+ headers: Optional[Dict[str, str]] = {"If-Match": table.etag}
+ else:
+ headers = None
+
+ path = table.path
+ span_attributes = {"path": path, "fields": fields}
+
+ api_response = self._call_api(
+ retry,
+ span_name="BigQuery.updateTable",
+ span_attributes=span_attributes,
+ method="PATCH",
+ path=path,
+ data=partial,
+ headers=headers,
+ timeout=timeout,
+ )
+ return Table.from_api_repr(api_response)
+
+ def list_models(
+ self,
+ dataset: Union[Dataset, DatasetReference, DatasetListItem, str],
+ max_results: Optional[int] = None,
+ page_token: Optional[str] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ page_size: Optional[int] = None,
+ ) -> page_iterator.Iterator:
+ """[Beta] List models in the dataset.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/models/list
+
+ Args:
+ dataset (Union[ \
+ google.cloud.bigquery.dataset.Dataset, \
+ google.cloud.bigquery.dataset.DatasetReference, \
+ google.cloud.bigquery.dataset.DatasetListItem, \
+ str, \
+ ]):
+ A reference to the dataset whose models to list from the
+ BigQuery API. If a string is passed in, this method attempts
+ to create a dataset reference from a string using
+ :func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
+ max_results (Optional[int]):
+ Maximum number of models to return. Defaults to a
+ value set by the API.
+ page_token (Optional[str]):
+ Token representing a cursor into the models. If not passed,
+ the API will return the first page of models. The token marks
+ the beginning of the iterator to be returned and the value of
+ the ``page_token`` can be accessed at ``next_page_token`` of the
+ :class:`~google.api_core.page_iterator.HTTPIterator`.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ page_size (Optional[int]):
+ Maximum number of models to return per page.
+ Defaults to a value set by the API.
+
+ Returns:
+ google.api_core.page_iterator.Iterator:
+ Iterator of
+ :class:`~google.cloud.bigquery.model.Model` contained
+ within the requested dataset.
+ """
+ dataset = self._dataset_from_arg(dataset)
+
+ path = "%s/models" % dataset.path
+ span_attributes = {"path": path}
+
+ def api_request(*args, **kwargs):
+ return self._call_api(
+ retry,
+ span_name="BigQuery.listModels",
+ span_attributes=span_attributes,
+ *args,
+ timeout=timeout,
+ **kwargs,
+ )
+
+ result = page_iterator.HTTPIterator(
+ client=self,
+ api_request=api_request,
+ path=path,
+ item_to_value=_item_to_model,
+ items_key="models",
+ page_token=page_token,
+ max_results=max_results,
+ page_size=page_size,
+ )
+ result.dataset = dataset # type: ignore
+ return result
+
+ def list_routines(
+ self,
+ dataset: Union[Dataset, DatasetReference, DatasetListItem, str],
+ max_results: Optional[int] = None,
+ page_token: Optional[str] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ page_size: Optional[int] = None,
+ ) -> page_iterator.Iterator:
+ """[Beta] List routines in the dataset.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/routines/list
+
+ Args:
+ dataset (Union[ \
+ google.cloud.bigquery.dataset.Dataset, \
+ google.cloud.bigquery.dataset.DatasetReference, \
+ google.cloud.bigquery.dataset.DatasetListItem, \
+ str, \
+ ]):
+ A reference to the dataset whose routines to list from the
+ BigQuery API. If a string is passed in, this method attempts
+ to create a dataset reference from a string using
+ :func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
+ max_results (Optional[int]):
+ Maximum number of routines to return. Defaults
+ to a value set by the API.
+ page_token (Optional[str]):
+ Token representing a cursor into the routines. If not passed,
+ the API will return the first page of routines. The token marks
+ the beginning of the iterator to be returned and the value of the
+ ``page_token`` can be accessed at ``next_page_token`` of the
+ :class:`~google.api_core.page_iterator.HTTPIterator`.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ page_size (Optional[int]):
+ Maximum number of routines to return per page.
+ Defaults to a value set by the API.
+
+ Returns:
+ google.api_core.page_iterator.Iterator:
+ Iterator of all
+ :class:`~google.cloud.bigquery.routine.Routine`s contained
+ within the requested dataset, limited by ``max_results``.
+ """
+ dataset = self._dataset_from_arg(dataset)
+ path = "{}/routines".format(dataset.path)
+
+ span_attributes = {"path": path}
+
+ def api_request(*args, **kwargs):
+ return self._call_api(
+ retry,
+ span_name="BigQuery.listRoutines",
+ span_attributes=span_attributes,
+ *args,
+ timeout=timeout,
+ **kwargs,
+ )
+
+ result = page_iterator.HTTPIterator(
+ client=self,
+ api_request=api_request,
+ path=path,
+ item_to_value=_item_to_routine,
+ items_key="routines",
+ page_token=page_token,
+ max_results=max_results,
+ page_size=page_size,
+ )
+ result.dataset = dataset # type: ignore
+ return result
+
+ def list_tables(
+ self,
+ dataset: Union[Dataset, DatasetReference, DatasetListItem, str],
+ max_results: Optional[int] = None,
+ page_token: Optional[str] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ page_size: Optional[int] = None,
+ ) -> page_iterator.Iterator:
+ """List tables in the dataset.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list
+
+ Args:
+ dataset (Union[ \
+ google.cloud.bigquery.dataset.Dataset, \
+ google.cloud.bigquery.dataset.DatasetReference, \
+ google.cloud.bigquery.dataset.DatasetListItem, \
+ str, \
+ ]):
+ A reference to the dataset whose tables to list from the
+ BigQuery API. If a string is passed in, this method attempts
+ to create a dataset reference from a string using
+ :func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
+ max_results (Optional[int]):
+ Maximum number of tables to return. Defaults
+ to a value set by the API.
+ page_token (Optional[str]):
+ Token representing a cursor into the tables. If not passed,
+ the API will return the first page of tables. The token marks
+ the beginning of the iterator to be returned and the value of
+ the ``page_token`` can be accessed at ``next_page_token`` of the
+ :class:`~google.api_core.page_iterator.HTTPIterator`.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ page_size (Optional[int]):
+ Maximum number of tables to return per page.
+ Defaults to a value set by the API.
+
+ Returns:
+ google.api_core.page_iterator.Iterator:
+ Iterator of
+ :class:`~google.cloud.bigquery.table.TableListItem` contained
+ within the requested dataset.
+ """
+ dataset = self._dataset_from_arg(dataset)
+ path = "%s/tables" % dataset.path
+ span_attributes = {"path": path}
+
+ def api_request(*args, **kwargs):
+ return self._call_api(
+ retry,
+ span_name="BigQuery.listTables",
+ span_attributes=span_attributes,
+ *args,
+ timeout=timeout,
+ **kwargs,
+ )
+
+ result = page_iterator.HTTPIterator(
+ client=self,
+ api_request=api_request,
+ path=path,
+ item_to_value=_item_to_table,
+ items_key="tables",
+ page_token=page_token,
+ max_results=max_results,
+ page_size=page_size,
+ )
+ result.dataset = dataset # type: ignore
+ return result
+
+ def delete_dataset(
+ self,
+ dataset: Union[Dataset, DatasetReference, DatasetListItem, str],
+ delete_contents: bool = False,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ not_found_ok: bool = False,
+ ) -> None:
+ """Delete a dataset.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/delete
+
+ Args:
+ dataset (Union[ \
+ google.cloud.bigquery.dataset.Dataset, \
+ google.cloud.bigquery.dataset.DatasetReference, \
+ google.cloud.bigquery.dataset.DatasetListItem, \
+ str, \
+ ]):
+ A reference to the dataset to delete. If a string is passed
+ in, this method attempts to create a dataset reference from a
+ string using
+ :func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
+ delete_contents (Optional[bool]):
+ If True, delete all the tables in the dataset. If False and
+ the dataset contains tables, the request will fail.
+ Default is False.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ not_found_ok (Optional[bool]):
+ Defaults to ``False``. If ``True``, ignore "not found" errors
+ when deleting the dataset.
+ """
+ dataset = self._dataset_from_arg(dataset)
+ params = {}
+ path = dataset.path
+ if delete_contents:
+ params["deleteContents"] = "true"
+ span_attributes = {"path": path, "deleteContents": delete_contents}
+ else:
+ span_attributes = {"path": path}
+
+ try:
+ self._call_api(
+ retry,
+ span_name="BigQuery.deleteDataset",
+ span_attributes=span_attributes,
+ method="DELETE",
+ path=path,
+ query_params=params,
+ timeout=timeout,
+ )
+ except core_exceptions.NotFound:
+ if not not_found_ok:
+ raise
+
+ def delete_model(
+ self,
+ model: Union[Model, ModelReference, str],
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ not_found_ok: bool = False,
+ ) -> None:
+ """[Beta] Delete a model
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/models/delete
+
+ Args:
+ model (Union[ \
+ google.cloud.bigquery.model.Model, \
+ google.cloud.bigquery.model.ModelReference, \
+ str, \
+ ]):
+ A reference to the model to delete. If a string is passed in,
+ this method attempts to create a model reference from a
+ string using
+ :func:`google.cloud.bigquery.model.ModelReference.from_string`.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ not_found_ok (Optional[bool]):
+ Defaults to ``False``. If ``True``, ignore "not found" errors
+ when deleting the model.
+ """
+ if isinstance(model, str):
+ model = ModelReference.from_string(model, default_project=self.project)
+
+ if not isinstance(model, (Model, ModelReference)):
+ raise TypeError("model must be a Model or a ModelReference")
+
+ path = model.path
+ try:
+ span_attributes = {"path": path}
+ self._call_api(
+ retry,
+ span_name="BigQuery.deleteModel",
+ span_attributes=span_attributes,
+ method="DELETE",
+ path=path,
+ timeout=timeout,
+ )
+ except core_exceptions.NotFound:
+ if not not_found_ok:
+ raise
+
+ def delete_job_metadata(
+ self,
+ job_id: Union[str, LoadJob, CopyJob, ExtractJob, QueryJob],
+ project: Optional[str] = None,
+ location: Optional[str] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ not_found_ok: bool = False,
+ ):
+ """[Beta] Delete job metadata from job history.
+
+ Note: This does not stop a running job. Use
+ :func:`~google.cloud.bigquery.client.Client.cancel_job` instead.
+
+ Args:
+ job_id (Union[ \
+ str, \
+ LoadJob, \
+ CopyJob, \
+ ExtractJob, \
+ QueryJob \
+ ]): Job or job identifier.
+ project (Optional[str]):
+ ID of the project which owns the job (defaults to the client's project).
+ location (Optional[str]):
+ Location where the job was run. Ignored if ``job_id`` is a job
+ object.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ not_found_ok (Optional[bool]):
+ Defaults to ``False``. If ``True``, ignore "not found" errors
+ when deleting the job.
+ """
+ extra_params = {}
+
+ project, location, job_id = _extract_job_reference(
+ job_id, project=project, location=location
+ )
+
+ if project is None:
+ project = self.project
+
+ if location is None:
+ location = self.location
+
+ # Location is always required for jobs.delete()
+ extra_params["location"] = location
+
+ path = f"/projects/{project}/jobs/{job_id}/delete"
+
+ span_attributes = {"path": path, "job_id": job_id, "location": location}
+
+ try:
+ self._call_api(
+ retry,
+ span_name="BigQuery.deleteJob",
+ span_attributes=span_attributes,
+ method="DELETE",
+ path=path,
+ query_params=extra_params,
+ timeout=timeout,
+ )
+ except google.api_core.exceptions.NotFound:
+ if not not_found_ok:
+ raise
+
+ def delete_routine(
+ self,
+ routine: Union[Routine, RoutineReference, str],
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ not_found_ok: bool = False,
+ ) -> None:
+ """[Beta] Delete a routine.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/routines/delete
+
+ Args:
+ routine (Union[ \
+ google.cloud.bigquery.routine.Routine, \
+ google.cloud.bigquery.routine.RoutineReference, \
+ str, \
+ ]):
+ A reference to the routine to delete. If a string is passed
+ in, this method attempts to create a routine reference from a
+ string using
+ :func:`google.cloud.bigquery.routine.RoutineReference.from_string`.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ not_found_ok (Optional[bool]):
+ Defaults to ``False``. If ``True``, ignore "not found" errors
+ when deleting the routine.
+ """
+ if isinstance(routine, str):
+ routine = RoutineReference.from_string(
+ routine, default_project=self.project
+ )
+ path = routine.path
+
+ if not isinstance(routine, (Routine, RoutineReference)):
+ raise TypeError("routine must be a Routine or a RoutineReference")
+
+ try:
+ span_attributes = {"path": path}
+ self._call_api(
+ retry,
+ span_name="BigQuery.deleteRoutine",
+ span_attributes=span_attributes,
+ method="DELETE",
+ path=path,
+ timeout=timeout,
+ )
+ except core_exceptions.NotFound:
+ if not not_found_ok:
+ raise
+
+ def delete_table(
+ self,
+ table: Union[Table, TableReference, TableListItem, str],
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ not_found_ok: bool = False,
+ ) -> None:
+ """Delete a table
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/delete
+
+ Args:
+ table (Union[ \
+ google.cloud.bigquery.table.Table, \
+ google.cloud.bigquery.table.TableReference, \
+ google.cloud.bigquery.table.TableListItem, \
+ str, \
+ ]):
+ A reference to the table to delete. If a string is passed in,
+ this method attempts to create a table reference from a
+ string using
+ :func:`google.cloud.bigquery.table.TableReference.from_string`.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ not_found_ok (Optional[bool]):
+ Defaults to ``False``. If ``True``, ignore "not found" errors
+ when deleting the table.
+ """
+ table = _table_arg_to_table_ref(table, default_project=self.project)
+ if not isinstance(table, TableReference):
+ raise TypeError("Unable to get TableReference for table '{}'".format(table))
+
+ try:
+ path = table.path
+ span_attributes = {"path": path}
+ self._call_api(
+ retry,
+ span_name="BigQuery.deleteTable",
+ span_attributes=span_attributes,
+ method="DELETE",
+ path=path,
+ timeout=timeout,
+ )
+ except core_exceptions.NotFound:
+ if not not_found_ok:
+ raise
+
+ def _get_query_results(
+ self,
+ job_id: str,
+ retry: retries.Retry,
+ project: Optional[str] = None,
+ timeout_ms: Optional[int] = None,
+ location: Optional[str] = None,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ page_size: int = 0,
+ ) -> _QueryResults:
+ """Get the query results object for a query job.
+
+ Args:
+ job_id (str): Name of the query job.
+ retry (google.api_core.retry.Retry):
+ How to retry the RPC.
+ project (Optional[str]):
+ Project ID for the query job (defaults to the project of the client).
+ timeout_ms (Optional[int]):
+ Number of milliseconds the the API call should wait for the query
+ to complete before the request times out.
+ location (Optional[str]): Location of the query job.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``. If set, this connection timeout may be
+ increased to a minimum value. This prevents retries on what
+ would otherwise be a successful response.
+ page_size (int):
+ Maximum number of rows in a single response. See maxResults in
+ the jobs.getQueryResults REST API.
+
+ Returns:
+ google.cloud.bigquery.query._QueryResults:
+ A new ``_QueryResults`` instance.
+ """
+
+ extra_params: Dict[str, Any] = {"maxResults": page_size}
+
+ if timeout is not None:
+ if not isinstance(timeout, (int, float)):
+ timeout = _MIN_GET_QUERY_RESULTS_TIMEOUT
+ else:
+ timeout = max(timeout, _MIN_GET_QUERY_RESULTS_TIMEOUT)
+
+ if page_size > 0:
+ extra_params["formatOptions.useInt64Timestamp"] = True
+
+ if project is None:
+ project = self.project
+
+ if timeout_ms is not None:
+ extra_params["timeoutMs"] = timeout_ms
+
+ if location is None:
+ location = self.location
+
+ if location is not None:
+ extra_params["location"] = location
+
+ path = "/projects/{}/queries/{}".format(project, job_id)
+
+ # This call is typically made in a polling loop that checks whether the
+ # job is complete (from QueryJob.done(), called ultimately from
+ # QueryJob.result()). So we don't need to poll here.
+ span_attributes = {"path": path}
+ resource = self._call_api(
+ retry,
+ span_name="BigQuery.getQueryResults",
+ span_attributes=span_attributes,
+ method="GET",
+ path=path,
+ query_params=extra_params,
+ timeout=timeout,
+ )
+ return _QueryResults.from_api_repr(resource)
+
+ def job_from_resource(
+ self, resource: dict
+ ) -> Union[job.CopyJob, job.ExtractJob, job.LoadJob, job.QueryJob, job.UnknownJob]:
+ """Detect correct job type from resource and instantiate.
+
+ Args:
+ resource (Dict): one job resource from API response
+
+ Returns:
+ Union[job.CopyJob, job.ExtractJob, job.LoadJob, job.QueryJob, job.UnknownJob]:
+ The job instance, constructed via the resource.
+ """
+ config = resource.get("configuration", {})
+ if "load" in config:
+ return job.LoadJob.from_api_repr(resource, self)
+ elif "copy" in config:
+ return job.CopyJob.from_api_repr(resource, self)
+ elif "extract" in config:
+ return job.ExtractJob.from_api_repr(resource, self)
+ elif "query" in config:
+ return job.QueryJob.from_api_repr(resource, self)
+ return job.UnknownJob.from_api_repr(resource, self)
+
+ def create_job(
+ self,
+ job_config: dict,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Union[job.LoadJob, job.CopyJob, job.ExtractJob, job.QueryJob]:
+ """Create a new job.
+
+ Args:
+ job_config (dict): configuration job representation returned from the API.
+ retry (Optional[google.api_core.retry.Retry]): How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ Union[ \
+ google.cloud.bigquery.job.LoadJob, \
+ google.cloud.bigquery.job.CopyJob, \
+ google.cloud.bigquery.job.ExtractJob, \
+ google.cloud.bigquery.job.QueryJob \
+ ]:
+ A new job instance.
+ """
+
+ if "load" in job_config:
+ load_job_config = google.cloud.bigquery.job.LoadJobConfig.from_api_repr(
+ job_config
+ )
+ destination = _get_sub_prop(job_config, ["load", "destinationTable"])
+ source_uris = _get_sub_prop(job_config, ["load", "sourceUris"])
+ destination = TableReference.from_api_repr(destination)
+ return self.load_table_from_uri(
+ source_uris,
+ destination,
+ job_config=typing.cast(LoadJobConfig, load_job_config),
+ retry=retry,
+ timeout=timeout,
+ )
+ elif "copy" in job_config:
+ copy_job_config = google.cloud.bigquery.job.CopyJobConfig.from_api_repr(
+ job_config
+ )
+ destination = _get_sub_prop(job_config, ["copy", "destinationTable"])
+ destination = TableReference.from_api_repr(destination)
+ return self.copy_table(
+ [], # Source table(s) already in job_config resource.
+ destination,
+ job_config=typing.cast(CopyJobConfig, copy_job_config),
+ retry=retry,
+ timeout=timeout,
+ )
+ elif "extract" in job_config:
+ extract_job_config = (
+ google.cloud.bigquery.job.ExtractJobConfig.from_api_repr(job_config)
+ )
+ source = _get_sub_prop(job_config, ["extract", "sourceTable"])
+ if source:
+ source_type = "Table"
+ source = TableReference.from_api_repr(source)
+ else:
+ source = _get_sub_prop(job_config, ["extract", "sourceModel"])
+ source_type = "Model"
+ source = ModelReference.from_api_repr(source)
+ destination_uris = _get_sub_prop(job_config, ["extract", "destinationUris"])
+ return self.extract_table(
+ source,
+ destination_uris,
+ job_config=typing.cast(ExtractJobConfig, extract_job_config),
+ retry=retry,
+ timeout=timeout,
+ source_type=source_type,
+ )
+ elif "query" in job_config:
+ query_job_config = google.cloud.bigquery.job.QueryJobConfig.from_api_repr(
+ job_config
+ )
+ query = _get_sub_prop(job_config, ["query", "query"])
+ return self.query(
+ query,
+ job_config=typing.cast(QueryJobConfig, query_job_config),
+ retry=retry,
+ timeout=timeout,
+ )
+ else:
+ raise TypeError("Invalid job configuration received.")
+
+ def get_job(
+ self,
+ job_id: Union[str, job.LoadJob, job.CopyJob, job.ExtractJob, job.QueryJob],
+ project: Optional[str] = None,
+ location: Optional[str] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_GET_JOB_TIMEOUT,
+ ) -> Union[job.LoadJob, job.CopyJob, job.ExtractJob, job.QueryJob, job.UnknownJob]:
+ """Fetch a job for the project associated with this client.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get
+
+ Args:
+ job_id (Union[ \
+ str, \
+ job.LoadJob, \
+ job.CopyJob, \
+ job.ExtractJob, \
+ job.QueryJob \
+ ]):
+ Job identifier.
+ project (Optional[str]):
+ ID of the project which owns the job (defaults to the client's project).
+ location (Optional[str]):
+ Location where the job was run. Ignored if ``job_id`` is a job
+ object.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ Union[job.LoadJob, job.CopyJob, job.ExtractJob, job.QueryJob, job.UnknownJob]:
+ Job instance, based on the resource returned by the API.
+ """
+ extra_params = {"projection": "full"}
+
+ project, location, job_id = _extract_job_reference(
+ job_id, project=project, location=location
+ )
+
+ if project is None:
+ project = self.project
+
+ if location is None:
+ location = self.location
+
+ if location is not None:
+ extra_params["location"] = location
+
+ path = "/projects/{}/jobs/{}".format(project, job_id)
+
+ span_attributes = {"path": path, "job_id": job_id, "location": location}
+
+ resource = self._call_api(
+ retry,
+ span_name="BigQuery.getJob",
+ span_attributes=span_attributes,
+ method="GET",
+ path=path,
+ query_params=extra_params,
+ timeout=timeout,
+ )
+
+ return self.job_from_resource(resource)
+
+ def cancel_job(
+ self,
+ job_id: str,
+ project: Optional[str] = None,
+ location: Optional[str] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Union[job.LoadJob, job.CopyJob, job.ExtractJob, job.QueryJob]:
+ """Attempt to cancel a job from a job ID.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/cancel
+
+ Args:
+ job_id (Union[ \
+ str, \
+ google.cloud.bigquery.job.LoadJob, \
+ google.cloud.bigquery.job.CopyJob, \
+ google.cloud.bigquery.job.ExtractJob, \
+ google.cloud.bigquery.job.QueryJob \
+ ]): Job identifier.
+ project (Optional[str]):
+ ID of the project which owns the job (defaults to the client's project).
+ location (Optional[str]):
+ Location where the job was run. Ignored if ``job_id`` is a job
+ object.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ Union[ \
+ google.cloud.bigquery.job.LoadJob, \
+ google.cloud.bigquery.job.CopyJob, \
+ google.cloud.bigquery.job.ExtractJob, \
+ google.cloud.bigquery.job.QueryJob, \
+ ]:
+ Job instance, based on the resource returned by the API.
+ """
+ extra_params = {"projection": "full"}
+
+ project, location, job_id = _extract_job_reference(
+ job_id, project=project, location=location
+ )
+
+ if project is None:
+ project = self.project
+
+ if location is None:
+ location = self.location
+
+ if location is not None:
+ extra_params["location"] = location
+
+ path = "/projects/{}/jobs/{}/cancel".format(project, job_id)
+
+ span_attributes = {"path": path, "job_id": job_id, "location": location}
+
+ resource = self._call_api(
+ retry,
+ span_name="BigQuery.cancelJob",
+ span_attributes=span_attributes,
+ method="POST",
+ path=path,
+ query_params=extra_params,
+ timeout=timeout,
+ )
+
+ job_instance = self.job_from_resource(resource["job"]) # never an UnknownJob
+
+ return typing.cast(
+ Union[job.LoadJob, job.CopyJob, job.ExtractJob, job.QueryJob],
+ job_instance,
+ )
+
+ def list_jobs(
+ self,
+ project: Optional[str] = None,
+ parent_job: Optional[Union[QueryJob, str]] = None,
+ max_results: Optional[int] = None,
+ page_token: Optional[str] = None,
+ all_users: Optional[bool] = None,
+ state_filter: Optional[str] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ min_creation_time: Optional[datetime.datetime] = None,
+ max_creation_time: Optional[datetime.datetime] = None,
+ page_size: Optional[int] = None,
+ ) -> page_iterator.Iterator:
+ """List jobs for the project associated with this client.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/list
+
+ Args:
+ project (Optional[str]):
+ Project ID to use for retreiving datasets. Defaults
+ to the client's project.
+ parent_job (Optional[Union[ \
+ google.cloud.bigquery.job._AsyncJob, \
+ str, \
+ ]]):
+ If set, retrieve only child jobs of the specified parent.
+ max_results (Optional[int]):
+ Maximum number of jobs to return.
+ page_token (Optional[str]):
+ Opaque marker for the next "page" of jobs. If not
+ passed, the API will return the first page of jobs. The token
+ marks the beginning of the iterator to be returned and the
+ value of the ``page_token`` can be accessed at
+ ``next_page_token`` of
+ :class:`~google.api_core.page_iterator.HTTPIterator`.
+ all_users (Optional[bool]):
+ If true, include jobs owned by all users in the project.
+ Defaults to :data:`False`.
+ state_filter (Optional[str]):
+ If set, include only jobs matching the given state. One of:
+ * ``"done"``
+ * ``"pending"``
+ * ``"running"``
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ min_creation_time (Optional[datetime.datetime]):
+ Min value for job creation time. If set, only jobs created
+ after or at this timestamp are returned. If the datetime has
+ no time zone assumes UTC time.
+ max_creation_time (Optional[datetime.datetime]):
+ Max value for job creation time. If set, only jobs created
+ before or at this timestamp are returned. If the datetime has
+ no time zone assumes UTC time.
+ page_size (Optional[int]):
+ Maximum number of jobs to return per page.
+
+ Returns:
+ google.api_core.page_iterator.Iterator:
+ Iterable of job instances.
+ """
+ if isinstance(parent_job, job._AsyncJob):
+ parent_job = parent_job.job_id # pytype: disable=attribute-error
+
+ extra_params = {
+ "allUsers": all_users,
+ "stateFilter": state_filter,
+ "minCreationTime": _str_or_none(
+ google.cloud._helpers._millis_from_datetime(min_creation_time)
+ ),
+ "maxCreationTime": _str_or_none(
+ google.cloud._helpers._millis_from_datetime(max_creation_time)
+ ),
+ "projection": "full",
+ "parentJobId": parent_job,
+ }
+
+ extra_params = {
+ param: value for param, value in extra_params.items() if value is not None
+ }
+
+ if project is None:
+ project = self.project
+
+ path = "/projects/%s/jobs" % (project,)
+
+ span_attributes = {"path": path}
+
+ def api_request(*args, **kwargs):
+ return self._call_api(
+ retry,
+ span_name="BigQuery.listJobs",
+ span_attributes=span_attributes,
+ *args,
+ timeout=timeout,
+ **kwargs,
+ )
+
+ return page_iterator.HTTPIterator(
+ client=self,
+ api_request=api_request,
+ path=path,
+ item_to_value=_item_to_job,
+ items_key="jobs",
+ page_token=page_token,
+ max_results=max_results,
+ extra_params=extra_params,
+ page_size=page_size,
+ )
+
+ def load_table_from_uri(
+ self,
+ source_uris: Union[str, Sequence[str]],
+ destination: Union[Table, TableReference, TableListItem, str],
+ job_id: Optional[str] = None,
+ job_id_prefix: Optional[str] = None,
+ location: Optional[str] = None,
+ project: Optional[str] = None,
+ job_config: Optional[LoadJobConfig] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> job.LoadJob:
+ """Starts a job for loading data into a table from Cloud Storage.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload
+
+ Args:
+ source_uris (Union[str, Sequence[str]]):
+ URIs of data files to be loaded; in format
+ ``gs:///``.
+ destination (Union[ \
+ google.cloud.bigquery.table.Table, \
+ google.cloud.bigquery.table.TableReference, \
+ google.cloud.bigquery.table.TableListItem, \
+ str, \
+ ]):
+ Table into which data is to be loaded. If a string is passed
+ in, this method attempts to create a table reference from a
+ string using
+ :func:`google.cloud.bigquery.table.TableReference.from_string`.
+ job_id (Optional[str]): Name of the job.
+ job_id_prefix (Optional[str]):
+ The user-provided prefix for a randomly generated job ID.
+ This parameter will be ignored if a ``job_id`` is also given.
+ location (Optional[str]):
+ Location where to run the job. Must match the location of the
+ destination table.
+ project (Optional[str]):
+ Project ID of the project of where to run the job. Defaults
+ to the client's project.
+ job_config (Optional[google.cloud.bigquery.job.LoadJobConfig]):
+ Extra configuration options for the job.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ google.cloud.bigquery.job.LoadJob: A new load job.
+
+ Raises:
+ TypeError:
+ If ``job_config`` is not an instance of
+ :class:`~google.cloud.bigquery.job.LoadJobConfig` class.
+ """
+ job_id = _make_job_id(job_id, job_id_prefix)
+
+ if project is None:
+ project = self.project
+
+ if location is None:
+ location = self.location
+
+ job_ref = job._JobReference(job_id, project=project, location=location)
+
+ if isinstance(source_uris, str):
+ source_uris = [source_uris]
+
+ destination = _table_arg_to_table_ref(destination, default_project=self.project)
+
+ if job_config is not None:
+ _verify_job_config_type(job_config, LoadJobConfig)
+ else:
+ job_config = job.LoadJobConfig()
+
+ new_job_config = job_config._fill_from_default(self._default_load_job_config)
+
+ load_job = job.LoadJob(job_ref, source_uris, destination, self, new_job_config)
+ load_job._begin(retry=retry, timeout=timeout)
+
+ return load_job
+
+ def load_table_from_file(
+ self,
+ file_obj: IO[bytes],
+ destination: Union[Table, TableReference, TableListItem, str],
+ rewind: bool = False,
+ size: Optional[int] = None,
+ num_retries: int = _DEFAULT_NUM_RETRIES,
+ job_id: Optional[str] = None,
+ job_id_prefix: Optional[str] = None,
+ location: Optional[str] = None,
+ project: Optional[str] = None,
+ job_config: Optional[LoadJobConfig] = None,
+ timeout: ResumableTimeoutType = DEFAULT_TIMEOUT,
+ ) -> job.LoadJob:
+ """Upload the contents of this table from a file-like object.
+
+ Similar to :meth:`load_table_from_uri`, this method creates, starts and
+ returns a :class:`~google.cloud.bigquery.job.LoadJob`.
+
+ Args:
+ file_obj (IO[bytes]):
+ A file handle opened in binary mode for reading.
+ destination (Union[Table, \
+ TableReference, \
+ TableListItem, \
+ str \
+ ]):
+ Table into which data is to be loaded. If a string is passed
+ in, this method attempts to create a table reference from a
+ string using
+ :func:`google.cloud.bigquery.table.TableReference.from_string`.
+ rewind (Optional[bool]):
+ If True, seek to the beginning of the file handle before
+ reading the file. Defaults to False.
+ size (Optional[int]):
+ The number of bytes to read from the file handle. If size is
+ ``None`` or large, resumable upload will be used. Otherwise,
+ multipart upload will be used.
+ num_retries (Optional[int]): Number of upload retries. Defaults to 6.
+ job_id (Optional[str]): Name of the job.
+ job_id_prefix (Optional[str]):
+ The user-provided prefix for a randomly generated job ID.
+ This parameter will be ignored if a ``job_id`` is also given.
+ location (Optional[str]):
+ Location where to run the job. Must match the location of the
+ destination table.
+ project (Optional[str]):
+ Project ID of the project of where to run the job. Defaults
+ to the client's project.
+ job_config (Optional[LoadJobConfig]):
+ Extra configuration options for the job.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``. Depending on the retry strategy, a request
+ may be repeated several times using the same timeout each time.
+ Defaults to None.
+
+ Can also be passed as a tuple (connect_timeout, read_timeout).
+ See :meth:`requests.Session.request` documentation for details.
+
+ Returns:
+ google.cloud.bigquery.job.LoadJob: A new load job.
+
+ Raises:
+ ValueError:
+ If ``size`` is not passed in and can not be determined, or if
+ the ``file_obj`` can be detected to be a file opened in text
+ mode.
+
+ TypeError:
+ If ``job_config`` is not an instance of
+ :class:`~google.cloud.bigquery.job.LoadJobConfig` class.
+ """
+ job_id = _make_job_id(job_id, job_id_prefix)
+
+ if project is None:
+ project = self.project
+
+ if location is None:
+ location = self.location
+
+ destination = _table_arg_to_table_ref(destination, default_project=self.project)
+ job_ref = job._JobReference(job_id, project=project, location=location)
+
+ if job_config is not None:
+ _verify_job_config_type(job_config, LoadJobConfig)
+ else:
+ job_config = job.LoadJobConfig()
+
+ new_job_config = job_config._fill_from_default(self._default_load_job_config)
+
+ load_job = job.LoadJob(job_ref, None, destination, self, new_job_config)
+ job_resource = load_job.to_api_repr()
+
+ if rewind:
+ file_obj.seek(0, os.SEEK_SET)
+
+ _check_mode(file_obj)
+
+ try:
+ if size is None or size >= _MAX_MULTIPART_SIZE:
+ response = self._do_resumable_upload(
+ file_obj, job_resource, num_retries, timeout, project=project
+ )
+ else:
+ response = self._do_multipart_upload(
+ file_obj, job_resource, size, num_retries, timeout, project=project
+ )
+ except resumable_media.InvalidResponse as exc:
+ raise exceptions.from_http_response(exc.response)
+
+ return typing.cast(LoadJob, self.job_from_resource(response.json()))
+
+ def load_table_from_dataframe(
+ self,
+ dataframe: "pandas.DataFrame", # type: ignore
+ destination: Union[Table, TableReference, str],
+ num_retries: int = _DEFAULT_NUM_RETRIES,
+ job_id: Optional[str] = None,
+ job_id_prefix: Optional[str] = None,
+ location: Optional[str] = None,
+ project: Optional[str] = None,
+ job_config: Optional[LoadJobConfig] = None,
+ parquet_compression: str = "snappy",
+ timeout: ResumableTimeoutType = DEFAULT_TIMEOUT,
+ ) -> job.LoadJob:
+ """Upload the contents of a table from a pandas DataFrame.
+
+ Similar to :meth:`load_table_from_uri`, this method creates, starts and
+ returns a :class:`~google.cloud.bigquery.job.LoadJob`.
+
+ .. note::
+
+ REPEATED fields are NOT supported when using the CSV source format.
+ They are supported when using the PARQUET source format, but
+ due to the way they are encoded in the ``parquet`` file,
+ a mismatch with the existing table schema can occur, so
+ REPEATED fields are not properly supported when using ``pyarrow<4.0.0``
+ using the parquet format.
+
+ https://github.com/googleapis/python-bigquery/issues/19
+
+ Args:
+ dataframe (pandas.Dataframe):
+ A :class:`~pandas.DataFrame` containing the data to load.
+ destination (Union[ \
+ Table, \
+ TableReference, \
+ str \
+ ]):
+ The destination table to use for loading the data. If it is an
+ existing table, the schema of the :class:`~pandas.DataFrame`
+ must match the schema of the destination table. If the table
+ does not yet exist, the schema is inferred from the
+ :class:`~pandas.DataFrame`.
+
+ If a string is passed in, this method attempts to create a
+ table reference from a string using
+ :func:`google.cloud.bigquery.table.TableReference.from_string`.
+ num_retries (Optional[int]): Number of upload retries. Defaults to 6.
+ job_id (Optional[str]): Name of the job.
+ job_id_prefix (Optional[str]):
+ The user-provided prefix for a randomly generated
+ job ID. This parameter will be ignored if a ``job_id`` is
+ also given.
+ location (Optional[str]):
+ Location where to run the job. Must match the location of the
+ destination table.
+ project (Optional[str]):
+ Project ID of the project of where to run the job. Defaults
+ to the client's project.
+ job_config (Optional[LoadJobConfig]):
+ Extra configuration options for the job.
+
+ To override the default pandas data type conversions, supply
+ a value for
+ :attr:`~google.cloud.bigquery.job.LoadJobConfig.schema` with
+ column names matching those of the dataframe. The BigQuery
+ schema is used to determine the correct data type conversion.
+ Indexes are not loaded.
+
+ By default, this method uses the parquet source format. To
+ override this, supply a value for
+ :attr:`~google.cloud.bigquery.job.LoadJobConfig.source_format`
+ with the format name. Currently only
+ :attr:`~google.cloud.bigquery.job.SourceFormat.CSV` and
+ :attr:`~google.cloud.bigquery.job.SourceFormat.PARQUET` are
+ supported.
+ parquet_compression (Optional[str]):
+ [Beta] The compression method to use if intermittently
+ serializing ``dataframe`` to a parquet file.
+ Defaults to "snappy".
+
+ The argument is directly passed as the ``compression``
+ argument to the underlying ``pyarrow.parquet.write_table()``
+ method (the default value "snappy" gets converted to uppercase).
+ https://arrow.apache.org/docs/python/generated/pyarrow.parquet.write_table.html#pyarrow-parquet-write-table
+
+ If the job config schema is missing, the argument is directly
+ passed as the ``compression`` argument to the underlying
+ ``DataFrame.to_parquet()`` method.
+ https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_parquet.html#pandas.DataFrame.to_parquet
+ timeout (Optional[flaot]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``. Depending on the retry strategy, a request may
+ be repeated several times using the same timeout each time.
+ Defaults to None.
+
+ Can also be passed as a tuple (connect_timeout, read_timeout).
+ See :meth:`requests.Session.request` documentation for details.
+
+ Returns:
+ google.cloud.bigquery.job.LoadJob: A new load job.
+
+ Raises:
+ ValueError:
+ If a usable parquet engine cannot be found. This method
+ requires :mod:`pyarrow` to be installed.
+ TypeError:
+ If ``job_config`` is not an instance of
+ :class:`~google.cloud.bigquery.job.LoadJobConfig` class.
+ """
+ job_id = _make_job_id(job_id, job_id_prefix)
+
+ if job_config is not None:
+ _verify_job_config_type(job_config, LoadJobConfig)
+ else:
+ job_config = job.LoadJobConfig()
+
+ new_job_config = job_config._fill_from_default(self._default_load_job_config)
+
+ supported_formats = {job.SourceFormat.CSV, job.SourceFormat.PARQUET}
+ if new_job_config.source_format is None:
+ # default value
+ new_job_config.source_format = job.SourceFormat.PARQUET
+
+ if (
+ new_job_config.source_format == job.SourceFormat.PARQUET
+ and new_job_config.parquet_options is None
+ ):
+ parquet_options = ParquetOptions()
+ # default value
+ parquet_options.enable_list_inference = True
+ new_job_config.parquet_options = parquet_options
+
+ if new_job_config.source_format not in supported_formats:
+ raise ValueError(
+ "Got unexpected source_format: '{}'. Currently, only PARQUET and CSV are supported".format(
+ new_job_config.source_format
+ )
+ )
+
+ if pyarrow is None and new_job_config.source_format == job.SourceFormat.PARQUET:
+ # pyarrow is now the only supported parquet engine.
+ raise ValueError("This method requires pyarrow to be installed")
+
+ if location is None:
+ location = self.location
+
+ # If table schema is not provided, we try to fetch the existing table
+ # schema, and check if dataframe schema is compatible with it - except
+ # for WRITE_TRUNCATE jobs, the existing schema does not matter then.
+ if (
+ not new_job_config.schema
+ and new_job_config.write_disposition != job.WriteDisposition.WRITE_TRUNCATE
+ ):
+ try:
+ table = self.get_table(destination)
+ except core_exceptions.NotFound:
+ pass
+ else:
+ columns_and_indexes = frozenset(
+ name
+ for name, _ in _pandas_helpers.list_columns_and_indexes(dataframe)
+ )
+ new_job_config.schema = [
+ # Field description and policy tags are not needed to
+ # serialize a data frame.
+ SchemaField(
+ field.name,
+ field.field_type,
+ mode=field.mode,
+ fields=field.fields,
+ )
+ # schema fields not present in the dataframe are not needed
+ for field in table.schema
+ if field.name in columns_and_indexes
+ ]
+
+ new_job_config.schema = _pandas_helpers.dataframe_to_bq_schema(
+ dataframe, new_job_config.schema
+ )
+
+ if not new_job_config.schema:
+ # the schema could not be fully detected
+ warnings.warn(
+ "Schema could not be detected for all columns. Loading from a "
+ "dataframe without a schema will be deprecated in the future, "
+ "please provide a schema.",
+ PendingDeprecationWarning,
+ stacklevel=2,
+ )
+
+ tmpfd, tmppath = tempfile.mkstemp(
+ suffix="_job_{}.{}".format(job_id[:8], new_job_config.source_format.lower())
+ )
+ os.close(tmpfd)
+
+ try:
+ if new_job_config.source_format == job.SourceFormat.PARQUET:
+ if new_job_config.schema:
+ if parquet_compression == "snappy": # adjust the default value
+ parquet_compression = parquet_compression.upper()
+
+ _pandas_helpers.dataframe_to_parquet(
+ dataframe,
+ new_job_config.schema,
+ tmppath,
+ parquet_compression=parquet_compression,
+ parquet_use_compliant_nested_type=True,
+ )
+ else:
+ dataframe.to_parquet(
+ tmppath,
+ engine="pyarrow",
+ compression=parquet_compression,
+ **(
+ {"use_compliant_nested_type": True}
+ if _versions_helpers.PYARROW_VERSIONS.use_compliant_nested_type
+ else {}
+ ),
+ )
+
+ else:
+ dataframe.to_csv(
+ tmppath,
+ index=False,
+ header=False,
+ encoding="utf-8",
+ float_format="%.17g",
+ date_format="%Y-%m-%d %H:%M:%S.%f",
+ )
+
+ with open(tmppath, "rb") as tmpfile:
+ file_size = os.path.getsize(tmppath)
+ return self.load_table_from_file(
+ tmpfile,
+ destination,
+ num_retries=num_retries,
+ rewind=True,
+ size=file_size,
+ job_id=job_id,
+ job_id_prefix=job_id_prefix,
+ location=location,
+ project=project,
+ job_config=new_job_config,
+ timeout=timeout,
+ )
+
+ finally:
+ os.remove(tmppath)
+
+ def load_table_from_json(
+ self,
+ json_rows: Iterable[Dict[str, Any]],
+ destination: Union[Table, TableReference, TableListItem, str],
+ num_retries: int = _DEFAULT_NUM_RETRIES,
+ job_id: Optional[str] = None,
+ job_id_prefix: Optional[str] = None,
+ location: Optional[str] = None,
+ project: Optional[str] = None,
+ job_config: Optional[LoadJobConfig] = None,
+ timeout: ResumableTimeoutType = DEFAULT_TIMEOUT,
+ ) -> job.LoadJob:
+ """Upload the contents of a table from a JSON string or dict.
+
+ Args:
+ json_rows (Iterable[Dict[str, Any]]):
+ Row data to be inserted. Keys must match the table schema fields
+ and values must be JSON-compatible representations.
+
+ .. note::
+
+ If your data is already a newline-delimited JSON string,
+ it is best to wrap it into a file-like object and pass it
+ to :meth:`~google.cloud.bigquery.client.Client.load_table_from_file`::
+
+ import io
+ from google.cloud import bigquery
+
+ data = u'{"foo": "bar"}'
+ data_as_file = io.StringIO(data)
+
+ client = bigquery.Client()
+ client.load_table_from_file(data_as_file, ...)
+
+ destination (Union[ \
+ Table, \
+ TableReference, \
+ TableListItem, \
+ str \
+ ]):
+ Table into which data is to be loaded. If a string is passed
+ in, this method attempts to create a table reference from a
+ string using
+ :func:`google.cloud.bigquery.table.TableReference.from_string`.
+ num_retries (Optional[int]): Number of upload retries. Defaults to 6.
+ job_id (Optional[str]): Name of the job.
+ job_id_prefix (Optional[str]):
+ The user-provided prefix for a randomly generated job ID.
+ This parameter will be ignored if a ``job_id`` is also given.
+ location (Optional[str]):
+ Location where to run the job. Must match the location of the
+ destination table.
+ project (Optional[str]):
+ Project ID of the project of where to run the job. Defaults
+ to the client's project.
+ job_config (Optional[LoadJobConfig]):
+ Extra configuration options for the job. The ``source_format``
+ setting is always set to
+ :attr:`~google.cloud.bigquery.job.SourceFormat.NEWLINE_DELIMITED_JSON`.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``. Depending on the retry strategy, a request may
+ be repeated several times using the same timeout each time.
+ Defaults to None.
+
+ Can also be passed as a tuple (connect_timeout, read_timeout).
+ See :meth:`requests.Session.request` documentation for details.
+
+ Returns:
+ google.cloud.bigquery.job.LoadJob: A new load job.
+
+ Raises:
+ TypeError:
+ If ``job_config`` is not an instance of
+ :class:`~google.cloud.bigquery.job.LoadJobConfig` class.
+ """
+ job_id = _make_job_id(job_id, job_id_prefix)
+
+ if job_config is not None:
+ _verify_job_config_type(job_config, LoadJobConfig)
+ else:
+ job_config = job.LoadJobConfig()
+
+ new_job_config = job_config._fill_from_default(self._default_load_job_config)
+
+ new_job_config.source_format = job.SourceFormat.NEWLINE_DELIMITED_JSON
+
+ # In specific conditions, we check if the table alread exists, and/or
+ # set the autodetect value for the user. For exact conditions, see table
+ # https://github.com/googleapis/python-bigquery/issues/1228#issuecomment-1910946297
+ if new_job_config.schema is None and new_job_config.autodetect is None:
+ if new_job_config.write_disposition in (
+ job.WriteDisposition.WRITE_TRUNCATE,
+ job.WriteDisposition.WRITE_EMPTY,
+ ):
+ new_job_config.autodetect = True
+ else:
+ try:
+ self.get_table(destination)
+ except core_exceptions.NotFound:
+ new_job_config.autodetect = True
+ else:
+ new_job_config.autodetect = False
+
+ if project is None:
+ project = self.project
+
+ if location is None:
+ location = self.location
+
+ destination = _table_arg_to_table_ref(destination, default_project=self.project)
+
+ data_str = "\n".join(json.dumps(item, ensure_ascii=False) for item in json_rows)
+ encoded_str = data_str.encode()
+ data_file = io.BytesIO(encoded_str)
+ return self.load_table_from_file(
+ data_file,
+ destination,
+ size=len(encoded_str),
+ num_retries=num_retries,
+ job_id=job_id,
+ job_id_prefix=job_id_prefix,
+ location=location,
+ project=project,
+ job_config=new_job_config,
+ timeout=timeout,
+ )
+
+ def _do_resumable_upload(
+ self,
+ stream: IO[bytes],
+ metadata: Mapping[str, str],
+ num_retries: int,
+ timeout: Optional[ResumableTimeoutType],
+ project: Optional[str] = None,
+ ) -> "requests.Response":
+ """Perform a resumable upload.
+
+ Args:
+ stream (IO[bytes]): A bytes IO object open for reading.
+ metadata (Mapping[str, str]): The metadata associated with the upload.
+ num_retries (int):
+ Number of upload retries. (Deprecated: This
+ argument will be removed in a future release.)
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``. Depending on the retry strategy, a request may
+ be repeated several times using the same timeout each time.
+
+ Can also be passed as a tuple (connect_timeout, read_timeout).
+ See :meth:`requests.Session.request` documentation for details.
+ project (Optional[str]):
+ Project ID of the project of where to run the upload. Defaults
+ to the client's project.
+
+ Returns:
+ The "200 OK" response object returned after the final chunk
+ is uploaded.
+ """
+ upload, transport = self._initiate_resumable_upload(
+ stream, metadata, num_retries, timeout, project=project
+ )
+
+ while not upload.finished:
+ response = upload.transmit_next_chunk(transport, timeout=timeout)
+
+ return response
+
+ def _initiate_resumable_upload(
+ self,
+ stream: IO[bytes],
+ metadata: Mapping[str, str],
+ num_retries: int,
+ timeout: Optional[ResumableTimeoutType],
+ project: Optional[str] = None,
+ ):
+ """Initiate a resumable upload.
+
+ Args:
+ stream (IO[bytes]): A bytes IO object open for reading.
+ metadata (Mapping[str, str]): The metadata associated with the upload.
+ num_retries (int):
+ Number of upload retries. (Deprecated: This
+ argument will be removed in a future release.)
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``. Depending on the retry strategy, a request may
+ be repeated several times using the same timeout each time.
+
+ Can also be passed as a tuple (connect_timeout, read_timeout).
+ See :meth:`requests.Session.request` documentation for details.
+ project (Optional[str]):
+ Project ID of the project of where to run the upload. Defaults
+ to the client's project.
+
+ Returns:
+ Tuple:
+ Pair of
+
+ * The :class:`~google.resumable_media.requests.ResumableUpload`
+ that was created
+ * The ``transport`` used to initiate the upload.
+ """
+ chunk_size = _DEFAULT_CHUNKSIZE
+ transport = self._http
+ headers = _get_upload_headers(self._connection.user_agent)
+
+ if project is None:
+ project = self.project
+ # TODO: Increase the minimum version of google-cloud-core to 1.6.0
+ # and remove this logic. See:
+ # https://github.com/googleapis/python-bigquery/issues/509
+ hostname = (
+ self._connection.API_BASE_URL
+ if not hasattr(self._connection, "get_api_base_url_for_mtls")
+ else self._connection.get_api_base_url_for_mtls()
+ )
+ upload_url = _RESUMABLE_URL_TEMPLATE.format(host=hostname, project=project)
+
+ # TODO: modify ResumableUpload to take a retry.Retry object
+ # that it can use for the initial RPC.
+ upload = ResumableUpload(upload_url, chunk_size, headers=headers)
+
+ if num_retries is not None:
+ upload._retry_strategy = resumable_media.RetryStrategy(
+ max_retries=num_retries
+ )
+
+ upload.initiate(
+ transport,
+ stream,
+ metadata,
+ _GENERIC_CONTENT_TYPE,
+ stream_final=False,
+ timeout=timeout,
+ )
+
+ return upload, transport
+
+ def _do_multipart_upload(
+ self,
+ stream: IO[bytes],
+ metadata: Mapping[str, str],
+ size: int,
+ num_retries: int,
+ timeout: Optional[ResumableTimeoutType],
+ project: Optional[str] = None,
+ ):
+ """Perform a multipart upload.
+
+ Args:
+ stream (IO[bytes]): A bytes IO object open for reading.
+ metadata (Mapping[str, str]): The metadata associated with the upload.
+ size (int):
+ The number of bytes to be uploaded (which will be read
+ from ``stream``). If not provided, the upload will be
+ concluded once ``stream`` is exhausted (or :data:`None`).
+ num_retries (int):
+ Number of upload retries. (Deprecated: This
+ argument will be removed in a future release.)
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``. Depending on the retry strategy, a request may
+ be repeated several times using the same timeout each time.
+
+ Can also be passed as a tuple (connect_timeout, read_timeout).
+ See :meth:`requests.Session.request` documentation for details.
+ project (Optional[str]):
+ Project ID of the project of where to run the upload. Defaults
+ to the client's project.
+
+ Returns:
+ requests.Response:
+ The "200 OK" response object returned after the multipart
+ upload request.
+
+ Raises:
+ ValueError:
+ if the ``stream`` has fewer than ``size``
+ bytes remaining.
+ """
+ data = stream.read(size)
+ if len(data) < size:
+ msg = _READ_LESS_THAN_SIZE.format(size, len(data))
+ raise ValueError(msg)
+
+ headers = _get_upload_headers(self._connection.user_agent)
+
+ if project is None:
+ project = self.project
+
+ # TODO: Increase the minimum version of google-cloud-core to 1.6.0
+ # and remove this logic. See:
+ # https://github.com/googleapis/python-bigquery/issues/509
+ hostname = (
+ self._connection.API_BASE_URL
+ if not hasattr(self._connection, "get_api_base_url_for_mtls")
+ else self._connection.get_api_base_url_for_mtls()
+ )
+ upload_url = _MULTIPART_URL_TEMPLATE.format(host=hostname, project=project)
+ upload = MultipartUpload(upload_url, headers=headers)
+
+ if num_retries is not None:
+ upload._retry_strategy = resumable_media.RetryStrategy(
+ max_retries=num_retries
+ )
+
+ response = upload.transmit(
+ self._http, data, metadata, _GENERIC_CONTENT_TYPE, timeout=timeout
+ )
+
+ return response
+
+ def copy_table(
+ self,
+ sources: Union[
+ Table,
+ TableReference,
+ TableListItem,
+ str,
+ Sequence[Union[Table, TableReference, TableListItem, str]],
+ ],
+ destination: Union[Table, TableReference, TableListItem, str],
+ job_id: Optional[str] = None,
+ job_id_prefix: Optional[str] = None,
+ location: Optional[str] = None,
+ project: Optional[str] = None,
+ job_config: Optional[CopyJobConfig] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> job.CopyJob:
+ """Copy one or more tables to another table.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationtablecopy
+
+ Args:
+ sources (Union[ \
+ google.cloud.bigquery.table.Table, \
+ google.cloud.bigquery.table.TableReference, \
+ google.cloud.bigquery.table.TableListItem, \
+ str, \
+ Sequence[ \
+ Union[ \
+ google.cloud.bigquery.table.Table, \
+ google.cloud.bigquery.table.TableReference, \
+ google.cloud.bigquery.table.TableListItem, \
+ str, \
+ ] \
+ ], \
+ ]):
+ Table or tables to be copied.
+ destination (Union[ \
+ google.cloud.bigquery.table.Table, \
+ google.cloud.bigquery.table.TableReference, \
+ google.cloud.bigquery.table.TableListItem, \
+ str, \
+ ]):
+ Table into which data is to be copied.
+ job_id (Optional[str]): The ID of the job.
+ job_id_prefix (Optional[str]):
+ The user-provided prefix for a randomly generated job ID.
+ This parameter will be ignored if a ``job_id`` is also given.
+ location (Optional[str]):
+ Location where to run the job. Must match the location of any
+ source table as well as the destination table.
+ project (Optional[str]):
+ Project ID of the project of where to run the job. Defaults
+ to the client's project.
+ job_config (Optional[google.cloud.bigquery.job.CopyJobConfig]):
+ Extra configuration options for the job.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ google.cloud.bigquery.job.CopyJob: A new copy job instance.
+
+ Raises:
+ TypeError:
+ If ``job_config`` is not an instance of :class:`~google.cloud.bigquery.job.CopyJobConfig`
+ class.
+ """
+ job_id = _make_job_id(job_id, job_id_prefix)
+
+ if project is None:
+ project = self.project
+
+ if location is None:
+ location = self.location
+
+ job_ref = job._JobReference(job_id, project=project, location=location)
+
+ # sources can be one of many different input types. (string, Table,
+ # TableReference, or a sequence of any of those.) Convert them all to a
+ # list of TableReferences.
+ #
+ # _table_arg_to_table_ref leaves lists unmodified.
+ sources = _table_arg_to_table_ref(sources, default_project=self.project)
+
+ if not isinstance(sources, collections_abc.Sequence):
+ sources = [sources]
+
+ sources = [
+ _table_arg_to_table_ref(source, default_project=self.project)
+ for source in sources
+ ]
+
+ destination = _table_arg_to_table_ref(destination, default_project=self.project)
+
+ if job_config:
+ _verify_job_config_type(job_config, google.cloud.bigquery.job.CopyJobConfig)
+ job_config = copy.deepcopy(job_config)
+
+ copy_job = job.CopyJob(
+ job_ref, sources, destination, client=self, job_config=job_config
+ )
+ copy_job._begin(retry=retry, timeout=timeout)
+
+ return copy_job
+
+ def extract_table(
+ self,
+ source: Union[Table, TableReference, TableListItem, Model, ModelReference, str],
+ destination_uris: Union[str, Sequence[str]],
+ job_id: Optional[str] = None,
+ job_id_prefix: Optional[str] = None,
+ location: Optional[str] = None,
+ project: Optional[str] = None,
+ job_config: Optional[ExtractJobConfig] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ source_type: str = "Table",
+ ) -> job.ExtractJob:
+ """Start a job to extract a table into Cloud Storage files.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationextract
+
+ Args:
+ source (Union[ \
+ google.cloud.bigquery.table.Table, \
+ google.cloud.bigquery.table.TableReference, \
+ google.cloud.bigquery.table.TableListItem, \
+ google.cloud.bigquery.model.Model, \
+ google.cloud.bigquery.model.ModelReference, \
+ src, \
+ ]):
+ Table or Model to be extracted.
+ destination_uris (Union[str, Sequence[str]]):
+ URIs of Cloud Storage file(s) into which table data is to be
+ extracted; in format
+ ``gs:///``.
+ job_id (Optional[str]): The ID of the job.
+ job_id_prefix (Optional[str]):
+ The user-provided prefix for a randomly generated job ID.
+ This parameter will be ignored if a ``job_id`` is also given.
+ location (Optional[str]):
+ Location where to run the job. Must match the location of the
+ source table.
+ project (Optional[str]):
+ Project ID of the project of where to run the job. Defaults
+ to the client's project.
+ job_config (Optional[google.cloud.bigquery.job.ExtractJobConfig]):
+ Extra configuration options for the job.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ source_type (Optional[str]):
+ Type of source to be extracted.``Table`` or ``Model``. Defaults to ``Table``.
+ Returns:
+ google.cloud.bigquery.job.ExtractJob: A new extract job instance.
+
+ Raises:
+ TypeError:
+ If ``job_config`` is not an instance of :class:`~google.cloud.bigquery.job.ExtractJobConfig`
+ class.
+ ValueError:
+ If ``source_type`` is not among ``Table``,``Model``.
+ """
+ job_id = _make_job_id(job_id, job_id_prefix)
+
+ if project is None:
+ project = self.project
+
+ if location is None:
+ location = self.location
+
+ job_ref = job._JobReference(job_id, project=project, location=location)
+ src = source_type.lower()
+ if src == "table":
+ source = _table_arg_to_table_ref(source, default_project=self.project)
+ elif src == "model":
+ source = _model_arg_to_model_ref(source, default_project=self.project)
+ else:
+ raise ValueError(
+ "Cannot pass `{}` as a ``source_type``, pass Table or Model".format(
+ source_type
+ )
+ )
+
+ if isinstance(destination_uris, str):
+ destination_uris = [destination_uris]
+
+ if job_config:
+ _verify_job_config_type(
+ job_config, google.cloud.bigquery.job.ExtractJobConfig
+ )
+ job_config = copy.deepcopy(job_config)
+
+ extract_job = job.ExtractJob(
+ job_ref, source, destination_uris, client=self, job_config=job_config
+ )
+ extract_job._begin(retry=retry, timeout=timeout)
+
+ return extract_job
+
+ def query(
+ self,
+ query: str,
+ job_config: Optional[QueryJobConfig] = None,
+ job_id: Optional[str] = None,
+ job_id_prefix: Optional[str] = None,
+ location: Optional[str] = None,
+ project: Optional[str] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ job_retry: retries.Retry = DEFAULT_JOB_RETRY,
+ api_method: Union[str, enums.QueryApiMethod] = enums.QueryApiMethod.INSERT,
+ ) -> job.QueryJob:
+ """Run a SQL query.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationquery
+
+ Args:
+ query (str):
+ SQL query to be executed. Defaults to the standard SQL
+ dialect. Use the ``job_config`` parameter to change dialects.
+ job_config (Optional[google.cloud.bigquery.job.QueryJobConfig]):
+ Extra configuration options for the job.
+ To override any options that were previously set in
+ the ``default_query_job_config`` given to the
+ ``Client`` constructor, manually set those options to ``None``,
+ or whatever value is preferred.
+ job_id (Optional[str]): ID to use for the query job.
+ job_id_prefix (Optional[str]):
+ The prefix to use for a randomly generated job ID. This parameter
+ will be ignored if a ``job_id`` is also given.
+ location (Optional[str]):
+ Location where to run the job. Must match the location of the
+ table used in the query as well as the destination table.
+ project (Optional[str]):
+ Project ID of the project of where to run the job. Defaults
+ to the client's project.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC. This only applies to making RPC
+ calls. It isn't used to retry failed jobs. This has
+ a reasonable default that should only be overridden
+ with care.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ job_retry (Optional[google.api_core.retry.Retry]):
+ How to retry failed jobs. The default retries
+ rate-limit-exceeded errors. Passing ``None`` disables
+ job retry.
+
+ Not all jobs can be retried. If ``job_id`` is
+ provided, then the job returned by the query will not
+ be retryable, and an exception will be raised if a
+ non-``None`` (and non-default) value for ``job_retry``
+ is also provided.
+
+ Note that errors aren't detected until ``result()`` is
+ called on the job returned. The ``job_retry``
+ specified here becomes the default ``job_retry`` for
+ ``result()``, where it can also be specified.
+ api_method (Union[str, enums.QueryApiMethod]):
+ Method with which to start the query job.
+
+ See :class:`google.cloud.bigquery.enums.QueryApiMethod` for
+ details on the difference between the query start methods.
+
+ Returns:
+ google.cloud.bigquery.job.QueryJob: A new query job instance.
+
+ Raises:
+ TypeError:
+ If ``job_config`` is not an instance of
+ :class:`~google.cloud.bigquery.job.QueryJobConfig`
+ class, or if both ``job_id`` and non-``None`` non-default
+ ``job_retry`` are provided.
+ """
+ job_id_given = job_id is not None
+ if (
+ job_id_given
+ and job_retry is not None
+ and job_retry is not DEFAULT_JOB_RETRY
+ ):
+ raise TypeError(
+ "`job_retry` was provided, but the returned job is"
+ " not retryable, because a custom `job_id` was"
+ " provided."
+ )
+
+ if job_id_given and api_method == enums.QueryApiMethod.QUERY:
+ raise TypeError(
+ "`job_id` was provided, but the 'QUERY' `api_method` was requested."
+ )
+
+ if project is None:
+ project = self.project
+
+ if location is None:
+ location = self.location
+
+ if job_config is not None:
+ _verify_job_config_type(job_config, QueryJobConfig)
+
+ job_config = _job_helpers.job_config_with_defaults(
+ job_config, self._default_query_job_config
+ )
+
+ # Note that we haven't modified the original job_config (or
+ # _default_query_job_config) up to this point.
+ if api_method == enums.QueryApiMethod.QUERY:
+ return _job_helpers.query_jobs_query(
+ self,
+ query,
+ job_config,
+ location,
+ project,
+ retry,
+ timeout,
+ job_retry,
+ )
+ elif api_method == enums.QueryApiMethod.INSERT:
+ return _job_helpers.query_jobs_insert(
+ self,
+ query,
+ job_config,
+ job_id,
+ job_id_prefix,
+ location,
+ project,
+ retry,
+ timeout,
+ job_retry,
+ )
+ else:
+ raise ValueError(f"Got unexpected value for api_method: {repr(api_method)}")
+
+ def query_and_wait(
+ self,
+ query,
+ *,
+ job_config: Optional[QueryJobConfig] = None,
+ location: Optional[str] = None,
+ project: Optional[str] = None,
+ api_timeout: TimeoutType = DEFAULT_TIMEOUT,
+ wait_timeout: Union[Optional[float], object] = POLLING_DEFAULT_VALUE,
+ retry: retries.Retry = DEFAULT_RETRY,
+ job_retry: retries.Retry = DEFAULT_JOB_RETRY,
+ page_size: Optional[int] = None,
+ max_results: Optional[int] = None,
+ ) -> RowIterator:
+ """Run the query, wait for it to finish, and return the results.
+
+ While ``jobCreationMode=JOB_CREATION_OPTIONAL`` is in preview in the
+ ``jobs.query`` REST API, use the default ``jobCreationMode`` unless
+ the environment variable ``QUERY_PREVIEW_ENABLED=true``. After
+ ``jobCreationMode`` is GA, this method will always use
+ ``jobCreationMode=JOB_CREATION_OPTIONAL``. See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query
+
+ Args:
+ query (str):
+ SQL query to be executed. Defaults to the standard SQL
+ dialect. Use the ``job_config`` parameter to change dialects.
+ job_config (Optional[google.cloud.bigquery.job.QueryJobConfig]):
+ Extra configuration options for the job.
+ To override any options that were previously set in
+ the ``default_query_job_config`` given to the
+ ``Client`` constructor, manually set those options to ``None``,
+ or whatever value is preferred.
+ location (Optional[str]):
+ Location where to run the job. Must match the location of the
+ table used in the query as well as the destination table.
+ project (Optional[str]):
+ Project ID of the project of where to run the job. Defaults
+ to the client's project.
+ api_timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ wait_timeout (Optional[Union[float, object]]):
+ The number of seconds to wait for the query to finish. If the
+ query doesn't finish before this timeout, the client attempts
+ to cancel the query. If unset, the underlying REST API calls
+ have timeouts, but we still wait indefinitely for the job to
+ finish.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC. This only applies to making RPC
+ calls. It isn't used to retry failed jobs. This has
+ a reasonable default that should only be overridden
+ with care.
+ job_retry (Optional[google.api_core.retry.Retry]):
+ How to retry failed jobs. The default retries
+ rate-limit-exceeded errors. Passing ``None`` disables
+ job retry. Not all jobs can be retried.
+ page_size (Optional[int]):
+ The maximum number of rows in each page of results from this
+ request. Non-positive values are ignored.
+ max_results (Optional[int]):
+ The maximum total number of rows from this request.
+
+ Returns:
+ google.cloud.bigquery.table.RowIterator:
+ Iterator of row data
+ :class:`~google.cloud.bigquery.table.Row`-s. During each
+ page, the iterator will have the ``total_rows`` attribute
+ set, which counts the total number of rows **in the result
+ set** (this is distinct from the total number of rows in the
+ current page: ``iterator.page.num_items``).
+
+ If the query is a special query that produces no results, e.g.
+ a DDL query, an ``_EmptyRowIterator`` instance is returned.
+
+ Raises:
+ TypeError:
+ If ``job_config`` is not an instance of
+ :class:`~google.cloud.bigquery.job.QueryJobConfig`
+ class.
+ """
+ if project is None:
+ project = self.project
+
+ if location is None:
+ location = self.location
+
+ if job_config is not None:
+ _verify_job_config_type(job_config, QueryJobConfig)
+
+ job_config = _job_helpers.job_config_with_defaults(
+ job_config, self._default_query_job_config
+ )
+
+ return _job_helpers.query_and_wait(
+ self,
+ query,
+ job_config=job_config,
+ location=location,
+ project=project,
+ api_timeout=api_timeout,
+ wait_timeout=wait_timeout,
+ retry=retry,
+ job_retry=job_retry,
+ page_size=page_size,
+ max_results=max_results,
+ )
+
+ def insert_rows(
+ self,
+ table: Union[Table, TableReference, str],
+ rows: Union[Iterable[Tuple], Iterable[Mapping[str, Any]]],
+ selected_fields: Optional[Sequence[SchemaField]] = None,
+ **kwargs,
+ ) -> Sequence[Dict[str, Any]]:
+ """Insert rows into a table via the streaming API.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
+
+ BigQuery will reject insertAll payloads that exceed a defined limit (10MB).
+ Additionally, if a payload vastly exceeds this limit, the request is rejected
+ by the intermediate architecture, which returns a 413 (Payload Too Large) status code.
+
+
+ See
+ https://cloud.google.com/bigquery/quotas#streaming_inserts
+
+ Args:
+ table (Union[ \
+ google.cloud.bigquery.table.Table, \
+ google.cloud.bigquery.table.TableReference, \
+ str, \
+ ]):
+ The destination table for the row data, or a reference to it.
+ rows (Union[Sequence[Tuple], Sequence[Dict]]):
+ Row data to be inserted. If a list of tuples is given, each
+ tuple should contain data for each schema field on the
+ current table and in the same order as the schema fields. If
+ a list of dictionaries is given, the keys must include all
+ required fields in the schema. Keys which do not correspond
+ to a field in the schema are ignored.
+ selected_fields (Sequence[google.cloud.bigquery.schema.SchemaField]):
+ The fields to return. Required if ``table`` is a
+ :class:`~google.cloud.bigquery.table.TableReference`.
+ kwargs (dict):
+ Keyword arguments to
+ :meth:`~google.cloud.bigquery.client.Client.insert_rows_json`.
+
+ Returns:
+ Sequence[Mappings]:
+ One mapping per row with insert errors: the "index" key
+ identifies the row, and the "errors" key contains a list of
+ the mappings describing one or more problems with the row.
+
+ Raises:
+ ValueError: if table's schema is not set or `rows` is not a `Sequence`.
+ """
+ if not isinstance(rows, (collections_abc.Sequence, collections_abc.Iterator)):
+ raise TypeError("rows argument should be a sequence of dicts or tuples")
+
+ table = _table_arg_to_table(table, default_project=self.project)
+
+ if not isinstance(table, Table):
+ raise TypeError(_NEED_TABLE_ARGUMENT)
+
+ schema = table.schema
+
+ # selected_fields can override the table schema.
+ if selected_fields is not None:
+ schema = selected_fields
+
+ if len(schema) == 0:
+ raise ValueError(
+ (
+ "Could not determine schema for table '{}'. Call client.get_table() "
+ "or pass in a list of schema fields to the selected_fields argument."
+ ).format(table)
+ )
+
+ json_rows = [_record_field_to_json(schema, row) for row in rows]
+
+ return self.insert_rows_json(table, json_rows, **kwargs)
+
+ def insert_rows_from_dataframe(
+ self,
+ table: Union[Table, TableReference, str],
+ dataframe,
+ selected_fields: Optional[Sequence[SchemaField]] = None,
+ chunk_size: int = 500,
+ **kwargs: Dict,
+ ) -> Sequence[Sequence[dict]]:
+ """Insert rows into a table from a dataframe via the streaming API.
+
+ BigQuery will reject insertAll payloads that exceed a defined limit (10MB).
+ Additionally, if a payload vastly exceeds this limit, the request is rejected
+ by the intermediate architecture, which returns a 413 (Payload Too Large) status code.
+
+ See
+ https://cloud.google.com/bigquery/quotas#streaming_inserts
+
+ Args:
+ table (Union[ \
+ google.cloud.bigquery.table.Table, \
+ google.cloud.bigquery.table.TableReference, \
+ str, \
+ ]):
+ The destination table for the row data, or a reference to it.
+ dataframe (pandas.DataFrame):
+ A :class:`~pandas.DataFrame` containing the data to load. Any
+ ``NaN`` values present in the dataframe are omitted from the
+ streaming API request(s).
+ selected_fields (Sequence[google.cloud.bigquery.schema.SchemaField]):
+ The fields to return. Required if ``table`` is a
+ :class:`~google.cloud.bigquery.table.TableReference`.
+ chunk_size (int):
+ The number of rows to stream in a single chunk. Must be positive.
+ kwargs (Dict):
+ Keyword arguments to
+ :meth:`~google.cloud.bigquery.client.Client.insert_rows_json`.
+
+ Returns:
+ Sequence[Sequence[Mappings]]:
+ A list with insert errors for each insert chunk. Each element
+ is a list containing one mapping per row with insert errors:
+ the "index" key identifies the row, and the "errors" key
+ contains a list of the mappings describing one or more problems
+ with the row.
+
+ Raises:
+ ValueError: if table's schema is not set
+ """
+ insert_results = []
+
+ chunk_count = int(math.ceil(len(dataframe) / chunk_size))
+ rows_iter = _pandas_helpers.dataframe_to_json_generator(dataframe)
+
+ for _ in range(chunk_count):
+ rows_chunk = itertools.islice(rows_iter, chunk_size)
+ result = self.insert_rows(table, rows_chunk, selected_fields, **kwargs)
+ insert_results.append(result)
+
+ return insert_results
+
+ def insert_rows_json(
+ self,
+ table: Union[Table, TableReference, TableListItem, str],
+ json_rows: Sequence[Mapping[str, Any]],
+ row_ids: Union[
+ Iterable[Optional[str]], AutoRowIDs, None
+ ] = AutoRowIDs.GENERATE_UUID,
+ skip_invalid_rows: Optional[bool] = None,
+ ignore_unknown_values: Optional[bool] = None,
+ template_suffix: Optional[str] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Sequence[dict]:
+ """Insert rows into a table without applying local type conversions.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
+
+ BigQuery will reject insertAll payloads that exceed a defined limit (10MB).
+ Additionally, if a payload vastly exceeds this limit, the request is rejected
+ by the intermediate architecture, which returns a 413 (Payload Too Large) status code.
+
+ See
+ https://cloud.google.com/bigquery/quotas#streaming_inserts
+
+ Args:
+ table (Union[ \
+ google.cloud.bigquery.table.Table \
+ google.cloud.bigquery.table.TableReference, \
+ google.cloud.bigquery.table.TableListItem, \
+ str \
+ ]):
+ The destination table for the row data, or a reference to it.
+ json_rows (Sequence[Dict]):
+ Row data to be inserted. Keys must match the table schema fields
+ and values must be JSON-compatible representations.
+ row_ids (Union[Iterable[str], AutoRowIDs, None]):
+ Unique IDs, one per row being inserted. An ID can also be
+ ``None``, indicating that an explicit insert ID should **not**
+ be used for that row. If the argument is omitted altogether,
+ unique IDs are created automatically.
+
+ .. versionchanged:: 2.21.0
+ Can also be an iterable, not just a sequence, or an
+ :class:`AutoRowIDs` enum member.
+
+ .. deprecated:: 2.21.0
+ Passing ``None`` to explicitly request autogenerating insert IDs is
+ deprecated, use :attr:`AutoRowIDs.GENERATE_UUID` instead.
+
+ skip_invalid_rows (Optional[bool]):
+ Insert all valid rows of a request, even if invalid rows exist.
+ The default value is ``False``, which causes the entire request
+ to fail if any invalid rows exist.
+ ignore_unknown_values (Optional[bool]):
+ Accept rows that contain values that do not match the schema.
+ The unknown values are ignored. Default is ``False``, which
+ treats unknown values as errors.
+ template_suffix (Optional[str]):
+ Treat ``name`` as a template table and provide a suffix.
+ BigQuery will create the table `` + ``
+ based on the schema of the template table. See
+ https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ Sequence[Mappings]:
+ One mapping per row with insert errors: the "index" key
+ identifies the row, and the "errors" key contains a list of
+ the mappings describing one or more problems with the row.
+
+ Raises:
+ TypeError: if `json_rows` is not a `Sequence`.
+ """
+ if not isinstance(
+ json_rows, (collections_abc.Sequence, collections_abc.Iterator)
+ ):
+ raise TypeError("json_rows argument should be a sequence of dicts")
+ # Convert table to just a reference because unlike insert_rows,
+ # insert_rows_json doesn't need the table schema. It's not doing any
+ # type conversions.
+ table = _table_arg_to_table_ref(table, default_project=self.project)
+ rows_info: List[Any] = []
+ data: Dict[str, Any] = {"rows": rows_info}
+
+ if row_ids is None:
+ warnings.warn(
+ "Passing None for row_ids is deprecated. To explicitly request "
+ "autogenerated insert IDs, use AutoRowIDs.GENERATE_UUID instead",
+ category=DeprecationWarning,
+ )
+ row_ids = AutoRowIDs.GENERATE_UUID
+
+ if not isinstance(row_ids, AutoRowIDs):
+ try:
+ row_ids_iter = iter(row_ids)
+ except TypeError:
+ msg = "row_ids is neither an iterable nor an AutoRowIDs enum member"
+ raise TypeError(msg)
+
+ for i, row in enumerate(json_rows):
+ info: Dict[str, Any] = {"json": row}
+
+ if row_ids is AutoRowIDs.GENERATE_UUID:
+ info["insertId"] = str(uuid.uuid4())
+ elif row_ids is AutoRowIDs.DISABLED:
+ info["insertId"] = None
+ else:
+ try:
+ insert_id = next(row_ids_iter)
+ except StopIteration:
+ msg = f"row_ids did not generate enough IDs, error at index {i}"
+ raise ValueError(msg)
+ else:
+ info["insertId"] = insert_id
+
+ rows_info.append(info)
+
+ if skip_invalid_rows is not None:
+ data["skipInvalidRows"] = skip_invalid_rows
+
+ if ignore_unknown_values is not None:
+ data["ignoreUnknownValues"] = ignore_unknown_values
+
+ if template_suffix is not None:
+ data["templateSuffix"] = template_suffix
+
+ path = "%s/insertAll" % table.path
+ # We can always retry, because every row has an insert ID.
+ span_attributes = {"path": path}
+ response = self._call_api(
+ retry,
+ span_name="BigQuery.insertRowsJson",
+ span_attributes=span_attributes,
+ method="POST",
+ path=path,
+ data=data,
+ timeout=timeout,
+ )
+ errors = []
+
+ for error in response.get("insertErrors", ()):
+ errors.append({"index": int(error["index"]), "errors": error["errors"]})
+
+ return errors
+
+ def list_partitions(
+ self,
+ table: Union[Table, TableReference, TableListItem, str],
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> Sequence[str]:
+ """List the partitions in a table.
+
+ Args:
+ table (Union[ \
+ google.cloud.bigquery.table.Table, \
+ google.cloud.bigquery.table.TableReference, \
+ google.cloud.bigquery.table.TableListItem, \
+ str, \
+ ]):
+ The table or reference from which to get partition info
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ If multiple requests are made under the hood, ``timeout``
+ applies to each individual request.
+
+ Returns:
+ List[str]:
+ A list of the partition ids present in the partitioned table
+ """
+ table = _table_arg_to_table_ref(table, default_project=self.project)
+ meta_table = self.get_table(
+ TableReference(
+ DatasetReference(table.project, table.dataset_id),
+ "%s$__PARTITIONS_SUMMARY__" % table.table_id,
+ ),
+ retry=retry,
+ timeout=timeout,
+ )
+
+ subset = [col for col in meta_table.schema if col.name == "partition_id"]
+ return [
+ row[0]
+ for row in self.list_rows(
+ meta_table, selected_fields=subset, retry=retry, timeout=timeout
+ )
+ ]
+
+ def list_rows(
+ self,
+ table: Union[Table, TableListItem, TableReference, str],
+ selected_fields: Optional[Sequence[SchemaField]] = None,
+ max_results: Optional[int] = None,
+ page_token: Optional[str] = None,
+ start_index: Optional[int] = None,
+ page_size: Optional[int] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ ) -> RowIterator:
+ """List the rows of the table.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/list
+
+ .. note::
+
+ This method assumes that the provided schema is up-to-date with the
+ schema as defined on the back-end: if the two schemas are not
+ identical, the values returned may be incomplete. To ensure that the
+ local copy of the schema is up-to-date, call ``client.get_table``.
+
+ Args:
+ table (Union[ \
+ google.cloud.bigquery.table.Table, \
+ google.cloud.bigquery.table.TableListItem, \
+ google.cloud.bigquery.table.TableReference, \
+ str, \
+ ]):
+ The table to list, or a reference to it. When the table
+ object does not contain a schema and ``selected_fields`` is
+ not supplied, this method calls ``get_table`` to fetch the
+ table schema.
+ selected_fields (Sequence[google.cloud.bigquery.schema.SchemaField]):
+ The fields to return. If not supplied, data for all columns
+ are downloaded.
+ max_results (Optional[int]):
+ Maximum number of rows to return.
+ page_token (Optional[str]):
+ Token representing a cursor into the table's rows.
+ If not passed, the API will return the first page of the
+ rows. The token marks the beginning of the iterator to be
+ returned and the value of the ``page_token`` can be accessed
+ at ``next_page_token`` of the
+ :class:`~google.cloud.bigquery.table.RowIterator`.
+ start_index (Optional[int]):
+ The zero-based index of the starting row to read.
+ page_size (Optional[int]):
+ The maximum number of rows in each page of results from this request.
+ Non-positive values are ignored. Defaults to a sensible value set by the API.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ If multiple requests are made under the hood, ``timeout``
+ applies to each individual request.
+
+ Returns:
+ google.cloud.bigquery.table.RowIterator:
+ Iterator of row data
+ :class:`~google.cloud.bigquery.table.Row`-s. During each
+ page, the iterator will have the ``total_rows`` attribute
+ set, which counts the total number of rows **in the table**
+ (this is distinct from the total number of rows in the
+ current page: ``iterator.page.num_items``).
+ """
+ table = _table_arg_to_table(table, default_project=self.project)
+
+ if not isinstance(table, Table):
+ raise TypeError(_NEED_TABLE_ARGUMENT)
+
+ schema = table.schema
+
+ # selected_fields can override the table schema.
+ if selected_fields is not None:
+ schema = selected_fields
+
+ # No schema, but no selected_fields. Assume the developer wants all
+ # columns, so get the table resource for them rather than failing.
+ elif len(schema) == 0:
+ table = self.get_table(table.reference, retry=retry, timeout=timeout)
+ schema = table.schema
+
+ params: Dict[str, Any] = {}
+ if selected_fields is not None:
+ params["selectedFields"] = ",".join(field.name for field in selected_fields)
+ if start_index is not None:
+ params["startIndex"] = start_index
+
+ params["formatOptions.useInt64Timestamp"] = True
+ row_iterator = RowIterator(
+ client=self,
+ api_request=functools.partial(self._call_api, retry, timeout=timeout),
+ path="%s/data" % (table.path,),
+ schema=schema,
+ page_token=page_token,
+ max_results=max_results,
+ page_size=page_size,
+ extra_params=params,
+ table=table,
+ # Pass in selected_fields separately from schema so that full
+ # tables can be fetched without a column filter.
+ selected_fields=selected_fields,
+ total_rows=getattr(table, "num_rows", None),
+ project=table.project,
+ location=table.location,
+ )
+ return row_iterator
+
+ def _list_rows_from_query_results(
+ self,
+ job_id: str,
+ location: str,
+ project: str,
+ schema: Sequence[SchemaField],
+ total_rows: Optional[int] = None,
+ destination: Optional[Union[Table, TableReference, TableListItem, str]] = None,
+ max_results: Optional[int] = None,
+ start_index: Optional[int] = None,
+ page_size: Optional[int] = None,
+ retry: retries.Retry = DEFAULT_RETRY,
+ timeout: TimeoutType = DEFAULT_TIMEOUT,
+ query_id: Optional[str] = None,
+ first_page_response: Optional[Dict[str, Any]] = None,
+ num_dml_affected_rows: Optional[int] = None,
+ ) -> RowIterator:
+ """List the rows of a completed query.
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/getQueryResults
+ Args:
+ job_id (str):
+ ID of a query job.
+ location (str): Location of the query job.
+ project (str):
+ ID of the project where the query job was run.
+ schema (Sequence[google.cloud.bigquery.schema.SchemaField]):
+ The fields expected in these query results. Used to convert
+ from JSON to expected Python types.
+ total_rows (Optional[int]):
+ Total number of rows in the query results.
+ destination (Optional[Union[ \
+ google.cloud.bigquery.table.Table, \
+ google.cloud.bigquery.table.TableListItem, \
+ google.cloud.bigquery.table.TableReference, \
+ str, \
+ ]]):
+ Destination table reference. Used to fetch the query results
+ with the BigQuery Storage API.
+ max_results (Optional[int]):
+ Maximum number of rows to return across the whole iterator.
+ start_index (Optional[int]):
+ The zero-based index of the starting row to read.
+ page_size (Optional[int]):
+ The maximum number of rows in each page of results from this request.
+ Non-positive values are ignored. Defaults to a sensible value set by the API.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``. If set, this connection timeout may be
+ increased to a minimum value. This prevents retries on what
+ would otherwise be a successful response.
+ If multiple requests are made under the hood, ``timeout``
+ applies to each individual request.
+ query_id (Optional[str]):
+ [Preview] ID of a completed query. This ID is auto-generated
+ and not guaranteed to be populated.
+ first_page_response (Optional[dict]):
+ API response for the first page of results (if available).
+ num_dml_affected_rows (Optional[int]):
+ If this RowIterator is the result of a DML query, the number of
+ rows that were affected.
+
+ Returns:
+ google.cloud.bigquery.table.RowIterator:
+ Iterator of row data
+ :class:`~google.cloud.bigquery.table.Row`-s.
+ """
+ params: Dict[str, Any] = {
+ "fields": _LIST_ROWS_FROM_QUERY_RESULTS_FIELDS,
+ "location": location,
+ }
+
+ if timeout is not None:
+ if not isinstance(timeout, (int, float)):
+ timeout = _MIN_GET_QUERY_RESULTS_TIMEOUT
+ else:
+ timeout = max(timeout, _MIN_GET_QUERY_RESULTS_TIMEOUT)
+
+ if start_index is not None:
+ params["startIndex"] = start_index
+
+ params["formatOptions.useInt64Timestamp"] = True
+ row_iterator = RowIterator(
+ client=self,
+ api_request=functools.partial(self._call_api, retry, timeout=timeout),
+ path=f"/projects/{project}/queries/{job_id}",
+ schema=schema,
+ max_results=max_results,
+ page_size=page_size,
+ table=destination,
+ extra_params=params,
+ total_rows=total_rows,
+ project=project,
+ location=location,
+ job_id=job_id,
+ query_id=query_id,
+ first_page_response=first_page_response,
+ num_dml_affected_rows=num_dml_affected_rows,
+ )
+ return row_iterator
+
+ def _schema_from_json_file_object(self, file_obj):
+ """Helper function for schema_from_json that takes a
+ file object that describes a table schema.
+
+ Returns:
+ List of schema field objects.
+ """
+ json_data = json.load(file_obj)
+ return [SchemaField.from_api_repr(field) for field in json_data]
+
+ def _schema_to_json_file_object(self, schema_list, file_obj):
+ """Helper function for schema_to_json that takes a schema list and file
+ object and writes the schema list to the file object with json.dump
+ """
+ json.dump(schema_list, file_obj, indent=2, sort_keys=True)
+
+ def schema_from_json(self, file_or_path: "PathType") -> List[SchemaField]:
+ """Takes a file object or file path that contains json that describes
+ a table schema.
+
+ Returns:
+ List[SchemaField]:
+ List of :class:`~google.cloud.bigquery.schema.SchemaField` objects.
+ """
+ if isinstance(file_or_path, io.IOBase):
+ return self._schema_from_json_file_object(file_or_path)
+
+ with open(file_or_path) as file_obj:
+ return self._schema_from_json_file_object(file_obj)
+
+ def schema_to_json(
+ self, schema_list: Sequence[SchemaField], destination: "PathType"
+ ):
+ """Takes a list of schema field objects.
+
+ Serializes the list of schema field objects as json to a file.
+
+ Destination is a file path or a file object.
+ """
+ json_schema_list = [f.to_api_repr() for f in schema_list]
+
+ if isinstance(destination, io.IOBase):
+ return self._schema_to_json_file_object(json_schema_list, destination)
+
+ with open(destination, mode="w") as file_obj:
+ return self._schema_to_json_file_object(json_schema_list, file_obj)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+
+# pylint: disable=unused-argument
+def _item_to_project(iterator, resource):
+ """Convert a JSON project to the native object.
+
+ Args:
+ iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use.
+
+ resource (Dict): An item to be converted to a project.
+
+ Returns:
+ google.cloud.bigquery.client.Project: The next project in the page.
+ """
+ return Project.from_api_repr(resource)
+
+
+# pylint: enable=unused-argument
+
+
+def _item_to_dataset(iterator, resource):
+ """Convert a JSON dataset to the native object.
+
+ Args:
+ iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use.
+
+ resource (Dict): An item to be converted to a dataset.
+
+ Returns:
+ google.cloud.bigquery.dataset.DatasetListItem: The next dataset in the page.
+ """
+ return DatasetListItem(resource)
+
+
+def _item_to_job(iterator, resource):
+ """Convert a JSON job to the native object.
+
+ Args:
+ iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use.
+
+ resource (Dict): An item to be converted to a job.
+
+ Returns:
+ job instance: The next job in the page.
+ """
+ return iterator.client.job_from_resource(resource)
+
+
+def _item_to_model(iterator, resource):
+ """Convert a JSON model to the native object.
+
+ Args:
+ iterator (google.api_core.page_iterator.Iterator):
+ The iterator that is currently in use.
+ resource (Dict): An item to be converted to a model.
+
+ Returns:
+ google.cloud.bigquery.model.Model: The next model in the page.
+ """
+ return Model.from_api_repr(resource)
+
+
+def _item_to_routine(iterator, resource):
+ """Convert a JSON model to the native object.
+
+ Args:
+ iterator (google.api_core.page_iterator.Iterator):
+ The iterator that is currently in use.
+ resource (Dict): An item to be converted to a routine.
+
+ Returns:
+ google.cloud.bigquery.routine.Routine: The next routine in the page.
+ """
+ return Routine.from_api_repr(resource)
+
+
+def _item_to_table(iterator, resource):
+ """Convert a JSON table to the native object.
+
+ Args:
+ iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use.
+
+ resource (Dict): An item to be converted to a table.
+
+ Returns:
+ google.cloud.bigquery.table.Table: The next table in the page.
+ """
+ return TableListItem(resource)
+
+
+def _extract_job_reference(job, project=None, location=None):
+ """Extract fully-qualified job reference from a job-like object.
+
+ Args:
+ job_id (Union[ \
+ str, \
+ google.cloud.bigquery.job.LoadJob, \
+ google.cloud.bigquery.job.CopyJob, \
+ google.cloud.bigquery.job.ExtractJob, \
+ google.cloud.bigquery.job.QueryJob \
+ ]): Job identifier.
+ project (Optional[str]):
+ Project where the job was run. Ignored if ``job_id`` is a job
+ object.
+ location (Optional[str]):
+ Location where the job was run. Ignored if ``job_id`` is a job
+ object.
+
+ Returns:
+ Tuple[str, str, str]: ``(project, location, job_id)``
+ """
+ if hasattr(job, "job_id"):
+ project = job.project
+ job_id = job.job_id
+ location = job.location
+ else:
+ job_id = job
+
+ return (project, location, job_id)
+
+
+def _check_mode(stream):
+ """Check that a stream was opened in read-binary mode.
+
+ Args:
+ stream (IO[bytes]): A bytes IO object open for reading.
+
+ Raises:
+ ValueError:
+ if the ``stream.mode`` is a valid attribute
+ and is not among ``rb``, ``r+b`` or ``rb+``.
+ """
+ mode = getattr(stream, "mode", None)
+
+ if isinstance(stream, gzip.GzipFile):
+ if mode != gzip.READ: # pytype: disable=module-attr
+ raise ValueError(
+ "Cannot upload gzip files opened in write mode: use "
+ "gzip.GzipFile(filename, mode='rb')"
+ )
+ else:
+ if mode is not None and mode not in ("rb", "r+b", "rb+"):
+ raise ValueError(
+ "Cannot upload files opened in text mode: use "
+ "open(filename, mode='rb') or open(filename, mode='r+b')"
+ )
+
+
+def _get_upload_headers(user_agent):
+ """Get the headers for an upload request.
+
+ Args:
+ user_agent (str): The user-agent for requests.
+
+ Returns:
+ Dict: The headers to be used for the request.
+ """
+ return {
+ "Accept": "application/json",
+ "Accept-Encoding": "gzip, deflate",
+ "User-Agent": user_agent,
+ "content-type": "application/json; charset=UTF-8",
+ }
+
+
+def _add_server_timeout_header(headers: Optional[Dict[str, str]], kwargs):
+ timeout = kwargs.get("timeout")
+ if timeout is not None:
+ if headers is None:
+ headers = {}
+ headers[TIMEOUT_HEADER] = str(timeout)
+
+ if headers:
+ kwargs["headers"] = headers
+
+ return kwargs
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/dataset.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..c49a52faf2174d0919b14e3472a036add0b644b0
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/dataset.py
@@ -0,0 +1,1028 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define API Datasets."""
+
+from __future__ import absolute_import
+
+import copy
+
+import typing
+
+import google.cloud._helpers # type: ignore
+
+from google.cloud.bigquery import _helpers
+from google.cloud.bigquery.model import ModelReference
+from google.cloud.bigquery.routine import Routine, RoutineReference
+from google.cloud.bigquery.table import Table, TableReference
+from google.cloud.bigquery.encryption_configuration import EncryptionConfiguration
+
+from typing import Optional, List, Dict, Any, Union
+
+
+def _get_table_reference(self, table_id: str) -> TableReference:
+ """Constructs a TableReference.
+
+ Args:
+ table_id (str): The ID of the table.
+
+ Returns:
+ google.cloud.bigquery.table.TableReference:
+ A table reference for a table in this dataset.
+ """
+ return TableReference(self, table_id)
+
+
+def _get_model_reference(self, model_id):
+ """Constructs a ModelReference.
+
+ Args:
+ model_id (str): the ID of the model.
+
+ Returns:
+ google.cloud.bigquery.model.ModelReference:
+ A ModelReference for a model in this dataset.
+ """
+ return ModelReference.from_api_repr(
+ {"projectId": self.project, "datasetId": self.dataset_id, "modelId": model_id}
+ )
+
+
+def _get_routine_reference(self, routine_id):
+ """Constructs a RoutineReference.
+
+ Args:
+ routine_id (str): the ID of the routine.
+
+ Returns:
+ google.cloud.bigquery.routine.RoutineReference:
+ A RoutineReference for a routine in this dataset.
+ """
+ return RoutineReference.from_api_repr(
+ {
+ "projectId": self.project,
+ "datasetId": self.dataset_id,
+ "routineId": routine_id,
+ }
+ )
+
+
+class DatasetReference(object):
+ """DatasetReferences are pointers to datasets.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#datasetreference
+
+ Args:
+ project (str): The ID of the project
+ dataset_id (str): The ID of the dataset
+
+ Raises:
+ ValueError: If either argument is not of type ``str``.
+ """
+
+ def __init__(self, project: str, dataset_id: str):
+ if not isinstance(project, str):
+ raise ValueError("Pass a string for project")
+ if not isinstance(dataset_id, str):
+ raise ValueError("Pass a string for dataset_id")
+ self._project = project
+ self._dataset_id = dataset_id
+
+ @property
+ def project(self):
+ """str: Project ID of the dataset."""
+ return self._project
+
+ @property
+ def dataset_id(self):
+ """str: Dataset ID."""
+ return self._dataset_id
+
+ @property
+ def path(self):
+ """str: URL path for the dataset based on project and dataset ID."""
+ return "/projects/%s/datasets/%s" % (self.project, self.dataset_id)
+
+ table = _get_table_reference
+
+ model = _get_model_reference
+
+ routine = _get_routine_reference
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "DatasetReference":
+ """Factory: construct a dataset reference given its API representation
+
+ Args:
+ resource (Dict[str, str]):
+ Dataset reference resource representation returned from the API
+
+ Returns:
+ google.cloud.bigquery.dataset.DatasetReference:
+ Dataset reference parsed from ``resource``.
+ """
+ project = resource["projectId"]
+ dataset_id = resource["datasetId"]
+ return cls(project, dataset_id)
+
+ @classmethod
+ def from_string(
+ cls, dataset_id: str, default_project: Optional[str] = None
+ ) -> "DatasetReference":
+ """Construct a dataset reference from dataset ID string.
+
+ Args:
+ dataset_id (str):
+ A dataset ID in standard SQL format. If ``default_project``
+ is not specified, this must include both the project ID and
+ the dataset ID, separated by ``.``.
+ default_project (Optional[str]):
+ The project ID to use when ``dataset_id`` does not include a
+ project ID.
+
+ Returns:
+ DatasetReference:
+ Dataset reference parsed from ``dataset_id``.
+
+ Examples:
+ >>> DatasetReference.from_string('my-project-id.some_dataset')
+ DatasetReference('my-project-id', 'some_dataset')
+
+ Raises:
+ ValueError:
+ If ``dataset_id`` is not a fully-qualified dataset ID in
+ standard SQL format.
+ """
+ output_dataset_id = dataset_id
+ parts = _helpers._split_id(dataset_id)
+
+ if len(parts) == 1:
+ if default_project is not None:
+ output_project_id = default_project
+ else:
+ raise ValueError(
+ "When default_project is not set, dataset_id must be a "
+ "fully-qualified dataset ID in standard SQL format, "
+ 'e.g., "project.dataset_id" got {}'.format(dataset_id)
+ )
+ elif len(parts) == 2:
+ output_project_id, output_dataset_id = parts
+ else:
+ raise ValueError(
+ "Too many parts in dataset_id. Expected a fully-qualified "
+ "dataset ID in standard SQL format, "
+ 'e.g. "project.dataset_id", got {}'.format(dataset_id)
+ )
+
+ return cls(output_project_id, output_dataset_id)
+
+ def to_api_repr(self) -> dict:
+ """Construct the API resource representation of this dataset reference
+
+ Returns:
+ Dict[str, str]: dataset reference represented as an API resource
+ """
+ return {"projectId": self._project, "datasetId": self._dataset_id}
+
+ def _key(self):
+ """A tuple key that uniquely describes this field.
+
+ Used to compute this instance's hashcode and evaluate equality.
+
+ Returns:
+ Tuple[str]: The contents of this :class:`.DatasetReference`.
+ """
+ return (self._project, self._dataset_id)
+
+ def __eq__(self, other):
+ if not isinstance(other, DatasetReference):
+ return NotImplemented
+ return self._key() == other._key()
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(self._key())
+
+ def __str__(self):
+ return f"{self.project}.{self._dataset_id}"
+
+ def __repr__(self):
+ return "DatasetReference{}".format(self._key())
+
+
+class AccessEntry(object):
+ """Represents grant of an access role to an entity.
+
+ An entry must have exactly one of the allowed
+ :class:`google.cloud.bigquery.enums.EntityTypes`. If anything but ``view``, ``routine``,
+ or ``dataset`` are set, a ``role`` is also required. ``role`` is omitted for ``view``,
+ ``routine``, ``dataset``, because they are always read-only.
+
+ See https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets.
+
+ Args:
+ role:
+ Role granted to the entity. The following string values are
+ supported: `'READER'`, `'WRITER'`, `'OWNER'`. It may also be
+ :data:`None` if the ``entity_type`` is ``view``, ``routine``, or ``dataset``.
+
+ entity_type:
+ Type of entity being granted the role. See
+ :class:`google.cloud.bigquery.enums.EntityTypes` for supported types.
+
+ entity_id:
+ If the ``entity_type`` is not 'view', 'routine', or 'dataset', the
+ ``entity_id`` is the ``str`` ID of the entity being granted the role. If
+ the ``entity_type`` is 'view' or 'routine', the ``entity_id`` is a ``dict``
+ representing the view or routine from a different dataset to grant access
+ to in the following format for views::
+
+ {
+ 'projectId': string,
+ 'datasetId': string,
+ 'tableId': string
+ }
+
+ For routines::
+
+ {
+ 'projectId': string,
+ 'datasetId': string,
+ 'routineId': string
+ }
+
+ If the ``entity_type`` is 'dataset', the ``entity_id`` is a ``dict`` that includes
+ a 'dataset' field with a ``dict`` representing the dataset and a 'target_types'
+ field with a ``str`` value of the dataset's resource type::
+
+ {
+ 'dataset': {
+ 'projectId': string,
+ 'datasetId': string,
+ },
+ 'target_types: 'VIEWS'
+ }
+
+ Raises:
+ ValueError:
+ If a ``view``, ``routine``, or ``dataset`` has ``role`` set, or a non ``view``,
+ non ``routine``, and non ``dataset`` **does not** have a ``role`` set.
+
+ Examples:
+ >>> entry = AccessEntry('OWNER', 'userByEmail', 'user@example.com')
+
+ >>> view = {
+ ... 'projectId': 'my-project',
+ ... 'datasetId': 'my_dataset',
+ ... 'tableId': 'my_table'
+ ... }
+ >>> entry = AccessEntry(None, 'view', view)
+ """
+
+ def __init__(
+ self,
+ role: Optional[str] = None,
+ entity_type: Optional[str] = None,
+ entity_id: Optional[Union[Dict[str, Any], str]] = None,
+ ):
+ self._properties = {}
+ if entity_type is not None:
+ self._properties[entity_type] = entity_id
+ self._properties["role"] = role
+ self._entity_type = entity_type
+
+ @property
+ def role(self) -> Optional[str]:
+ """The role of the entry."""
+ return typing.cast(Optional[str], self._properties.get("role"))
+
+ @role.setter
+ def role(self, value):
+ self._properties["role"] = value
+
+ @property
+ def dataset(self) -> Optional[DatasetReference]:
+ """API resource representation of a dataset reference."""
+ value = _helpers._get_sub_prop(self._properties, ["dataset", "dataset"])
+ return DatasetReference.from_api_repr(value) if value else None
+
+ @dataset.setter
+ def dataset(self, value):
+ if self.role is not None:
+ raise ValueError(
+ "Role must be None for a dataset. Current " "role: %r" % (self.role)
+ )
+
+ if isinstance(value, str):
+ value = DatasetReference.from_string(value).to_api_repr()
+
+ if isinstance(value, (Dataset, DatasetListItem)):
+ value = value.reference.to_api_repr()
+
+ _helpers._set_sub_prop(self._properties, ["dataset", "dataset"], value)
+ _helpers._set_sub_prop(
+ self._properties,
+ ["dataset", "targetTypes"],
+ self._properties.get("targetTypes"),
+ )
+
+ @property
+ def dataset_target_types(self) -> Optional[List[str]]:
+ """Which resources that the dataset in this entry applies to."""
+ return typing.cast(
+ Optional[List[str]],
+ _helpers._get_sub_prop(self._properties, ["dataset", "targetTypes"]),
+ )
+
+ @dataset_target_types.setter
+ def dataset_target_types(self, value):
+ self._properties.setdefault("dataset", {})
+ _helpers._set_sub_prop(self._properties, ["dataset", "targetTypes"], value)
+
+ @property
+ def routine(self) -> Optional[RoutineReference]:
+ """API resource representation of a routine reference."""
+ value = typing.cast(Optional[Dict], self._properties.get("routine"))
+ return RoutineReference.from_api_repr(value) if value else None
+
+ @routine.setter
+ def routine(self, value):
+ if self.role is not None:
+ raise ValueError(
+ "Role must be None for a routine. Current " "role: %r" % (self.role)
+ )
+
+ if isinstance(value, str):
+ value = RoutineReference.from_string(value).to_api_repr()
+
+ if isinstance(value, RoutineReference):
+ value = value.to_api_repr()
+
+ if isinstance(value, Routine):
+ value = value.reference.to_api_repr()
+
+ self._properties["routine"] = value
+
+ @property
+ def view(self) -> Optional[TableReference]:
+ """API resource representation of a view reference."""
+ value = typing.cast(Optional[Dict], self._properties.get("view"))
+ return TableReference.from_api_repr(value) if value else None
+
+ @view.setter
+ def view(self, value):
+ if self.role is not None:
+ raise ValueError(
+ "Role must be None for a view. Current " "role: %r" % (self.role)
+ )
+
+ if isinstance(value, str):
+ value = TableReference.from_string(value).to_api_repr()
+
+ if isinstance(value, TableReference):
+ value = value.to_api_repr()
+
+ if isinstance(value, Table):
+ value = value.reference.to_api_repr()
+
+ self._properties["view"] = value
+
+ @property
+ def group_by_email(self) -> Optional[str]:
+ """An email address of a Google Group to grant access to."""
+ return typing.cast(Optional[str], self._properties.get("groupByEmail"))
+
+ @group_by_email.setter
+ def group_by_email(self, value):
+ self._properties["groupByEmail"] = value
+
+ @property
+ def user_by_email(self) -> Optional[str]:
+ """An email address of a user to grant access to."""
+ return typing.cast(Optional[str], self._properties.get("userByEmail"))
+
+ @user_by_email.setter
+ def user_by_email(self, value):
+ self._properties["userByEmail"] = value
+
+ @property
+ def domain(self) -> Optional[str]:
+ """A domain to grant access to."""
+ return typing.cast(Optional[str], self._properties.get("domain"))
+
+ @domain.setter
+ def domain(self, value):
+ self._properties["domain"] = value
+
+ @property
+ def special_group(self) -> Optional[str]:
+ """A special group to grant access to."""
+ return typing.cast(Optional[str], self._properties.get("specialGroup"))
+
+ @special_group.setter
+ def special_group(self, value):
+ self._properties["specialGroup"] = value
+
+ @property
+ def entity_type(self) -> Optional[str]:
+ """The entity_type of the entry."""
+ return self._entity_type
+
+ @property
+ def entity_id(self) -> Optional[Union[Dict[str, Any], str]]:
+ """The entity_id of the entry."""
+ return self._properties.get(self._entity_type) if self._entity_type else None
+
+ def __eq__(self, other):
+ if not isinstance(other, AccessEntry):
+ return NotImplemented
+ return self._key() == other._key()
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self):
+ return f""
+
+ def _key(self):
+ """A tuple key that uniquely describes this field.
+ Used to compute this instance's hashcode and evaluate equality.
+ Returns:
+ Tuple: The contents of this :class:`~google.cloud.bigquery.dataset.AccessEntry`.
+ """
+ properties = self._properties.copy()
+ prop_tup = tuple(sorted(properties.items()))
+ return (self.role, self._entity_type, self.entity_id, prop_tup)
+
+ def __hash__(self):
+ return hash(self._key())
+
+ def to_api_repr(self):
+ """Construct the API resource representation of this access entry
+
+ Returns:
+ Dict[str, object]: Access entry represented as an API resource
+ """
+ resource = copy.deepcopy(self._properties)
+ return resource
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "AccessEntry":
+ """Factory: construct an access entry given its API representation
+
+ Args:
+ resource (Dict[str, object]):
+ Access entry resource representation returned from the API
+
+ Returns:
+ google.cloud.bigquery.dataset.AccessEntry:
+ Access entry parsed from ``resource``.
+
+ Raises:
+ ValueError:
+ If the resource has more keys than ``role`` and one additional
+ key.
+ """
+ entry = resource.copy()
+ role = entry.pop("role", None)
+ entity_type, entity_id = entry.popitem()
+ if len(entry) != 0:
+ raise ValueError("Entry has unexpected keys remaining.", entry)
+
+ return cls(role, entity_type, entity_id)
+
+
+class Dataset(object):
+ """Datasets are containers for tables.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource-dataset
+
+ Args:
+ dataset_ref (Union[google.cloud.bigquery.dataset.DatasetReference, str]):
+ A pointer to a dataset. If ``dataset_ref`` is a string, it must
+ include both the project ID and the dataset ID, separated by
+ ``.``.
+ """
+
+ _PROPERTY_TO_API_FIELD = {
+ "access_entries": "access",
+ "created": "creationTime",
+ "default_partition_expiration_ms": "defaultPartitionExpirationMs",
+ "default_table_expiration_ms": "defaultTableExpirationMs",
+ "friendly_name": "friendlyName",
+ "default_encryption_configuration": "defaultEncryptionConfiguration",
+ "is_case_insensitive": "isCaseInsensitive",
+ "storage_billing_model": "storageBillingModel",
+ "max_time_travel_hours": "maxTimeTravelHours",
+ "default_rounding_mode": "defaultRoundingMode",
+ }
+
+ def __init__(self, dataset_ref) -> None:
+ if isinstance(dataset_ref, str):
+ dataset_ref = DatasetReference.from_string(dataset_ref)
+ self._properties = {"datasetReference": dataset_ref.to_api_repr(), "labels": {}}
+
+ @property
+ def max_time_travel_hours(self):
+ """
+ Optional[int]: Defines the time travel window in hours. The value can
+ be from 48 to 168 hours (2 to 7 days), and in multiple of 24 hours
+ (48, 72, 96, 120, 144, 168).
+ The default value is 168 hours if this is not set.
+ """
+ return self._properties.get("maxTimeTravelHours")
+
+ @max_time_travel_hours.setter
+ def max_time_travel_hours(self, hours):
+ if not isinstance(hours, int):
+ raise ValueError(f"max_time_travel_hours must be an integer. Got {hours}")
+ if hours < 2 * 24 or hours > 7 * 24:
+ raise ValueError(
+ "Time Travel Window should be from 48 to 168 hours (2 to 7 days)"
+ )
+ if hours % 24 != 0:
+ raise ValueError("Time Travel Window should be multiple of 24")
+ self._properties["maxTimeTravelHours"] = hours
+
+ @property
+ def default_rounding_mode(self):
+ """Union[str, None]: defaultRoundingMode of the dataset as set by the user
+ (defaults to :data:`None`).
+
+ Set the value to one of ``'ROUND_HALF_AWAY_FROM_ZERO'``, ``'ROUND_HALF_EVEN'``, or
+ ``'ROUNDING_MODE_UNSPECIFIED'``.
+
+ See `default rounding mode
+ `_
+ in REST API docs and `updating the default rounding model
+ `_
+ guide.
+
+ Raises:
+ ValueError: for invalid value types.
+ """
+ return self._properties.get("defaultRoundingMode")
+
+ @default_rounding_mode.setter
+ def default_rounding_mode(self, value):
+ possible_values = [
+ "ROUNDING_MODE_UNSPECIFIED",
+ "ROUND_HALF_AWAY_FROM_ZERO",
+ "ROUND_HALF_EVEN",
+ ]
+ if not isinstance(value, str) and value is not None:
+ raise ValueError("Pass a string, or None")
+ if value is None:
+ self._properties["defaultRoundingMode"] = "ROUNDING_MODE_UNSPECIFIED"
+ if value not in possible_values and value is not None:
+ raise ValueError(
+ f'rounding mode needs to be one of {",".join(possible_values)}'
+ )
+ if value:
+ self._properties["defaultRoundingMode"] = value
+
+ @property
+ def project(self):
+ """str: Project ID of the project bound to the dataset."""
+ return self._properties["datasetReference"]["projectId"]
+
+ @property
+ def path(self):
+ """str: URL path for the dataset based on project and dataset ID."""
+ return "/projects/%s/datasets/%s" % (self.project, self.dataset_id)
+
+ @property
+ def access_entries(self):
+ """List[google.cloud.bigquery.dataset.AccessEntry]: Dataset's access
+ entries.
+
+ ``role`` augments the entity type and must be present **unless** the
+ entity type is ``view`` or ``routine``.
+
+ Raises:
+ TypeError: If 'value' is not a sequence
+ ValueError:
+ If any item in the sequence is not an
+ :class:`~google.cloud.bigquery.dataset.AccessEntry`.
+ """
+ entries = self._properties.get("access", [])
+ return [AccessEntry.from_api_repr(entry) for entry in entries]
+
+ @access_entries.setter
+ def access_entries(self, value):
+ if not all(isinstance(field, AccessEntry) for field in value):
+ raise ValueError("Values must be AccessEntry instances")
+ entries = [entry.to_api_repr() for entry in value]
+ self._properties["access"] = entries
+
+ @property
+ def created(self):
+ """Union[datetime.datetime, None]: Datetime at which the dataset was
+ created (:data:`None` until set from the server).
+ """
+ creation_time = self._properties.get("creationTime")
+ if creation_time is not None:
+ # creation_time will be in milliseconds.
+ return google.cloud._helpers._datetime_from_microseconds(
+ 1000.0 * float(creation_time)
+ )
+
+ @property
+ def dataset_id(self):
+ """str: Dataset ID."""
+ return self._properties["datasetReference"]["datasetId"]
+
+ @property
+ def full_dataset_id(self):
+ """Union[str, None]: ID for the dataset resource (:data:`None` until
+ set from the server)
+
+ In the format ``project_id:dataset_id``.
+ """
+ return self._properties.get("id")
+
+ @property
+ def reference(self):
+ """google.cloud.bigquery.dataset.DatasetReference: A reference to this
+ dataset.
+ """
+ return DatasetReference(self.project, self.dataset_id)
+
+ @property
+ def etag(self):
+ """Union[str, None]: ETag for the dataset resource (:data:`None` until
+ set from the server).
+ """
+ return self._properties.get("etag")
+
+ @property
+ def modified(self):
+ """Union[datetime.datetime, None]: Datetime at which the dataset was
+ last modified (:data:`None` until set from the server).
+ """
+ modified_time = self._properties.get("lastModifiedTime")
+ if modified_time is not None:
+ # modified_time will be in milliseconds.
+ return google.cloud._helpers._datetime_from_microseconds(
+ 1000.0 * float(modified_time)
+ )
+
+ @property
+ def self_link(self):
+ """Union[str, None]: URL for the dataset resource (:data:`None` until
+ set from the server).
+ """
+ return self._properties.get("selfLink")
+
+ @property
+ def default_partition_expiration_ms(self):
+ """Optional[int]: The default partition expiration for all
+ partitioned tables in the dataset, in milliseconds.
+
+ Once this property is set, all newly-created partitioned tables in
+ the dataset will have an ``time_paritioning.expiration_ms`` property
+ set to this value, and changing the value will only affect new
+ tables, not existing ones. The storage in a partition will have an
+ expiration time of its partition time plus this value.
+
+ Setting this property overrides the use of
+ ``default_table_expiration_ms`` for partitioned tables: only one of
+ ``default_table_expiration_ms`` and
+ ``default_partition_expiration_ms`` will be used for any new
+ partitioned table. If you provide an explicit
+ ``time_partitioning.expiration_ms`` when creating or updating a
+ partitioned table, that value takes precedence over the default
+ partition expiration time indicated by this property.
+ """
+ return _helpers._int_or_none(
+ self._properties.get("defaultPartitionExpirationMs")
+ )
+
+ @default_partition_expiration_ms.setter
+ def default_partition_expiration_ms(self, value):
+ self._properties["defaultPartitionExpirationMs"] = _helpers._str_or_none(value)
+
+ @property
+ def default_table_expiration_ms(self):
+ """Union[int, None]: Default expiration time for tables in the dataset
+ (defaults to :data:`None`).
+
+ Raises:
+ ValueError: For invalid value types.
+ """
+ return _helpers._int_or_none(self._properties.get("defaultTableExpirationMs"))
+
+ @default_table_expiration_ms.setter
+ def default_table_expiration_ms(self, value):
+ if not isinstance(value, int) and value is not None:
+ raise ValueError("Pass an integer, or None")
+ self._properties["defaultTableExpirationMs"] = _helpers._str_or_none(value)
+
+ @property
+ def description(self):
+ """Optional[str]: Description of the dataset as set by the user
+ (defaults to :data:`None`).
+
+ Raises:
+ ValueError: for invalid value types.
+ """
+ return self._properties.get("description")
+
+ @description.setter
+ def description(self, value):
+ if not isinstance(value, str) and value is not None:
+ raise ValueError("Pass a string, or None")
+ self._properties["description"] = value
+
+ @property
+ def friendly_name(self):
+ """Union[str, None]: Title of the dataset as set by the user
+ (defaults to :data:`None`).
+
+ Raises:
+ ValueError: for invalid value types.
+ """
+ return self._properties.get("friendlyName")
+
+ @friendly_name.setter
+ def friendly_name(self, value):
+ if not isinstance(value, str) and value is not None:
+ raise ValueError("Pass a string, or None")
+ self._properties["friendlyName"] = value
+
+ @property
+ def location(self):
+ """Union[str, None]: Location in which the dataset is hosted as set by
+ the user (defaults to :data:`None`).
+
+ Raises:
+ ValueError: for invalid value types.
+ """
+ return self._properties.get("location")
+
+ @location.setter
+ def location(self, value):
+ if not isinstance(value, str) and value is not None:
+ raise ValueError("Pass a string, or None")
+ self._properties["location"] = value
+
+ @property
+ def labels(self):
+ """Dict[str, str]: Labels for the dataset.
+
+ This method always returns a dict. To change a dataset's labels,
+ modify the dict, then call
+ :meth:`google.cloud.bigquery.client.Client.update_dataset`. To delete
+ a label, set its value to :data:`None` before updating.
+
+ Raises:
+ ValueError: for invalid value types.
+ """
+ return self._properties.setdefault("labels", {})
+
+ @labels.setter
+ def labels(self, value):
+ if not isinstance(value, dict):
+ raise ValueError("Pass a dict")
+ self._properties["labels"] = value
+
+ @property
+ def default_encryption_configuration(self):
+ """google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom
+ encryption configuration for all tables in the dataset.
+
+ Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
+ if using default encryption.
+
+ See `protecting data with Cloud KMS keys
+ `_
+ in the BigQuery documentation.
+ """
+ prop = self._properties.get("defaultEncryptionConfiguration")
+ if prop:
+ prop = EncryptionConfiguration.from_api_repr(prop)
+ return prop
+
+ @default_encryption_configuration.setter
+ def default_encryption_configuration(self, value):
+ api_repr = value
+ if value:
+ api_repr = value.to_api_repr()
+ self._properties["defaultEncryptionConfiguration"] = api_repr
+
+ @property
+ def is_case_insensitive(self):
+ """Optional[bool]: True if the dataset and its table names are case-insensitive, otherwise False.
+ By default, this is False, which means the dataset and its table names are case-sensitive.
+ This field does not affect routine references.
+
+ Raises:
+ ValueError: for invalid value types.
+ """
+ return self._properties.get("isCaseInsensitive") or False
+
+ @is_case_insensitive.setter
+ def is_case_insensitive(self, value):
+ if not isinstance(value, bool) and value is not None:
+ raise ValueError("Pass a boolean value, or None")
+ if value is None:
+ value = False
+ self._properties["isCaseInsensitive"] = value
+
+ @property
+ def storage_billing_model(self):
+ """Union[str, None]: StorageBillingModel of the dataset as set by the user
+ (defaults to :data:`None`).
+
+ Set the value to one of ``'LOGICAL'``, ``'PHYSICAL'``, or
+ ``'STORAGE_BILLING_MODEL_UNSPECIFIED'``. This change takes 24 hours to
+ take effect and you must wait 14 days before you can change the storage
+ billing model again.
+
+ See `storage billing model
+ `_
+ in REST API docs and `updating the storage billing model
+ `_
+ guide.
+
+ Raises:
+ ValueError: for invalid value types.
+ """
+ return self._properties.get("storageBillingModel")
+
+ @storage_billing_model.setter
+ def storage_billing_model(self, value):
+ if not isinstance(value, str) and value is not None:
+ raise ValueError(
+ "storage_billing_model must be a string (e.g. 'LOGICAL',"
+ " 'PHYSICAL', 'STORAGE_BILLING_MODEL_UNSPECIFIED'), or None."
+ f" Got {repr(value)}."
+ )
+ self._properties["storageBillingModel"] = value
+
+ @classmethod
+ def from_string(cls, full_dataset_id: str) -> "Dataset":
+ """Construct a dataset from fully-qualified dataset ID.
+
+ Args:
+ full_dataset_id (str):
+ A fully-qualified dataset ID in standard SQL format. Must
+ include both the project ID and the dataset ID, separated by
+ ``.``.
+
+ Returns:
+ Dataset: Dataset parsed from ``full_dataset_id``.
+
+ Examples:
+ >>> Dataset.from_string('my-project-id.some_dataset')
+ Dataset(DatasetReference('my-project-id', 'some_dataset'))
+
+ Raises:
+ ValueError:
+ If ``full_dataset_id`` is not a fully-qualified dataset ID in
+ standard SQL format.
+ """
+ return cls(DatasetReference.from_string(full_dataset_id))
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "Dataset":
+ """Factory: construct a dataset given its API representation
+
+ Args:
+ resource (Dict[str: object]):
+ Dataset resource representation returned from the API
+
+ Returns:
+ google.cloud.bigquery.dataset.Dataset:
+ Dataset parsed from ``resource``.
+ """
+ if (
+ "datasetReference" not in resource
+ or "datasetId" not in resource["datasetReference"]
+ ):
+ raise KeyError(
+ "Resource lacks required identity information:"
+ '["datasetReference"]["datasetId"]'
+ )
+ project_id = resource["datasetReference"]["projectId"]
+ dataset_id = resource["datasetReference"]["datasetId"]
+ dataset = cls(DatasetReference(project_id, dataset_id))
+ dataset._properties = copy.deepcopy(resource)
+ return dataset
+
+ def to_api_repr(self) -> dict:
+ """Construct the API resource representation of this dataset
+
+ Returns:
+ Dict[str, object]: The dataset represented as an API resource
+ """
+ return copy.deepcopy(self._properties)
+
+ def _build_resource(self, filter_fields):
+ """Generate a resource for ``update``."""
+ return _helpers._build_resource_from_properties(self, filter_fields)
+
+ table = _get_table_reference
+
+ model = _get_model_reference
+
+ routine = _get_routine_reference
+
+ def __repr__(self):
+ return "Dataset({})".format(repr(self.reference))
+
+
+class DatasetListItem(object):
+ """A read-only dataset resource from a list operation.
+
+ For performance reasons, the BigQuery API only includes some of the
+ dataset properties when listing datasets. Notably,
+ :attr:`~google.cloud.bigquery.dataset.Dataset.access_entries` is missing.
+
+ For a full list of the properties that the BigQuery API returns, see the
+ `REST documentation for datasets.list
+ `_.
+
+
+ Args:
+ resource (Dict[str, str]):
+ A dataset-like resource object from a dataset list response. A
+ ``datasetReference`` property is required.
+
+ Raises:
+ ValueError:
+ If ``datasetReference`` or one of its required members is missing
+ from ``resource``.
+ """
+
+ def __init__(self, resource):
+ if "datasetReference" not in resource:
+ raise ValueError("resource must contain a datasetReference value")
+ if "projectId" not in resource["datasetReference"]:
+ raise ValueError(
+ "resource['datasetReference'] must contain a projectId value"
+ )
+ if "datasetId" not in resource["datasetReference"]:
+ raise ValueError(
+ "resource['datasetReference'] must contain a datasetId value"
+ )
+ self._properties = resource
+
+ @property
+ def project(self):
+ """str: Project bound to the dataset."""
+ return self._properties["datasetReference"]["projectId"]
+
+ @property
+ def dataset_id(self):
+ """str: Dataset ID."""
+ return self._properties["datasetReference"]["datasetId"]
+
+ @property
+ def full_dataset_id(self):
+ """Union[str, None]: ID for the dataset resource (:data:`None` until
+ set from the server)
+
+ In the format ``project_id:dataset_id``.
+ """
+ return self._properties.get("id")
+
+ @property
+ def friendly_name(self):
+ """Union[str, None]: Title of the dataset as set by the user
+ (defaults to :data:`None`).
+ """
+ return self._properties.get("friendlyName")
+
+ @property
+ def labels(self):
+ """Dict[str, str]: Labels for the dataset."""
+ return self._properties.setdefault("labels", {})
+
+ @property
+ def reference(self):
+ """google.cloud.bigquery.dataset.DatasetReference: A reference to this
+ dataset.
+ """
+ return DatasetReference(self.project, self.dataset_id)
+
+ table = _get_table_reference
+
+ model = _get_model_reference
+
+ routine = _get_routine_reference
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/__init__.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1a723949b104e355b0ba2cd334beef9a91793a0
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/__init__.py
@@ -0,0 +1,87 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google BigQuery implementation of the Database API Specification v2.0.
+
+This module implements the `Python Database API Specification v2.0 (DB-API)`_
+for Google BigQuery.
+
+.. _Python Database API Specification v2.0 (DB-API):
+ https://www.python.org/dev/peps/pep-0249/
+"""
+
+from google.cloud.bigquery.dbapi.connection import connect
+from google.cloud.bigquery.dbapi.connection import Connection
+from google.cloud.bigquery.dbapi.cursor import Cursor
+from google.cloud.bigquery.dbapi.exceptions import Warning
+from google.cloud.bigquery.dbapi.exceptions import Error
+from google.cloud.bigquery.dbapi.exceptions import InterfaceError
+from google.cloud.bigquery.dbapi.exceptions import DatabaseError
+from google.cloud.bigquery.dbapi.exceptions import DataError
+from google.cloud.bigquery.dbapi.exceptions import OperationalError
+from google.cloud.bigquery.dbapi.exceptions import IntegrityError
+from google.cloud.bigquery.dbapi.exceptions import InternalError
+from google.cloud.bigquery.dbapi.exceptions import ProgrammingError
+from google.cloud.bigquery.dbapi.exceptions import NotSupportedError
+from google.cloud.bigquery.dbapi.types import Binary
+from google.cloud.bigquery.dbapi.types import Date
+from google.cloud.bigquery.dbapi.types import DateFromTicks
+from google.cloud.bigquery.dbapi.types import Time
+from google.cloud.bigquery.dbapi.types import TimeFromTicks
+from google.cloud.bigquery.dbapi.types import Timestamp
+from google.cloud.bigquery.dbapi.types import TimestampFromTicks
+from google.cloud.bigquery.dbapi.types import BINARY
+from google.cloud.bigquery.dbapi.types import DATETIME
+from google.cloud.bigquery.dbapi.types import NUMBER
+from google.cloud.bigquery.dbapi.types import ROWID
+from google.cloud.bigquery.dbapi.types import STRING
+
+
+apilevel = "2.0"
+
+# Threads may share the module and connections, but not cursors.
+threadsafety = 2
+
+paramstyle = "pyformat"
+
+__all__ = [
+ "apilevel",
+ "threadsafety",
+ "paramstyle",
+ "connect",
+ "Connection",
+ "Cursor",
+ "Warning",
+ "Error",
+ "InterfaceError",
+ "DatabaseError",
+ "DataError",
+ "OperationalError",
+ "IntegrityError",
+ "InternalError",
+ "ProgrammingError",
+ "NotSupportedError",
+ "Binary",
+ "Date",
+ "DateFromTicks",
+ "Time",
+ "TimeFromTicks",
+ "Timestamp",
+ "TimestampFromTicks",
+ "BINARY",
+ "DATETIME",
+ "NUMBER",
+ "ROWID",
+ "STRING",
+]
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/_helpers.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/_helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4ab05ce82a397fa8f004a44b94378e0e624445d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/_helpers.py
@@ -0,0 +1,522 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from collections import abc as collections_abc
+import datetime
+import decimal
+import functools
+import numbers
+import re
+import typing
+
+from google.cloud import bigquery
+from google.cloud.bigquery import table, query
+from google.cloud.bigquery.dbapi import exceptions
+
+
+_NUMERIC_SERVER_MIN = decimal.Decimal("-9.9999999999999999999999999999999999999E+28")
+_NUMERIC_SERVER_MAX = decimal.Decimal("9.9999999999999999999999999999999999999E+28")
+
+type_parameters_re = re.compile(
+ r"""
+ \(
+ \s*[0-9]+\s*
+ (,
+ \s*[0-9]+\s*
+ )*
+ \)
+ """,
+ re.VERBOSE,
+)
+
+
+def _parameter_type(name, value, query_parameter_type=None, value_doc=""):
+ if query_parameter_type:
+ # Strip type parameters
+ query_parameter_type = type_parameters_re.sub("", query_parameter_type)
+ try:
+ parameter_type = getattr(
+ query.SqlParameterScalarTypes, query_parameter_type.upper()
+ )._type
+ except AttributeError:
+ raise exceptions.ProgrammingError(
+ f"The given parameter type, {query_parameter_type},"
+ f" for {name} is not a valid BigQuery scalar type."
+ )
+ else:
+ parameter_type = bigquery_scalar_type(value)
+ if parameter_type is None:
+ raise exceptions.ProgrammingError(
+ f"Encountered parameter {name} with "
+ f"{value_doc} value {value} of unexpected type."
+ )
+ return parameter_type
+
+
+def scalar_to_query_parameter(value, name=None, query_parameter_type=None):
+ """Convert a scalar value into a query parameter.
+
+ Args:
+ value (Any):
+ A scalar value to convert into a query parameter.
+
+ name (str):
+ (Optional) Name of the query parameter.
+ query_parameter_type (Optional[str]): Given type for the parameter.
+
+ Returns:
+ google.cloud.bigquery.ScalarQueryParameter:
+ A query parameter corresponding with the type and value of the plain
+ Python object.
+
+ Raises:
+ google.cloud.bigquery.dbapi.exceptions.ProgrammingError:
+ if the type cannot be determined.
+ """
+ return bigquery.ScalarQueryParameter(
+ name, _parameter_type(name, value, query_parameter_type), value
+ )
+
+
+def array_to_query_parameter(value, name=None, query_parameter_type=None):
+ """Convert an array-like value into a query parameter.
+
+ Args:
+ value (Sequence[Any]): The elements of the array (should not be a
+ string-like Sequence).
+ name (Optional[str]): Name of the query parameter.
+ query_parameter_type (Optional[str]): Given type for the parameter.
+
+ Returns:
+ A query parameter corresponding with the type and value of the plain
+ Python object.
+
+ Raises:
+ google.cloud.bigquery.dbapi.exceptions.ProgrammingError:
+ if the type of array elements cannot be determined.
+ """
+ if not array_like(value):
+ raise exceptions.ProgrammingError(
+ "The value of parameter {} must be a sequence that is "
+ "not string-like.".format(name)
+ )
+
+ if query_parameter_type or value:
+ array_type = _parameter_type(
+ name,
+ value[0] if value else None,
+ query_parameter_type,
+ value_doc="array element ",
+ )
+ else:
+ raise exceptions.ProgrammingError(
+ "Encountered an empty array-like value of parameter {}, cannot "
+ "determine array elements type.".format(name)
+ )
+
+ return bigquery.ArrayQueryParameter(name, array_type, value)
+
+
+def _parse_struct_fields(
+ fields,
+ base,
+ parse_struct_field=re.compile(
+ r"""
+ (?:(\w+)\s+) # field name
+ ([A-Z0-9<> ,()]+) # Field type
+ $""",
+ re.VERBOSE | re.IGNORECASE,
+ ).match,
+):
+ # Split a string of struct fields. They're defined by commas, but
+ # we have to avoid splitting on commas internal to fields. For
+ # example:
+ # name string, children array>
+ #
+ # only has 2 top-level fields.
+ fields = fields.split(",")
+ fields = list(reversed(fields)) # in the off chance that there are very many
+ while fields:
+ field = fields.pop()
+ while fields and field.count("<") != field.count(">"):
+ field += "," + fields.pop()
+
+ m = parse_struct_field(field.strip())
+ if not m:
+ raise exceptions.ProgrammingError(
+ f"Invalid struct field, {field}, in {base}"
+ )
+ yield m.group(1, 2)
+
+
+SCALAR, ARRAY, STRUCT = ("s", "a", "r")
+
+
+def _parse_type(
+ type_,
+ name,
+ base,
+ complex_query_parameter_parse=re.compile(
+ r"""
+ \s*
+ (ARRAY|STRUCT|RECORD) # Type
+ \s*
+ <([A-Z0-9_<> ,()]+)> # Subtype(s)
+ \s*$
+ """,
+ re.IGNORECASE | re.VERBOSE,
+ ).match,
+):
+ if "<" not in type_:
+ # Scalar
+
+ # Strip type parameters
+ type_ = type_parameters_re.sub("", type_).strip()
+ try:
+ type_ = getattr(query.SqlParameterScalarTypes, type_.upper())
+ except AttributeError:
+ raise exceptions.ProgrammingError(
+ f"The given parameter type, {type_},"
+ f"{' for ' + name if name else ''}"
+ f" is not a valid BigQuery scalar type, in {base}."
+ )
+ if name:
+ type_ = type_.with_name(name)
+ return SCALAR, type_
+
+ m = complex_query_parameter_parse(type_)
+ if not m:
+ raise exceptions.ProgrammingError(f"Invalid parameter type, {type_}")
+ tname, sub = m.group(1, 2)
+ if tname.upper() == "ARRAY":
+ sub_type = complex_query_parameter_type(None, sub, base)
+ if isinstance(sub_type, query.ArrayQueryParameterType):
+ raise exceptions.ProgrammingError(f"Array can't contain an array in {base}")
+ sub_type._complex__src = sub
+ return ARRAY, sub_type
+ else:
+ return STRUCT, _parse_struct_fields(sub, base)
+
+
+def complex_query_parameter_type(name: typing.Optional[str], type_: str, base: str):
+ """Construct a parameter type (`StructQueryParameterType`) for a complex type
+
+ or a non-complex type that's part of a complex type.
+
+ Examples:
+
+ array>
+
+ struct>>
+
+ This is used for computing array types.
+ """
+
+ type_type, sub_type = _parse_type(type_, name, base)
+ if type_type == SCALAR:
+ result_type = sub_type
+ elif type_type == ARRAY:
+ result_type = query.ArrayQueryParameterType(sub_type, name=name)
+ elif type_type == STRUCT:
+ fields = [
+ complex_query_parameter_type(field_name, field_type, base)
+ for field_name, field_type in sub_type
+ ]
+ result_type = query.StructQueryParameterType(*fields, name=name)
+ else: # pragma: NO COVER
+ raise AssertionError("Bad type_type", type_type) # Can't happen :)
+
+ return result_type
+
+
+def complex_query_parameter(
+ name: typing.Optional[str], value, type_: str, base: typing.Optional[str] = None
+):
+ """
+ Construct a query parameter for a complex type (array or struct record)
+
+ or for a subtype, which may not be complex
+
+ Examples:
+
+ array>
+
+ struct>>
+
+ """
+ param: typing.Union[
+ query.ScalarQueryParameter,
+ query.ArrayQueryParameter,
+ query.StructQueryParameter,
+ ]
+
+ base = base or type_
+
+ type_type, sub_type = _parse_type(type_, name, base)
+
+ if type_type == SCALAR:
+ param = query.ScalarQueryParameter(name, sub_type._type, value)
+ elif type_type == ARRAY:
+ if not array_like(value):
+ raise exceptions.ProgrammingError(
+ f"Array type with non-array-like value"
+ f" with type {type(value).__name__}"
+ )
+ param = query.ArrayQueryParameter(
+ name,
+ sub_type,
+ (
+ value
+ if isinstance(sub_type, query.ScalarQueryParameterType)
+ else [
+ complex_query_parameter(None, v, sub_type._complex__src, base)
+ for v in value
+ ]
+ ),
+ )
+ elif type_type == STRUCT:
+ if not isinstance(value, collections_abc.Mapping):
+ raise exceptions.ProgrammingError(f"Non-mapping value for type {type_}")
+ value_keys = set(value)
+ fields = []
+ for field_name, field_type in sub_type:
+ if field_name not in value:
+ raise exceptions.ProgrammingError(
+ f"No field value for {field_name} in {type_}"
+ )
+ value_keys.remove(field_name)
+ fields.append(
+ complex_query_parameter(field_name, value[field_name], field_type, base)
+ )
+ if value_keys:
+ raise exceptions.ProgrammingError(f"Extra data keys for {type_}")
+
+ param = query.StructQueryParameter(name, *fields)
+ else: # pragma: NO COVER
+ raise AssertionError("Bad type_type", type_type) # Can't happen :)
+
+ return param
+
+
+def _dispatch_parameter(type_, value, name=None):
+ if type_ is not None and "<" in type_:
+ param = complex_query_parameter(name, value, type_)
+ elif isinstance(value, collections_abc.Mapping):
+ raise NotImplementedError(
+ f"STRUCT-like parameter values are not supported"
+ f"{' (parameter ' + name + ')' if name else ''},"
+ f" unless an explicit type is give in the parameter placeholder"
+ f" (e.g. '%({name if name else ''}:struct<...>)s')."
+ )
+ elif array_like(value):
+ param = array_to_query_parameter(value, name, type_)
+ else:
+ param = scalar_to_query_parameter(value, name, type_)
+
+ return param
+
+
+def to_query_parameters_list(parameters, parameter_types):
+ """Converts a sequence of parameter values into query parameters.
+
+ Args:
+ parameters (Sequence[Any]): Sequence of query parameter values.
+ parameter_types:
+ A list of parameter types, one for each parameter.
+ Unknown types are provided as None.
+
+ Returns:
+ List[google.cloud.bigquery.query._AbstractQueryParameter]:
+ A list of query parameters.
+ """
+ return [
+ _dispatch_parameter(type_, value)
+ for value, type_ in zip(parameters, parameter_types)
+ ]
+
+
+def to_query_parameters_dict(parameters, query_parameter_types):
+ """Converts a dictionary of parameter values into query parameters.
+
+ Args:
+ parameters (Mapping[str, Any]): Dictionary of query parameter values.
+ parameter_types:
+ A dictionary of parameter types. It needn't have a key for each
+ parameter.
+
+ Returns:
+ List[google.cloud.bigquery.query._AbstractQueryParameter]:
+ A list of named query parameters.
+ """
+ return [
+ _dispatch_parameter(query_parameter_types.get(name), value, name)
+ for name, value in parameters.items()
+ ]
+
+
+def to_query_parameters(parameters, parameter_types):
+ """Converts DB-API parameter values into query parameters.
+
+ Args:
+ parameters (Union[Mapping[str, Any], Sequence[Any]]):
+ A dictionary or sequence of query parameter values.
+ parameter_types (Union[Mapping[str, str], Sequence[str]]):
+ A dictionary or list of parameter types.
+
+ If parameters is a mapping, then this must be a dictionary
+ of parameter types. It needn't have a key for each
+ parameter.
+
+ If parameters is a sequence, then this must be a list of
+ parameter types, one for each paramater. Unknown types
+ are provided as None.
+
+ Returns:
+ List[google.cloud.bigquery.query._AbstractQueryParameter]:
+ A list of query parameters.
+ """
+ if parameters is None:
+ return []
+
+ if isinstance(parameters, collections_abc.Mapping):
+ return to_query_parameters_dict(parameters, parameter_types)
+ else:
+ return to_query_parameters_list(parameters, parameter_types)
+
+
+def bigquery_scalar_type(value):
+ """Return a BigQuery name of the scalar type that matches the given value.
+
+ If the scalar type name could not be determined (e.g. for non-scalar
+ values), ``None`` is returned.
+
+ Args:
+ value (Any)
+
+ Returns:
+ Optional[str]: The BigQuery scalar type name.
+ """
+ if isinstance(value, bool):
+ return "BOOL"
+ elif isinstance(value, numbers.Integral):
+ return "INT64"
+ elif isinstance(value, numbers.Real):
+ return "FLOAT64"
+ elif isinstance(value, decimal.Decimal):
+ vtuple = value.as_tuple()
+ # NUMERIC values have precision of 38 (number of digits) and scale of 9 (number
+ # of fractional digits), and their max absolute value must be strictly smaller
+ # than 1.0E+29.
+ # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#decimal_types
+ if (
+ len(vtuple.digits) <= 38 # max precision: 38
+ and vtuple.exponent >= -9 # max scale: 9
+ and _NUMERIC_SERVER_MIN <= value <= _NUMERIC_SERVER_MAX
+ ):
+ return "NUMERIC"
+ else:
+ return "BIGNUMERIC"
+
+ elif isinstance(value, str):
+ return "STRING"
+ elif isinstance(value, bytes):
+ return "BYTES"
+ elif isinstance(value, datetime.datetime):
+ return "DATETIME" if value.tzinfo is None else "TIMESTAMP"
+ elif isinstance(value, datetime.date):
+ return "DATE"
+ elif isinstance(value, datetime.time):
+ return "TIME"
+
+ return None
+
+
+def array_like(value):
+ """Determine if the given value is array-like.
+
+ Examples of array-like values (as interpreted by this function) are
+ sequences such as ``list`` and ``tuple``, but not strings and other
+ iterables such as sets.
+
+ Args:
+ value (Any)
+
+ Returns:
+ bool: ``True`` if the value is considered array-like, ``False`` otherwise.
+ """
+ return isinstance(value, collections_abc.Sequence) and not isinstance(
+ value, (str, bytes, bytearray)
+ )
+
+
+def to_bq_table_rows(rows_iterable):
+ """Convert table rows to BigQuery table Row instances.
+
+ Args:
+ rows_iterable (Iterable[Mapping]):
+ An iterable of row data items to convert to ``Row`` instances.
+
+ Returns:
+ Iterable[google.cloud.bigquery.table.Row]
+ """
+
+ def to_table_row(row):
+ # NOTE: We fetch ARROW values, thus we need to convert them to Python
+ # objects with as_py().
+ values = tuple(value.as_py() for value in row.values())
+ keys_to_index = {key: i for i, key in enumerate(row.keys())}
+ return table.Row(values, keys_to_index)
+
+ return (to_table_row(row_data) for row_data in rows_iterable)
+
+
+def raise_on_closed(
+ exc_msg, exc_class=exceptions.ProgrammingError, closed_attr_name="_closed"
+):
+ """Make public instance methods raise an error if the instance is closed."""
+
+ def _raise_on_closed(method):
+ """Make a non-static method raise an error if its containing instance is closed."""
+
+ def with_closed_check(self, *args, **kwargs):
+ if getattr(self, closed_attr_name):
+ raise exc_class(exc_msg)
+ return method(self, *args, **kwargs)
+
+ functools.update_wrapper(with_closed_check, method)
+ return with_closed_check
+
+ def decorate_public_methods(klass):
+ """Apply ``_raise_on_closed()`` decorator to public instance methods."""
+ for name in dir(klass):
+ if name.startswith("_") and name != "__iter__":
+ continue
+
+ member = getattr(klass, name)
+ if not callable(member):
+ continue
+
+ # We need to check for class/static methods directly in the instance
+ # __dict__, not via the retrieved attribute (`member`), as the
+ # latter is already a callable *produced* by one of these descriptors.
+ if isinstance(klass.__dict__[name], (staticmethod, classmethod)):
+ continue
+
+ member = _raise_on_closed(member)
+ setattr(klass, name, member)
+
+ return klass
+
+ return decorate_public_methods
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/connection.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/connection.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1a69b8fec90074afb01e459c19c3ce0a7ac9fdb
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/connection.py
@@ -0,0 +1,128 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Connection for the Google BigQuery DB-API."""
+
+import weakref
+
+from google.cloud import bigquery
+from google.cloud.bigquery.dbapi import cursor
+from google.cloud.bigquery.dbapi import _helpers
+
+
+@_helpers.raise_on_closed("Operating on a closed connection.")
+class Connection(object):
+ """DB-API Connection to Google BigQuery.
+
+ Args:
+ client (Optional[google.cloud.bigquery.Client]):
+ A REST API client used to connect to BigQuery. If not passed, a
+ client is created using default options inferred from the environment.
+ bqstorage_client(\
+ Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \
+ ):
+ A client that uses the faster BigQuery Storage API to fetch rows from
+ BigQuery. If not passed, it is created using the same credentials
+ as ``client`` (provided that BigQuery Storage dependencies are installed).
+ prefer_bqstorage_client (Optional[bool]):
+ Prefer the BigQuery Storage client over the REST client. If Storage
+ client isn't available, fall back to the REST client. Defaults to
+ ``True``.
+ """
+
+ def __init__(
+ self,
+ client=None,
+ bqstorage_client=None,
+ prefer_bqstorage_client=True,
+ ):
+ if client is None:
+ client = bigquery.Client()
+ self._owns_client = True
+ else:
+ self._owns_client = False
+
+ # A warning is already raised by the BQ Storage client factory factory if
+ # instantiation fails, or if the given BQ Storage client instance is outdated.
+ if not prefer_bqstorage_client:
+ bqstorage_client = None
+ self._owns_bqstorage_client = False
+ elif bqstorage_client is None:
+ bqstorage_client = client._ensure_bqstorage_client()
+ self._owns_bqstorage_client = bqstorage_client is not None
+ else:
+ self._owns_bqstorage_client = False
+ bqstorage_client = client._ensure_bqstorage_client(bqstorage_client)
+
+ self._client = client
+ self._bqstorage_client = bqstorage_client
+
+ self._closed = False
+ self._cursors_created = weakref.WeakSet()
+
+ def close(self):
+ """Close the connection and any cursors created from it.
+
+ Any BigQuery clients explicitly passed to the constructor are *not*
+ closed, only those created by the connection instance itself.
+ """
+ self._closed = True
+
+ if self._owns_client:
+ self._client.close()
+
+ if self._owns_bqstorage_client:
+ # There is no close() on the BQ Storage client itself.
+ self._bqstorage_client._transport.grpc_channel.close()
+
+ for cursor_ in self._cursors_created:
+ if not cursor_._closed:
+ cursor_.close()
+
+ def commit(self):
+ """No-op, but for consistency raise an error if connection is closed."""
+
+ def cursor(self):
+ """Return a new cursor object.
+
+ Returns:
+ google.cloud.bigquery.dbapi.Cursor: A DB-API cursor that uses this connection.
+ """
+ new_cursor = cursor.Cursor(self)
+ self._cursors_created.add(new_cursor)
+ return new_cursor
+
+
+def connect(client=None, bqstorage_client=None, prefer_bqstorage_client=True):
+ """Construct a DB-API connection to Google BigQuery.
+
+ Args:
+ client (Optional[google.cloud.bigquery.Client]):
+ A REST API client used to connect to BigQuery. If not passed, a
+ client is created using default options inferred from the environment.
+ bqstorage_client(\
+ Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \
+ ):
+ A client that uses the faster BigQuery Storage API to fetch rows from
+ BigQuery. If not passed, it is created using the same credentials
+ as ``client`` (provided that BigQuery Storage dependencies are installed).
+ prefer_bqstorage_client (Optional[bool]):
+ Prefer the BigQuery Storage client over the REST client. If Storage
+ client isn't available, fall back to the REST client. Defaults to
+ ``True``.
+
+ Returns:
+ google.cloud.bigquery.dbapi.Connection: A new DB-API connection to BigQuery.
+ """
+ return Connection(client, bqstorage_client, prefer_bqstorage_client)
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/cursor.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/cursor.py
new file mode 100644
index 0000000000000000000000000000000000000000..014a6825ea6b17f0ed85d42c9559aa969c10e57d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/cursor.py
@@ -0,0 +1,586 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Cursor for the Google BigQuery DB-API."""
+
+from __future__ import annotations
+
+import collections
+from collections import abc as collections_abc
+import re
+from typing import Optional
+
+try:
+ from google.cloud.bigquery_storage import ArrowSerializationOptions
+except ImportError:
+ _ARROW_COMPRESSION_SUPPORT = False
+else:
+ # Having BQ Storage available implies that pyarrow >=1.0.0 is available, too.
+ _ARROW_COMPRESSION_SUPPORT = True
+
+from google.cloud.bigquery import job
+from google.cloud.bigquery.dbapi import _helpers
+from google.cloud.bigquery.dbapi import exceptions
+import google.cloud.exceptions # type: ignore
+
+
+# Per PEP 249: A 7-item sequence containing information describing one result
+# column. The first two items (name and type_code) are mandatory, the other
+# five are optional and are set to None if no meaningful values can be
+# provided.
+Column = collections.namedtuple(
+ "Column",
+ [
+ "name",
+ "type_code",
+ "display_size",
+ "internal_size",
+ "precision",
+ "scale",
+ "null_ok",
+ ],
+)
+
+
+@_helpers.raise_on_closed("Operating on a closed cursor.")
+class Cursor(object):
+ """DB-API Cursor to Google BigQuery.
+
+ Args:
+ connection (google.cloud.bigquery.dbapi.Connection):
+ A DB-API connection to Google BigQuery.
+ """
+
+ def __init__(self, connection):
+ self.connection = connection
+ self.description = None
+ # Per PEP 249: The attribute is -1 in case no .execute*() has been
+ # performed on the cursor or the rowcount of the last operation
+ # cannot be determined by the interface.
+ self.rowcount = -1
+ # Per PEP 249: The arraysize attribute defaults to 1, meaning to fetch
+ # a single row at a time. However, we deviate from that, and set the
+ # default to None, allowing the backend to automatically determine the
+ # most appropriate size.
+ self.arraysize = None
+ self._query_data = None
+ self._query_rows = None
+ self._closed = False
+
+ @property
+ def query_job(self) -> Optional[job.QueryJob]:
+ """google.cloud.bigquery.job.query.QueryJob | None: The query job
+ created by the last ``execute*()`` call, if a query job was created.
+
+ .. note::
+ If the last ``execute*()`` call was ``executemany()``, this is the
+ last job created by ``executemany()``."""
+ rows = self._query_rows
+
+ if rows is None:
+ return None
+
+ job_id = rows.job_id
+ project = rows.project
+ location = rows.location
+ client = self.connection._client
+
+ if job_id is None:
+ return None
+
+ return client.get_job(job_id, location=location, project=project)
+
+ def close(self):
+ """Mark the cursor as closed, preventing its further use."""
+ self._closed = True
+
+ def _set_description(self, schema):
+ """Set description from schema.
+
+ Args:
+ schema (Sequence[google.cloud.bigquery.schema.SchemaField]):
+ A description of fields in the schema.
+ """
+ if schema is None:
+ self.description = None
+ return
+
+ self.description = tuple(
+ Column(
+ name=field.name,
+ type_code=field.field_type,
+ display_size=None,
+ internal_size=None,
+ precision=None,
+ scale=None,
+ null_ok=field.is_nullable,
+ )
+ for field in schema
+ )
+
+ def _set_rowcount(self, rows):
+ """Set the rowcount from a RowIterator.
+
+ Normally, this sets rowcount to the number of rows returned by the
+ query, but if it was a DML statement, it sets rowcount to the number
+ of modified rows.
+
+ Args:
+ query_results (google.cloud.bigquery.query._QueryResults):
+ Results of a query.
+ """
+ total_rows = 0
+ num_dml_affected_rows = rows.num_dml_affected_rows
+
+ if rows.total_rows is not None and rows.total_rows > 0:
+ total_rows = rows.total_rows
+ if num_dml_affected_rows is not None and num_dml_affected_rows > 0:
+ total_rows = num_dml_affected_rows
+ self.rowcount = total_rows
+
+ def execute(self, operation, parameters=None, job_id=None, job_config=None):
+ """Prepare and execute a database operation.
+
+ .. note::
+ When setting query parameters, values which are "text"
+ (``unicode`` in Python2, ``str`` in Python3) will use
+ the 'STRING' BigQuery type. Values which are "bytes" (``str`` in
+ Python2, ``bytes`` in Python3), will use using the 'BYTES' type.
+
+ A `~datetime.datetime` parameter without timezone information uses
+ the 'DATETIME' BigQuery type (example: Global Pi Day Celebration
+ March 14, 2017 at 1:59pm). A `~datetime.datetime` parameter with
+ timezone information uses the 'TIMESTAMP' BigQuery type (example:
+ a wedding on April 29, 2011 at 11am, British Summer Time).
+
+ For more information about BigQuery data types, see:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
+
+ ``STRUCT``/``RECORD`` and ``REPEATED`` query parameters are not
+ yet supported. See:
+ https://github.com/GoogleCloudPlatform/google-cloud-python/issues/3524
+
+ Args:
+ operation (str): A Google BigQuery query string.
+
+ parameters (Union[Mapping[str, Any], Sequence[Any]]):
+ (Optional) dictionary or sequence of parameter values.
+
+ job_id (str | None):
+ (Optional and discouraged) The job ID to use when creating
+ the query job. For best performance and reliability, manually
+ setting a job ID is discouraged.
+
+ job_config (google.cloud.bigquery.job.QueryJobConfig):
+ (Optional) Extra configuration options for the query job.
+ """
+ formatted_operation, parameter_types = _format_operation(operation, parameters)
+ self._execute(
+ formatted_operation, parameters, job_id, job_config, parameter_types
+ )
+
+ def _execute(
+ self, formatted_operation, parameters, job_id, job_config, parameter_types
+ ):
+ self._query_data = None
+ self._query_results = None
+ client = self.connection._client
+
+ # The DB-API uses the pyformat formatting, since the way BigQuery does
+ # query parameters was not one of the standard options. Convert both
+ # the query and the parameters to the format expected by the client
+ # libraries.
+ query_parameters = _helpers.to_query_parameters(parameters, parameter_types)
+
+ config = job_config or job.QueryJobConfig()
+ config.query_parameters = query_parameters
+
+ # Start the query and wait for the query to finish.
+ try:
+ if job_id is not None:
+ rows = client.query(
+ formatted_operation,
+ job_config=job_config,
+ job_id=job_id,
+ ).result(
+ page_size=self.arraysize,
+ )
+ else:
+ rows = client.query_and_wait(
+ formatted_operation,
+ job_config=config,
+ page_size=self.arraysize,
+ )
+ except google.cloud.exceptions.GoogleCloudError as exc:
+ raise exceptions.DatabaseError(exc)
+
+ self._query_rows = rows
+ self._set_description(rows.schema)
+
+ if config.dry_run:
+ self.rowcount = 0
+ else:
+ self._set_rowcount(rows)
+
+ def executemany(self, operation, seq_of_parameters):
+ """Prepare and execute a database operation multiple times.
+
+ Args:
+ operation (str): A Google BigQuery query string.
+
+ seq_of_parameters (Union[Sequence[Mapping[str, Any], Sequence[Any]]]):
+ Sequence of many sets of parameter values.
+ """
+ if seq_of_parameters:
+ rowcount = 0
+ # There's no reason to format the line more than once, as
+ # the operation only barely depends on the parameters. So
+ # we just use the first set of parameters. If there are
+ # different numbers or types of parameters, we'll error
+ # anyway.
+ formatted_operation, parameter_types = _format_operation(
+ operation, seq_of_parameters[0]
+ )
+ for parameters in seq_of_parameters:
+ self._execute(
+ formatted_operation, parameters, None, None, parameter_types
+ )
+ rowcount += self.rowcount
+
+ self.rowcount = rowcount
+
+ def _try_fetch(self, size=None):
+ """Try to start fetching data, if not yet started.
+
+ Mutates self to indicate that iteration has started.
+ """
+ if self._query_data is not None:
+ # Already started fetching the data.
+ return
+
+ rows = self._query_rows
+ if rows is None:
+ raise exceptions.InterfaceError(
+ "No query results: execute() must be called before fetch."
+ )
+
+ bqstorage_client = self.connection._bqstorage_client
+ if rows._should_use_bqstorage(
+ bqstorage_client,
+ create_bqstorage_client=False,
+ ):
+ rows_iterable = self._bqstorage_fetch(bqstorage_client)
+ self._query_data = _helpers.to_bq_table_rows(rows_iterable)
+ return
+
+ self._query_data = iter(rows)
+
+ def _bqstorage_fetch(self, bqstorage_client):
+ """Start fetching data with the BigQuery Storage API.
+
+ The method assumes that the data about the relevant query job already
+ exists internally.
+
+ Args:
+ bqstorage_client(\
+ google.cloud.bigquery_storage_v1.BigQueryReadClient \
+ ):
+ A client tha know how to talk to the BigQuery Storage API.
+
+ Returns:
+ Iterable[Mapping]:
+ A sequence of rows, represented as dictionaries.
+ """
+ # Hitting this code path with a BQ Storage client instance implies that
+ # bigquery_storage can indeed be imported here without errors.
+ from google.cloud import bigquery_storage
+
+ table_reference = self._query_rows._table
+
+ requested_session = bigquery_storage.types.ReadSession(
+ table=table_reference.to_bqstorage(),
+ data_format=bigquery_storage.types.DataFormat.ARROW,
+ )
+
+ if _ARROW_COMPRESSION_SUPPORT:
+ requested_session.read_options.arrow_serialization_options.buffer_compression = (
+ ArrowSerializationOptions.CompressionCodec.LZ4_FRAME
+ )
+
+ read_session = bqstorage_client.create_read_session(
+ parent="projects/{}".format(table_reference.project),
+ read_session=requested_session,
+ # a single stream only, as DB API is not well-suited for multithreading
+ max_stream_count=1,
+ )
+
+ if not read_session.streams:
+ return iter([]) # empty table, nothing to read
+
+ stream_name = read_session.streams[0].name
+ read_rows_stream = bqstorage_client.read_rows(stream_name)
+
+ rows_iterable = read_rows_stream.rows(read_session)
+ return rows_iterable
+
+ def fetchone(self):
+ """Fetch a single row from the results of the last ``execute*()`` call.
+
+ .. note::
+ If a dry run query was executed, no rows are returned.
+
+ Returns:
+ Tuple:
+ A tuple representing a row or ``None`` if no more data is
+ available.
+
+ Raises:
+ google.cloud.bigquery.dbapi.InterfaceError: if called before ``execute()``.
+ """
+ self._try_fetch()
+ try:
+ return next(self._query_data)
+ except StopIteration:
+ return None
+
+ def fetchmany(self, size=None):
+ """Fetch multiple results from the last ``execute*()`` call.
+
+ .. note::
+ If a dry run query was executed, no rows are returned.
+
+ .. note::
+ The size parameter is not used for the request/response size.
+ Set the ``arraysize`` attribute before calling ``execute()`` to
+ set the batch size.
+
+ Args:
+ size (int):
+ (Optional) Maximum number of rows to return. Defaults to the
+ ``arraysize`` property value. If ``arraysize`` is not set, it
+ defaults to ``1``.
+
+ Returns:
+ List[Tuple]: A list of rows.
+
+ Raises:
+ google.cloud.bigquery.dbapi.InterfaceError: if called before ``execute()``.
+ """
+ if size is None:
+ # Since self.arraysize can be None (a deviation from PEP 249),
+ # use an actual PEP 249 default of 1 in such case (*some* number
+ # is needed here).
+ size = self.arraysize if self.arraysize else 1
+
+ self._try_fetch(size=size)
+ rows = []
+
+ for row in self._query_data:
+ rows.append(row)
+ if len(rows) >= size:
+ break
+
+ return rows
+
+ def fetchall(self):
+ """Fetch all remaining results from the last ``execute*()`` call.
+
+ .. note::
+ If a dry run query was executed, no rows are returned.
+
+ Returns:
+ List[Tuple]: A list of all the rows in the results.
+
+ Raises:
+ google.cloud.bigquery.dbapi.InterfaceError: if called before ``execute()``.
+ """
+ self._try_fetch()
+ return list(self._query_data)
+
+ def setinputsizes(self, sizes):
+ """No-op, but for consistency raise an error if cursor is closed."""
+
+ def setoutputsize(self, size, column=None):
+ """No-op, but for consistency raise an error if cursor is closed."""
+
+ def __iter__(self):
+ self._try_fetch()
+ return iter(self._query_data)
+
+
+def _format_operation_list(operation, parameters):
+ """Formats parameters in operation in the way BigQuery expects.
+
+ The input operation will be a query like ``SELECT %s`` and the output
+ will be a query like ``SELECT ?``.
+
+ Args:
+ operation (str): A Google BigQuery query string.
+
+ parameters (Sequence[Any]): Sequence of parameter values.
+
+ Returns:
+ str: A formatted query string.
+
+ Raises:
+ google.cloud.bigquery.dbapi.ProgrammingError:
+ if a parameter used in the operation is not found in the
+ ``parameters`` argument.
+ """
+ formatted_params = ["?" for _ in parameters]
+
+ try:
+ return operation % tuple(formatted_params)
+ except (TypeError, ValueError) as exc:
+ raise exceptions.ProgrammingError(exc)
+
+
+def _format_operation_dict(operation, parameters):
+ """Formats parameters in operation in the way BigQuery expects.
+
+ The input operation will be a query like ``SELECT %(namedparam)s`` and
+ the output will be a query like ``SELECT @namedparam``.
+
+ Args:
+ operation (str): A Google BigQuery query string.
+
+ parameters (Mapping[str, Any]): Dictionary of parameter values.
+
+ Returns:
+ str: A formatted query string.
+
+ Raises:
+ google.cloud.bigquery.dbapi.ProgrammingError:
+ if a parameter used in the operation is not found in the
+ ``parameters`` argument.
+ """
+ formatted_params = {}
+ for name in parameters:
+ escaped_name = name.replace("`", r"\`")
+ formatted_params[name] = "@`{}`".format(escaped_name)
+
+ try:
+ return operation % formatted_params
+ except (KeyError, ValueError, TypeError) as exc:
+ raise exceptions.ProgrammingError(exc)
+
+
+def _format_operation(operation, parameters):
+ """Formats parameters in operation in way BigQuery expects.
+
+ Args:
+ operation (str): A Google BigQuery query string.
+
+ parameters (Union[Mapping[str, Any], Sequence[Any]]):
+ Optional parameter values.
+
+ Returns:
+ str: A formatted query string.
+
+ Raises:
+ google.cloud.bigquery.dbapi.ProgrammingError:
+ if a parameter used in the operation is not found in the
+ ``parameters`` argument.
+ """
+ if parameters is None or len(parameters) == 0:
+ return operation.replace("%%", "%"), None # Still do percent de-escaping.
+
+ operation, parameter_types = _extract_types(operation)
+ if parameter_types is None:
+ raise exceptions.ProgrammingError(
+ f"Parameters were provided, but {repr(operation)} has no placeholders."
+ )
+
+ if isinstance(parameters, collections_abc.Mapping):
+ return _format_operation_dict(operation, parameters), parameter_types
+
+ return _format_operation_list(operation, parameters), parameter_types
+
+
+def _extract_types(
+ operation,
+ extra_type_sub=re.compile(
+ r"""
+ (%*) # Extra %s. We'll deal with these in the replacement code
+
+ % # Beginning of replacement, %s, %(...)s
+
+ (?:\( # Begin of optional name and/or type
+ ([^:)]*) # name
+ (?:: # ':' introduces type
+ ( # start of type group
+ [a-zA-Z0-9_<>, ]+ # First part, no parens
+
+ (?: # start sets of parens + non-paren text
+ \([0-9 ,]+\) # comma-separated groups of digits in parens
+ # (e.g. string(10))
+ (?=[, >)]) # Must be followed by ,>) or space
+ [a-zA-Z0-9<>, ]* # Optional non-paren chars
+ )* # Can be zero or more of parens and following text
+ ) # end of type group
+ )? # close type clause ":type"
+ \))? # End of optional name and/or type
+
+ s # End of replacement
+ """,
+ re.VERBOSE,
+ ).sub,
+):
+ """Remove type information from parameter placeholders.
+
+ For every parameter of the form %(name:type)s, replace with %(name)s and add the
+ item name->type to dict that's returned.
+
+ Returns operation without type information and a dictionary of names and types.
+ """
+ parameter_types = None
+
+ def repl(m):
+ nonlocal parameter_types
+ prefix, name, type_ = m.groups()
+ if len(prefix) % 2:
+ # The prefix has an odd number of %s, the last of which
+ # escapes the % we're looking for, so we don't want to
+ # change anything.
+ return m.group(0)
+
+ try:
+ if name:
+ if not parameter_types:
+ parameter_types = {}
+ if type_:
+ if name in parameter_types:
+ if type_ != parameter_types[name]:
+ raise exceptions.ProgrammingError(
+ f"Conflicting types for {name}: "
+ f"{parameter_types[name]} and {type_}."
+ )
+ else:
+ parameter_types[name] = type_
+ else:
+ if not isinstance(parameter_types, dict):
+ raise TypeError()
+
+ return f"{prefix}%({name})s"
+ else:
+ if parameter_types is None:
+ parameter_types = []
+ parameter_types.append(type_)
+ return f"{prefix}%s"
+ except (AttributeError, TypeError):
+ raise exceptions.ProgrammingError(
+ f"{repr(operation)} mixes named and unamed parameters."
+ )
+
+ return extra_type_sub(repl, operation), parameter_types
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/exceptions.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..37f7129d06186c0fcfdc9d75ed55168fece90d2c
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/exceptions.py
@@ -0,0 +1,58 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Exceptions used in the Google BigQuery DB-API."""
+
+
+class Warning(Exception):
+ """Exception raised for important DB-API warnings."""
+
+
+class Error(Exception):
+ """Exception representing all non-warning DB-API errors."""
+
+
+class InterfaceError(Error):
+ """DB-API error related to the database interface."""
+
+
+class DatabaseError(Error):
+ """DB-API error related to the database."""
+
+
+class DataError(DatabaseError):
+ """DB-API error due to problems with the processed data."""
+
+
+class OperationalError(DatabaseError):
+ """DB-API error related to the database operation.
+
+ These errors are not necessarily under the control of the programmer.
+ """
+
+
+class IntegrityError(DatabaseError):
+ """DB-API error when integrity of the database is affected."""
+
+
+class InternalError(DatabaseError):
+ """DB-API error when the database encounters an internal error."""
+
+
+class ProgrammingError(DatabaseError):
+ """DB-API exception raised for programming errors."""
+
+
+class NotSupportedError(DatabaseError):
+ """DB-API error for operations not supported by the database or API."""
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/types.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/types.py
new file mode 100644
index 0000000000000000000000000000000000000000..717593ae1336d949cbec93ddfbc2abfc51d81011
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/dbapi/types.py
@@ -0,0 +1,96 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Types used in the Google BigQuery DB-API.
+
+See `PEP-249`_ for details.
+
+.. _PEP-249:
+ https://www.python.org/dev/peps/pep-0249/#type-objects-and-constructors
+"""
+
+import datetime
+
+
+Date = datetime.date
+Time = datetime.time
+Timestamp = datetime.datetime
+DateFromTicks = datetime.date.fromtimestamp
+TimestampFromTicks = datetime.datetime.fromtimestamp
+
+
+def Binary(data):
+ """Contruct a DB-API binary value.
+
+ Args:
+ data (bytes-like): An object containing binary data and that
+ can be converted to bytes with the `bytes` builtin.
+
+ Returns:
+ bytes: The binary data as a bytes object.
+ """
+ if isinstance(data, int):
+ # This is not the conversion we're looking for, because it
+ # will simply create a bytes object of the given size.
+ raise TypeError("cannot convert `int` object to binary")
+
+ try:
+ return bytes(data)
+ except TypeError:
+ if isinstance(data, str):
+ return data.encode("utf-8")
+ else:
+ raise
+
+
+def TimeFromTicks(ticks, tz=None):
+ """Construct a DB-API time value from the given ticks value.
+
+ Args:
+ ticks (float):
+ a number of seconds since the epoch; see the documentation of the
+ standard Python time module for details.
+
+ tz (datetime.tzinfo): (Optional) time zone to use for conversion
+
+ Returns:
+ datetime.time: time represented by ticks.
+ """
+ dt = datetime.datetime.fromtimestamp(ticks, tz=tz)
+ return dt.timetz()
+
+
+class _DBAPITypeObject(object):
+ """DB-API type object which compares equal to many different strings.
+
+ See `PEP-249`_ for details.
+
+ .. _PEP-249:
+ https://www.python.org/dev/peps/pep-0249/#implementation-hints-for-module-authors
+ """
+
+ def __init__(self, *values):
+ self.values = values
+
+ def __eq__(self, other):
+ return other in self.values
+
+
+STRING = "STRING"
+BINARY = _DBAPITypeObject("BYTES", "RECORD", "STRUCT")
+NUMBER = _DBAPITypeObject(
+ "INTEGER", "INT64", "FLOAT", "FLOAT64", "NUMERIC", "BIGNUMERIC", "BOOLEAN", "BOOL"
+)
+DATETIME = _DBAPITypeObject("TIMESTAMP", "DATE", "TIME", "DATETIME")
+ROWID = "ROWID"
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/encryption_configuration.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/encryption_configuration.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0b6f36774c6f91143afba7e34a789e7763be1e1
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/encryption_configuration.py
@@ -0,0 +1,84 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define class for the custom encryption configuration."""
+
+import copy
+
+
+class EncryptionConfiguration(object):
+ """Custom encryption configuration (e.g., Cloud KMS keys).
+
+ Args:
+ kms_key_name (str): resource ID of Cloud KMS key used for encryption
+ """
+
+ def __init__(self, kms_key_name=None) -> None:
+ self._properties = {}
+ if kms_key_name is not None:
+ self._properties["kmsKeyName"] = kms_key_name
+
+ @property
+ def kms_key_name(self):
+ """str: Resource ID of Cloud KMS key
+
+ Resource ID of Cloud KMS key or :data:`None` if using default
+ encryption.
+ """
+ return self._properties.get("kmsKeyName")
+
+ @kms_key_name.setter
+ def kms_key_name(self, value):
+ self._properties["kmsKeyName"] = value
+
+ @classmethod
+ def from_api_repr(cls, resource):
+ """Construct an encryption configuration from its API representation
+
+ Args:
+ resource (Dict[str, object]):
+ An encryption configuration representation as returned from
+ the API.
+
+ Returns:
+ google.cloud.bigquery.table.EncryptionConfiguration:
+ An encryption configuration parsed from ``resource``.
+ """
+ config = cls()
+ config._properties = copy.deepcopy(resource)
+ return config
+
+ def to_api_repr(self):
+ """Construct the API resource representation of this encryption
+ configuration.
+
+ Returns:
+ Dict[str, object]:
+ Encryption configuration as represented as an API resource
+ """
+ return copy.deepcopy(self._properties)
+
+ def __eq__(self, other):
+ if not isinstance(other, EncryptionConfiguration):
+ return NotImplemented
+ return self.kms_key_name == other.kms_key_name
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(self.kms_key_name)
+
+ def __repr__(self):
+ return "EncryptionConfiguration({})".format(self.kms_key_name)
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/enums.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/enums.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8cbe99691b18209d0adca88210b601012d75bc5
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/enums.py
@@ -0,0 +1,346 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum
+
+
+class AutoRowIDs(enum.Enum):
+ """How to handle automatic insert IDs when inserting rows as a stream."""
+
+ DISABLED = enum.auto()
+ GENERATE_UUID = enum.auto()
+
+
+class Compression(str, enum.Enum):
+ """The compression type to use for exported files. The default value is
+ :attr:`NONE`.
+
+ :attr:`DEFLATE` and :attr:`SNAPPY` are
+ only supported for Avro.
+ """
+
+ GZIP = "GZIP"
+ """Specifies GZIP format."""
+
+ DEFLATE = "DEFLATE"
+ """Specifies DEFLATE format."""
+
+ SNAPPY = "SNAPPY"
+ """Specifies SNAPPY format."""
+
+ ZSTD = "ZSTD"
+ """Specifies ZSTD format."""
+
+ NONE = "NONE"
+ """Specifies no compression."""
+
+
+class DecimalTargetType:
+ """The data types that could be used as a target type when converting decimal values.
+
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#DecimalTargetType
+
+ .. versionadded:: 2.21.0
+ """
+
+ NUMERIC = "NUMERIC"
+ """Decimal values could be converted to NUMERIC type."""
+
+ BIGNUMERIC = "BIGNUMERIC"
+ """Decimal values could be converted to BIGNUMERIC type."""
+
+ STRING = "STRING"
+ """Decimal values could be converted to STRING type."""
+
+
+class CreateDisposition(object):
+ """Specifies whether the job is allowed to create new tables. The default
+ value is :attr:`CREATE_IF_NEEDED`.
+
+ Creation, truncation and append actions occur as one atomic update
+ upon job completion.
+ """
+
+ CREATE_IF_NEEDED = "CREATE_IF_NEEDED"
+ """If the table does not exist, BigQuery creates the table."""
+
+ CREATE_NEVER = "CREATE_NEVER"
+ """The table must already exist. If it does not, a 'notFound' error is
+ returned in the job result."""
+
+
+class DefaultPandasDTypes(enum.Enum):
+ """Default Pandas DataFrem DTypes to convert BigQuery data. These
+ Sentinel values are used instead of None to maintain backward compatibility,
+ and allow Pandas package is not available. For more information:
+ https://stackoverflow.com/a/60605919/101923
+ """
+
+ BOOL_DTYPE = object()
+ """Specifies default bool dtype"""
+
+ INT_DTYPE = object()
+ """Specifies default integer dtype"""
+
+ DATE_DTYPE = object()
+ """Specifies default date dtype"""
+
+ TIME_DTYPE = object()
+ """Specifies default time dtype"""
+
+ RANGE_DATE_DTYPE = object()
+ """Specifies default range date dtype"""
+
+ RANGE_DATETIME_DTYPE = object()
+ """Specifies default range datetime dtype"""
+
+ RANGE_TIMESTAMP_DTYPE = object()
+ """Specifies default range timestamp dtype"""
+
+
+class DestinationFormat(object):
+ """The exported file format. The default value is :attr:`CSV`.
+
+ Tables with nested or repeated fields cannot be exported as CSV.
+ """
+
+ CSV = "CSV"
+ """Specifies CSV format."""
+
+ NEWLINE_DELIMITED_JSON = "NEWLINE_DELIMITED_JSON"
+ """Specifies newline delimited JSON format."""
+
+ AVRO = "AVRO"
+ """Specifies Avro format."""
+
+ PARQUET = "PARQUET"
+ """Specifies Parquet format."""
+
+
+class Encoding(object):
+ """The character encoding of the data. The default is :attr:`UTF_8`.
+
+ BigQuery decodes the data after the raw, binary data has been
+ split using the values of the quote and fieldDelimiter properties.
+ """
+
+ UTF_8 = "UTF-8"
+ """Specifies UTF-8 encoding."""
+
+ ISO_8859_1 = "ISO-8859-1"
+ """Specifies ISO-8859-1 encoding."""
+
+
+class QueryPriority(object):
+ """Specifies a priority for the query. The default value is
+ :attr:`INTERACTIVE`.
+ """
+
+ INTERACTIVE = "INTERACTIVE"
+ """Specifies interactive priority."""
+
+ BATCH = "BATCH"
+ """Specifies batch priority."""
+
+
+class QueryApiMethod(str, enum.Enum):
+ """API method used to start the query. The default value is
+ :attr:`INSERT`.
+ """
+
+ INSERT = "INSERT"
+ """Submit a query job by using the `jobs.insert REST API method
+ `_.
+
+ This supports all job configuration options.
+ """
+
+ QUERY = "QUERY"
+ """Submit a query job by using the `jobs.query REST API method
+ `_.
+
+ Differences from ``INSERT``:
+
+ * Many parameters and job configuration options, including job ID and
+ destination table, cannot be used
+ with this API method. See the `jobs.query REST API documentation
+ `_ for
+ the complete list of supported configuration options.
+
+ * API blocks up to a specified timeout, waiting for the query to
+ finish.
+
+ * The full job resource (including job statistics) may not be available.
+ Call :meth:`~google.cloud.bigquery.job.QueryJob.reload` or
+ :meth:`~google.cloud.bigquery.client.Client.get_job` to get full job
+ statistics and configuration.
+
+ * :meth:`~google.cloud.bigquery.Client.query` can raise API exceptions if
+ the query fails, whereas the same errors don't appear until calling
+ :meth:`~google.cloud.bigquery.job.QueryJob.result` when the ``INSERT``
+ API method is used.
+ """
+
+
+class SchemaUpdateOption(object):
+ """Specifies an update to the destination table schema as a side effect of
+ a load job.
+ """
+
+ ALLOW_FIELD_ADDITION = "ALLOW_FIELD_ADDITION"
+ """Allow adding a nullable field to the schema."""
+
+ ALLOW_FIELD_RELAXATION = "ALLOW_FIELD_RELAXATION"
+ """Allow relaxing a required field in the original schema to nullable."""
+
+
+class SourceFormat(object):
+ """The format of the data files. The default value is :attr:`CSV`.
+
+ Note that the set of allowed values for loading data is different
+ than the set used for external data sources (see
+ :class:`~google.cloud.bigquery.external_config.ExternalSourceFormat`).
+ """
+
+ CSV = "CSV"
+ """Specifies CSV format."""
+
+ DATASTORE_BACKUP = "DATASTORE_BACKUP"
+ """Specifies datastore backup format"""
+
+ NEWLINE_DELIMITED_JSON = "NEWLINE_DELIMITED_JSON"
+ """Specifies newline delimited JSON format."""
+
+ AVRO = "AVRO"
+ """Specifies Avro format."""
+
+ PARQUET = "PARQUET"
+ """Specifies Parquet format."""
+
+ ORC = "ORC"
+ """Specifies Orc format."""
+
+
+class KeyResultStatementKind:
+ """Determines which statement in the script represents the "key result".
+
+ The "key result" is used to populate the schema and query results of the script job.
+
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#keyresultstatementkind
+ """
+
+ KEY_RESULT_STATEMENT_KIND_UNSPECIFIED = "KEY_RESULT_STATEMENT_KIND_UNSPECIFIED"
+ LAST = "LAST"
+ FIRST_SELECT = "FIRST_SELECT"
+
+
+class StandardSqlTypeNames(str, enum.Enum):
+ def _generate_next_value_(name, start, count, last_values):
+ return name
+
+ TYPE_KIND_UNSPECIFIED = enum.auto()
+ INT64 = enum.auto()
+ BOOL = enum.auto()
+ FLOAT64 = enum.auto()
+ STRING = enum.auto()
+ BYTES = enum.auto()
+ TIMESTAMP = enum.auto()
+ DATE = enum.auto()
+ TIME = enum.auto()
+ DATETIME = enum.auto()
+ INTERVAL = enum.auto()
+ GEOGRAPHY = enum.auto()
+ NUMERIC = enum.auto()
+ BIGNUMERIC = enum.auto()
+ JSON = enum.auto()
+ ARRAY = enum.auto()
+ STRUCT = enum.auto()
+ RANGE = enum.auto()
+
+
+class EntityTypes(str, enum.Enum):
+ """Enum of allowed entity type names in AccessEntry"""
+
+ USER_BY_EMAIL = "userByEmail"
+ GROUP_BY_EMAIL = "groupByEmail"
+ DOMAIN = "domain"
+ DATASET = "dataset"
+ SPECIAL_GROUP = "specialGroup"
+ VIEW = "view"
+ IAM_MEMBER = "iamMember"
+ ROUTINE = "routine"
+
+
+# See also: https://cloud.google.com/bigquery/data-types#legacy_sql_data_types
+# and https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
+class SqlTypeNames(str, enum.Enum):
+ """Enum of allowed SQL type names in schema.SchemaField."""
+
+ STRING = "STRING"
+ BYTES = "BYTES"
+ INTEGER = "INTEGER"
+ INT64 = "INTEGER"
+ FLOAT = "FLOAT"
+ FLOAT64 = "FLOAT"
+ DECIMAL = NUMERIC = "NUMERIC"
+ BIGDECIMAL = BIGNUMERIC = "BIGNUMERIC"
+ BOOLEAN = "BOOLEAN"
+ BOOL = "BOOLEAN"
+ GEOGRAPHY = "GEOGRAPHY" # NOTE: not available in legacy types
+ RECORD = "RECORD"
+ STRUCT = "RECORD"
+ TIMESTAMP = "TIMESTAMP"
+ DATE = "DATE"
+ TIME = "TIME"
+ DATETIME = "DATETIME"
+ INTERVAL = "INTERVAL" # NOTE: not available in legacy types
+ RANGE = "RANGE" # NOTE: not available in legacy types
+
+
+class WriteDisposition(object):
+ """Specifies the action that occurs if destination table already exists.
+
+ The default value is :attr:`WRITE_APPEND`.
+
+ Each action is atomic and only occurs if BigQuery is able to complete
+ the job successfully. Creation, truncation and append actions occur as one
+ atomic update upon job completion.
+ """
+
+ WRITE_APPEND = "WRITE_APPEND"
+ """If the table already exists, BigQuery appends the data to the table."""
+
+ WRITE_TRUNCATE = "WRITE_TRUNCATE"
+ """If the table already exists, BigQuery overwrites the table data."""
+
+ WRITE_EMPTY = "WRITE_EMPTY"
+ """If the table already exists and contains data, a 'duplicate' error is
+ returned in the job result."""
+
+
+class DeterminismLevel:
+ """Specifies determinism level for JavaScript user-defined functions (UDFs).
+
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/routines#DeterminismLevel
+ """
+
+ DETERMINISM_LEVEL_UNSPECIFIED = "DETERMINISM_LEVEL_UNSPECIFIED"
+ """The determinism of the UDF is unspecified."""
+
+ DETERMINISTIC = "DETERMINISTIC"
+ """The UDF is deterministic, meaning that 2 function calls with the same inputs
+ always produce the same result, even across 2 query runs."""
+
+ NOT_DETERMINISTIC = "NOT_DETERMINISTIC"
+ """The UDF is not deterministic."""
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/exceptions.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..62e0d540c91c930b52cdf1342e2dc224d69177c5
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/exceptions.py
@@ -0,0 +1,35 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class BigQueryError(Exception):
+ """Base class for all custom exceptions defined by the BigQuery client."""
+
+
+class LegacyBigQueryStorageError(BigQueryError):
+ """Raised when too old a version of BigQuery Storage extra is detected at runtime."""
+
+
+class LegacyPyarrowError(BigQueryError):
+ """Raised when too old a version of pyarrow package is detected at runtime."""
+
+
+class BigQueryStorageNotFoundError(BigQueryError):
+ """Raised when BigQuery Storage extra is not installed when trying to
+ import it.
+ """
+
+
+class LegacyPandasError(BigQueryError):
+ """Raised when too old a version of pandas package is detected at runtime."""
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/external_config.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/external_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..a891bc2327abb98932b9e388125b87471e50056a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/external_config.py
@@ -0,0 +1,1005 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define classes that describe external data sources.
+
+ These are used for both Table.externalDataConfiguration and
+ Job.configuration.query.tableDefinitions.
+"""
+
+from __future__ import absolute_import
+
+import base64
+import copy
+from typing import Any, Dict, FrozenSet, Iterable, Optional, Union
+
+from google.cloud.bigquery._helpers import _to_bytes
+from google.cloud.bigquery._helpers import _bytes_to_json
+from google.cloud.bigquery._helpers import _int_or_none
+from google.cloud.bigquery._helpers import _str_or_none
+from google.cloud.bigquery.format_options import AvroOptions, ParquetOptions
+from google.cloud.bigquery.schema import SchemaField
+
+
+class ExternalSourceFormat(object):
+ """The format for external data files.
+
+ Note that the set of allowed values for external data sources is different
+ than the set used for loading data (see
+ :class:`~google.cloud.bigquery.job.SourceFormat`).
+ """
+
+ CSV = "CSV"
+ """Specifies CSV format."""
+
+ GOOGLE_SHEETS = "GOOGLE_SHEETS"
+ """Specifies Google Sheets format."""
+
+ NEWLINE_DELIMITED_JSON = "NEWLINE_DELIMITED_JSON"
+ """Specifies newline delimited JSON format."""
+
+ AVRO = "AVRO"
+ """Specifies Avro format."""
+
+ DATASTORE_BACKUP = "DATASTORE_BACKUP"
+ """Specifies datastore backup format"""
+
+ ORC = "ORC"
+ """Specifies ORC format."""
+
+ PARQUET = "PARQUET"
+ """Specifies Parquet format."""
+
+ BIGTABLE = "BIGTABLE"
+ """Specifies Bigtable format."""
+
+
+class BigtableColumn(object):
+ """Options for a Bigtable column."""
+
+ def __init__(self):
+ self._properties = {}
+
+ @property
+ def encoding(self):
+ """str: The encoding of the values when the type is not `STRING`
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#BigtableColumn.FIELDS.encoding
+ """
+ return self._properties.get("encoding")
+
+ @encoding.setter
+ def encoding(self, value):
+ self._properties["encoding"] = value
+
+ @property
+ def field_name(self):
+ """str: An identifier to use if the qualifier is not a valid BigQuery
+ field identifier
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#BigtableColumn.FIELDS.field_name
+ """
+ return self._properties.get("fieldName")
+
+ @field_name.setter
+ def field_name(self, value):
+ self._properties["fieldName"] = value
+
+ @property
+ def only_read_latest(self):
+ """bool: If this is set, only the latest version of value in this
+ column are exposed.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#BigtableColumn.FIELDS.only_read_latest
+ """
+ return self._properties.get("onlyReadLatest")
+
+ @only_read_latest.setter
+ def only_read_latest(self, value):
+ self._properties["onlyReadLatest"] = value
+
+ @property
+ def qualifier_encoded(self):
+ """Union[str, bytes]: The qualifier encoded in binary.
+
+ The type is ``str`` (Python 2.x) or ``bytes`` (Python 3.x). The module
+ will handle base64 encoding for you.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#BigtableColumn.FIELDS.qualifier_encoded
+ """
+ prop = self._properties.get("qualifierEncoded")
+ if prop is None:
+ return None
+ return base64.standard_b64decode(_to_bytes(prop))
+
+ @qualifier_encoded.setter
+ def qualifier_encoded(self, value):
+ self._properties["qualifierEncoded"] = _bytes_to_json(value)
+
+ @property
+ def qualifier_string(self):
+ """str: A valid UTF-8 string qualifier
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#BigtableColumn.FIELDS.qualifier_string
+ """
+ return self._properties.get("qualifierString")
+
+ @qualifier_string.setter
+ def qualifier_string(self, value):
+ self._properties["qualifierString"] = value
+
+ @property
+ def type_(self):
+ """str: The type to convert the value in cells of this column.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#BigtableColumn.FIELDS.type
+ """
+ return self._properties.get("type")
+
+ @type_.setter
+ def type_(self, value):
+ self._properties["type"] = value
+
+ def to_api_repr(self) -> dict:
+ """Build an API representation of this object.
+
+ Returns:
+ Dict[str, Any]:
+ A dictionary in the format used by the BigQuery API.
+ """
+ return copy.deepcopy(self._properties)
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "BigtableColumn":
+ """Factory: construct a :class:`~.external_config.BigtableColumn`
+ instance given its API representation.
+
+ Args:
+ resource (Dict[str, Any]):
+ Definition of a :class:`~.external_config.BigtableColumn`
+ instance in the same representation as is returned from the
+ API.
+
+ Returns:
+ external_config.BigtableColumn: Configuration parsed from ``resource``.
+ """
+ config = cls()
+ config._properties = copy.deepcopy(resource)
+ return config
+
+
+class BigtableColumnFamily(object):
+ """Options for a Bigtable column family."""
+
+ def __init__(self):
+ self._properties = {}
+
+ @property
+ def encoding(self):
+ """str: The encoding of the values when the type is not `STRING`
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#BigtableColumnFamily.FIELDS.encoding
+ """
+ return self._properties.get("encoding")
+
+ @encoding.setter
+ def encoding(self, value):
+ self._properties["encoding"] = value
+
+ @property
+ def family_id(self):
+ """str: Identifier of the column family.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#BigtableColumnFamily.FIELDS.family_id
+ """
+ return self._properties.get("familyId")
+
+ @family_id.setter
+ def family_id(self, value):
+ self._properties["familyId"] = value
+
+ @property
+ def only_read_latest(self):
+ """bool: If this is set only the latest version of value are exposed
+ for all columns in this column family.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#BigtableColumnFamily.FIELDS.only_read_latest
+ """
+ return self._properties.get("onlyReadLatest")
+
+ @only_read_latest.setter
+ def only_read_latest(self, value):
+ self._properties["onlyReadLatest"] = value
+
+ @property
+ def type_(self):
+ """str: The type to convert the value in cells of this column family.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#BigtableColumnFamily.FIELDS.type
+ """
+ return self._properties.get("type")
+
+ @type_.setter
+ def type_(self, value):
+ self._properties["type"] = value
+
+ @property
+ def columns(self):
+ """List[BigtableColumn]: Lists of columns
+ that should be exposed as individual fields.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#BigtableColumnFamily.FIELDS.columns
+ """
+ prop = self._properties.get("columns", [])
+ return [BigtableColumn.from_api_repr(col) for col in prop]
+
+ @columns.setter
+ def columns(self, value):
+ self._properties["columns"] = [col.to_api_repr() for col in value]
+
+ def to_api_repr(self) -> dict:
+ """Build an API representation of this object.
+
+ Returns:
+ Dict[str, Any]:
+ A dictionary in the format used by the BigQuery API.
+ """
+ return copy.deepcopy(self._properties)
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "BigtableColumnFamily":
+ """Factory: construct a :class:`~.external_config.BigtableColumnFamily`
+ instance given its API representation.
+
+ Args:
+ resource (Dict[str, Any]):
+ Definition of a :class:`~.external_config.BigtableColumnFamily`
+ instance in the same representation as is returned from the
+ API.
+
+ Returns:
+ :class:`~.external_config.BigtableColumnFamily`:
+ Configuration parsed from ``resource``.
+ """
+ config = cls()
+ config._properties = copy.deepcopy(resource)
+ return config
+
+
+class BigtableOptions(object):
+ """Options that describe how to treat Bigtable tables as BigQuery tables."""
+
+ _SOURCE_FORMAT = "BIGTABLE"
+ _RESOURCE_NAME = "bigtableOptions"
+
+ def __init__(self):
+ self._properties = {}
+
+ @property
+ def ignore_unspecified_column_families(self):
+ """bool: If :data:`True`, ignore columns not specified in
+ :attr:`column_families` list. Defaults to :data:`False`.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#BigtableOptions.FIELDS.ignore_unspecified_column_families
+ """
+ return self._properties.get("ignoreUnspecifiedColumnFamilies")
+
+ @ignore_unspecified_column_families.setter
+ def ignore_unspecified_column_families(self, value):
+ self._properties["ignoreUnspecifiedColumnFamilies"] = value
+
+ @property
+ def read_rowkey_as_string(self):
+ """bool: If :data:`True`, rowkey column families will be read and
+ converted to string. Defaults to :data:`False`.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#BigtableOptions.FIELDS.read_rowkey_as_string
+ """
+ return self._properties.get("readRowkeyAsString")
+
+ @read_rowkey_as_string.setter
+ def read_rowkey_as_string(self, value):
+ self._properties["readRowkeyAsString"] = value
+
+ @property
+ def column_families(self):
+ """List[:class:`~.external_config.BigtableColumnFamily`]: List of
+ column families to expose in the table schema along with their types.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#BigtableOptions.FIELDS.column_families
+ """
+ prop = self._properties.get("columnFamilies", [])
+ return [BigtableColumnFamily.from_api_repr(cf) for cf in prop]
+
+ @column_families.setter
+ def column_families(self, value):
+ self._properties["columnFamilies"] = [cf.to_api_repr() for cf in value]
+
+ def to_api_repr(self) -> dict:
+ """Build an API representation of this object.
+
+ Returns:
+ Dict[str, Any]:
+ A dictionary in the format used by the BigQuery API.
+ """
+ return copy.deepcopy(self._properties)
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "BigtableOptions":
+ """Factory: construct a :class:`~.external_config.BigtableOptions`
+ instance given its API representation.
+
+ Args:
+ resource (Dict[str, Any]):
+ Definition of a :class:`~.external_config.BigtableOptions`
+ instance in the same representation as is returned from the
+ API.
+
+ Returns:
+ BigtableOptions: Configuration parsed from ``resource``.
+ """
+ config = cls()
+ config._properties = copy.deepcopy(resource)
+ return config
+
+
+class CSVOptions(object):
+ """Options that describe how to treat CSV files as BigQuery tables."""
+
+ _SOURCE_FORMAT = "CSV"
+ _RESOURCE_NAME = "csvOptions"
+
+ def __init__(self):
+ self._properties = {}
+
+ @property
+ def allow_jagged_rows(self):
+ """bool: If :data:`True`, BigQuery treats missing trailing columns as
+ null values. Defaults to :data:`False`.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#CsvOptions.FIELDS.allow_jagged_rows
+ """
+ return self._properties.get("allowJaggedRows")
+
+ @allow_jagged_rows.setter
+ def allow_jagged_rows(self, value):
+ self._properties["allowJaggedRows"] = value
+
+ @property
+ def allow_quoted_newlines(self):
+ """bool: If :data:`True`, quoted data sections that contain newline
+ characters in a CSV file are allowed. Defaults to :data:`False`.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#CsvOptions.FIELDS.allow_quoted_newlines
+ """
+ return self._properties.get("allowQuotedNewlines")
+
+ @allow_quoted_newlines.setter
+ def allow_quoted_newlines(self, value):
+ self._properties["allowQuotedNewlines"] = value
+
+ @property
+ def encoding(self):
+ """str: The character encoding of the data.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#CsvOptions.FIELDS.encoding
+ """
+ return self._properties.get("encoding")
+
+ @encoding.setter
+ def encoding(self, value):
+ self._properties["encoding"] = value
+
+ @property
+ def preserve_ascii_control_characters(self):
+ """bool: Indicates if the embedded ASCII control characters
+ (the first 32 characters in the ASCII-table, from '\x00' to '\x1F') are preserved.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#CsvOptions.FIELDS.preserve_ascii_control_characters
+ """
+ return self._properties.get("preserveAsciiControlCharacters")
+
+ @preserve_ascii_control_characters.setter
+ def preserve_ascii_control_characters(self, value):
+ self._properties["preserveAsciiControlCharacters"] = value
+
+ @property
+ def field_delimiter(self):
+ """str: The separator for fields in a CSV file. Defaults to comma (',').
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#CsvOptions.FIELDS.field_delimiter
+ """
+ return self._properties.get("fieldDelimiter")
+
+ @field_delimiter.setter
+ def field_delimiter(self, value):
+ self._properties["fieldDelimiter"] = value
+
+ @property
+ def quote_character(self):
+ """str: The value that is used to quote data sections in a CSV file.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#CsvOptions.FIELDS.quote
+ """
+ return self._properties.get("quote")
+
+ @quote_character.setter
+ def quote_character(self, value):
+ self._properties["quote"] = value
+
+ @property
+ def skip_leading_rows(self):
+ """int: The number of rows at the top of a CSV file.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#CsvOptions.FIELDS.skip_leading_rows
+ """
+ return _int_or_none(self._properties.get("skipLeadingRows"))
+
+ @skip_leading_rows.setter
+ def skip_leading_rows(self, value):
+ self._properties["skipLeadingRows"] = str(value)
+
+ def to_api_repr(self) -> dict:
+ """Build an API representation of this object.
+
+ Returns:
+ Dict[str, Any]: A dictionary in the format used by the BigQuery API.
+ """
+ return copy.deepcopy(self._properties)
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "CSVOptions":
+ """Factory: construct a :class:`~.external_config.CSVOptions` instance
+ given its API representation.
+
+ Args:
+ resource (Dict[str, Any]):
+ Definition of a :class:`~.external_config.CSVOptions`
+ instance in the same representation as is returned from the
+ API.
+
+ Returns:
+ CSVOptions: Configuration parsed from ``resource``.
+ """
+ config = cls()
+ config._properties = copy.deepcopy(resource)
+ return config
+
+
+class GoogleSheetsOptions(object):
+ """Options that describe how to treat Google Sheets as BigQuery tables."""
+
+ _SOURCE_FORMAT = "GOOGLE_SHEETS"
+ _RESOURCE_NAME = "googleSheetsOptions"
+
+ def __init__(self):
+ self._properties = {}
+
+ @property
+ def skip_leading_rows(self):
+ """int: The number of rows at the top of a sheet that BigQuery will
+ skip when reading the data.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#GoogleSheetsOptions.FIELDS.skip_leading_rows
+ """
+ return _int_or_none(self._properties.get("skipLeadingRows"))
+
+ @skip_leading_rows.setter
+ def skip_leading_rows(self, value):
+ self._properties["skipLeadingRows"] = str(value)
+
+ @property
+ def range(self):
+ """str: The range of a sheet that BigQuery will query from.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#GoogleSheetsOptions.FIELDS.range
+ """
+ return _str_or_none(self._properties.get("range"))
+
+ @range.setter
+ def range(self, value):
+ self._properties["range"] = value
+
+ def to_api_repr(self) -> dict:
+ """Build an API representation of this object.
+
+ Returns:
+ Dict[str, Any]: A dictionary in the format used by the BigQuery API.
+ """
+ return copy.deepcopy(self._properties)
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "GoogleSheetsOptions":
+ """Factory: construct a :class:`~.external_config.GoogleSheetsOptions`
+ instance given its API representation.
+
+ Args:
+ resource (Dict[str, Any]):
+ Definition of a :class:`~.external_config.GoogleSheetsOptions`
+ instance in the same representation as is returned from the
+ API.
+
+ Returns:
+ GoogleSheetsOptions: Configuration parsed from ``resource``.
+ """
+ config = cls()
+ config._properties = copy.deepcopy(resource)
+ return config
+
+
+_OPTION_CLASSES = (
+ AvroOptions,
+ BigtableOptions,
+ CSVOptions,
+ GoogleSheetsOptions,
+ ParquetOptions,
+)
+
+OptionsType = Union[
+ AvroOptions,
+ BigtableOptions,
+ CSVOptions,
+ GoogleSheetsOptions,
+ ParquetOptions,
+]
+
+
+class HivePartitioningOptions(object):
+ """[Beta] Options that configure hive partitioning.
+
+ .. note::
+ **Experimental**. This feature is experimental and might change or
+ have limited support.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#HivePartitioningOptions
+ """
+
+ def __init__(self) -> None:
+ self._properties: Dict[str, Any] = {}
+
+ @property
+ def mode(self):
+ """Optional[str]: When set, what mode of hive partitioning to use when reading data.
+
+ Two modes are supported: "AUTO" and "STRINGS".
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#HivePartitioningOptions.FIELDS.mode
+ """
+ return self._properties.get("mode")
+
+ @mode.setter
+ def mode(self, value):
+ self._properties["mode"] = value
+
+ @property
+ def source_uri_prefix(self):
+ """Optional[str]: When hive partition detection is requested, a common prefix for
+ all source URIs is required.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#HivePartitioningOptions.FIELDS.source_uri_prefix
+ """
+ return self._properties.get("sourceUriPrefix")
+
+ @source_uri_prefix.setter
+ def source_uri_prefix(self, value):
+ self._properties["sourceUriPrefix"] = value
+
+ @property
+ def require_partition_filter(self):
+ """Optional[bool]: If set to true, queries over the partitioned table require a
+ partition filter that can be used for partition elimination to be
+ specified.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#HivePartitioningOptions.FIELDS.mode
+ """
+ return self._properties.get("requirePartitionFilter")
+
+ @require_partition_filter.setter
+ def require_partition_filter(self, value):
+ self._properties["requirePartitionFilter"] = value
+
+ def to_api_repr(self) -> dict:
+ """Build an API representation of this object.
+
+ Returns:
+ Dict[str, Any]: A dictionary in the format used by the BigQuery API.
+ """
+ return copy.deepcopy(self._properties)
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "HivePartitioningOptions":
+ """Factory: construct a :class:`~.external_config.HivePartitioningOptions`
+ instance given its API representation.
+
+ Args:
+ resource (Dict[str, Any]):
+ Definition of a :class:`~.external_config.HivePartitioningOptions`
+ instance in the same representation as is returned from the
+ API.
+
+ Returns:
+ HivePartitioningOptions: Configuration parsed from ``resource``.
+ """
+ config = cls()
+ config._properties = copy.deepcopy(resource)
+ return config
+
+
+class ExternalConfig(object):
+ """Description of an external data source.
+
+ Args:
+ source_format (ExternalSourceFormat):
+ See :attr:`source_format`.
+ """
+
+ def __init__(self, source_format) -> None:
+ self._properties = {"sourceFormat": source_format}
+
+ @property
+ def source_format(self):
+ """:class:`~.external_config.ExternalSourceFormat`:
+ Format of external source.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ExternalDataConfiguration.FIELDS.source_format
+ """
+ return self._properties["sourceFormat"]
+
+ @property
+ def options(self) -> Optional[OptionsType]:
+ """Source-specific options."""
+ for optcls in _OPTION_CLASSES:
+ # The code below is too much magic for mypy to handle.
+ if self.source_format == optcls._SOURCE_FORMAT: # type: ignore
+ options: OptionsType = optcls() # type: ignore
+ options._properties = self._properties.setdefault(
+ optcls._RESOURCE_NAME, {} # type: ignore
+ )
+ return options
+
+ # No matching source format found.
+ return None
+
+ @property
+ def autodetect(self):
+ """bool: If :data:`True`, try to detect schema and format options
+ automatically.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ExternalDataConfiguration.FIELDS.autodetect
+ """
+ return self._properties.get("autodetect")
+
+ @autodetect.setter
+ def autodetect(self, value):
+ self._properties["autodetect"] = value
+
+ @property
+ def compression(self):
+ """str: The compression type of the data source.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ExternalDataConfiguration.FIELDS.compression
+ """
+ return self._properties.get("compression")
+
+ @compression.setter
+ def compression(self, value):
+ self._properties["compression"] = value
+
+ @property
+ def decimal_target_types(self) -> Optional[FrozenSet[str]]:
+ """Possible SQL data types to which the source decimal values are converted.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ExternalDataConfiguration.FIELDS.decimal_target_types
+
+ .. versionadded:: 2.21.0
+ """
+ prop = self._properties.get("decimalTargetTypes")
+ if prop is not None:
+ prop = frozenset(prop)
+ return prop
+
+ @decimal_target_types.setter
+ def decimal_target_types(self, value: Optional[Iterable[str]]):
+ if value is not None:
+ self._properties["decimalTargetTypes"] = list(value)
+ else:
+ if "decimalTargetTypes" in self._properties:
+ del self._properties["decimalTargetTypes"]
+
+ @property
+ def hive_partitioning(self):
+ """Optional[:class:`~.external_config.HivePartitioningOptions`]: [Beta] When set, \
+ it configures hive partitioning support.
+
+ .. note::
+ **Experimental**. This feature is experimental and might change or
+ have limited support.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ExternalDataConfiguration.FIELDS.hive_partitioning_options
+ """
+ prop = self._properties.get("hivePartitioningOptions")
+ if prop is None:
+ return None
+ return HivePartitioningOptions.from_api_repr(prop)
+
+ @hive_partitioning.setter
+ def hive_partitioning(self, value):
+ prop = value.to_api_repr() if value is not None else None
+ self._properties["hivePartitioningOptions"] = prop
+
+ @property
+ def reference_file_schema_uri(self):
+ """Optional[str]:
+ When creating an external table, the user can provide a reference file with the
+ table schema. This is enabled for the following formats:
+
+ AVRO, PARQUET, ORC
+ """
+ return self._properties.get("referenceFileSchemaUri")
+
+ @reference_file_schema_uri.setter
+ def reference_file_schema_uri(self, value):
+ self._properties["referenceFileSchemaUri"] = value
+
+ @property
+ def ignore_unknown_values(self):
+ """bool: If :data:`True`, extra values that are not represented in the
+ table schema are ignored. Defaults to :data:`False`.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ExternalDataConfiguration.FIELDS.ignore_unknown_values
+ """
+ return self._properties.get("ignoreUnknownValues")
+
+ @ignore_unknown_values.setter
+ def ignore_unknown_values(self, value):
+ self._properties["ignoreUnknownValues"] = value
+
+ @property
+ def max_bad_records(self):
+ """int: The maximum number of bad records that BigQuery can ignore when
+ reading data.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ExternalDataConfiguration.FIELDS.max_bad_records
+ """
+ return self._properties.get("maxBadRecords")
+
+ @max_bad_records.setter
+ def max_bad_records(self, value):
+ self._properties["maxBadRecords"] = value
+
+ @property
+ def source_uris(self):
+ """List[str]: URIs that point to your data in Google Cloud.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ExternalDataConfiguration.FIELDS.source_uris
+ """
+ return self._properties.get("sourceUris", [])
+
+ @source_uris.setter
+ def source_uris(self, value):
+ self._properties["sourceUris"] = value
+
+ @property
+ def schema(self):
+ """List[:class:`~google.cloud.bigquery.schema.SchemaField`]: The schema
+ for the data.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ExternalDataConfiguration.FIELDS.schema
+ """
+ prop = self._properties.get("schema", {})
+ return [SchemaField.from_api_repr(field) for field in prop.get("fields", [])]
+
+ @schema.setter
+ def schema(self, value):
+ prop = value
+ if value is not None:
+ prop = {"fields": [field.to_api_repr() for field in value]}
+ self._properties["schema"] = prop
+
+ @property
+ def connection_id(self):
+ """Optional[str]: [Experimental] ID of a BigQuery Connection API
+ resource.
+
+ .. WARNING::
+
+ This feature is experimental. Pre-GA features may have limited
+ support, and changes to pre-GA features may not be compatible with
+ other pre-GA versions.
+ """
+ return self._properties.get("connectionId")
+
+ @connection_id.setter
+ def connection_id(self, value):
+ self._properties["connectionId"] = value
+
+ @property
+ def avro_options(self) -> Optional[AvroOptions]:
+ """Additional properties to set if ``sourceFormat`` is set to AVRO.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ExternalDataConfiguration.FIELDS.avro_options
+ """
+ if self.source_format == ExternalSourceFormat.AVRO:
+ self._properties.setdefault(AvroOptions._RESOURCE_NAME, {})
+ resource = self._properties.get(AvroOptions._RESOURCE_NAME)
+ if resource is None:
+ return None
+ options = AvroOptions()
+ options._properties = resource
+ return options
+
+ @avro_options.setter
+ def avro_options(self, value):
+ if self.source_format != ExternalSourceFormat.AVRO:
+ msg = f"Cannot set Avro options, source format is {self.source_format}"
+ raise TypeError(msg)
+ self._properties[AvroOptions._RESOURCE_NAME] = value._properties
+
+ @property
+ def bigtable_options(self) -> Optional[BigtableOptions]:
+ """Additional properties to set if ``sourceFormat`` is set to BIGTABLE.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ExternalDataConfiguration.FIELDS.bigtable_options
+ """
+ if self.source_format == ExternalSourceFormat.BIGTABLE:
+ self._properties.setdefault(BigtableOptions._RESOURCE_NAME, {})
+ resource = self._properties.get(BigtableOptions._RESOURCE_NAME)
+ if resource is None:
+ return None
+ options = BigtableOptions()
+ options._properties = resource
+ return options
+
+ @bigtable_options.setter
+ def bigtable_options(self, value):
+ if self.source_format != ExternalSourceFormat.BIGTABLE:
+ msg = f"Cannot set Bigtable options, source format is {self.source_format}"
+ raise TypeError(msg)
+ self._properties[BigtableOptions._RESOURCE_NAME] = value._properties
+
+ @property
+ def csv_options(self) -> Optional[CSVOptions]:
+ """Additional properties to set if ``sourceFormat`` is set to CSV.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ExternalDataConfiguration.FIELDS.csv_options
+ """
+ if self.source_format == ExternalSourceFormat.CSV:
+ self._properties.setdefault(CSVOptions._RESOURCE_NAME, {})
+ resource = self._properties.get(CSVOptions._RESOURCE_NAME)
+ if resource is None:
+ return None
+ options = CSVOptions()
+ options._properties = resource
+ return options
+
+ @csv_options.setter
+ def csv_options(self, value):
+ if self.source_format != ExternalSourceFormat.CSV:
+ msg = f"Cannot set CSV options, source format is {self.source_format}"
+ raise TypeError(msg)
+ self._properties[CSVOptions._RESOURCE_NAME] = value._properties
+
+ @property
+ def google_sheets_options(self) -> Optional[GoogleSheetsOptions]:
+ """Additional properties to set if ``sourceFormat`` is set to
+ GOOGLE_SHEETS.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ExternalDataConfiguration.FIELDS.google_sheets_options
+ """
+ if self.source_format == ExternalSourceFormat.GOOGLE_SHEETS:
+ self._properties.setdefault(GoogleSheetsOptions._RESOURCE_NAME, {})
+ resource = self._properties.get(GoogleSheetsOptions._RESOURCE_NAME)
+ if resource is None:
+ return None
+ options = GoogleSheetsOptions()
+ options._properties = resource
+ return options
+
+ @google_sheets_options.setter
+ def google_sheets_options(self, value):
+ if self.source_format != ExternalSourceFormat.GOOGLE_SHEETS:
+ msg = f"Cannot set Google Sheets options, source format is {self.source_format}"
+ raise TypeError(msg)
+ self._properties[GoogleSheetsOptions._RESOURCE_NAME] = value._properties
+
+ @property
+ def parquet_options(self) -> Optional[ParquetOptions]:
+ """Additional properties to set if ``sourceFormat`` is set to PARQUET.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ExternalDataConfiguration.FIELDS.parquet_options
+ """
+ if self.source_format == ExternalSourceFormat.PARQUET:
+ self._properties.setdefault(ParquetOptions._RESOURCE_NAME, {})
+ resource = self._properties.get(ParquetOptions._RESOURCE_NAME)
+ if resource is None:
+ return None
+ options = ParquetOptions()
+ options._properties = resource
+ return options
+
+ @parquet_options.setter
+ def parquet_options(self, value):
+ if self.source_format != ExternalSourceFormat.PARQUET:
+ msg = f"Cannot set Parquet options, source format is {self.source_format}"
+ raise TypeError(msg)
+ self._properties[ParquetOptions._RESOURCE_NAME] = value._properties
+
+ def to_api_repr(self) -> dict:
+ """Build an API representation of this object.
+
+ Returns:
+ Dict[str, Any]:
+ A dictionary in the format used by the BigQuery API.
+ """
+ config = copy.deepcopy(self._properties)
+ return config
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "ExternalConfig":
+ """Factory: construct an :class:`~.external_config.ExternalConfig`
+ instance given its API representation.
+
+ Args:
+ resource (Dict[str, Any]):
+ Definition of an :class:`~.external_config.ExternalConfig`
+ instance in the same representation as is returned from the
+ API.
+
+ Returns:
+ ExternalConfig: Configuration parsed from ``resource``.
+ """
+ config = cls(resource["sourceFormat"])
+ config._properties = copy.deepcopy(resource)
+ return config
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/format_options.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/format_options.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad5591b1c9b62295535bba409a96b792794ebf12
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/format_options.py
@@ -0,0 +1,147 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+from typing import Dict, Optional
+
+
+class AvroOptions:
+ """Options if source format is set to AVRO."""
+
+ _SOURCE_FORMAT = "AVRO"
+ _RESOURCE_NAME = "avroOptions"
+
+ def __init__(self):
+ self._properties = {}
+
+ @property
+ def use_avro_logical_types(self) -> Optional[bool]:
+ """[Optional] If sourceFormat is set to 'AVRO', indicates whether to
+ interpret logical types as the corresponding BigQuery data type (for
+ example, TIMESTAMP), instead of using the raw type (for example,
+ INTEGER).
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#AvroOptions.FIELDS.use_avro_logical_types
+ """
+ return self._properties.get("useAvroLogicalTypes")
+
+ @use_avro_logical_types.setter
+ def use_avro_logical_types(self, value):
+ self._properties["useAvroLogicalTypes"] = value
+
+ @classmethod
+ def from_api_repr(cls, resource: Dict[str, bool]) -> "AvroOptions":
+ """Factory: construct an instance from a resource dict.
+
+ Args:
+ resource (Dict[str, bool]):
+ Definition of a :class:`~.format_options.AvroOptions` instance in
+ the same representation as is returned from the API.
+
+ Returns:
+ :class:`~.format_options.AvroOptions`:
+ Configuration parsed from ``resource``.
+ """
+ config = cls()
+ config._properties = copy.deepcopy(resource)
+ return config
+
+ def to_api_repr(self) -> dict:
+ """Build an API representation of this object.
+
+ Returns:
+ Dict[str, bool]:
+ A dictionary in the format used by the BigQuery API.
+ """
+ return copy.deepcopy(self._properties)
+
+
+class ParquetOptions:
+ """Additional options if the PARQUET source format is used."""
+
+ _SOURCE_FORMAT = "PARQUET"
+ _RESOURCE_NAME = "parquetOptions"
+
+ def __init__(self):
+ self._properties = {}
+
+ @property
+ def enum_as_string(self) -> bool:
+ """Indicates whether to infer Parquet ENUM logical type as STRING instead of
+ BYTES by default.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ParquetOptions.FIELDS.enum_as_string
+ """
+ return self._properties.get("enumAsString")
+
+ @enum_as_string.setter
+ def enum_as_string(self, value: bool) -> None:
+ self._properties["enumAsString"] = value
+
+ @property
+ def enable_list_inference(self) -> bool:
+ """Indicates whether to use schema inference specifically for Parquet LIST
+ logical type.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ParquetOptions.FIELDS.enable_list_inference
+ """
+ return self._properties.get("enableListInference")
+
+ @enable_list_inference.setter
+ def enable_list_inference(self, value: bool) -> None:
+ self._properties["enableListInference"] = value
+
+ @property
+ def map_target_type(self) -> str:
+ """Indicates whether to simplify the representation of parquet maps to only show keys and values."""
+
+ return self._properties.get("mapTargetType")
+
+ @map_target_type.setter
+ def map_target_type(self, value: str) -> None:
+ """Sets the map target type.
+
+ Args:
+ value: The map target type (eg ARRAY_OF_STRUCT).
+ """
+ self._properties["mapTargetType"] = value
+
+ @classmethod
+ def from_api_repr(cls, resource: Dict[str, bool]) -> "ParquetOptions":
+ """Factory: construct an instance from a resource dict.
+
+ Args:
+ resource (Dict[str, bool]):
+ Definition of a :class:`~.format_options.ParquetOptions` instance in
+ the same representation as is returned from the API.
+
+ Returns:
+ :class:`~.format_options.ParquetOptions`:
+ Configuration parsed from ``resource``.
+ """
+ config = cls()
+ config._properties = copy.deepcopy(resource)
+ return config
+
+ def to_api_repr(self) -> dict:
+ """Build an API representation of this object.
+
+ Returns:
+ Dict[str, bool]:
+ A dictionary in the format used by the BigQuery API.
+ """
+ return copy.deepcopy(self._properties)
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/iam.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/iam.py
new file mode 100644
index 0000000000000000000000000000000000000000..df9db36b756dd8b1f1c8bd0ca552533277af7272
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/iam.py
@@ -0,0 +1,38 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""BigQuery API IAM policy definitions
+
+For all allowed roles and permissions, see:
+
+https://cloud.google.com/bigquery/docs/access-control
+"""
+
+# BigQuery-specific IAM roles available for tables and views
+
+BIGQUERY_DATA_EDITOR_ROLE = "roles/bigquery.dataEditor"
+"""When applied to a table or view, this role provides permissions to
+read and update data and metadata for the table or view."""
+
+BIGQUERY_DATA_OWNER_ROLE = "roles/bigquery.dataOwner"
+"""When applied to a table or view, this role provides permissions to
+read and update data and metadata for the table or view, share the
+table/view, and delete the table/view."""
+
+BIGQUERY_DATA_VIEWER_ROLE = "roles/bigquery.dataViewer"
+"""When applied to a table or view, this role provides permissions to
+read data and metadata from the table or view."""
+
+BIGQUERY_METADATA_VIEWER_ROLE = "roles/bigquery.metadataViewer"
+"""When applied to a table or view, this role provides persmissions to
+read metadata from the table or view."""
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/__init__.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f51311b0bb012f496d92b55a04df5e00366fc61d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/__init__.py
@@ -0,0 +1,87 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define API Jobs."""
+
+from google.cloud.bigquery.job.base import _AsyncJob
+from google.cloud.bigquery.job.base import _error_result_to_exception
+from google.cloud.bigquery.job.base import _DONE_STATE
+from google.cloud.bigquery.job.base import _JobConfig
+from google.cloud.bigquery.job.base import _JobReference
+from google.cloud.bigquery.job.base import ReservationUsage
+from google.cloud.bigquery.job.base import ScriptStatistics
+from google.cloud.bigquery.job.base import ScriptStackFrame
+from google.cloud.bigquery.job.base import TransactionInfo
+from google.cloud.bigquery.job.base import UnknownJob
+from google.cloud.bigquery.job.copy_ import CopyJob
+from google.cloud.bigquery.job.copy_ import CopyJobConfig
+from google.cloud.bigquery.job.copy_ import OperationType
+from google.cloud.bigquery.job.extract import ExtractJob
+from google.cloud.bigquery.job.extract import ExtractJobConfig
+from google.cloud.bigquery.job.load import LoadJob
+from google.cloud.bigquery.job.load import LoadJobConfig
+from google.cloud.bigquery.job.query import _contains_order_by
+from google.cloud.bigquery.job.query import DmlStats
+from google.cloud.bigquery.job.query import QueryJob
+from google.cloud.bigquery.job.query import QueryJobConfig
+from google.cloud.bigquery.job.query import QueryPlanEntry
+from google.cloud.bigquery.job.query import QueryPlanEntryStep
+from google.cloud.bigquery.job.query import ScriptOptions
+from google.cloud.bigquery.job.query import TimelineEntry
+from google.cloud.bigquery.enums import Compression
+from google.cloud.bigquery.enums import CreateDisposition
+from google.cloud.bigquery.enums import DestinationFormat
+from google.cloud.bigquery.enums import Encoding
+from google.cloud.bigquery.enums import QueryPriority
+from google.cloud.bigquery.enums import SchemaUpdateOption
+from google.cloud.bigquery.enums import SourceFormat
+from google.cloud.bigquery.enums import WriteDisposition
+
+
+# Include classes previously in job.py for backwards compatibility.
+__all__ = [
+ "_AsyncJob",
+ "_error_result_to_exception",
+ "_DONE_STATE",
+ "_JobConfig",
+ "_JobReference",
+ "ReservationUsage",
+ "ScriptStatistics",
+ "ScriptStackFrame",
+ "UnknownJob",
+ "CopyJob",
+ "CopyJobConfig",
+ "OperationType",
+ "ExtractJob",
+ "ExtractJobConfig",
+ "LoadJob",
+ "LoadJobConfig",
+ "_contains_order_by",
+ "DmlStats",
+ "QueryJob",
+ "QueryJobConfig",
+ "QueryPlanEntry",
+ "QueryPlanEntryStep",
+ "ScriptOptions",
+ "TimelineEntry",
+ "Compression",
+ "CreateDisposition",
+ "DestinationFormat",
+ "Encoding",
+ "QueryPriority",
+ "SchemaUpdateOption",
+ "SourceFormat",
+ "TransactionInfo",
+ "WriteDisposition",
+]
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/base.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5f68c8437d9dbc8fbccc60afde5a53693a10bf7
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/base.py
@@ -0,0 +1,1114 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Base classes and helpers for job classes."""
+
+from collections import namedtuple
+import copy
+import http
+import threading
+import typing
+from typing import ClassVar, Dict, Optional, Sequence
+
+from google.api_core import retry as retries
+from google.api_core import exceptions
+import google.api_core.future.polling
+
+from google.cloud.bigquery import _helpers
+from google.cloud.bigquery._helpers import _int_or_none
+from google.cloud.bigquery.retry import (
+ DEFAULT_GET_JOB_TIMEOUT,
+ DEFAULT_RETRY,
+)
+
+
+_DONE_STATE = "DONE"
+_STOPPED_REASON = "stopped"
+_ERROR_REASON_TO_EXCEPTION = {
+ "accessDenied": http.client.FORBIDDEN,
+ "backendError": http.client.INTERNAL_SERVER_ERROR,
+ "billingNotEnabled": http.client.FORBIDDEN,
+ "billingTierLimitExceeded": http.client.BAD_REQUEST,
+ "blocked": http.client.FORBIDDEN,
+ "duplicate": http.client.CONFLICT,
+ "internalError": http.client.INTERNAL_SERVER_ERROR,
+ "invalid": http.client.BAD_REQUEST,
+ "invalidQuery": http.client.BAD_REQUEST,
+ "notFound": http.client.NOT_FOUND,
+ "notImplemented": http.client.NOT_IMPLEMENTED,
+ "policyViolation": http.client.FORBIDDEN,
+ "quotaExceeded": http.client.FORBIDDEN,
+ "rateLimitExceeded": http.client.TOO_MANY_REQUESTS,
+ "resourceInUse": http.client.BAD_REQUEST,
+ "resourcesExceeded": http.client.BAD_REQUEST,
+ "responseTooLarge": http.client.FORBIDDEN,
+ "stopped": http.client.OK,
+ "tableUnavailable": http.client.BAD_REQUEST,
+}
+
+
+def _error_result_to_exception(error_result, errors=None):
+ """Maps BigQuery error reasons to an exception.
+
+ The reasons and their matching HTTP status codes are documented on
+ the `troubleshooting errors`_ page.
+
+ .. _troubleshooting errors: https://cloud.google.com/bigquery\
+ /troubleshooting-errors
+
+ Args:
+ error_result (Mapping[str, str]): The error result from BigQuery.
+ errors (Union[Iterable[str], None]): The detailed error messages.
+
+ Returns:
+ google.cloud.exceptions.GoogleAPICallError: The mapped exception.
+ """
+ reason = error_result.get("reason")
+ status_code = _ERROR_REASON_TO_EXCEPTION.get(
+ reason, http.client.INTERNAL_SERVER_ERROR
+ )
+ # Manually create error message to preserve both error_result and errors.
+ # Can be removed once b/310544564 and b/318889899 are resolved.
+ concatenated_errors = ""
+ if errors:
+ concatenated_errors = "; "
+ for err in errors:
+ concatenated_errors += ", ".join(
+ [f"{key}: {value}" for key, value in err.items()]
+ )
+ concatenated_errors += "; "
+
+ # strips off the last unneeded semicolon and space
+ concatenated_errors = concatenated_errors[:-2]
+
+ error_message = error_result.get("message", "") + concatenated_errors
+
+ return exceptions.from_http_status(
+ status_code, error_message, errors=[error_result]
+ )
+
+
+ReservationUsage = namedtuple("ReservationUsage", "name slot_ms")
+ReservationUsage.__doc__ = "Job resource usage for a reservation."
+ReservationUsage.name.__doc__ = (
+ 'Reservation name or "unreserved" for on-demand resources usage.'
+)
+ReservationUsage.slot_ms.__doc__ = (
+ "Total slot milliseconds used by the reservation for a particular job."
+)
+
+
+class TransactionInfo(typing.NamedTuple):
+ """[Alpha] Information of a multi-statement transaction.
+
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#TransactionInfo
+
+ .. versionadded:: 2.24.0
+ """
+
+ transaction_id: str
+ """Output only. ID of the transaction."""
+
+ @classmethod
+ def from_api_repr(cls, transaction_info: Dict[str, str]) -> "TransactionInfo":
+ return cls(transaction_info["transactionId"])
+
+
+class _JobReference(object):
+ """A reference to a job.
+
+ Args:
+ job_id (str): ID of the job to run.
+ project (str): ID of the project where the job runs.
+ location (str): Location of where the job runs.
+ """
+
+ def __init__(self, job_id, project, location):
+ self._properties = {"jobId": job_id, "projectId": project}
+ # The location field must not be populated if it is None.
+ if location:
+ self._properties["location"] = location
+
+ @property
+ def job_id(self):
+ """str: ID of the job."""
+ return self._properties.get("jobId")
+
+ @property
+ def project(self):
+ """str: ID of the project where the job runs."""
+ return self._properties.get("projectId")
+
+ @property
+ def location(self):
+ """str: Location where the job runs."""
+ return self._properties.get("location")
+
+ def _to_api_repr(self):
+ """Returns the API resource representation of the job reference."""
+ return copy.deepcopy(self._properties)
+
+ @classmethod
+ def _from_api_repr(cls, resource):
+ """Returns a job reference for an API resource representation."""
+ job_id = resource.get("jobId")
+ project = resource.get("projectId")
+ location = resource.get("location")
+ job_ref = cls(job_id, project, location)
+ return job_ref
+
+
+class _JobConfig(object):
+ """Abstract base class for job configuration objects.
+
+ Args:
+ job_type (str): The key to use for the job configuration.
+ """
+
+ def __init__(self, job_type, **kwargs):
+ self._job_type = job_type
+ self._properties = {job_type: {}}
+ for prop, val in kwargs.items():
+ setattr(self, prop, val)
+
+ def __setattr__(self, name, value):
+ """Override to be able to raise error if an unknown property is being set"""
+ if not name.startswith("_") and not hasattr(type(self), name):
+ raise AttributeError(
+ "Property {} is unknown for {}.".format(name, type(self))
+ )
+ super(_JobConfig, self).__setattr__(name, value)
+
+ @property
+ def job_timeout_ms(self):
+ """Optional parameter. Job timeout in milliseconds. If this time limit is exceeded, BigQuery might attempt to stop the job.
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfiguration.FIELDS.job_timeout_ms
+ e.g.
+
+ job_config = bigquery.QueryJobConfig( job_timeout_ms = 5000 )
+ or
+ job_config.job_timeout_ms = 5000
+
+ Raises:
+ ValueError: If ``value`` type is invalid.
+ """
+
+ # None as this is an optional parameter.
+ if self._properties.get("jobTimeoutMs"):
+ return self._properties["jobTimeoutMs"]
+ return None
+
+ @job_timeout_ms.setter
+ def job_timeout_ms(self, value):
+ try:
+ value = _int_or_none(value)
+ except ValueError as err:
+ raise ValueError("Pass an int for jobTimeoutMs, e.g. 5000").with_traceback(
+ err.__traceback__
+ )
+
+ if value is not None:
+ # docs indicate a string is expected by the API
+ self._properties["jobTimeoutMs"] = str(value)
+ else:
+ self._properties.pop("jobTimeoutMs", None)
+
+ @property
+ def labels(self):
+ """Dict[str, str]: Labels for the job.
+
+ This method always returns a dict. Once a job has been created on the
+ server, its labels cannot be modified anymore.
+
+ Raises:
+ ValueError: If ``value`` type is invalid.
+ """
+ return self._properties.setdefault("labels", {})
+
+ @labels.setter
+ def labels(self, value):
+ if not isinstance(value, dict):
+ raise ValueError("Pass a dict")
+ self._properties["labels"] = value
+
+ def _get_sub_prop(self, key, default=None):
+ """Get a value in the ``self._properties[self._job_type]`` dictionary.
+
+ Most job properties are inside the dictionary related to the job type
+ (e.g. 'copy', 'extract', 'load', 'query'). Use this method to access
+ those properties::
+
+ self._get_sub_prop('destinationTable')
+
+ This is equivalent to using the ``_helpers._get_sub_prop`` function::
+
+ _helpers._get_sub_prop(
+ self._properties, ['query', 'destinationTable'])
+
+ Args:
+ key (str):
+ Key for the value to get in the
+ ``self._properties[self._job_type]`` dictionary.
+ default (Optional[object]):
+ Default value to return if the key is not found.
+ Defaults to :data:`None`.
+
+ Returns:
+ object: The value if present or the default.
+ """
+ return _helpers._get_sub_prop(
+ self._properties, [self._job_type, key], default=default
+ )
+
+ def _set_sub_prop(self, key, value):
+ """Set a value in the ``self._properties[self._job_type]`` dictionary.
+
+ Most job properties are inside the dictionary related to the job type
+ (e.g. 'copy', 'extract', 'load', 'query'). Use this method to set
+ those properties::
+
+ self._set_sub_prop('useLegacySql', False)
+
+ This is equivalent to using the ``_helper._set_sub_prop`` function::
+
+ _helper._set_sub_prop(
+ self._properties, ['query', 'useLegacySql'], False)
+
+ Args:
+ key (str):
+ Key to set in the ``self._properties[self._job_type]``
+ dictionary.
+ value (object): Value to set.
+ """
+ _helpers._set_sub_prop(self._properties, [self._job_type, key], value)
+
+ def _del_sub_prop(self, key):
+ """Remove ``key`` from the ``self._properties[self._job_type]`` dict.
+
+ Most job properties are inside the dictionary related to the job type
+ (e.g. 'copy', 'extract', 'load', 'query'). Use this method to clear
+ those properties::
+
+ self._del_sub_prop('useLegacySql')
+
+ This is equivalent to using the ``_helper._del_sub_prop`` function::
+
+ _helper._del_sub_prop(
+ self._properties, ['query', 'useLegacySql'])
+
+ Args:
+ key (str):
+ Key to remove in the ``self._properties[self._job_type]``
+ dictionary.
+ """
+ _helpers._del_sub_prop(self._properties, [self._job_type, key])
+
+ def to_api_repr(self) -> dict:
+ """Build an API representation of the job config.
+
+ Returns:
+ Dict: A dictionary in the format used by the BigQuery API.
+ """
+ return copy.deepcopy(self._properties)
+
+ def _fill_from_default(self, default_job_config=None):
+ """Merge this job config with a default job config.
+
+ The keys in this object take precedence over the keys in the default
+ config. The merge is done at the top-level as well as for keys one
+ level below the job type.
+
+ Args:
+ default_job_config (google.cloud.bigquery.job._JobConfig):
+ The default job config that will be used to fill in self.
+
+ Returns:
+ google.cloud.bigquery.job._JobConfig: A new (merged) job config.
+ """
+ if not default_job_config:
+ new_job_config = copy.deepcopy(self)
+ return new_job_config
+
+ if self._job_type != default_job_config._job_type:
+ raise TypeError(
+ "attempted to merge two incompatible job types: "
+ + repr(self._job_type)
+ + ", "
+ + repr(default_job_config._job_type)
+ )
+
+ # cls is one of the job config subclasses that provides the job_type argument to
+ # this base class on instantiation, thus missing-parameter warning is a false
+ # positive here.
+ new_job_config = self.__class__() # pytype: disable=missing-parameter
+
+ default_job_properties = copy.deepcopy(default_job_config._properties)
+ for key in self._properties:
+ if key != self._job_type:
+ default_job_properties[key] = self._properties[key]
+
+ default_job_properties[self._job_type].update(self._properties[self._job_type])
+ new_job_config._properties = default_job_properties
+
+ return new_job_config
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "_JobConfig":
+ """Factory: construct a job configuration given its API representation
+
+ Args:
+ resource (Dict):
+ A job configuration in the same representation as is returned
+ from the API.
+
+ Returns:
+ google.cloud.bigquery.job._JobConfig: Configuration parsed from ``resource``.
+ """
+ # cls is one of the job config subclasses that provides the job_type argument to
+ # this base class on instantiation, thus missing-parameter warning is a false
+ # positive here.
+ job_config = cls() # type: ignore # pytype: disable=missing-parameter
+ job_config._properties = resource
+ return job_config
+
+
+class _AsyncJob(google.api_core.future.polling.PollingFuture):
+ """Base class for asynchronous jobs.
+
+ Args:
+ job_id (Union[str, _JobReference]):
+ Job's ID in the project associated with the client or a
+ fully-qualified job reference.
+ client (google.cloud.bigquery.client.Client):
+ Client which holds credentials and project configuration.
+ """
+
+ _JOB_TYPE = "unknown"
+ _CONFIG_CLASS: ClassVar
+
+ def __init__(self, job_id, client):
+ super(_AsyncJob, self).__init__()
+
+ # The job reference can be either a plain job ID or the full resource.
+ # Populate the properties dictionary consistently depending on what has
+ # been passed in.
+ job_ref = job_id
+ if not isinstance(job_id, _JobReference):
+ job_ref = _JobReference(job_id, client.project, None)
+ self._properties = {"jobReference": job_ref._to_api_repr()}
+
+ self._client = client
+ self._result_set = False
+ self._completion_lock = threading.Lock()
+
+ @property
+ def configuration(self) -> _JobConfig:
+ """Job-type specific configurtion."""
+ configuration = self._CONFIG_CLASS()
+ configuration._properties = self._properties.setdefault("configuration", {})
+ return configuration
+
+ @property
+ def job_id(self):
+ """str: ID of the job."""
+ return _helpers._get_sub_prop(self._properties, ["jobReference", "jobId"])
+
+ @property
+ def parent_job_id(self):
+ """Return the ID of the parent job.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics.FIELDS.parent_job_id
+
+ Returns:
+ Optional[str]: parent job id.
+ """
+ return _helpers._get_sub_prop(self._properties, ["statistics", "parentJobId"])
+
+ @property
+ def script_statistics(self) -> Optional["ScriptStatistics"]:
+ """Statistics for a child job of a script."""
+ resource = _helpers._get_sub_prop(
+ self._properties, ["statistics", "scriptStatistics"]
+ )
+ if resource is None:
+ return None
+ return ScriptStatistics(resource)
+
+ @property
+ def session_info(self) -> Optional["SessionInfo"]:
+ """[Preview] Information of the session if this job is part of one.
+
+ .. versionadded:: 2.29.0
+ """
+ resource = _helpers._get_sub_prop(
+ self._properties, ["statistics", "sessionInfo"]
+ )
+ if resource is None:
+ return None
+ return SessionInfo(resource)
+
+ @property
+ def num_child_jobs(self):
+ """The number of child jobs executed.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics.FIELDS.num_child_jobs
+
+ Returns:
+ int
+ """
+ count = _helpers._get_sub_prop(self._properties, ["statistics", "numChildJobs"])
+ return int(count) if count is not None else 0
+
+ @property
+ def project(self):
+ """Project bound to the job.
+
+ Returns:
+ str: the project (derived from the client).
+ """
+ return _helpers._get_sub_prop(self._properties, ["jobReference", "projectId"])
+
+ @property
+ def location(self):
+ """str: Location where the job runs."""
+ return _helpers._get_sub_prop(self._properties, ["jobReference", "location"])
+
+ def _require_client(self, client):
+ """Check client or verify over-ride.
+
+ Args:
+ client (Optional[google.cloud.bigquery.client.Client]):
+ the client to use. If not passed, falls back to the
+ ``client`` stored on the current dataset.
+
+ Returns:
+ google.cloud.bigquery.client.Client:
+ The client passed in or the currently bound client.
+ """
+ if client is None:
+ client = self._client
+ return client
+
+ @property
+ def job_type(self):
+ """Type of job.
+
+ Returns:
+ str: one of 'load', 'copy', 'extract', 'query'.
+ """
+ return self._JOB_TYPE
+
+ @property
+ def path(self):
+ """URL path for the job's APIs.
+
+ Returns:
+ str: the path based on project and job ID.
+ """
+ return "/projects/%s/jobs/%s" % (self.project, self.job_id)
+
+ @property
+ def labels(self):
+ """Dict[str, str]: Labels for the job."""
+ return self._properties.setdefault("configuration", {}).setdefault("labels", {})
+
+ @property
+ def etag(self):
+ """ETag for the job resource.
+
+ Returns:
+ Optional[str]: the ETag (None until set from the server).
+ """
+ return self._properties.get("etag")
+
+ @property
+ def self_link(self):
+ """URL for the job resource.
+
+ Returns:
+ Optional[str]: the URL (None until set from the server).
+ """
+ return self._properties.get("selfLink")
+
+ @property
+ def user_email(self):
+ """E-mail address of user who submitted the job.
+
+ Returns:
+ Optional[str]: the URL (None until set from the server).
+ """
+ return self._properties.get("user_email")
+
+ @property
+ def created(self):
+ """Datetime at which the job was created.
+
+ Returns:
+ Optional[datetime.datetime]:
+ the creation time (None until set from the server).
+ """
+ millis = _helpers._get_sub_prop(
+ self._properties, ["statistics", "creationTime"]
+ )
+ if millis is not None:
+ return _helpers._datetime_from_microseconds(millis * 1000.0)
+
+ @property
+ def started(self):
+ """Datetime at which the job was started.
+
+ Returns:
+ Optional[datetime.datetime]:
+ the start time (None until set from the server).
+ """
+ millis = _helpers._get_sub_prop(self._properties, ["statistics", "startTime"])
+ if millis is not None:
+ return _helpers._datetime_from_microseconds(millis * 1000.0)
+
+ @property
+ def ended(self):
+ """Datetime at which the job finished.
+
+ Returns:
+ Optional[datetime.datetime]:
+ the end time (None until set from the server).
+ """
+ millis = _helpers._get_sub_prop(self._properties, ["statistics", "endTime"])
+ if millis is not None:
+ return _helpers._datetime_from_microseconds(millis * 1000.0)
+
+ def _job_statistics(self):
+ """Helper for job-type specific statistics-based properties."""
+ statistics = self._properties.get("statistics", {})
+ return statistics.get(self._JOB_TYPE, {})
+
+ @property
+ def reservation_usage(self):
+ """Job resource usage breakdown by reservation.
+
+ Returns:
+ List[google.cloud.bigquery.job.ReservationUsage]:
+ Reservation usage stats. Can be empty if not set from the server.
+ """
+ usage_stats_raw = _helpers._get_sub_prop(
+ self._properties, ["statistics", "reservationUsage"], default=()
+ )
+ return [
+ ReservationUsage(name=usage["name"], slot_ms=int(usage["slotMs"]))
+ for usage in usage_stats_raw
+ ]
+
+ @property
+ def transaction_info(self) -> Optional[TransactionInfo]:
+ """Information of the multi-statement transaction if this job is part of one.
+
+ Since a scripting query job can execute multiple transactions, this
+ property is only expected on child jobs. Use the
+ :meth:`google.cloud.bigquery.client.Client.list_jobs` method with the
+ ``parent_job`` parameter to iterate over child jobs.
+
+ .. versionadded:: 2.24.0
+ """
+ info = self._properties.get("statistics", {}).get("transactionInfo")
+ if info is None:
+ return None
+ else:
+ return TransactionInfo.from_api_repr(info)
+
+ @property
+ def error_result(self):
+ """Error information about the job as a whole.
+
+ Returns:
+ Optional[Mapping]: the error information (None until set from the server).
+ """
+ status = self._properties.get("status")
+ if status is not None:
+ return status.get("errorResult")
+
+ @property
+ def errors(self):
+ """Information about individual errors generated by the job.
+
+ Returns:
+ Optional[List[Mapping]]:
+ the error information (None until set from the server).
+ """
+ status = self._properties.get("status")
+ if status is not None:
+ return status.get("errors")
+
+ @property
+ def state(self):
+ """Status of the job.
+
+ Returns:
+ Optional[str]:
+ the state (None until set from the server).
+ """
+ status = self._properties.get("status", {})
+ return status.get("state")
+
+ def _set_properties(self, api_response):
+ """Update properties from resource in body of ``api_response``
+
+ Args:
+ api_response (Dict): response returned from an API call.
+ """
+ cleaned = api_response.copy()
+ statistics = cleaned.setdefault("statistics", {})
+ if "creationTime" in statistics:
+ statistics["creationTime"] = float(statistics["creationTime"])
+ if "startTime" in statistics:
+ statistics["startTime"] = float(statistics["startTime"])
+ if "endTime" in statistics:
+ statistics["endTime"] = float(statistics["endTime"])
+
+ self._properties = cleaned
+
+ # For Future interface
+ self._set_future_result()
+
+ @classmethod
+ def _check_resource_config(cls, resource):
+ """Helper for :meth:`from_api_repr`
+
+ Args:
+ resource (Dict): resource for the job.
+
+ Raises:
+ KeyError:
+ If the resource has no identifier, or
+ is missing the appropriate configuration.
+ """
+ if "jobReference" not in resource or "jobId" not in resource["jobReference"]:
+ raise KeyError(
+ "Resource lacks required identity information: "
+ '["jobReference"]["jobId"]'
+ )
+ if (
+ "configuration" not in resource
+ or cls._JOB_TYPE not in resource["configuration"]
+ ):
+ raise KeyError(
+ "Resource lacks required configuration: "
+ '["configuration"]["%s"]' % cls._JOB_TYPE
+ )
+
+ def to_api_repr(self):
+ """Generate a resource for the job."""
+ return copy.deepcopy(self._properties)
+
+ _build_resource = to_api_repr # backward-compatibility alias
+
+ def _begin(self, client=None, retry=DEFAULT_RETRY, timeout=None):
+ """API call: begin the job via a POST request
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert
+
+ Args:
+ client (Optional[google.cloud.bigquery.client.Client]):
+ The client to use. If not passed, falls back to the ``client``
+ associated with the job object or``NoneType``
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Raises:
+ ValueError:
+ If the job has already begun.
+ """
+ if self.state is not None:
+ raise ValueError("Job already begun.")
+
+ client = self._require_client(client)
+ path = "/projects/%s/jobs" % (self.project,)
+
+ # jobs.insert is idempotent because we ensure that every new
+ # job has an ID.
+ span_attributes = {"path": path}
+ api_response = client._call_api(
+ retry,
+ span_name="BigQuery.job.begin",
+ span_attributes=span_attributes,
+ job_ref=self,
+ method="POST",
+ path=path,
+ data=self.to_api_repr(),
+ timeout=timeout,
+ )
+ self._set_properties(api_response)
+
+ def exists(
+ self,
+ client=None,
+ retry: "retries.Retry" = DEFAULT_RETRY,
+ timeout: Optional[float] = None,
+ ) -> bool:
+ """API call: test for the existence of the job via a GET request
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get
+
+ Args:
+ client (Optional[google.cloud.bigquery.client.Client]):
+ the client to use. If not passed, falls back to the
+ ``client`` stored on the current dataset.
+
+ retry (Optional[google.api_core.retry.Retry]): How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Returns:
+ bool: Boolean indicating existence of the job.
+ """
+ client = self._require_client(client)
+
+ extra_params = {"fields": "id"}
+ if self.location:
+ extra_params["location"] = self.location
+
+ try:
+ span_attributes = {"path": self.path}
+
+ client._call_api(
+ retry,
+ span_name="BigQuery.job.exists",
+ span_attributes=span_attributes,
+ job_ref=self,
+ method="GET",
+ path=self.path,
+ query_params=extra_params,
+ timeout=timeout,
+ )
+ except exceptions.NotFound:
+ return False
+ else:
+ return True
+
+ def reload(
+ self,
+ client=None,
+ retry: "retries.Retry" = DEFAULT_RETRY,
+ timeout: Optional[float] = DEFAULT_GET_JOB_TIMEOUT,
+ ):
+ """API call: refresh job properties via a GET request.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get
+
+ Args:
+ client (Optional[google.cloud.bigquery.client.Client]):
+ the client to use. If not passed, falls back to the
+ ``client`` stored on the current dataset.
+
+ retry (Optional[google.api_core.retry.Retry]): How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ """
+ client = self._require_client(client)
+
+ got_job = client.get_job(
+ self,
+ project=self.project,
+ location=self.location,
+ retry=retry,
+ timeout=timeout,
+ )
+ self._set_properties(got_job._properties)
+
+ def cancel(
+ self,
+ client=None,
+ retry: Optional[retries.Retry] = DEFAULT_RETRY,
+ timeout: Optional[float] = None,
+ ) -> bool:
+ """API call: cancel job via a POST request
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/cancel
+
+ Args:
+ client (Optional[google.cloud.bigquery.client.Client]):
+ the client to use. If not passed, falls back to the
+ ``client`` stored on the current dataset.
+ retry (Optional[google.api_core.retry.Retry]): How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``
+
+ Returns:
+ bool: Boolean indicating that the cancel request was sent.
+ """
+ client = self._require_client(client)
+
+ extra_params = {}
+ if self.location:
+ extra_params["location"] = self.location
+
+ path = "{}/cancel".format(self.path)
+ span_attributes = {"path": path}
+
+ api_response = client._call_api(
+ retry,
+ span_name="BigQuery.job.cancel",
+ span_attributes=span_attributes,
+ job_ref=self,
+ method="POST",
+ path=path,
+ query_params=extra_params,
+ timeout=timeout,
+ )
+ self._set_properties(api_response["job"])
+ # The Future interface requires that we return True if the *attempt*
+ # to cancel was successful.
+ return True
+
+ # The following methods implement the PollingFuture interface. Note that
+ # the methods above are from the pre-Future interface and are left for
+ # compatibility. The only "overloaded" method is :meth:`cancel`, which
+ # satisfies both interfaces.
+
+ def _set_future_result(self):
+ """Set the result or exception from the job if it is complete."""
+ # This must be done in a lock to prevent the polling thread
+ # and main thread from both executing the completion logic
+ # at the same time.
+ with self._completion_lock:
+ # If the operation isn't complete or if the result has already been
+ # set, do not call set_result/set_exception again.
+ # Note: self._result_set is set to True in set_result and
+ # set_exception, in case those methods are invoked directly.
+ if not self.done(reload=False) or self._result_set:
+ return
+
+ if self.error_result is not None:
+ exception = _error_result_to_exception(
+ self.error_result, self.errors or ()
+ )
+ self.set_exception(exception)
+ else:
+ self.set_result(self)
+
+ def done(
+ self,
+ retry: "retries.Retry" = DEFAULT_RETRY,
+ timeout: Optional[float] = DEFAULT_GET_JOB_TIMEOUT,
+ reload: bool = True,
+ ) -> bool:
+ """Checks if the job is complete.
+
+ Args:
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC. If the job state is ``DONE``, retrying is aborted
+ early, as the job will not change anymore.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ reload (Optional[bool]):
+ If ``True``, make an API call to refresh the job state of
+ unfinished jobs before checking. Default ``True``.
+
+ Returns:
+ bool: True if the job is complete, False otherwise.
+ """
+ # Do not refresh is the state is already done, as the job will not
+ # change once complete.
+ if self.state != _DONE_STATE and reload:
+ self.reload(retry=retry, timeout=timeout)
+ return self.state == _DONE_STATE
+
+ def result( # type: ignore # (incompatible with supertype)
+ self,
+ retry: Optional[retries.Retry] = DEFAULT_RETRY,
+ timeout: Optional[float] = None,
+ ) -> "_AsyncJob":
+ """Start the job and wait for it to complete and get the result.
+
+ Args:
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC. If the job state is ``DONE``, retrying is aborted
+ early, as the job will not change anymore.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ If multiple requests are made under the hood, ``timeout``
+ applies to each individual request.
+
+ Returns:
+ _AsyncJob: This instance.
+
+ Raises:
+ google.cloud.exceptions.GoogleAPICallError:
+ if the job failed.
+ concurrent.futures.TimeoutError:
+ if the job did not complete in the given timeout.
+ """
+ if self.state is None:
+ self._begin(retry=retry, timeout=timeout)
+
+ kwargs = {} if retry is DEFAULT_RETRY else {"retry": retry}
+ return super(_AsyncJob, self).result(timeout=timeout, **kwargs)
+
+ def cancelled(self):
+ """Check if the job has been cancelled.
+
+ This always returns False. It's not possible to check if a job was
+ cancelled in the API. This method is here to satisfy the interface
+ for :class:`google.api_core.future.Future`.
+
+ Returns:
+ bool: False
+ """
+ return (
+ self.error_result is not None
+ and self.error_result.get("reason") == _STOPPED_REASON
+ )
+
+ def __repr__(self):
+ result = (
+ f"{self.__class__.__name__}<"
+ f"project={self.project}, location={self.location}, id={self.job_id}"
+ ">"
+ )
+ return result
+
+
+class ScriptStackFrame(object):
+ """Stack frame showing the line/column/procedure name where the current
+ evaluation happened.
+
+ Args:
+ resource (Map[str, Any]): JSON representation of object.
+ """
+
+ def __init__(self, resource):
+ self._properties = resource
+
+ @property
+ def procedure_id(self):
+ """Optional[str]: Name of the active procedure.
+
+ Omitted if in a top-level script.
+ """
+ return self._properties.get("procedureId")
+
+ @property
+ def text(self):
+ """str: Text of the current statement/expression."""
+ return self._properties.get("text")
+
+ @property
+ def start_line(self):
+ """int: One-based start line."""
+ return _helpers._int_or_none(self._properties.get("startLine"))
+
+ @property
+ def start_column(self):
+ """int: One-based start column."""
+ return _helpers._int_or_none(self._properties.get("startColumn"))
+
+ @property
+ def end_line(self):
+ """int: One-based end line."""
+ return _helpers._int_or_none(self._properties.get("endLine"))
+
+ @property
+ def end_column(self):
+ """int: One-based end column."""
+ return _helpers._int_or_none(self._properties.get("endColumn"))
+
+
+class ScriptStatistics(object):
+ """Statistics for a child job of a script.
+
+ Args:
+ resource (Map[str, Any]): JSON representation of object.
+ """
+
+ def __init__(self, resource):
+ self._properties = resource
+
+ @property
+ def stack_frames(self) -> Sequence[ScriptStackFrame]:
+ """Stack trace where the current evaluation happened.
+
+ Shows line/column/procedure name of each frame on the stack at the
+ point where the current evaluation happened.
+
+ The leaf frame is first, the primary script is last.
+ """
+ return [
+ ScriptStackFrame(frame) for frame in self._properties.get("stackFrames", [])
+ ]
+
+ @property
+ def evaluation_kind(self) -> Optional[str]:
+ """str: Indicates the type of child job.
+
+ Possible values include ``STATEMENT`` and ``EXPRESSION``.
+ """
+ return self._properties.get("evaluationKind")
+
+
+class SessionInfo:
+ """[Preview] Information of the session if this job is part of one.
+
+ .. versionadded:: 2.29.0
+
+ Args:
+ resource (Map[str, Any]): JSON representation of object.
+ """
+
+ def __init__(self, resource):
+ self._properties = resource
+
+ @property
+ def session_id(self) -> Optional[str]:
+ """The ID of the session."""
+ return self._properties.get("sessionId")
+
+
+class UnknownJob(_AsyncJob):
+ """A job whose type cannot be determined."""
+
+ @classmethod
+ def from_api_repr(cls, resource: dict, client) -> "UnknownJob":
+ """Construct an UnknownJob from the JSON representation.
+
+ Args:
+ resource (Dict): JSON representation of a job.
+ client (google.cloud.bigquery.client.Client):
+ Client connected to BigQuery API.
+
+ Returns:
+ UnknownJob: Job corresponding to the resource.
+ """
+ job_ref_properties = resource.get(
+ "jobReference", {"projectId": client.project, "jobId": None}
+ )
+ job_ref = _JobReference._from_api_repr(job_ref_properties)
+ job = cls(job_ref, client)
+ # Populate the job reference with the project, even if it has been
+ # redacted, because we know it should equal that of the request.
+ resource["jobReference"] = job_ref_properties
+ job._properties = resource
+ return job
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/copy_.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/copy_.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c52aeed64707225702e459dd6cf555bba3aeae4
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/copy_.py
@@ -0,0 +1,282 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Classes for copy jobs."""
+
+import typing
+from typing import Optional
+
+from google.cloud.bigquery.encryption_configuration import EncryptionConfiguration
+from google.cloud.bigquery import _helpers
+from google.cloud.bigquery.table import TableReference
+
+from google.cloud.bigquery.job.base import _AsyncJob
+from google.cloud.bigquery.job.base import _JobConfig
+from google.cloud.bigquery.job.base import _JobReference
+
+
+class OperationType:
+ """Different operation types supported in table copy job.
+
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#operationtype
+ """
+
+ OPERATION_TYPE_UNSPECIFIED = "OPERATION_TYPE_UNSPECIFIED"
+ """Unspecified operation type."""
+
+ COPY = "COPY"
+ """The source and destination table have the same table type."""
+
+ SNAPSHOT = "SNAPSHOT"
+ """The source table type is TABLE and the destination table type is SNAPSHOT."""
+
+ CLONE = "CLONE"
+ """The source table type is TABLE and the destination table type is CLONE."""
+
+ RESTORE = "RESTORE"
+ """The source table type is SNAPSHOT and the destination table type is TABLE."""
+
+
+class CopyJobConfig(_JobConfig):
+ """Configuration options for copy jobs.
+
+ All properties in this class are optional. Values which are :data:`None` ->
+ server defaults. Set properties on the constructed configuration by using
+ the property name as the name of a keyword argument.
+ """
+
+ def __init__(self, **kwargs) -> None:
+ super(CopyJobConfig, self).__init__("copy", **kwargs)
+
+ @property
+ def create_disposition(self):
+ """google.cloud.bigquery.job.CreateDisposition: Specifies behavior
+ for creating tables.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.create_disposition
+ """
+ return self._get_sub_prop("createDisposition")
+
+ @create_disposition.setter
+ def create_disposition(self, value):
+ self._set_sub_prop("createDisposition", value)
+
+ @property
+ def write_disposition(self):
+ """google.cloud.bigquery.job.WriteDisposition: Action that occurs if
+ the destination table already exists.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.write_disposition
+ """
+ return self._get_sub_prop("writeDisposition")
+
+ @write_disposition.setter
+ def write_disposition(self, value):
+ self._set_sub_prop("writeDisposition", value)
+
+ @property
+ def destination_encryption_configuration(self):
+ """google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom
+ encryption configuration for the destination table.
+
+ Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
+ if using default encryption.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.destination_encryption_configuration
+ """
+ prop = self._get_sub_prop("destinationEncryptionConfiguration")
+ if prop is not None:
+ prop = EncryptionConfiguration.from_api_repr(prop)
+ return prop
+
+ @destination_encryption_configuration.setter
+ def destination_encryption_configuration(self, value):
+ api_repr = value
+ if value is not None:
+ api_repr = value.to_api_repr()
+ self._set_sub_prop("destinationEncryptionConfiguration", api_repr)
+
+ @property
+ def operation_type(self) -> str:
+ """The operation to perform with this copy job.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.operation_type
+ """
+ return self._get_sub_prop(
+ "operationType", OperationType.OPERATION_TYPE_UNSPECIFIED
+ )
+
+ @operation_type.setter
+ def operation_type(self, value: Optional[str]):
+ if value is None:
+ value = OperationType.OPERATION_TYPE_UNSPECIFIED
+ self._set_sub_prop("operationType", value)
+
+ @property
+ def destination_expiration_time(self) -> str:
+ """google.cloud.bigquery.job.DestinationExpirationTime: The time when the
+ destination table expires. Expired tables will be deleted and their storage reclaimed.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationTableCopy.FIELDS.destination_expiration_time
+ """
+ return self._get_sub_prop("destinationExpirationTime")
+
+ @destination_expiration_time.setter
+ def destination_expiration_time(self, value: str):
+ self._set_sub_prop("destinationExpirationTime", value)
+
+
+class CopyJob(_AsyncJob):
+ """Asynchronous job: copy data into a table from other tables.
+
+ Args:
+ job_id (str): the job's ID, within the project belonging to ``client``.
+
+ sources (List[google.cloud.bigquery.table.TableReference]): Table from which data is to be loaded.
+
+ destination (google.cloud.bigquery.table.TableReference): Table into which data is to be loaded.
+
+ client (google.cloud.bigquery.client.Client):
+ A client which holds credentials and project configuration
+ for the dataset (which requires a project).
+
+ job_config (Optional[google.cloud.bigquery.job.CopyJobConfig]):
+ Extra configuration options for the copy job.
+ """
+
+ _JOB_TYPE = "copy"
+ _CONFIG_CLASS = CopyJobConfig
+
+ def __init__(self, job_id, sources, destination, client, job_config=None):
+ super(CopyJob, self).__init__(job_id, client)
+
+ if job_config is not None:
+ self._properties["configuration"] = job_config._properties
+
+ if destination:
+ _helpers._set_sub_prop(
+ self._properties,
+ ["configuration", "copy", "destinationTable"],
+ destination.to_api_repr(),
+ )
+
+ if sources:
+ source_resources = [source.to_api_repr() for source in sources]
+ _helpers._set_sub_prop(
+ self._properties,
+ ["configuration", "copy", "sourceTables"],
+ source_resources,
+ )
+
+ @property
+ def configuration(self) -> CopyJobConfig:
+ """The configuration for this copy job."""
+ return typing.cast(CopyJobConfig, super().configuration)
+
+ @property
+ def destination(self):
+ """google.cloud.bigquery.table.TableReference: Table into which data
+ is to be loaded.
+ """
+ return TableReference.from_api_repr(
+ _helpers._get_sub_prop(
+ self._properties, ["configuration", "copy", "destinationTable"]
+ )
+ )
+
+ @property
+ def sources(self):
+ """List[google.cloud.bigquery.table.TableReference]): Table(s) from
+ which data is to be loaded.
+ """
+ source_configs = _helpers._get_sub_prop(
+ self._properties, ["configuration", "copy", "sourceTables"]
+ )
+ if source_configs is None:
+ single = _helpers._get_sub_prop(
+ self._properties, ["configuration", "copy", "sourceTable"]
+ )
+ if single is None:
+ raise KeyError("Resource missing 'sourceTables' / 'sourceTable'")
+ source_configs = [single]
+
+ sources = []
+ for source_config in source_configs:
+ table_ref = TableReference.from_api_repr(source_config)
+ sources.append(table_ref)
+ return sources
+
+ @property
+ def create_disposition(self):
+ """See
+ :attr:`google.cloud.bigquery.job.CopyJobConfig.create_disposition`.
+ """
+ return self.configuration.create_disposition
+
+ @property
+ def write_disposition(self):
+ """See
+ :attr:`google.cloud.bigquery.job.CopyJobConfig.write_disposition`.
+ """
+ return self.configuration.write_disposition
+
+ @property
+ def destination_encryption_configuration(self):
+ """google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom
+ encryption configuration for the destination table.
+
+ Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
+ if using default encryption.
+
+ See
+ :attr:`google.cloud.bigquery.job.CopyJobConfig.destination_encryption_configuration`.
+ """
+ return self.configuration.destination_encryption_configuration
+
+ def to_api_repr(self):
+ """Generate a resource for :meth:`_begin`."""
+ # Exclude statistics, if set.
+ return {
+ "jobReference": self._properties["jobReference"],
+ "configuration": self._properties["configuration"],
+ }
+
+ @classmethod
+ def from_api_repr(cls, resource, client):
+ """Factory: construct a job given its API representation
+
+ .. note::
+
+ This method assumes that the project found in the resource matches
+ the client's project.
+
+ Args:
+ resource (Dict): dataset job representation returned from the API
+ client (google.cloud.bigquery.client.Client):
+ Client which holds credentials and project
+ configuration for the dataset.
+
+ Returns:
+ google.cloud.bigquery.job.CopyJob: Job parsed from ``resource``.
+ """
+ cls._check_resource_config(resource)
+ job_ref = _JobReference._from_api_repr(resource["jobReference"])
+ job = cls(job_ref, None, None, client=client)
+ job._set_properties(resource)
+ return job
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/extract.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/extract.py
new file mode 100644
index 0000000000000000000000000000000000000000..64ec39b7603cef0f9b850397f94ebf2d85ebe50e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/extract.py
@@ -0,0 +1,271 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Classes for extract (export) jobs."""
+
+import typing
+
+from google.cloud.bigquery import _helpers
+from google.cloud.bigquery.model import ModelReference
+from google.cloud.bigquery.table import Table
+from google.cloud.bigquery.table import TableListItem
+from google.cloud.bigquery.table import TableReference
+from google.cloud.bigquery.job.base import _AsyncJob
+from google.cloud.bigquery.job.base import _JobConfig
+from google.cloud.bigquery.job.base import _JobReference
+
+
+class ExtractJobConfig(_JobConfig):
+ """Configuration options for extract jobs.
+
+ All properties in this class are optional. Values which are :data:`None` ->
+ server defaults. Set properties on the constructed configuration by using
+ the property name as the name of a keyword argument.
+ """
+
+ def __init__(self, **kwargs):
+ super(ExtractJobConfig, self).__init__("extract", **kwargs)
+
+ @property
+ def compression(self):
+ """google.cloud.bigquery.job.Compression: Compression type to use for
+ exported files.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationExtract.FIELDS.compression
+ """
+ return self._get_sub_prop("compression")
+
+ @compression.setter
+ def compression(self, value):
+ self._set_sub_prop("compression", value)
+
+ @property
+ def destination_format(self):
+ """google.cloud.bigquery.job.DestinationFormat: Exported file format.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationExtract.FIELDS.destination_format
+ """
+ return self._get_sub_prop("destinationFormat")
+
+ @destination_format.setter
+ def destination_format(self, value):
+ self._set_sub_prop("destinationFormat", value)
+
+ @property
+ def field_delimiter(self):
+ """str: Delimiter to use between fields in the exported data.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationExtract.FIELDS.field_delimiter
+ """
+ return self._get_sub_prop("fieldDelimiter")
+
+ @field_delimiter.setter
+ def field_delimiter(self, value):
+ self._set_sub_prop("fieldDelimiter", value)
+
+ @property
+ def print_header(self):
+ """bool: Print a header row in the exported data.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationExtract.FIELDS.print_header
+ """
+ return self._get_sub_prop("printHeader")
+
+ @print_header.setter
+ def print_header(self, value):
+ self._set_sub_prop("printHeader", value)
+
+ @property
+ def use_avro_logical_types(self):
+ """bool: For loads of Avro data, governs whether Avro logical types are
+ converted to their corresponding BigQuery types (e.g. TIMESTAMP) rather than
+ raw types (e.g. INTEGER).
+ """
+ return self._get_sub_prop("useAvroLogicalTypes")
+
+ @use_avro_logical_types.setter
+ def use_avro_logical_types(self, value):
+ self._set_sub_prop("useAvroLogicalTypes", bool(value))
+
+
+class ExtractJob(_AsyncJob):
+ """Asynchronous job: extract data from a table into Cloud Storage.
+
+ Args:
+ job_id (str): the job's ID.
+
+ source (Union[ \
+ google.cloud.bigquery.table.TableReference, \
+ google.cloud.bigquery.model.ModelReference \
+ ]):
+ Table or Model from which data is to be loaded or extracted.
+
+ destination_uris (List[str]):
+ URIs describing where the extracted data will be written in Cloud
+ Storage, using the format ``gs:///``.
+
+ client (google.cloud.bigquery.client.Client):
+ A client which holds credentials and project configuration.
+
+ job_config (Optional[google.cloud.bigquery.job.ExtractJobConfig]):
+ Extra configuration options for the extract job.
+ """
+
+ _JOB_TYPE = "extract"
+ _CONFIG_CLASS = ExtractJobConfig
+
+ def __init__(self, job_id, source, destination_uris, client, job_config=None):
+ super(ExtractJob, self).__init__(job_id, client)
+
+ if job_config is not None:
+ self._properties["configuration"] = job_config._properties
+
+ if source:
+ source_ref = {"projectId": source.project, "datasetId": source.dataset_id}
+
+ if isinstance(source, (Table, TableListItem, TableReference)):
+ source_ref["tableId"] = source.table_id
+ source_key = "sourceTable"
+ else:
+ source_ref["modelId"] = source.model_id
+ source_key = "sourceModel"
+
+ _helpers._set_sub_prop(
+ self._properties, ["configuration", "extract", source_key], source_ref
+ )
+
+ if destination_uris:
+ _helpers._set_sub_prop(
+ self._properties,
+ ["configuration", "extract", "destinationUris"],
+ destination_uris,
+ )
+
+ @property
+ def configuration(self) -> ExtractJobConfig:
+ """The configuration for this extract job."""
+ return typing.cast(ExtractJobConfig, super().configuration)
+
+ @property
+ def source(self):
+ """Union[ \
+ google.cloud.bigquery.table.TableReference, \
+ google.cloud.bigquery.model.ModelReference \
+ ]: Table or Model from which data is to be loaded or extracted.
+ """
+ source_config = _helpers._get_sub_prop(
+ self._properties, ["configuration", "extract", "sourceTable"]
+ )
+ if source_config:
+ return TableReference.from_api_repr(source_config)
+ else:
+ source_config = _helpers._get_sub_prop(
+ self._properties, ["configuration", "extract", "sourceModel"]
+ )
+ return ModelReference.from_api_repr(source_config)
+
+ @property
+ def destination_uris(self):
+ """List[str]: URIs describing where the extracted data will be
+ written in Cloud Storage, using the format
+ ``gs:///``.
+ """
+ return _helpers._get_sub_prop(
+ self._properties, ["configuration", "extract", "destinationUris"]
+ )
+
+ @property
+ def compression(self):
+ """See
+ :attr:`google.cloud.bigquery.job.ExtractJobConfig.compression`.
+ """
+ return self.configuration.compression
+
+ @property
+ def destination_format(self):
+ """See
+ :attr:`google.cloud.bigquery.job.ExtractJobConfig.destination_format`.
+ """
+ return self.configuration.destination_format
+
+ @property
+ def field_delimiter(self):
+ """See
+ :attr:`google.cloud.bigquery.job.ExtractJobConfig.field_delimiter`.
+ """
+ return self.configuration.field_delimiter
+
+ @property
+ def print_header(self):
+ """See
+ :attr:`google.cloud.bigquery.job.ExtractJobConfig.print_header`.
+ """
+ return self.configuration.print_header
+
+ @property
+ def destination_uri_file_counts(self):
+ """Return file counts from job statistics, if present.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics4.FIELDS.destination_uri_file_counts
+
+ Returns:
+ List[int]:
+ A list of integer counts, each representing the number of files
+ per destination URI or URI pattern specified in the extract
+ configuration. These values will be in the same order as the URIs
+ specified in the 'destinationUris' field. Returns None if job is
+ not yet complete.
+ """
+ counts = self._job_statistics().get("destinationUriFileCounts")
+ if counts is not None:
+ return [int(count) for count in counts]
+ return None
+
+ def to_api_repr(self):
+ """Generate a resource for :meth:`_begin`."""
+ # Exclude statistics, if set.
+ return {
+ "jobReference": self._properties["jobReference"],
+ "configuration": self._properties["configuration"],
+ }
+
+ @classmethod
+ def from_api_repr(cls, resource: dict, client) -> "ExtractJob":
+ """Factory: construct a job given its API representation
+
+ .. note::
+
+ This method assumes that the project found in the resource matches
+ the client's project.
+
+ Args:
+ resource (Dict): dataset job representation returned from the API
+
+ client (google.cloud.bigquery.client.Client):
+ Client which holds credentials and project
+ configuration for the dataset.
+
+ Returns:
+ google.cloud.bigquery.job.ExtractJob: Job parsed from ``resource``.
+ """
+ cls._check_resource_config(resource)
+ job_ref = _JobReference._from_api_repr(resource["jobReference"])
+ job = cls(job_ref, None, None, client=client)
+ job._set_properties(resource)
+ return job
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/load.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/load.py
new file mode 100644
index 0000000000000000000000000000000000000000..e56ce16f04a5c063f0ff441d26b23e70e0be58bc
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/load.py
@@ -0,0 +1,985 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Classes for load jobs."""
+
+import typing
+from typing import FrozenSet, List, Iterable, Optional
+
+from google.cloud.bigquery.encryption_configuration import EncryptionConfiguration
+from google.cloud.bigquery.external_config import HivePartitioningOptions
+from google.cloud.bigquery.format_options import ParquetOptions
+from google.cloud.bigquery import _helpers
+from google.cloud.bigquery.schema import SchemaField
+from google.cloud.bigquery.schema import _to_schema_fields
+from google.cloud.bigquery.table import RangePartitioning
+from google.cloud.bigquery.table import TableReference
+from google.cloud.bigquery.table import TimePartitioning
+from google.cloud.bigquery.job.base import _AsyncJob
+from google.cloud.bigquery.job.base import _JobConfig
+from google.cloud.bigquery.job.base import _JobReference
+from google.cloud.bigquery.query import ConnectionProperty
+
+
+class ColumnNameCharacterMap:
+ """Indicates the character map used for column names.
+
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#columnnamecharactermap
+ """
+
+ COLUMN_NAME_CHARACTER_MAP_UNSPECIFIED = "COLUMN_NAME_CHARACTER_MAP_UNSPECIFIED"
+ """Unspecified column name character map."""
+
+ STRICT = "STRICT"
+ """Support flexible column name and reject invalid column names."""
+
+ V1 = "V1"
+ """ Support alphanumeric + underscore characters and names must start with
+ a letter or underscore. Invalid column names will be normalized."""
+
+ V2 = "V2"
+ """Support flexible column name. Invalid column names will be normalized."""
+
+
+class LoadJobConfig(_JobConfig):
+ """Configuration options for load jobs.
+
+ Set properties on the constructed configuration by using the property name
+ as the name of a keyword argument. Values which are unset or :data:`None`
+ use the BigQuery REST API default values. See the `BigQuery REST API
+ reference documentation
+ `_
+ for a list of default values.
+
+ Required options differ based on the
+ :attr:`~google.cloud.bigquery.job.LoadJobConfig.source_format` value.
+ For example, the BigQuery API's default value for
+ :attr:`~google.cloud.bigquery.job.LoadJobConfig.source_format` is ``"CSV"``.
+ When loading a CSV file, either
+ :attr:`~google.cloud.bigquery.job.LoadJobConfig.schema` must be set or
+ :attr:`~google.cloud.bigquery.job.LoadJobConfig.autodetect` must be set to
+ :data:`True`.
+ """
+
+ def __init__(self, **kwargs) -> None:
+ super(LoadJobConfig, self).__init__("load", **kwargs)
+
+ @property
+ def allow_jagged_rows(self):
+ """Optional[bool]: Allow missing trailing optional columns (CSV only).
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.allow_jagged_rows
+ """
+ return self._get_sub_prop("allowJaggedRows")
+
+ @allow_jagged_rows.setter
+ def allow_jagged_rows(self, value):
+ self._set_sub_prop("allowJaggedRows", value)
+
+ @property
+ def allow_quoted_newlines(self):
+ """Optional[bool]: Allow quoted data containing newline characters (CSV only).
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.allow_quoted_newlines
+ """
+ return self._get_sub_prop("allowQuotedNewlines")
+
+ @allow_quoted_newlines.setter
+ def allow_quoted_newlines(self, value):
+ self._set_sub_prop("allowQuotedNewlines", value)
+
+ @property
+ def autodetect(self):
+ """Optional[bool]: Automatically infer the schema from a sample of the data.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.autodetect
+ """
+ return self._get_sub_prop("autodetect")
+
+ @autodetect.setter
+ def autodetect(self, value):
+ self._set_sub_prop("autodetect", value)
+
+ @property
+ def clustering_fields(self):
+ """Optional[List[str]]: Fields defining clustering for the table
+
+ (Defaults to :data:`None`).
+
+ Clustering fields are immutable after table creation.
+
+ .. note::
+
+ BigQuery supports clustering for both partitioned and
+ non-partitioned tables.
+ """
+ prop = self._get_sub_prop("clustering")
+ if prop is not None:
+ return list(prop.get("fields", ()))
+
+ @clustering_fields.setter
+ def clustering_fields(self, value):
+ """Optional[List[str]]: Fields defining clustering for the table
+
+ (Defaults to :data:`None`).
+ """
+ if value is not None:
+ self._set_sub_prop("clustering", {"fields": value})
+ else:
+ self._del_sub_prop("clustering")
+
+ @property
+ def connection_properties(self) -> List[ConnectionProperty]:
+ """Connection properties.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.connection_properties
+
+ .. versionadded:: 3.7.0
+ """
+ resource = self._get_sub_prop("connectionProperties", [])
+ return [ConnectionProperty.from_api_repr(prop) for prop in resource]
+
+ @connection_properties.setter
+ def connection_properties(self, value: Iterable[ConnectionProperty]):
+ self._set_sub_prop(
+ "connectionProperties",
+ [prop.to_api_repr() for prop in value],
+ )
+
+ @property
+ def create_disposition(self):
+ """Optional[google.cloud.bigquery.job.CreateDisposition]: Specifies behavior
+ for creating tables.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.create_disposition
+ """
+ return self._get_sub_prop("createDisposition")
+
+ @create_disposition.setter
+ def create_disposition(self, value):
+ self._set_sub_prop("createDisposition", value)
+
+ @property
+ def create_session(self) -> Optional[bool]:
+ """[Preview] If :data:`True`, creates a new session, where
+ :attr:`~google.cloud.bigquery.job.LoadJob.session_info` will contain a
+ random server generated session id.
+
+ If :data:`False`, runs load job with an existing ``session_id`` passed in
+ :attr:`~google.cloud.bigquery.job.LoadJobConfig.connection_properties`,
+ otherwise runs load job in non-session mode.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.create_session
+
+ .. versionadded:: 3.7.0
+ """
+ return self._get_sub_prop("createSession")
+
+ @create_session.setter
+ def create_session(self, value: Optional[bool]):
+ self._set_sub_prop("createSession", value)
+
+ @property
+ def decimal_target_types(self) -> Optional[FrozenSet[str]]:
+ """Possible SQL data types to which the source decimal values are converted.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.decimal_target_types
+
+ .. versionadded:: 2.21.0
+ """
+ prop = self._get_sub_prop("decimalTargetTypes")
+ if prop is not None:
+ prop = frozenset(prop)
+ return prop
+
+ @decimal_target_types.setter
+ def decimal_target_types(self, value: Optional[Iterable[str]]):
+ if value is not None:
+ self._set_sub_prop("decimalTargetTypes", list(value))
+ else:
+ self._del_sub_prop("decimalTargetTypes")
+
+ @property
+ def destination_encryption_configuration(self):
+ """Optional[google.cloud.bigquery.encryption_configuration.EncryptionConfiguration]: Custom
+ encryption configuration for the destination table.
+
+ Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
+ if using default encryption.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.destination_encryption_configuration
+ """
+ prop = self._get_sub_prop("destinationEncryptionConfiguration")
+ if prop is not None:
+ prop = EncryptionConfiguration.from_api_repr(prop)
+ return prop
+
+ @destination_encryption_configuration.setter
+ def destination_encryption_configuration(self, value):
+ api_repr = value
+ if value is not None:
+ api_repr = value.to_api_repr()
+ self._set_sub_prop("destinationEncryptionConfiguration", api_repr)
+ else:
+ self._del_sub_prop("destinationEncryptionConfiguration")
+
+ @property
+ def destination_table_description(self):
+ """Optional[str]: Description of the destination table.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#DestinationTableProperties.FIELDS.description
+ """
+ prop = self._get_sub_prop("destinationTableProperties")
+ if prop is not None:
+ return prop["description"]
+
+ @destination_table_description.setter
+ def destination_table_description(self, value):
+ keys = [self._job_type, "destinationTableProperties", "description"]
+ if value is not None:
+ _helpers._set_sub_prop(self._properties, keys, value)
+ else:
+ _helpers._del_sub_prop(self._properties, keys)
+
+ @property
+ def destination_table_friendly_name(self):
+ """Optional[str]: Name given to destination table.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#DestinationTableProperties.FIELDS.friendly_name
+ """
+ prop = self._get_sub_prop("destinationTableProperties")
+ if prop is not None:
+ return prop["friendlyName"]
+
+ @destination_table_friendly_name.setter
+ def destination_table_friendly_name(self, value):
+ keys = [self._job_type, "destinationTableProperties", "friendlyName"]
+ if value is not None:
+ _helpers._set_sub_prop(self._properties, keys, value)
+ else:
+ _helpers._del_sub_prop(self._properties, keys)
+
+ @property
+ def encoding(self):
+ """Optional[google.cloud.bigquery.job.Encoding]: The character encoding of the
+ data.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.encoding
+ """
+ return self._get_sub_prop("encoding")
+
+ @encoding.setter
+ def encoding(self, value):
+ self._set_sub_prop("encoding", value)
+
+ @property
+ def field_delimiter(self):
+ """Optional[str]: The separator for fields in a CSV file.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.field_delimiter
+ """
+ return self._get_sub_prop("fieldDelimiter")
+
+ @field_delimiter.setter
+ def field_delimiter(self, value):
+ self._set_sub_prop("fieldDelimiter", value)
+
+ @property
+ def hive_partitioning(self):
+ """Optional[:class:`~.external_config.HivePartitioningOptions`]: [Beta] When set, \
+ it configures hive partitioning support.
+
+ .. note::
+ **Experimental**. This feature is experimental and might change or
+ have limited support.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.hive_partitioning_options
+ """
+ prop = self._get_sub_prop("hivePartitioningOptions")
+ if prop is None:
+ return None
+ return HivePartitioningOptions.from_api_repr(prop)
+
+ @hive_partitioning.setter
+ def hive_partitioning(self, value):
+ if value is not None:
+ if isinstance(value, HivePartitioningOptions):
+ value = value.to_api_repr()
+ else:
+ raise TypeError("Expected a HivePartitioningOptions instance or None.")
+
+ self._set_sub_prop("hivePartitioningOptions", value)
+
+ @property
+ def ignore_unknown_values(self):
+ """Optional[bool]: Ignore extra values not represented in the table schema.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.ignore_unknown_values
+ """
+ return self._get_sub_prop("ignoreUnknownValues")
+
+ @ignore_unknown_values.setter
+ def ignore_unknown_values(self, value):
+ self._set_sub_prop("ignoreUnknownValues", value)
+
+ @property
+ def json_extension(self):
+ """Optional[str]: The extension to use for writing JSON data to BigQuery. Only supports GeoJSON currently.
+
+ See: https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.json_extension
+
+ """
+ return self._get_sub_prop("jsonExtension")
+
+ @json_extension.setter
+ def json_extension(self, value):
+ self._set_sub_prop("jsonExtension", value)
+
+ @property
+ def max_bad_records(self):
+ """Optional[int]: Number of invalid rows to ignore.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.max_bad_records
+ """
+ return _helpers._int_or_none(self._get_sub_prop("maxBadRecords"))
+
+ @max_bad_records.setter
+ def max_bad_records(self, value):
+ self._set_sub_prop("maxBadRecords", value)
+
+ @property
+ def null_marker(self):
+ """Optional[str]: Represents a null value (CSV only).
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.null_marker
+ """
+ return self._get_sub_prop("nullMarker")
+
+ @null_marker.setter
+ def null_marker(self, value):
+ self._set_sub_prop("nullMarker", value)
+
+ @property
+ def preserve_ascii_control_characters(self):
+ """Optional[bool]: Preserves the embedded ASCII control characters when sourceFormat is set to CSV.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.preserve_ascii_control_characters
+ """
+ return self._get_sub_prop("preserveAsciiControlCharacters")
+
+ @preserve_ascii_control_characters.setter
+ def preserve_ascii_control_characters(self, value):
+ self._set_sub_prop("preserveAsciiControlCharacters", bool(value))
+
+ @property
+ def projection_fields(self) -> Optional[List[str]]:
+ """Optional[List[str]]: If
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.source_format` is set to
+ "DATASTORE_BACKUP", indicates which entity properties to load into
+ BigQuery from a Cloud Datastore backup.
+
+ Property names are case sensitive and must be top-level properties. If
+ no properties are specified, BigQuery loads all properties. If any
+ named property isn't found in the Cloud Datastore backup, an invalid
+ error is returned in the job result.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.projection_fields
+ """
+ return self._get_sub_prop("projectionFields")
+
+ @projection_fields.setter
+ def projection_fields(self, value: Optional[List[str]]):
+ self._set_sub_prop("projectionFields", value)
+
+ @property
+ def quote_character(self):
+ """Optional[str]: Character used to quote data sections (CSV only).
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.quote
+ """
+ return self._get_sub_prop("quote")
+
+ @quote_character.setter
+ def quote_character(self, value):
+ self._set_sub_prop("quote", value)
+
+ @property
+ def range_partitioning(self):
+ """Optional[google.cloud.bigquery.table.RangePartitioning]:
+ Configures range-based partitioning for destination table.
+
+ .. note::
+ **Beta**. The integer range partitioning feature is in a
+ pre-release state and might change or have limited support.
+
+ Only specify at most one of
+ :attr:`~google.cloud.bigquery.job.LoadJobConfig.time_partitioning` or
+ :attr:`~google.cloud.bigquery.job.LoadJobConfig.range_partitioning`.
+
+ Raises:
+ ValueError:
+ If the value is not
+ :class:`~google.cloud.bigquery.table.RangePartitioning` or
+ :data:`None`.
+ """
+ resource = self._get_sub_prop("rangePartitioning")
+ if resource is not None:
+ return RangePartitioning(_properties=resource)
+
+ @range_partitioning.setter
+ def range_partitioning(self, value):
+ resource = value
+ if isinstance(value, RangePartitioning):
+ resource = value._properties
+ elif value is not None:
+ raise ValueError(
+ "Expected value to be RangePartitioning or None, got {}.".format(value)
+ )
+ self._set_sub_prop("rangePartitioning", resource)
+
+ @property
+ def reference_file_schema_uri(self):
+ """Optional[str]:
+ When creating an external table, the user can provide a reference file with the
+ table schema. This is enabled for the following formats:
+
+ AVRO, PARQUET, ORC
+ """
+ return self._get_sub_prop("referenceFileSchemaUri")
+
+ @reference_file_schema_uri.setter
+ def reference_file_schema_uri(self, value):
+ return self._set_sub_prop("referenceFileSchemaUri", value)
+
+ @property
+ def schema(self):
+ """Optional[Sequence[Union[ \
+ :class:`~google.cloud.bigquery.schema.SchemaField`, \
+ Mapping[str, Any] \
+ ]]]: Schema of the destination table.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.schema
+ """
+ schema = _helpers._get_sub_prop(self._properties, ["load", "schema", "fields"])
+ if schema is None:
+ return
+ return [SchemaField.from_api_repr(field) for field in schema]
+
+ @schema.setter
+ def schema(self, value):
+ if value is None:
+ self._del_sub_prop("schema")
+ return
+
+ value = _to_schema_fields(value)
+
+ _helpers._set_sub_prop(
+ self._properties,
+ ["load", "schema", "fields"],
+ [field.to_api_repr() for field in value],
+ )
+
+ @property
+ def schema_update_options(self):
+ """Optional[List[google.cloud.bigquery.job.SchemaUpdateOption]]: Specifies
+ updates to the destination table schema to allow as a side effect of
+ the load job.
+ """
+ return self._get_sub_prop("schemaUpdateOptions")
+
+ @schema_update_options.setter
+ def schema_update_options(self, values):
+ self._set_sub_prop("schemaUpdateOptions", values)
+
+ @property
+ def skip_leading_rows(self):
+ """Optional[int]: Number of rows to skip when reading data (CSV only).
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.skip_leading_rows
+ """
+ return _helpers._int_or_none(self._get_sub_prop("skipLeadingRows"))
+
+ @skip_leading_rows.setter
+ def skip_leading_rows(self, value):
+ self._set_sub_prop("skipLeadingRows", str(value))
+
+ @property
+ def source_format(self):
+ """Optional[google.cloud.bigquery.job.SourceFormat]: File format of the data.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.source_format
+ """
+ return self._get_sub_prop("sourceFormat")
+
+ @source_format.setter
+ def source_format(self, value):
+ self._set_sub_prop("sourceFormat", value)
+
+ @property
+ def time_partitioning(self):
+ """Optional[google.cloud.bigquery.table.TimePartitioning]: Specifies time-based
+ partitioning for the destination table.
+
+ Only specify at most one of
+ :attr:`~google.cloud.bigquery.job.LoadJobConfig.time_partitioning` or
+ :attr:`~google.cloud.bigquery.job.LoadJobConfig.range_partitioning`.
+ """
+ prop = self._get_sub_prop("timePartitioning")
+ if prop is not None:
+ prop = TimePartitioning.from_api_repr(prop)
+ return prop
+
+ @time_partitioning.setter
+ def time_partitioning(self, value):
+ api_repr = value
+ if value is not None:
+ api_repr = value.to_api_repr()
+ self._set_sub_prop("timePartitioning", api_repr)
+ else:
+ self._del_sub_prop("timePartitioning")
+
+ @property
+ def use_avro_logical_types(self):
+ """Optional[bool]: For loads of Avro data, governs whether Avro logical types are
+ converted to their corresponding BigQuery types (e.g. TIMESTAMP) rather than
+ raw types (e.g. INTEGER).
+ """
+ return self._get_sub_prop("useAvroLogicalTypes")
+
+ @use_avro_logical_types.setter
+ def use_avro_logical_types(self, value):
+ self._set_sub_prop("useAvroLogicalTypes", bool(value))
+
+ @property
+ def write_disposition(self):
+ """Optional[google.cloud.bigquery.job.WriteDisposition]: Action that occurs if
+ the destination table already exists.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.write_disposition
+ """
+ return self._get_sub_prop("writeDisposition")
+
+ @write_disposition.setter
+ def write_disposition(self, value):
+ self._set_sub_prop("writeDisposition", value)
+
+ @property
+ def parquet_options(self):
+ """Optional[google.cloud.bigquery.format_options.ParquetOptions]: Additional
+ properties to set if ``sourceFormat`` is set to PARQUET.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.parquet_options
+ """
+ prop = self._get_sub_prop("parquetOptions")
+ if prop is not None:
+ prop = ParquetOptions.from_api_repr(prop)
+ return prop
+
+ @parquet_options.setter
+ def parquet_options(self, value):
+ if value is not None:
+ self._set_sub_prop("parquetOptions", value.to_api_repr())
+ else:
+ self._del_sub_prop("parquetOptions")
+
+ @property
+ def column_name_character_map(self) -> str:
+ """Optional[google.cloud.bigquery.job.ColumnNameCharacterMap]:
+ Character map supported for column names in CSV/Parquet loads. Defaults
+ to STRICT and can be overridden by Project Config Service. Using this
+ option with unsupported load formats will result in an error.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.column_name_character_map
+ """
+ return self._get_sub_prop(
+ "columnNameCharacterMap",
+ ColumnNameCharacterMap.COLUMN_NAME_CHARACTER_MAP_UNSPECIFIED,
+ )
+
+ @column_name_character_map.setter
+ def column_name_character_map(self, value: Optional[str]):
+ if value is None:
+ value = ColumnNameCharacterMap.COLUMN_NAME_CHARACTER_MAP_UNSPECIFIED
+ self._set_sub_prop("columnNameCharacterMap", value)
+
+
+class LoadJob(_AsyncJob):
+ """Asynchronous job for loading data into a table.
+
+ Can load from Google Cloud Storage URIs or from a file.
+
+ Args:
+ job_id (str): the job's ID
+
+ source_uris (Optional[Sequence[str]]):
+ URIs of one or more data files to be loaded. See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.source_uris
+ for supported URI formats. Pass None for jobs that load from a file.
+
+ destination (google.cloud.bigquery.table.TableReference): reference to table into which data is to be loaded.
+
+ client (google.cloud.bigquery.client.Client):
+ A client which holds credentials and project configuration
+ for the dataset (which requires a project).
+ """
+
+ _JOB_TYPE = "load"
+ _CONFIG_CLASS = LoadJobConfig
+
+ def __init__(self, job_id, source_uris, destination, client, job_config=None):
+ super(LoadJob, self).__init__(job_id, client)
+
+ if job_config is not None:
+ self._properties["configuration"] = job_config._properties
+
+ if source_uris is not None:
+ _helpers._set_sub_prop(
+ self._properties, ["configuration", "load", "sourceUris"], source_uris
+ )
+
+ if destination is not None:
+ _helpers._set_sub_prop(
+ self._properties,
+ ["configuration", "load", "destinationTable"],
+ destination.to_api_repr(),
+ )
+
+ @property
+ def configuration(self) -> LoadJobConfig:
+ """The configuration for this load job."""
+ return typing.cast(LoadJobConfig, super().configuration)
+
+ @property
+ def destination(self):
+ """google.cloud.bigquery.table.TableReference: table where loaded rows are written
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.destination_table
+ """
+ dest_config = _helpers._get_sub_prop(
+ self._properties, ["configuration", "load", "destinationTable"]
+ )
+ return TableReference.from_api_repr(dest_config)
+
+ @property
+ def source_uris(self):
+ """Optional[Sequence[str]]: URIs of data files to be loaded. See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.source_uris
+ for supported URI formats. None for jobs that load from a file.
+ """
+ return _helpers._get_sub_prop(
+ self._properties, ["configuration", "load", "sourceUris"]
+ )
+
+ @property
+ def allow_jagged_rows(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.allow_jagged_rows`.
+ """
+ return self.configuration.allow_jagged_rows
+
+ @property
+ def allow_quoted_newlines(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.allow_quoted_newlines`.
+ """
+ return self.configuration.allow_quoted_newlines
+
+ @property
+ def autodetect(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.autodetect`.
+ """
+ return self.configuration.autodetect
+
+ @property
+ def connection_properties(self) -> List[ConnectionProperty]:
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.connection_properties`.
+
+ .. versionadded:: 3.7.0
+ """
+ return self.configuration.connection_properties
+
+ @property
+ def create_disposition(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.create_disposition`.
+ """
+ return self.configuration.create_disposition
+
+ @property
+ def create_session(self) -> Optional[bool]:
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.create_session`.
+
+ .. versionadded:: 3.7.0
+ """
+ return self.configuration.create_session
+
+ @property
+ def encoding(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.encoding`.
+ """
+ return self.configuration.encoding
+
+ @property
+ def field_delimiter(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.field_delimiter`.
+ """
+ return self.configuration.field_delimiter
+
+ @property
+ def ignore_unknown_values(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.ignore_unknown_values`.
+ """
+ return self.configuration.ignore_unknown_values
+
+ @property
+ def max_bad_records(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.max_bad_records`.
+ """
+ return self.configuration.max_bad_records
+
+ @property
+ def null_marker(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.null_marker`.
+ """
+ return self.configuration.null_marker
+
+ @property
+ def quote_character(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.quote_character`.
+ """
+ return self.configuration.quote_character
+
+ @property
+ def reference_file_schema_uri(self):
+ """See:
+ attr:`google.cloud.bigquery.job.LoadJobConfig.reference_file_schema_uri`.
+ """
+ return self.configuration.reference_file_schema_uri
+
+ @property
+ def skip_leading_rows(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.skip_leading_rows`.
+ """
+ return self.configuration.skip_leading_rows
+
+ @property
+ def source_format(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.source_format`.
+ """
+ return self.configuration.source_format
+
+ @property
+ def write_disposition(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.write_disposition`.
+ """
+ return self.configuration.write_disposition
+
+ @property
+ def schema(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.schema`.
+ """
+ return self.configuration.schema
+
+ @property
+ def destination_encryption_configuration(self):
+ """google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom
+ encryption configuration for the destination table.
+
+ Custom encryption configuration (e.g., Cloud KMS keys)
+ or :data:`None` if using default encryption.
+
+ See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.destination_encryption_configuration`.
+ """
+ return self.configuration.destination_encryption_configuration
+
+ @property
+ def destination_table_description(self):
+ """Optional[str] name given to destination table.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#DestinationTableProperties.FIELDS.description
+ """
+ return self.configuration.destination_table_description
+
+ @property
+ def destination_table_friendly_name(self):
+ """Optional[str] name given to destination table.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#DestinationTableProperties.FIELDS.friendly_name
+ """
+ return self.configuration.destination_table_friendly_name
+
+ @property
+ def range_partitioning(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.range_partitioning`.
+ """
+ return self.configuration.range_partitioning
+
+ @property
+ def time_partitioning(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.time_partitioning`.
+ """
+ return self.configuration.time_partitioning
+
+ @property
+ def use_avro_logical_types(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.use_avro_logical_types`.
+ """
+ return self.configuration.use_avro_logical_types
+
+ @property
+ def clustering_fields(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.clustering_fields`.
+ """
+ return self.configuration.clustering_fields
+
+ @property
+ def schema_update_options(self):
+ """See
+ :attr:`google.cloud.bigquery.job.LoadJobConfig.schema_update_options`.
+ """
+ return self.configuration.schema_update_options
+
+ @property
+ def input_file_bytes(self):
+ """Count of bytes loaded from source files.
+
+ Returns:
+ Optional[int]: the count (None until set from the server).
+
+ Raises:
+ ValueError: for invalid value types.
+ """
+ return _helpers._int_or_none(
+ _helpers._get_sub_prop(
+ self._properties, ["statistics", "load", "inputFileBytes"]
+ )
+ )
+
+ @property
+ def input_files(self):
+ """Count of source files.
+
+ Returns:
+ Optional[int]: the count (None until set from the server).
+ """
+ return _helpers._int_or_none(
+ _helpers._get_sub_prop(
+ self._properties, ["statistics", "load", "inputFiles"]
+ )
+ )
+
+ @property
+ def output_bytes(self):
+ """Count of bytes saved to destination table.
+
+ Returns:
+ Optional[int]: the count (None until set from the server).
+ """
+ return _helpers._int_or_none(
+ _helpers._get_sub_prop(
+ self._properties, ["statistics", "load", "outputBytes"]
+ )
+ )
+
+ @property
+ def output_rows(self):
+ """Count of rows saved to destination table.
+
+ Returns:
+ Optional[int]: the count (None until set from the server).
+ """
+ return _helpers._int_or_none(
+ _helpers._get_sub_prop(
+ self._properties, ["statistics", "load", "outputRows"]
+ )
+ )
+
+ def to_api_repr(self):
+ """Generate a resource for :meth:`_begin`."""
+ # Exclude statistics, if set.
+ return {
+ "jobReference": self._properties["jobReference"],
+ "configuration": self._properties["configuration"],
+ }
+
+ @classmethod
+ def from_api_repr(cls, resource: dict, client) -> "LoadJob":
+ """Factory: construct a job given its API representation
+
+ .. note::
+
+ This method assumes that the project found in the resource matches
+ the client's project.
+
+ Args:
+ resource (Dict): dataset job representation returned from the API
+
+ client (google.cloud.bigquery.client.Client):
+ Client which holds credentials and project
+ configuration for the dataset.
+
+ Returns:
+ google.cloud.bigquery.job.LoadJob: Job parsed from ``resource``.
+ """
+ cls._check_resource_config(resource)
+ job_ref = _JobReference._from_api_repr(resource["jobReference"])
+ job = cls(job_ref, None, None, client)
+ job._set_properties(resource)
+ return job
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/query.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/query.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca2448eaacd8ffdebc768ac06f3d9b9277b45932
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/job/query.py
@@ -0,0 +1,2496 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Classes for query jobs."""
+
+import concurrent.futures
+import copy
+import re
+import time
+import typing
+from typing import Any, Dict, Iterable, List, Optional, Union
+
+from google.api_core import exceptions
+from google.api_core import retry as retries
+import requests
+
+from google.cloud.bigquery.dataset import Dataset
+from google.cloud.bigquery.dataset import DatasetListItem
+from google.cloud.bigquery.dataset import DatasetReference
+from google.cloud.bigquery.encryption_configuration import EncryptionConfiguration
+from google.cloud.bigquery.enums import KeyResultStatementKind, DefaultPandasDTypes
+from google.cloud.bigquery.external_config import ExternalConfig
+from google.cloud.bigquery import _helpers
+from google.cloud.bigquery.query import (
+ _query_param_from_api_repr,
+ ArrayQueryParameter,
+ ConnectionProperty,
+ ScalarQueryParameter,
+ StructQueryParameter,
+ UDFResource,
+)
+from google.cloud.bigquery.retry import (
+ DEFAULT_RETRY,
+ DEFAULT_JOB_RETRY,
+ POLLING_DEFAULT_VALUE,
+)
+from google.cloud.bigquery.routine import RoutineReference
+from google.cloud.bigquery.schema import SchemaField
+from google.cloud.bigquery.table import _EmptyRowIterator
+from google.cloud.bigquery.table import RangePartitioning
+from google.cloud.bigquery.table import _table_arg_to_table_ref
+from google.cloud.bigquery.table import TableReference
+from google.cloud.bigquery.table import TimePartitioning
+from google.cloud.bigquery._tqdm_helpers import wait_for_query
+
+from google.cloud.bigquery.job.base import _AsyncJob
+from google.cloud.bigquery.job.base import _JobConfig
+from google.cloud.bigquery.job.base import _JobReference
+
+try:
+ import pandas # type: ignore
+except ImportError:
+ pandas = None
+
+if typing.TYPE_CHECKING: # pragma: NO COVER
+ # Assumption: type checks are only used by library developers and CI environments
+ # that have all optional dependencies installed, thus no conditional imports.
+ import pandas # type: ignore
+ import geopandas # type: ignore
+ import pyarrow # type: ignore
+ from google.cloud import bigquery_storage
+ from google.cloud.bigquery.client import Client
+ from google.cloud.bigquery.table import RowIterator
+
+
+_CONTAINS_ORDER_BY = re.compile(r"ORDER\s+BY", re.IGNORECASE)
+_EXCEPTION_FOOTER_TEMPLATE = "{message}\n\nLocation: {location}\nJob ID: {job_id}\n"
+_TIMEOUT_BUFFER_SECS = 0.1
+
+
+def _contains_order_by(query):
+ """Do we need to preserve the order of the query results?
+
+ This function has known false positives, such as with ordered window
+ functions:
+
+ .. code-block:: sql
+
+ SELECT SUM(x) OVER (
+ window_name
+ PARTITION BY...
+ ORDER BY...
+ window_frame_clause)
+ FROM ...
+
+ This false positive failure case means the behavior will be correct, but
+ downloading results with the BigQuery Storage API may be slower than it
+ otherwise would. This is preferable to the false negative case, where
+ results are expected to be in order but are not (due to parallel reads).
+ """
+ return query and _CONTAINS_ORDER_BY.search(query)
+
+
+def _from_api_repr_query_parameters(resource):
+ return [_query_param_from_api_repr(mapping) for mapping in resource]
+
+
+def _to_api_repr_query_parameters(value):
+ return [query_parameter.to_api_repr() for query_parameter in value]
+
+
+def _from_api_repr_udf_resources(resource):
+ udf_resources = []
+ for udf_mapping in resource:
+ for udf_type, udf_value in udf_mapping.items():
+ udf_resources.append(UDFResource(udf_type, udf_value))
+ return udf_resources
+
+
+def _to_api_repr_udf_resources(value):
+ return [{udf_resource.udf_type: udf_resource.value} for udf_resource in value]
+
+
+def _from_api_repr_table_defs(resource):
+ return {k: ExternalConfig.from_api_repr(v) for k, v in resource.items()}
+
+
+def _to_api_repr_table_defs(value):
+ return {k: ExternalConfig.to_api_repr(v) for k, v in value.items()}
+
+
+class BiEngineReason(typing.NamedTuple):
+ """Reason for BI Engine acceleration failure
+
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#bienginereason
+ """
+
+ code: str = "CODE_UNSPECIFIED"
+
+ reason: str = ""
+
+ @classmethod
+ def from_api_repr(cls, reason: Dict[str, str]) -> "BiEngineReason":
+ return cls(reason.get("code", "CODE_UNSPECIFIED"), reason.get("message", ""))
+
+
+class BiEngineStats(typing.NamedTuple):
+ """Statistics for a BI Engine query
+
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#bienginestatistics
+ """
+
+ mode: str = "ACCELERATION_MODE_UNSPECIFIED"
+ """ Specifies which mode of BI Engine acceleration was performed (if any)
+ """
+
+ reasons: List[BiEngineReason] = []
+ """ Contains explanatory messages in case of DISABLED / PARTIAL acceleration
+ """
+
+ @classmethod
+ def from_api_repr(cls, stats: Dict[str, Any]) -> "BiEngineStats":
+ mode = stats.get("biEngineMode", "ACCELERATION_MODE_UNSPECIFIED")
+ reasons = [
+ BiEngineReason.from_api_repr(r) for r in stats.get("biEngineReasons", [])
+ ]
+ return cls(mode, reasons)
+
+
+class DmlStats(typing.NamedTuple):
+ """Detailed statistics for DML statements.
+
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/DmlStats
+ """
+
+ inserted_row_count: int = 0
+ """Number of inserted rows. Populated by DML INSERT and MERGE statements."""
+
+ deleted_row_count: int = 0
+ """Number of deleted rows. populated by DML DELETE, MERGE and TRUNCATE statements.
+ """
+
+ updated_row_count: int = 0
+ """Number of updated rows. Populated by DML UPDATE and MERGE statements."""
+
+ @classmethod
+ def from_api_repr(cls, stats: Dict[str, str]) -> "DmlStats":
+ # NOTE: The field order here must match the order of fields set at the
+ # class level.
+ api_fields = ("insertedRowCount", "deletedRowCount", "updatedRowCount")
+
+ args = (
+ int(stats.get(api_field, default_val))
+ for api_field, default_val in zip(api_fields, cls.__new__.__defaults__) # type: ignore
+ )
+ return cls(*args)
+
+
+class IndexUnusedReason(typing.NamedTuple):
+ """Reason about why no search index was used in the search query (or sub-query).
+
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#indexunusedreason
+ """
+
+ code: Optional[str] = None
+ """Specifies the high-level reason for the scenario when no search index was used.
+ """
+
+ message: Optional[str] = None
+ """Free form human-readable reason for the scenario when no search index was used.
+ """
+
+ baseTable: Optional[TableReference] = None
+ """Specifies the base table involved in the reason that no search index was used.
+ """
+
+ indexName: Optional[str] = None
+ """Specifies the name of the unused search index, if available."""
+
+ @classmethod
+ def from_api_repr(cls, reason):
+ code = reason.get("code")
+ message = reason.get("message")
+ baseTable = reason.get("baseTable")
+ indexName = reason.get("indexName")
+
+ return cls(code, message, baseTable, indexName)
+
+
+class SearchStats(typing.NamedTuple):
+ """Statistics related to Search Queries. Populated as part of JobStatistics2.
+
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#searchstatistics
+ """
+
+ mode: Optional[str] = None
+ """Indicates the type of search index usage in the entire search query."""
+
+ reason: List[IndexUnusedReason] = []
+ """Reason about why no search index was used in the search query (or sub-query)"""
+
+ @classmethod
+ def from_api_repr(cls, stats: Dict[str, Any]):
+ mode = stats.get("indexUsageMode", None)
+ reason = [
+ IndexUnusedReason.from_api_repr(r)
+ for r in stats.get("indexUnusedReasons", [])
+ ]
+ return cls(mode, reason)
+
+
+class ScriptOptions:
+ """Options controlling the execution of scripts.
+
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#ScriptOptions
+ """
+
+ def __init__(
+ self,
+ statement_timeout_ms: Optional[int] = None,
+ statement_byte_budget: Optional[int] = None,
+ key_result_statement: Optional[KeyResultStatementKind] = None,
+ ):
+ self._properties: Dict[str, Any] = {}
+ self.statement_timeout_ms = statement_timeout_ms
+ self.statement_byte_budget = statement_byte_budget
+ self.key_result_statement = key_result_statement
+
+ @classmethod
+ def from_api_repr(cls, resource: Dict[str, Any]) -> "ScriptOptions":
+ """Factory: construct instance from the JSON repr.
+
+ Args:
+ resource(Dict[str: Any]):
+ ScriptOptions representation returned from API.
+
+ Returns:
+ google.cloud.bigquery.ScriptOptions:
+ ScriptOptions sample parsed from ``resource``.
+ """
+ entry = cls()
+ entry._properties = copy.deepcopy(resource)
+ return entry
+
+ def to_api_repr(self) -> Dict[str, Any]:
+ """Construct the API resource representation."""
+ return copy.deepcopy(self._properties)
+
+ @property
+ def statement_timeout_ms(self) -> Union[int, None]:
+ """Timeout period for each statement in a script."""
+ return _helpers._int_or_none(self._properties.get("statementTimeoutMs"))
+
+ @statement_timeout_ms.setter
+ def statement_timeout_ms(self, value: Union[int, None]):
+ new_value = None if value is None else str(value)
+ self._properties["statementTimeoutMs"] = new_value
+
+ @property
+ def statement_byte_budget(self) -> Union[int, None]:
+ """Limit on the number of bytes billed per statement.
+
+ Exceeding this budget results in an error.
+ """
+ return _helpers._int_or_none(self._properties.get("statementByteBudget"))
+
+ @statement_byte_budget.setter
+ def statement_byte_budget(self, value: Union[int, None]):
+ new_value = None if value is None else str(value)
+ self._properties["statementByteBudget"] = new_value
+
+ @property
+ def key_result_statement(self) -> Union[KeyResultStatementKind, None]:
+ """Determines which statement in the script represents the "key result".
+
+ This is used to populate the schema and query results of the script job.
+ Default is ``KeyResultStatementKind.LAST``.
+ """
+ return self._properties.get("keyResultStatement")
+
+ @key_result_statement.setter
+ def key_result_statement(self, value: Union[KeyResultStatementKind, None]):
+ self._properties["keyResultStatement"] = value
+
+
+class QueryJobConfig(_JobConfig):
+ """Configuration options for query jobs.
+
+ All properties in this class are optional. Values which are :data:`None` ->
+ server defaults. Set properties on the constructed configuration by using
+ the property name as the name of a keyword argument.
+ """
+
+ def __init__(self, **kwargs) -> None:
+ super(QueryJobConfig, self).__init__("query", **kwargs)
+
+ @property
+ def destination_encryption_configuration(self):
+ """google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom
+ encryption configuration for the destination table.
+
+ Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
+ if using default encryption.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.destination_encryption_configuration
+ """
+ prop = self._get_sub_prop("destinationEncryptionConfiguration")
+ if prop is not None:
+ prop = EncryptionConfiguration.from_api_repr(prop)
+ return prop
+
+ @destination_encryption_configuration.setter
+ def destination_encryption_configuration(self, value):
+ api_repr = value
+ if value is not None:
+ api_repr = value.to_api_repr()
+ self._set_sub_prop("destinationEncryptionConfiguration", api_repr)
+
+ @property
+ def allow_large_results(self):
+ """bool: Allow large query results tables (legacy SQL, only)
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.allow_large_results
+ """
+ return self._get_sub_prop("allowLargeResults")
+
+ @allow_large_results.setter
+ def allow_large_results(self, value):
+ self._set_sub_prop("allowLargeResults", value)
+
+ @property
+ def connection_properties(self) -> List[ConnectionProperty]:
+ """Connection properties.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.connection_properties
+
+ .. versionadded:: 2.29.0
+ """
+ resource = self._get_sub_prop("connectionProperties", [])
+ return [ConnectionProperty.from_api_repr(prop) for prop in resource]
+
+ @connection_properties.setter
+ def connection_properties(self, value: Iterable[ConnectionProperty]):
+ self._set_sub_prop(
+ "connectionProperties",
+ [prop.to_api_repr() for prop in value],
+ )
+
+ @property
+ def create_disposition(self):
+ """google.cloud.bigquery.job.CreateDisposition: Specifies behavior
+ for creating tables.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.create_disposition
+ """
+ return self._get_sub_prop("createDisposition")
+
+ @create_disposition.setter
+ def create_disposition(self, value):
+ self._set_sub_prop("createDisposition", value)
+
+ @property
+ def create_session(self) -> Optional[bool]:
+ """[Preview] If :data:`True`, creates a new session, where
+ :attr:`~google.cloud.bigquery.job.QueryJob.session_info` will contain a
+ random server generated session id.
+
+ If :data:`False`, runs query with an existing ``session_id`` passed in
+ :attr:`~google.cloud.bigquery.job.QueryJobConfig.connection_properties`,
+ otherwise runs query in non-session mode.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.create_session
+
+ .. versionadded:: 2.29.0
+ """
+ return self._get_sub_prop("createSession")
+
+ @create_session.setter
+ def create_session(self, value: Optional[bool]):
+ self._set_sub_prop("createSession", value)
+
+ @property
+ def default_dataset(self):
+ """google.cloud.bigquery.dataset.DatasetReference: the default dataset
+ to use for unqualified table names in the query or :data:`None` if not
+ set.
+
+ The ``default_dataset`` setter accepts:
+
+ - a :class:`~google.cloud.bigquery.dataset.Dataset`, or
+ - a :class:`~google.cloud.bigquery.dataset.DatasetReference`, or
+ - a :class:`str` of the fully-qualified dataset ID in standard SQL
+ format. The value must included a project ID and dataset ID
+ separated by ``.``. For example: ``your-project.your_dataset``.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.default_dataset
+ """
+ prop = self._get_sub_prop("defaultDataset")
+ if prop is not None:
+ prop = DatasetReference.from_api_repr(prop)
+ return prop
+
+ @default_dataset.setter
+ def default_dataset(self, value):
+ if value is None:
+ self._set_sub_prop("defaultDataset", None)
+ return
+
+ if isinstance(value, str):
+ value = DatasetReference.from_string(value)
+
+ if isinstance(value, (Dataset, DatasetListItem)):
+ value = value.reference
+
+ resource = value.to_api_repr()
+ self._set_sub_prop("defaultDataset", resource)
+
+ @property
+ def destination(self):
+ """google.cloud.bigquery.table.TableReference: table where results are
+ written or :data:`None` if not set.
+
+ The ``destination`` setter accepts:
+
+ - a :class:`~google.cloud.bigquery.table.Table`, or
+ - a :class:`~google.cloud.bigquery.table.TableReference`, or
+ - a :class:`str` of the fully-qualified table ID in standard SQL
+ format. The value must included a project ID, dataset ID, and table
+ ID, each separated by ``.``. For example:
+ ``your-project.your_dataset.your_table``.
+
+ .. note::
+
+ Only table ID is passed to the backend, so any configuration
+ in `~google.cloud.bigquery.table.Table` is discarded.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.destination_table
+ """
+ prop = self._get_sub_prop("destinationTable")
+ if prop is not None:
+ prop = TableReference.from_api_repr(prop)
+ return prop
+
+ @destination.setter
+ def destination(self, value):
+ if value is None:
+ self._set_sub_prop("destinationTable", None)
+ return
+
+ value = _table_arg_to_table_ref(value)
+ resource = value.to_api_repr()
+ self._set_sub_prop("destinationTable", resource)
+
+ @property
+ def dry_run(self):
+ """bool: :data:`True` if this query should be a dry run to estimate
+ costs.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfiguration.FIELDS.dry_run
+ """
+ return self._properties.get("dryRun")
+
+ @dry_run.setter
+ def dry_run(self, value):
+ self._properties["dryRun"] = value
+
+ @property
+ def flatten_results(self):
+ """bool: Flatten nested/repeated fields in results. (Legacy SQL only)
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.flatten_results
+ """
+ return self._get_sub_prop("flattenResults")
+
+ @flatten_results.setter
+ def flatten_results(self, value):
+ self._set_sub_prop("flattenResults", value)
+
+ @property
+ def maximum_billing_tier(self):
+ """int: Deprecated. Changes the billing tier to allow high-compute
+ queries.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.maximum_billing_tier
+ """
+ return self._get_sub_prop("maximumBillingTier")
+
+ @maximum_billing_tier.setter
+ def maximum_billing_tier(self, value):
+ self._set_sub_prop("maximumBillingTier", value)
+
+ @property
+ def maximum_bytes_billed(self):
+ """int: Maximum bytes to be billed for this job or :data:`None` if not set.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.maximum_bytes_billed
+ """
+ return _helpers._int_or_none(self._get_sub_prop("maximumBytesBilled"))
+
+ @maximum_bytes_billed.setter
+ def maximum_bytes_billed(self, value):
+ self._set_sub_prop("maximumBytesBilled", str(value))
+
+ @property
+ def priority(self):
+ """google.cloud.bigquery.job.QueryPriority: Priority of the query.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.priority
+ """
+ return self._get_sub_prop("priority")
+
+ @priority.setter
+ def priority(self, value):
+ self._set_sub_prop("priority", value)
+
+ @property
+ def query_parameters(self):
+ """List[Union[google.cloud.bigquery.query.ArrayQueryParameter, \
+ google.cloud.bigquery.query.ScalarQueryParameter, \
+ google.cloud.bigquery.query.StructQueryParameter]]: list of parameters
+ for parameterized query (empty by default)
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.query_parameters
+ """
+ prop = self._get_sub_prop("queryParameters", default=[])
+ return _from_api_repr_query_parameters(prop)
+
+ @query_parameters.setter
+ def query_parameters(self, values):
+ self._set_sub_prop("queryParameters", _to_api_repr_query_parameters(values))
+
+ @property
+ def range_partitioning(self):
+ """Optional[google.cloud.bigquery.table.RangePartitioning]:
+ Configures range-based partitioning for destination table.
+
+ .. note::
+ **Beta**. The integer range partitioning feature is in a
+ pre-release state and might change or have limited support.
+
+ Only specify at most one of
+ :attr:`~google.cloud.bigquery.job.LoadJobConfig.time_partitioning` or
+ :attr:`~google.cloud.bigquery.job.LoadJobConfig.range_partitioning`.
+
+ Raises:
+ ValueError:
+ If the value is not
+ :class:`~google.cloud.bigquery.table.RangePartitioning` or
+ :data:`None`.
+ """
+ resource = self._get_sub_prop("rangePartitioning")
+ if resource is not None:
+ return RangePartitioning(_properties=resource)
+
+ @range_partitioning.setter
+ def range_partitioning(self, value):
+ resource = value
+ if isinstance(value, RangePartitioning):
+ resource = value._properties
+ elif value is not None:
+ raise ValueError(
+ "Expected value to be RangePartitioning or None, got {}.".format(value)
+ )
+ self._set_sub_prop("rangePartitioning", resource)
+
+ @property
+ def udf_resources(self):
+ """List[google.cloud.bigquery.query.UDFResource]: user
+ defined function resources (empty by default)
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.user_defined_function_resources
+ """
+ prop = self._get_sub_prop("userDefinedFunctionResources", default=[])
+ return _from_api_repr_udf_resources(prop)
+
+ @udf_resources.setter
+ def udf_resources(self, values):
+ self._set_sub_prop(
+ "userDefinedFunctionResources", _to_api_repr_udf_resources(values)
+ )
+
+ @property
+ def use_legacy_sql(self):
+ """bool: Use legacy SQL syntax.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.use_legacy_sql
+ """
+ return self._get_sub_prop("useLegacySql")
+
+ @use_legacy_sql.setter
+ def use_legacy_sql(self, value):
+ self._set_sub_prop("useLegacySql", value)
+
+ @property
+ def use_query_cache(self):
+ """bool: Look for the query result in the cache.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.use_query_cache
+ """
+ return self._get_sub_prop("useQueryCache")
+
+ @use_query_cache.setter
+ def use_query_cache(self, value):
+ self._set_sub_prop("useQueryCache", value)
+
+ @property
+ def write_disposition(self):
+ """google.cloud.bigquery.job.WriteDisposition: Action that occurs if
+ the destination table already exists.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.write_disposition
+ """
+ return self._get_sub_prop("writeDisposition")
+
+ @write_disposition.setter
+ def write_disposition(self, value):
+ self._set_sub_prop("writeDisposition", value)
+
+ @property
+ def table_definitions(self):
+ """Dict[str, google.cloud.bigquery.external_config.ExternalConfig]:
+ Definitions for external tables or :data:`None` if not set.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.external_table_definitions
+ """
+ prop = self._get_sub_prop("tableDefinitions")
+ if prop is not None:
+ prop = _from_api_repr_table_defs(prop)
+ return prop
+
+ @table_definitions.setter
+ def table_definitions(self, values):
+ self._set_sub_prop("tableDefinitions", _to_api_repr_table_defs(values))
+
+ @property
+ def time_partitioning(self):
+ """Optional[google.cloud.bigquery.table.TimePartitioning]: Specifies
+ time-based partitioning for the destination table.
+
+ Only specify at most one of
+ :attr:`~google.cloud.bigquery.job.LoadJobConfig.time_partitioning` or
+ :attr:`~google.cloud.bigquery.job.LoadJobConfig.range_partitioning`.
+
+ Raises:
+ ValueError:
+ If the value is not
+ :class:`~google.cloud.bigquery.table.TimePartitioning` or
+ :data:`None`.
+ """
+ prop = self._get_sub_prop("timePartitioning")
+ if prop is not None:
+ prop = TimePartitioning.from_api_repr(prop)
+ return prop
+
+ @time_partitioning.setter
+ def time_partitioning(self, value):
+ api_repr = value
+ if value is not None:
+ api_repr = value.to_api_repr()
+ self._set_sub_prop("timePartitioning", api_repr)
+
+ @property
+ def clustering_fields(self):
+ """Optional[List[str]]: Fields defining clustering for the table
+
+ (Defaults to :data:`None`).
+
+ Clustering fields are immutable after table creation.
+
+ .. note::
+
+ BigQuery supports clustering for both partitioned and
+ non-partitioned tables.
+ """
+ prop = self._get_sub_prop("clustering")
+ if prop is not None:
+ return list(prop.get("fields", ()))
+
+ @clustering_fields.setter
+ def clustering_fields(self, value):
+ """Optional[List[str]]: Fields defining clustering for the table
+
+ (Defaults to :data:`None`).
+ """
+ if value is not None:
+ self._set_sub_prop("clustering", {"fields": value})
+ else:
+ self._del_sub_prop("clustering")
+
+ @property
+ def schema_update_options(self):
+ """List[google.cloud.bigquery.job.SchemaUpdateOption]: Specifies
+ updates to the destination table schema to allow as a side effect of
+ the query job.
+ """
+ return self._get_sub_prop("schemaUpdateOptions")
+
+ @schema_update_options.setter
+ def schema_update_options(self, values):
+ self._set_sub_prop("schemaUpdateOptions", values)
+
+ @property
+ def script_options(self) -> ScriptOptions:
+ """Options controlling the execution of scripts.
+
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#scriptoptions
+ """
+ prop = self._get_sub_prop("scriptOptions")
+ if prop is not None:
+ prop = ScriptOptions.from_api_repr(prop)
+ return prop
+
+ @script_options.setter
+ def script_options(self, value: Union[ScriptOptions, None]):
+ new_value = None if value is None else value.to_api_repr()
+ self._set_sub_prop("scriptOptions", new_value)
+
+ def to_api_repr(self) -> dict:
+ """Build an API representation of the query job config.
+
+ Returns:
+ Dict: A dictionary in the format used by the BigQuery API.
+ """
+ resource = copy.deepcopy(self._properties)
+ # Query parameters have an addition property associated with them
+ # to indicate if the query is using named or positional parameters.
+ query_parameters = resource.get("query", {}).get("queryParameters")
+ if query_parameters:
+ if query_parameters[0].get("name") is None:
+ resource["query"]["parameterMode"] = "POSITIONAL"
+ else:
+ resource["query"]["parameterMode"] = "NAMED"
+
+ return resource
+
+
+class QueryJob(_AsyncJob):
+ """Asynchronous job: query tables.
+
+ Args:
+ job_id (str): the job's ID, within the project belonging to ``client``.
+
+ query (str): SQL query string.
+
+ client (google.cloud.bigquery.client.Client):
+ A client which holds credentials and project configuration
+ for the dataset (which requires a project).
+
+ job_config (Optional[google.cloud.bigquery.job.QueryJobConfig]):
+ Extra configuration options for the query job.
+ """
+
+ _JOB_TYPE = "query"
+ _UDF_KEY = "userDefinedFunctionResources"
+ _CONFIG_CLASS = QueryJobConfig
+
+ def __init__(self, job_id, query, client, job_config=None):
+ super(QueryJob, self).__init__(job_id, client)
+
+ if job_config is not None:
+ self._properties["configuration"] = job_config._properties
+ if self.configuration.use_legacy_sql is None:
+ self.configuration.use_legacy_sql = False
+
+ if query:
+ _helpers._set_sub_prop(
+ self._properties, ["configuration", "query", "query"], query
+ )
+ self._query_results = None
+ self._done_timeout = None
+ self._transport_timeout = None
+
+ @property
+ def allow_large_results(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.allow_large_results`.
+ """
+ return self.configuration.allow_large_results
+
+ @property
+ def configuration(self) -> QueryJobConfig:
+ """The configuration for this query job."""
+ return typing.cast(QueryJobConfig, super().configuration)
+
+ @property
+ def connection_properties(self) -> List[ConnectionProperty]:
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.connection_properties`.
+
+ .. versionadded:: 2.29.0
+ """
+ return self.configuration.connection_properties
+
+ @property
+ def create_disposition(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.create_disposition`.
+ """
+ return self.configuration.create_disposition
+
+ @property
+ def create_session(self) -> Optional[bool]:
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.create_session`.
+
+ .. versionadded:: 2.29.0
+ """
+ return self.configuration.create_session
+
+ @property
+ def default_dataset(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.default_dataset`.
+ """
+ return self.configuration.default_dataset
+
+ @property
+ def destination(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.destination`.
+ """
+ return self.configuration.destination
+
+ @property
+ def destination_encryption_configuration(self):
+ """google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom
+ encryption configuration for the destination table.
+
+ Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
+ if using default encryption.
+
+ See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.destination_encryption_configuration`.
+ """
+ return self.configuration.destination_encryption_configuration
+
+ @property
+ def dry_run(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.dry_run`.
+ """
+ return self.configuration.dry_run
+
+ @property
+ def flatten_results(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.flatten_results`.
+ """
+ return self.configuration.flatten_results
+
+ @property
+ def priority(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.priority`.
+ """
+ return self.configuration.priority
+
+ @property
+ def search_stats(self) -> Optional[SearchStats]:
+ """Returns a SearchStats object."""
+
+ stats = self._job_statistics().get("searchStatistics")
+ if stats is not None:
+ return SearchStats.from_api_repr(stats)
+ return None
+
+ @property
+ def query(self):
+ """str: The query text used in this query job.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.query
+ """
+ return _helpers._get_sub_prop(
+ self._properties, ["configuration", "query", "query"]
+ )
+
+ @property
+ def query_id(self) -> Optional[str]:
+ """[Preview] ID of a completed query.
+
+ This ID is auto-generated and not guaranteed to be populated.
+ """
+ query_results = self._query_results
+ return query_results.query_id if query_results is not None else None
+
+ @property
+ def query_parameters(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.query_parameters`.
+ """
+ return self.configuration.query_parameters
+
+ @property
+ def udf_resources(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.udf_resources`.
+ """
+ return self.configuration.udf_resources
+
+ @property
+ def use_legacy_sql(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.use_legacy_sql`.
+ """
+ return self.configuration.use_legacy_sql
+
+ @property
+ def use_query_cache(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.use_query_cache`.
+ """
+ return self.configuration.use_query_cache
+
+ @property
+ def write_disposition(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.write_disposition`.
+ """
+ return self.configuration.write_disposition
+
+ @property
+ def maximum_billing_tier(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.maximum_billing_tier`.
+ """
+ return self.configuration.maximum_billing_tier
+
+ @property
+ def maximum_bytes_billed(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.maximum_bytes_billed`.
+ """
+ return self.configuration.maximum_bytes_billed
+
+ @property
+ def range_partitioning(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.range_partitioning`.
+ """
+ return self.configuration.range_partitioning
+
+ @property
+ def table_definitions(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.table_definitions`.
+ """
+ return self.configuration.table_definitions
+
+ @property
+ def time_partitioning(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.time_partitioning`.
+ """
+ return self.configuration.time_partitioning
+
+ @property
+ def clustering_fields(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.clustering_fields`.
+ """
+ return self.configuration.clustering_fields
+
+ @property
+ def schema_update_options(self):
+ """See
+ :attr:`google.cloud.bigquery.job.QueryJobConfig.schema_update_options`.
+ """
+ return self.configuration.schema_update_options
+
+ def to_api_repr(self):
+ """Generate a resource for :meth:`_begin`."""
+ # Use to_api_repr to allow for some configuration properties to be set
+ # automatically.
+ configuration = self.configuration.to_api_repr()
+ return {
+ "jobReference": self._properties["jobReference"],
+ "configuration": configuration,
+ }
+
+ @classmethod
+ def from_api_repr(cls, resource: dict, client: "Client") -> "QueryJob":
+ """Factory: construct a job given its API representation
+
+ Args:
+ resource (Dict): dataset job representation returned from the API
+
+ client (google.cloud.bigquery.client.Client):
+ Client which holds credentials and project
+ configuration for the dataset.
+
+ Returns:
+ google.cloud.bigquery.job.QueryJob: Job parsed from ``resource``.
+ """
+ job_ref_properties = resource.setdefault(
+ "jobReference", {"projectId": client.project, "jobId": None}
+ )
+ job_ref = _JobReference._from_api_repr(job_ref_properties)
+ job = cls(job_ref, None, client=client)
+ job._set_properties(resource)
+ return job
+
+ @property
+ def query_plan(self):
+ """Return query plan from job statistics, if present.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.query_plan
+
+ Returns:
+ List[google.cloud.bigquery.job.QueryPlanEntry]:
+ mappings describing the query plan, or an empty list
+ if the query has not yet completed.
+ """
+ plan_entries = self._job_statistics().get("queryPlan", ())
+ return [QueryPlanEntry.from_api_repr(entry) for entry in plan_entries]
+
+ @property
+ def schema(self) -> Optional[List[SchemaField]]:
+ """The schema of the results.
+
+ Present only for successful dry run of non-legacy SQL queries.
+ """
+ resource = self._job_statistics().get("schema")
+ if resource is None:
+ return None
+ fields = resource.get("fields", [])
+ return [SchemaField.from_api_repr(field) for field in fields]
+
+ @property
+ def timeline(self):
+ """List(TimelineEntry): Return the query execution timeline
+ from job statistics.
+ """
+ raw = self._job_statistics().get("timeline", ())
+ return [TimelineEntry.from_api_repr(entry) for entry in raw]
+
+ @property
+ def total_bytes_processed(self):
+ """Return total bytes processed from job statistics, if present.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.total_bytes_processed
+
+ Returns:
+ Optional[int]:
+ Total bytes processed by the job, or None if job is not
+ yet complete.
+ """
+ result = self._job_statistics().get("totalBytesProcessed")
+ if result is not None:
+ result = int(result)
+ return result
+
+ @property
+ def total_bytes_billed(self):
+ """Return total bytes billed from job statistics, if present.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.total_bytes_billed
+
+ Returns:
+ Optional[int]:
+ Total bytes processed by the job, or None if job is not
+ yet complete.
+ """
+ result = self._job_statistics().get("totalBytesBilled")
+ if result is not None:
+ result = int(result)
+ return result
+
+ @property
+ def billing_tier(self):
+ """Return billing tier from job statistics, if present.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.billing_tier
+
+ Returns:
+ Optional[int]:
+ Billing tier used by the job, or None if job is not
+ yet complete.
+ """
+ return self._job_statistics().get("billingTier")
+
+ @property
+ def cache_hit(self):
+ """Return whether or not query results were served from cache.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.cache_hit
+
+ Returns:
+ Optional[bool]:
+ whether the query results were returned from cache, or None
+ if job is not yet complete.
+ """
+ return self._job_statistics().get("cacheHit")
+
+ @property
+ def ddl_operation_performed(self):
+ """Optional[str]: Return the DDL operation performed.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.ddl_operation_performed
+
+ """
+ return self._job_statistics().get("ddlOperationPerformed")
+
+ @property
+ def ddl_target_routine(self):
+ """Optional[google.cloud.bigquery.routine.RoutineReference]: Return the DDL target routine, present
+ for CREATE/DROP FUNCTION/PROCEDURE queries.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.ddl_target_routine
+ """
+ prop = self._job_statistics().get("ddlTargetRoutine")
+ if prop is not None:
+ prop = RoutineReference.from_api_repr(prop)
+ return prop
+
+ @property
+ def ddl_target_table(self):
+ """Optional[google.cloud.bigquery.table.TableReference]: Return the DDL target table, present
+ for CREATE/DROP TABLE/VIEW queries.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.ddl_target_table
+ """
+ prop = self._job_statistics().get("ddlTargetTable")
+ if prop is not None:
+ prop = TableReference.from_api_repr(prop)
+ return prop
+
+ @property
+ def num_dml_affected_rows(self) -> Optional[int]:
+ """Return the number of DML rows affected by the job.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.num_dml_affected_rows
+
+ Returns:
+ Optional[int]:
+ number of DML rows affected by the job, or None if job is not
+ yet complete.
+ """
+ result = self._job_statistics().get("numDmlAffectedRows")
+ if result is not None:
+ result = int(result)
+ return result
+
+ @property
+ def slot_millis(self):
+ """Union[int, None]: Slot-milliseconds used by this query job."""
+ return _helpers._int_or_none(self._job_statistics().get("totalSlotMs"))
+
+ @property
+ def statement_type(self):
+ """Return statement type from job statistics, if present.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.statement_type
+
+ Returns:
+ Optional[str]:
+ type of statement used by the job, or None if job is not
+ yet complete.
+ """
+ return self._job_statistics().get("statementType")
+
+ @property
+ def referenced_tables(self):
+ """Return referenced tables from job statistics, if present.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.referenced_tables
+
+ Returns:
+ List[Dict]:
+ mappings describing the query plan, or an empty list
+ if the query has not yet completed.
+ """
+ tables = []
+ datasets_by_project_name = {}
+
+ for table in self._job_statistics().get("referencedTables", ()):
+ t_project = table["projectId"]
+
+ ds_id = table["datasetId"]
+ t_dataset = datasets_by_project_name.get((t_project, ds_id))
+ if t_dataset is None:
+ t_dataset = DatasetReference(t_project, ds_id)
+ datasets_by_project_name[(t_project, ds_id)] = t_dataset
+
+ t_name = table["tableId"]
+ tables.append(t_dataset.table(t_name))
+
+ return tables
+
+ @property
+ def undeclared_query_parameters(self):
+ """Return undeclared query parameters from job statistics, if present.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.undeclared_query_parameters
+
+ Returns:
+ List[Union[ \
+ google.cloud.bigquery.query.ArrayQueryParameter, \
+ google.cloud.bigquery.query.ScalarQueryParameter, \
+ google.cloud.bigquery.query.StructQueryParameter \
+ ]]:
+ Undeclared parameters, or an empty list if the query has
+ not yet completed.
+ """
+ parameters = []
+ undeclared = self._job_statistics().get("undeclaredQueryParameters", ())
+
+ for parameter in undeclared:
+ p_type = parameter["parameterType"]
+
+ if "arrayType" in p_type:
+ klass = ArrayQueryParameter
+ elif "structTypes" in p_type:
+ klass = StructQueryParameter
+ else:
+ klass = ScalarQueryParameter
+
+ parameters.append(klass.from_api_repr(parameter))
+
+ return parameters
+
+ @property
+ def estimated_bytes_processed(self):
+ """Return the estimated number of bytes processed by the query.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.estimated_bytes_processed
+
+ Returns:
+ Optional[int]:
+ number of DML rows affected by the job, or None if job is not
+ yet complete.
+ """
+ result = self._job_statistics().get("estimatedBytesProcessed")
+ if result is not None:
+ result = int(result)
+ return result
+
+ @property
+ def dml_stats(self) -> Optional[DmlStats]:
+ stats = self._job_statistics().get("dmlStats")
+ if stats is None:
+ return None
+ else:
+ return DmlStats.from_api_repr(stats)
+
+ @property
+ def bi_engine_stats(self) -> Optional[BiEngineStats]:
+ stats = self._job_statistics().get("biEngineStatistics")
+
+ if stats is None:
+ return None
+ else:
+ return BiEngineStats.from_api_repr(stats)
+
+ def _blocking_poll(self, timeout=None, **kwargs):
+ self._done_timeout = timeout
+ self._transport_timeout = timeout
+ super(QueryJob, self)._blocking_poll(timeout=timeout, **kwargs)
+
+ @staticmethod
+ def _format_for_exception(message: str, query: str):
+ """Format a query for the output in exception message.
+
+ Args:
+ message (str): The original exception message.
+ query (str): The SQL query to format.
+
+ Returns:
+ str: A formatted query text.
+ """
+ template = "{message}\n\n{header}\n\n{ruler}\n{body}\n{ruler}"
+
+ lines = query.splitlines() if query is not None else [""]
+ max_line_len = max(len(line) for line in lines)
+
+ header = "-----Query Job SQL Follows-----"
+ header = "{:^{total_width}}".format(header, total_width=max_line_len + 5)
+
+ # Print out a "ruler" above and below the SQL so we can judge columns.
+ # Left pad for the line numbers (4 digits plus ":").
+ ruler = " |" + " . |" * (max_line_len // 10)
+
+ # Put line numbers next to the SQL.
+ body = "\n".join(
+ "{:4}:{}".format(n, line) for n, line in enumerate(lines, start=1)
+ )
+
+ return template.format(message=message, header=header, ruler=ruler, body=body)
+
+ def _begin(self, client=None, retry=DEFAULT_RETRY, timeout=None):
+ """API call: begin the job via a POST request
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert
+
+ Args:
+ client (Optional[google.cloud.bigquery.client.Client]):
+ The client to use. If not passed, falls back to the ``client``
+ associated with the job object or``NoneType``.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the RPC.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+
+ Raises:
+ ValueError: If the job has already begun.
+ """
+
+ try:
+ super(QueryJob, self)._begin(client=client, retry=retry, timeout=timeout)
+ except exceptions.GoogleAPICallError as exc:
+ exc.message = _EXCEPTION_FOOTER_TEMPLATE.format(
+ message=exc.message, location=self.location, job_id=self.job_id
+ )
+ exc.debug_message = self._format_for_exception(exc.message, self.query)
+ exc.query_job = self
+ raise
+
+ def _reload_query_results(
+ self,
+ retry: "retries.Retry" = DEFAULT_RETRY,
+ timeout: Optional[float] = None,
+ page_size: int = 0,
+ ):
+ """Refresh the cached query results unless already cached and complete.
+
+ Args:
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the call that retrieves query results.
+ timeout (Optional[float]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``.
+ page_size (int):
+ Maximum number of rows in a single response. See maxResults in
+ the jobs.getQueryResults REST API.
+ """
+ # Optimization: avoid a call to jobs.getQueryResults if it's already
+ # been fetched, e.g. from jobs.query first page of results.
+ if self._query_results and self._query_results.complete:
+ return
+
+ # Since the API to getQueryResults can hang up to the timeout value
+ # (default of 10 seconds), set the timeout parameter to ensure that
+ # the timeout from the futures API is respected. See:
+ # https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4135
+ timeout_ms = None
+
+ # Python_API_core, as part of a major rewrite of the deadline, timeout,
+ # retry process sets the timeout value as a Python object().
+ # Our system does not natively handle that and instead expects
+ # either None or a numeric value. If passed a Python object, convert to
+ # None.
+ if type(self._done_timeout) is object: # pragma: NO COVER
+ self._done_timeout = None
+
+ if self._done_timeout is not None: # pragma: NO COVER
+ # Subtract a buffer for context switching, network latency, etc.
+ api_timeout = self._done_timeout - _TIMEOUT_BUFFER_SECS
+ api_timeout = max(min(api_timeout, 10), 0)
+ self._done_timeout -= api_timeout
+ self._done_timeout = max(0, self._done_timeout)
+ timeout_ms = int(api_timeout * 1000)
+
+ # If an explicit timeout is not given, fall back to the transport timeout
+ # stored in _blocking_poll() in the process of polling for job completion.
+ if timeout is not None:
+ transport_timeout = timeout
+ else:
+ transport_timeout = self._transport_timeout
+
+ # Handle PollingJob._DEFAULT_VALUE.
+ if not isinstance(transport_timeout, (float, int)):
+ transport_timeout = None
+
+ self._query_results = self._client._get_query_results(
+ self.job_id,
+ retry,
+ project=self.project,
+ timeout_ms=timeout_ms,
+ location=self.location,
+ timeout=transport_timeout,
+ page_size=page_size,
+ )
+
+ def result( # type: ignore # (incompatible with supertype)
+ self,
+ page_size: Optional[int] = None,
+ max_results: Optional[int] = None,
+ retry: Optional[retries.Retry] = DEFAULT_RETRY,
+ timeout: Optional[Union[float, object]] = POLLING_DEFAULT_VALUE,
+ start_index: Optional[int] = None,
+ job_retry: Optional[retries.Retry] = DEFAULT_JOB_RETRY,
+ ) -> Union["RowIterator", _EmptyRowIterator]:
+ """Start the job and wait for it to complete and get the result.
+
+ Args:
+ page_size (Optional[int]):
+ The maximum number of rows in each page of results from this
+ request. Non-positive values are ignored.
+ max_results (Optional[int]):
+ The maximum total number of rows from this request.
+ retry (Optional[google.api_core.retry.Retry]):
+ How to retry the call that retrieves rows. This only
+ applies to making RPC calls. It isn't used to retry
+ failed jobs. This has a reasonable default that
+ should only be overridden with care. If the job state
+ is ``DONE``, retrying is aborted early even if the
+ results are not available, as this will not change
+ anymore.
+ timeout (Optional[Union[float, \
+ google.api_core.future.polling.PollingFuture._DEFAULT_VALUE, \
+ ]]):
+ The number of seconds to wait for the underlying HTTP transport
+ before using ``retry``. If ``None``, wait indefinitely
+ unless an error is returned. If unset, only the
+ underlying API calls have their default timeouts, but we still
+ wait indefinitely for the job to finish.
+ start_index (Optional[int]):
+ The zero-based index of the starting row to read.
+ job_retry (Optional[google.api_core.retry.Retry]):
+ How to retry failed jobs. The default retries
+ rate-limit-exceeded errors. Passing ``None`` disables
+ job retry.
+
+ Not all jobs can be retried. If ``job_id`` was
+ provided to the query that created this job, then the
+ job returned by the query will not be retryable, and
+ an exception will be raised if non-``None``
+ non-default ``job_retry`` is also provided.
+
+ Returns:
+ google.cloud.bigquery.table.RowIterator:
+ Iterator of row data
+ :class:`~google.cloud.bigquery.table.Row`-s. During each
+ page, the iterator will have the ``total_rows`` attribute
+ set, which counts the total number of rows **in the result
+ set** (this is distinct from the total number of rows in the
+ current page: ``iterator.page.num_items``).
+
+ If the query is a special query that produces no results, e.g.
+ a DDL query, an ``_EmptyRowIterator`` instance is returned.
+
+ Raises:
+ google.cloud.exceptions.GoogleAPICallError:
+ If the job failed and retries aren't successful.
+ concurrent.futures.TimeoutError:
+ If the job did not complete in the given timeout.
+ TypeError:
+ If Non-``None`` and non-default ``job_retry`` is
+ provided and the job is not retryable.
+ """
+ # Note: Since waiting for a query job to finish is more complex than
+ # refreshing the job state in a loop, we avoid calling the superclass
+ # in this method.
+
+ if self.dry_run:
+ return _EmptyRowIterator(
+ project=self.project,
+ location=self.location,
+ # Intentionally omit job_id and query_id since this doesn't
+ # actually correspond to a finished query job.
+ )
+
+ # Setting max_results should be equivalent to setting page_size with
+ # regards to allowing the user to tune how many results to download
+ # while we wait for the query to finish. See internal issue:
+ # 344008814. But if start_index is set, user is trying to access a
+ # specific page, so we don't need to set page_size. See issue #1950.
+ if page_size is None and max_results is not None and start_index is None:
+ page_size = max_results
+
+ # When timeout has default sentinel value ``object()``, do not pass
+ # anything to invoke default timeouts in subsequent calls.
+ done_kwargs: Dict[str, Union[_helpers.TimeoutType, object]] = {}
+ reload_query_results_kwargs: Dict[str, Union[_helpers.TimeoutType, object]] = {}
+ list_rows_kwargs: Dict[str, Union[_helpers.TimeoutType, object]] = {}
+ if type(timeout) is not object:
+ done_kwargs["timeout"] = timeout
+ list_rows_kwargs["timeout"] = timeout
+ reload_query_results_kwargs["timeout"] = timeout
+
+ if page_size is not None:
+ reload_query_results_kwargs["page_size"] = page_size
+
+ try:
+ retry_do_query = getattr(self, "_retry_do_query", None)
+ if retry_do_query is not None:
+ if job_retry is DEFAULT_JOB_RETRY:
+ job_retry = self._job_retry # type: ignore
+ else:
+ if job_retry is not None and job_retry is not DEFAULT_JOB_RETRY:
+ raise TypeError(
+ "`job_retry` was provided, but this job is"
+ " not retryable, because a custom `job_id` was"
+ " provided to the query that created this job."
+ )
+
+ restart_query_job = False
+
+ def is_job_done():
+ nonlocal restart_query_job
+
+ if restart_query_job:
+ restart_query_job = False
+
+ # The original job has failed. Create a new one.
+ #
+ # Note that we won't get here if retry_do_query is
+ # None, because we won't use a retry.
+ job = retry_do_query()
+
+ # Become the new job:
+ self.__dict__.clear()
+ self.__dict__.update(job.__dict__)
+
+ # It's possible the job fails again and we'll have to
+ # retry that too.
+ self._retry_do_query = retry_do_query
+ self._job_retry = job_retry
+
+ # If the job hasn't been created, create it now. Related:
+ # https://github.com/googleapis/python-bigquery/issues/1940
+ if self.state is None:
+ self._begin(retry=retry, **done_kwargs)
+
+ # Refresh the job status with jobs.get because some of the
+ # exceptions thrown by jobs.getQueryResults like timeout and
+ # rateLimitExceeded errors are ambiguous. We want to know if
+ # the query job failed and not just the call to
+ # jobs.getQueryResults.
+ if self.done(retry=retry, **done_kwargs):
+ # If it's already failed, we might as well stop.
+ job_failed_exception = self.exception()
+ if job_failed_exception is not None:
+ # Only try to restart the query job if the job failed for
+ # a retriable reason. For example, don't restart the query
+ # if the call to reload the job metadata within self.done()
+ # timed out.
+ #
+ # The `restart_query_job` must only be called after a
+ # successful call to the `jobs.get` REST API and we
+ # determine that the job has failed.
+ #
+ # The `jobs.get` REST API
+ # (https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get)
+ # is called via `self.done()` which calls
+ # `self.reload()`.
+ #
+ # To determine if the job failed, the `self.exception()`
+ # is set from `self.reload()` via
+ # `self._set_properties()`, which translates the
+ # `Job.status.errorResult` field
+ # (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatus.FIELDS.error_result)
+ # into an exception that can be processed by the
+ # `job_retry` predicate.
+ restart_query_job = True
+ raise job_failed_exception
+ else:
+ # Make sure that the _query_results are cached so we
+ # can return a complete RowIterator.
+ #
+ # Note: As an optimization, _reload_query_results
+ # doesn't make any API calls if the query results are
+ # already cached and have jobComplete=True in the
+ # response from the REST API. This ensures we aren't
+ # making any extra API calls if the previous loop
+ # iteration fetched the finished job.
+ self._reload_query_results(
+ retry=retry, **reload_query_results_kwargs
+ )
+ return True
+
+ # Call jobs.getQueryResults with max results set to 0 just to
+ # wait for the query to finish. Unlike most methods,
+ # jobs.getQueryResults hangs as long as it can to ensure we
+ # know when the query has finished as soon as possible.
+ self._reload_query_results(retry=retry, **reload_query_results_kwargs)
+
+ # Even if the query is finished now according to
+ # jobs.getQueryResults, we'll want to reload the job status if
+ # it's not already DONE.
+ return False
+
+ if retry_do_query is not None and job_retry is not None:
+ is_job_done = job_retry(is_job_done)
+
+ # timeout can be a number of seconds, `None`, or a
+ # `google.api_core.future.polling.PollingFuture._DEFAULT_VALUE`
+ # sentinel object indicating a default timeout if we choose to add
+ # one some day. This value can come from our PollingFuture
+ # superclass and was introduced in
+ # https://github.com/googleapis/python-api-core/pull/462.
+ if isinstance(timeout, (float, int)):
+ remaining_timeout = timeout
+ else:
+ # Note: we may need to handle _DEFAULT_VALUE as a separate
+ # case someday, but even then the best we can do for queries
+ # is 72+ hours for hyperparameter tuning jobs:
+ # https://cloud.google.com/bigquery/quotas#query_jobs
+ #
+ # The timeout for a multi-statement query is 24+ hours. See:
+ # https://cloud.google.com/bigquery/quotas#multi_statement_query_limits
+ remaining_timeout = None
+
+ if remaining_timeout is None:
+ # Since is_job_done() calls jobs.getQueryResults, which is a
+ # long-running API, don't delay the next request at all.
+ while not is_job_done():
+ pass
+ else:
+ # Use a monotonic clock since we don't actually care about
+ # daylight savings or similar, just the elapsed time.
+ previous_time = time.monotonic()
+
+ while not is_job_done():
+ current_time = time.monotonic()
+ elapsed_time = current_time - previous_time
+ remaining_timeout = remaining_timeout - elapsed_time
+ previous_time = current_time
+
+ if remaining_timeout < 0:
+ raise concurrent.futures.TimeoutError()
+
+ except exceptions.GoogleAPICallError as exc:
+ exc.message = _EXCEPTION_FOOTER_TEMPLATE.format(
+ message=exc.message, location=self.location, job_id=self.job_id
+ )
+ exc.debug_message = self._format_for_exception(exc.message, self.query) # type: ignore
+ exc.query_job = self # type: ignore
+ raise
+ except requests.exceptions.Timeout as exc:
+ raise concurrent.futures.TimeoutError from exc
+
+ # If the query job is complete but there are no query results, this was
+ # special job, such as a DDL query. Return an empty result set to
+ # indicate success and avoid calling tabledata.list on a table which
+ # can't be read (such as a view table).
+ if self._query_results.total_rows is None:
+ return _EmptyRowIterator(
+ location=self.location,
+ project=self.project,
+ job_id=self.job_id,
+ query_id=self.query_id,
+ num_dml_affected_rows=self._query_results.num_dml_affected_rows,
+ )
+
+ # We know that there's at least 1 row, so only treat the response from
+ # jobs.getQueryResults / jobs.query as the first page of the
+ # RowIterator response if there are any rows in it. This prevents us
+ # from stopping the iteration early in the cases where we set
+ # maxResults=0. In that case, we're missing rows and there's no next
+ # page token.
+ first_page_response = self._query_results._properties
+ if "rows" not in first_page_response:
+ first_page_response = None
+
+ rows = self._client._list_rows_from_query_results(
+ self.job_id,
+ self.location,
+ self.project,
+ self._query_results.schema,
+ total_rows=self._query_results.total_rows,
+ destination=self.destination,
+ page_size=page_size,
+ max_results=max_results,
+ start_index=start_index,
+ retry=retry,
+ query_id=self.query_id,
+ first_page_response=first_page_response,
+ num_dml_affected_rows=self._query_results.num_dml_affected_rows,
+ **list_rows_kwargs,
+ )
+ rows._preserve_order = _contains_order_by(self.query)
+ return rows
+
+ # If changing the signature of this method, make sure to apply the same
+ # changes to table.RowIterator.to_arrow(), except for the max_results parameter
+ # that should only exist here in the QueryJob method.
+ def to_arrow(
+ self,
+ progress_bar_type: Optional[str] = None,
+ bqstorage_client: Optional["bigquery_storage.BigQueryReadClient"] = None,
+ create_bqstorage_client: bool = True,
+ max_results: Optional[int] = None,
+ ) -> "pyarrow.Table":
+ """[Beta] Create a class:`pyarrow.Table` by loading all pages of a
+ table or query.
+
+ Args:
+ progress_bar_type (Optional[str]):
+ If set, use the `tqdm `_ library to
+ display a progress bar while the data downloads. Install the
+ ``tqdm`` package to use this feature.
+
+ Possible values of ``progress_bar_type`` include:
+
+ ``None``
+ No progress bar.
+ ``'tqdm'``
+ Use the :func:`tqdm.tqdm` function to print a progress bar
+ to :data:`sys.stdout`.
+ ``'tqdm_notebook'``
+ Use the :func:`tqdm.notebook.tqdm` function to display a
+ progress bar as a Jupyter notebook widget.
+ ``'tqdm_gui'``
+ Use the :func:`tqdm.tqdm_gui` function to display a
+ progress bar as a graphical dialog box.
+ bqstorage_client (Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient]):
+ A BigQuery Storage API client. If supplied, use the faster
+ BigQuery Storage API to fetch rows from BigQuery. This API
+ is a billable API.
+
+ This method requires ``google-cloud-bigquery-storage`` library.
+
+ Reading from a specific partition or snapshot is not
+ currently supported by this method.
+ create_bqstorage_client (Optional[bool]):
+ If ``True`` (default), create a BigQuery Storage API client
+ using the default API settings. The BigQuery Storage API
+ is a faster way to fetch rows from BigQuery. See the
+ ``bqstorage_client`` parameter for more information.
+
+ This argument does nothing if ``bqstorage_client`` is supplied.
+
+ .. versionadded:: 1.24.0
+
+ max_results (Optional[int]):
+ Maximum number of rows to include in the result. No limit by default.
+
+ .. versionadded:: 2.21.0
+
+ Returns:
+ pyarrow.Table
+ A :class:`pyarrow.Table` populated with row data and column
+ headers from the query results. The column headers are derived
+ from the destination table's schema.
+
+ Raises:
+ ValueError:
+ If the :mod:`pyarrow` library cannot be imported.
+
+ .. versionadded:: 1.17.0
+ """
+ query_result = wait_for_query(self, progress_bar_type, max_results=max_results)
+ return query_result.to_arrow(
+ progress_bar_type=progress_bar_type,
+ bqstorage_client=bqstorage_client,
+ create_bqstorage_client=create_bqstorage_client,
+ )
+
+ # If changing the signature of this method, make sure to apply the same
+ # changes to table.RowIterator.to_dataframe(), except for the max_results parameter
+ # that should only exist here in the QueryJob method.
+ def to_dataframe(
+ self,
+ bqstorage_client: Optional["bigquery_storage.BigQueryReadClient"] = None,
+ dtypes: Optional[Dict[str, Any]] = None,
+ progress_bar_type: Optional[str] = None,
+ create_bqstorage_client: bool = True,
+ max_results: Optional[int] = None,
+ geography_as_object: bool = False,
+ bool_dtype: Union[Any, None] = DefaultPandasDTypes.BOOL_DTYPE,
+ int_dtype: Union[Any, None] = DefaultPandasDTypes.INT_DTYPE,
+ float_dtype: Union[Any, None] = None,
+ string_dtype: Union[Any, None] = None,
+ date_dtype: Union[Any, None] = DefaultPandasDTypes.DATE_DTYPE,
+ datetime_dtype: Union[Any, None] = None,
+ time_dtype: Union[Any, None] = DefaultPandasDTypes.TIME_DTYPE,
+ timestamp_dtype: Union[Any, None] = None,
+ range_date_dtype: Union[Any, None] = DefaultPandasDTypes.RANGE_DATE_DTYPE,
+ range_datetime_dtype: Union[
+ Any, None
+ ] = DefaultPandasDTypes.RANGE_DATETIME_DTYPE,
+ range_timestamp_dtype: Union[
+ Any, None
+ ] = DefaultPandasDTypes.RANGE_TIMESTAMP_DTYPE,
+ ) -> "pandas.DataFrame":
+ """Return a pandas DataFrame from a QueryJob
+
+ Args:
+ bqstorage_client (Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient]):
+ A BigQuery Storage API client. If supplied, use the faster
+ BigQuery Storage API to fetch rows from BigQuery. This
+ API is a billable API.
+
+ This method requires the ``fastavro`` and
+ ``google-cloud-bigquery-storage`` libraries.
+
+ Reading from a specific partition or snapshot is not
+ currently supported by this method.
+
+ dtypes (Optional[Map[str, Union[str, pandas.Series.dtype]]]):
+ A dictionary of column names pandas ``dtype``s. The provided
+ ``dtype`` is used when constructing the series for the column
+ specified. Otherwise, the default pandas behavior is used.
+
+ progress_bar_type (Optional[str]):
+ If set, use the `tqdm `_ library to
+ display a progress bar while the data downloads. Install the
+ ``tqdm`` package to use this feature.
+
+ See
+ :func:`~google.cloud.bigquery.table.RowIterator.to_dataframe`
+ for details.
+
+ .. versionadded:: 1.11.0
+ create_bqstorage_client (Optional[bool]):
+ If ``True`` (default), create a BigQuery Storage API client
+ using the default API settings. The BigQuery Storage API
+ is a faster way to fetch rows from BigQuery. See the
+ ``bqstorage_client`` parameter for more information.
+
+ This argument does nothing if ``bqstorage_client`` is supplied.
+
+ .. versionadded:: 1.24.0
+
+ max_results (Optional[int]):
+ Maximum number of rows to include in the result. No limit by default.
+
+ .. versionadded:: 2.21.0
+
+ geography_as_object (Optional[bool]):
+ If ``True``, convert GEOGRAPHY data to :mod:`shapely`
+ geometry objects. If ``False`` (default), don't cast
+ geography data to :mod:`shapely` geometry objects.
+
+ .. versionadded:: 2.24.0
+
+ bool_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype (e.g. ``pandas.BooleanDtype()``)
+ to convert BigQuery Boolean type, instead of relying on the default
+ ``pandas.BooleanDtype()``. If you explicitly set the value to ``None``,
+ then the data type will be ``numpy.dtype("bool")``. BigQuery Boolean
+ type can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#boolean_type
+
+ .. versionadded:: 3.8.0
+
+ int_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype (e.g. ``pandas.Int64Dtype()``)
+ to convert BigQuery Integer types, instead of relying on the default
+ ``pandas.Int64Dtype()``. If you explicitly set the value to ``None``,
+ then the data type will be ``numpy.dtype("int64")``. A list of BigQuery
+ Integer types can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#integer_types
+
+ .. versionadded:: 3.8.0
+
+ float_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype (e.g. ``pandas.Float32Dtype()``)
+ to convert BigQuery Float type, instead of relying on the default
+ ``numpy.dtype("float64")``. If you explicitly set the value to ``None``,
+ then the data type will be ``numpy.dtype("float64")``. BigQuery Float
+ type can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#floating_point_types
+
+ .. versionadded:: 3.8.0
+
+ string_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype (e.g. ``pandas.StringDtype()``) to
+ convert BigQuery String type, instead of relying on the default
+ ``numpy.dtype("object")``. If you explicitly set the value to ``None``,
+ then the data type will be ``numpy.dtype("object")``. BigQuery String
+ type can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#string_type
+
+ .. versionadded:: 3.8.0
+
+ date_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype (e.g.
+ ``pandas.ArrowDtype(pyarrow.date32())``) to convert BigQuery Date
+ type, instead of relying on the default ``db_dtypes.DateDtype()``.
+ If you explicitly set the value to ``None``, then the data type will be
+ ``numpy.dtype("datetime64[ns]")`` or ``object`` if out of bound. BigQuery
+ Date type can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#date_type
+
+ .. versionadded:: 3.10.0
+
+ datetime_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype (e.g.
+ ``pandas.ArrowDtype(pyarrow.timestamp("us"))``) to convert BigQuery Datetime
+ type, instead of relying on the default ``numpy.dtype("datetime64[ns]``.
+ If you explicitly set the value to ``None``, then the data type will be
+ ``numpy.dtype("datetime64[ns]")`` or ``object`` if out of bound. BigQuery
+ Datetime type can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#datetime_type
+
+ .. versionadded:: 3.10.0
+
+ time_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype (e.g.
+ ``pandas.ArrowDtype(pyarrow.time64("us"))``) to convert BigQuery Time
+ type, instead of relying on the default ``db_dtypes.TimeDtype()``.
+ If you explicitly set the value to ``None``, then the data type will be
+ ``numpy.dtype("object")``. BigQuery Time type can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#time_type
+
+ .. versionadded:: 3.10.0
+
+ timestamp_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype (e.g.
+ ``pandas.ArrowDtype(pyarrow.timestamp("us", tz="UTC"))``) to convert BigQuery Timestamp
+ type, instead of relying on the default ``numpy.dtype("datetime64[ns, UTC]")``.
+ If you explicitly set the value to ``None``, then the data type will be
+ ``numpy.dtype("datetime64[ns, UTC]")`` or ``object`` if out of bound. BigQuery
+ Datetime type can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp_type
+
+ .. versionadded:: 3.10.0
+
+ range_date_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype, such as:
+
+ .. code-block:: python
+
+ pandas.ArrowDtype(pyarrow.struct(
+ [("start", pyarrow.date32()), ("end", pyarrow.date32())]
+ ))
+
+ to convert BigQuery RANGE type, instead of relying on
+ the default ``object``. If you explicitly set the value to
+ ``None``, the data type will be ``object``. BigQuery Range type
+ can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#range_type
+
+ .. versionadded:: 3.21.0
+
+ range_datetime_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype, such as:
+
+ .. code-block:: python
+
+ pandas.ArrowDtype(pyarrow.struct(
+ [
+ ("start", pyarrow.timestamp("us")),
+ ("end", pyarrow.timestamp("us")),
+ ]
+ ))
+
+ to convert BigQuery RANGE type, instead of relying on
+ the default ``object``. If you explicitly set the value to
+ ``None``, the data type will be ``object``. BigQuery Range type
+ can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#range_type
+
+ .. versionadded:: 3.21.0
+
+ range_timestamp_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype, such as:
+
+ .. code-block:: python
+
+ pandas.ArrowDtype(pyarrow.struct(
+ [
+ ("start", pyarrow.timestamp("us", tz="UTC")),
+ ("end", pyarrow.timestamp("us", tz="UTC")),
+ ]
+ ))
+
+ to convert BigQuery RANGE type, instead of relying
+ on the default ``object``. If you explicitly set the value to
+ ``None``, the data type will be ``object``. BigQuery Range type
+ can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#range_type
+
+ .. versionadded:: 3.21.0
+
+ Returns:
+ pandas.DataFrame:
+ A :class:`~pandas.DataFrame` populated with row data
+ and column headers from the query results. The column
+ headers are derived from the destination table's
+ schema.
+
+ Raises:
+ ValueError:
+ If the :mod:`pandas` library cannot be imported, or
+ the :mod:`google.cloud.bigquery_storage_v1` module is
+ required but cannot be imported. Also if
+ `geography_as_object` is `True`, but the
+ :mod:`shapely` library cannot be imported.
+ """
+ query_result = wait_for_query(self, progress_bar_type, max_results=max_results)
+ return query_result.to_dataframe(
+ bqstorage_client=bqstorage_client,
+ dtypes=dtypes,
+ progress_bar_type=progress_bar_type,
+ create_bqstorage_client=create_bqstorage_client,
+ geography_as_object=geography_as_object,
+ bool_dtype=bool_dtype,
+ int_dtype=int_dtype,
+ float_dtype=float_dtype,
+ string_dtype=string_dtype,
+ date_dtype=date_dtype,
+ datetime_dtype=datetime_dtype,
+ time_dtype=time_dtype,
+ timestamp_dtype=timestamp_dtype,
+ range_date_dtype=range_date_dtype,
+ range_datetime_dtype=range_datetime_dtype,
+ range_timestamp_dtype=range_timestamp_dtype,
+ )
+
+ # If changing the signature of this method, make sure to apply the same
+ # changes to table.RowIterator.to_dataframe(), except for the max_results parameter
+ # that should only exist here in the QueryJob method.
+ def to_geodataframe(
+ self,
+ bqstorage_client: Optional["bigquery_storage.BigQueryReadClient"] = None,
+ dtypes: Optional[Dict[str, Any]] = None,
+ progress_bar_type: Optional[str] = None,
+ create_bqstorage_client: bool = True,
+ max_results: Optional[int] = None,
+ geography_column: Optional[str] = None,
+ ) -> "geopandas.GeoDataFrame":
+ """Return a GeoPandas GeoDataFrame from a QueryJob
+
+ Args:
+ bqstorage_client (Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient]):
+ A BigQuery Storage API client. If supplied, use the faster
+ BigQuery Storage API to fetch rows from BigQuery. This
+ API is a billable API.
+
+ This method requires the ``fastavro`` and
+ ``google-cloud-bigquery-storage`` libraries.
+
+ Reading from a specific partition or snapshot is not
+ currently supported by this method.
+
+ dtypes (Optional[Map[str, Union[str, pandas.Series.dtype]]]):
+ A dictionary of column names pandas ``dtype``s. The provided
+ ``dtype`` is used when constructing the series for the column
+ specified. Otherwise, the default pandas behavior is used.
+
+ progress_bar_type (Optional[str]):
+ If set, use the `tqdm `_ library to
+ display a progress bar while the data downloads. Install the
+ ``tqdm`` package to use this feature.
+
+ See
+ :func:`~google.cloud.bigquery.table.RowIterator.to_dataframe`
+ for details.
+
+ .. versionadded:: 1.11.0
+ create_bqstorage_client (Optional[bool]):
+ If ``True`` (default), create a BigQuery Storage API client
+ using the default API settings. The BigQuery Storage API
+ is a faster way to fetch rows from BigQuery. See the
+ ``bqstorage_client`` parameter for more information.
+
+ This argument does nothing if ``bqstorage_client`` is supplied.
+
+ .. versionadded:: 1.24.0
+
+ max_results (Optional[int]):
+ Maximum number of rows to include in the result. No limit by default.
+
+ .. versionadded:: 2.21.0
+
+ geography_column (Optional[str]):
+ If there are more than one GEOGRAPHY column,
+ identifies which one to use to construct a GeoPandas
+ GeoDataFrame. This option can be ommitted if there's
+ only one GEOGRAPHY column.
+
+ Returns:
+ geopandas.GeoDataFrame:
+ A :class:`geopandas.GeoDataFrame` populated with row
+ data and column headers from the query results. The
+ column headers are derived from the destination
+ table's schema.
+
+ Raises:
+ ValueError:
+ If the :mod:`geopandas` library cannot be imported, or the
+ :mod:`google.cloud.bigquery_storage_v1` module is
+ required but cannot be imported.
+
+ .. versionadded:: 2.24.0
+ """
+ query_result = wait_for_query(self, progress_bar_type, max_results=max_results)
+ return query_result.to_geodataframe(
+ bqstorage_client=bqstorage_client,
+ dtypes=dtypes,
+ progress_bar_type=progress_bar_type,
+ create_bqstorage_client=create_bqstorage_client,
+ geography_column=geography_column,
+ )
+
+ def __iter__(self):
+ return iter(self.result())
+
+
+class QueryPlanEntryStep(object):
+ """Map a single step in a query plan entry.
+
+ Args:
+ kind (str): step type.
+ substeps (List): names of substeps.
+ """
+
+ def __init__(self, kind, substeps):
+ self.kind = kind
+ self.substeps = list(substeps)
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "QueryPlanEntryStep":
+ """Factory: construct instance from the JSON repr.
+
+ Args:
+ resource (Dict): JSON representation of the entry.
+
+ Returns:
+ google.cloud.bigquery.job.QueryPlanEntryStep:
+ New instance built from the resource.
+ """
+ return cls(kind=resource.get("kind"), substeps=resource.get("substeps", ()))
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return self.kind == other.kind and self.substeps == other.substeps
+
+
+class QueryPlanEntry(object):
+ """QueryPlanEntry represents a single stage of a query execution plan.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#ExplainQueryStage
+ for the underlying API representation within query statistics.
+ """
+
+ def __init__(self):
+ self._properties = {}
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "QueryPlanEntry":
+ """Factory: construct instance from the JSON repr.
+
+ Args:
+ resource(Dict[str: object]):
+ ExplainQueryStage representation returned from API.
+
+ Returns:
+ google.cloud.bigquery.job.QueryPlanEntry:
+ Query plan entry parsed from ``resource``.
+ """
+ entry = cls()
+ entry._properties = resource
+ return entry
+
+ @property
+ def name(self):
+ """Optional[str]: Human-readable name of the stage."""
+ return self._properties.get("name")
+
+ @property
+ def entry_id(self):
+ """Optional[str]: Unique ID for the stage within the plan."""
+ return self._properties.get("id")
+
+ @property
+ def start(self):
+ """Optional[Datetime]: Datetime when the stage started."""
+ if self._properties.get("startMs") is None:
+ return None
+ return _helpers._datetime_from_microseconds(
+ int(self._properties.get("startMs")) * 1000.0
+ )
+
+ @property
+ def end(self):
+ """Optional[Datetime]: Datetime when the stage ended."""
+ if self._properties.get("endMs") is None:
+ return None
+ return _helpers._datetime_from_microseconds(
+ int(self._properties.get("endMs")) * 1000.0
+ )
+
+ @property
+ def input_stages(self):
+ """List(int): Entry IDs for stages that were inputs for this stage."""
+ if self._properties.get("inputStages") is None:
+ return []
+ return [
+ _helpers._int_or_none(entry)
+ for entry in self._properties.get("inputStages")
+ ]
+
+ @property
+ def parallel_inputs(self):
+ """Optional[int]: Number of parallel input segments within
+ the stage.
+ """
+ return _helpers._int_or_none(self._properties.get("parallelInputs"))
+
+ @property
+ def completed_parallel_inputs(self):
+ """Optional[int]: Number of parallel input segments completed."""
+ return _helpers._int_or_none(self._properties.get("completedParallelInputs"))
+
+ @property
+ def wait_ms_avg(self):
+ """Optional[int]: Milliseconds the average worker spent waiting to
+ be scheduled.
+ """
+ return _helpers._int_or_none(self._properties.get("waitMsAvg"))
+
+ @property
+ def wait_ms_max(self):
+ """Optional[int]: Milliseconds the slowest worker spent waiting to
+ be scheduled.
+ """
+ return _helpers._int_or_none(self._properties.get("waitMsMax"))
+
+ @property
+ def wait_ratio_avg(self):
+ """Optional[float]: Ratio of time the average worker spent waiting
+ to be scheduled, relative to the longest time spent by any worker in
+ any stage of the overall plan.
+ """
+ return self._properties.get("waitRatioAvg")
+
+ @property
+ def wait_ratio_max(self):
+ """Optional[float]: Ratio of time the slowest worker spent waiting
+ to be scheduled, relative to the longest time spent by any worker in
+ any stage of the overall plan.
+ """
+ return self._properties.get("waitRatioMax")
+
+ @property
+ def read_ms_avg(self):
+ """Optional[int]: Milliseconds the average worker spent reading
+ input.
+ """
+ return _helpers._int_or_none(self._properties.get("readMsAvg"))
+
+ @property
+ def read_ms_max(self):
+ """Optional[int]: Milliseconds the slowest worker spent reading
+ input.
+ """
+ return _helpers._int_or_none(self._properties.get("readMsMax"))
+
+ @property
+ def read_ratio_avg(self):
+ """Optional[float]: Ratio of time the average worker spent reading
+ input, relative to the longest time spent by any worker in any stage
+ of the overall plan.
+ """
+ return self._properties.get("readRatioAvg")
+
+ @property
+ def read_ratio_max(self):
+ """Optional[float]: Ratio of time the slowest worker spent reading
+ to be scheduled, relative to the longest time spent by any worker in
+ any stage of the overall plan.
+ """
+ return self._properties.get("readRatioMax")
+
+ @property
+ def compute_ms_avg(self):
+ """Optional[int]: Milliseconds the average worker spent on CPU-bound
+ processing.
+ """
+ return _helpers._int_or_none(self._properties.get("computeMsAvg"))
+
+ @property
+ def compute_ms_max(self):
+ """Optional[int]: Milliseconds the slowest worker spent on CPU-bound
+ processing.
+ """
+ return _helpers._int_or_none(self._properties.get("computeMsMax"))
+
+ @property
+ def compute_ratio_avg(self):
+ """Optional[float]: Ratio of time the average worker spent on
+ CPU-bound processing, relative to the longest time spent by any
+ worker in any stage of the overall plan.
+ """
+ return self._properties.get("computeRatioAvg")
+
+ @property
+ def compute_ratio_max(self):
+ """Optional[float]: Ratio of time the slowest worker spent on
+ CPU-bound processing, relative to the longest time spent by any
+ worker in any stage of the overall plan.
+ """
+ return self._properties.get("computeRatioMax")
+
+ @property
+ def write_ms_avg(self):
+ """Optional[int]: Milliseconds the average worker spent writing
+ output data.
+ """
+ return _helpers._int_or_none(self._properties.get("writeMsAvg"))
+
+ @property
+ def write_ms_max(self):
+ """Optional[int]: Milliseconds the slowest worker spent writing
+ output data.
+ """
+ return _helpers._int_or_none(self._properties.get("writeMsMax"))
+
+ @property
+ def write_ratio_avg(self):
+ """Optional[float]: Ratio of time the average worker spent writing
+ output data, relative to the longest time spent by any worker in any
+ stage of the overall plan.
+ """
+ return self._properties.get("writeRatioAvg")
+
+ @property
+ def write_ratio_max(self):
+ """Optional[float]: Ratio of time the slowest worker spent writing
+ output data, relative to the longest time spent by any worker in any
+ stage of the overall plan.
+ """
+ return self._properties.get("writeRatioMax")
+
+ @property
+ def records_read(self):
+ """Optional[int]: Number of records read by this stage."""
+ return _helpers._int_or_none(self._properties.get("recordsRead"))
+
+ @property
+ def records_written(self):
+ """Optional[int]: Number of records written by this stage."""
+ return _helpers._int_or_none(self._properties.get("recordsWritten"))
+
+ @property
+ def status(self):
+ """Optional[str]: status of this stage."""
+ return self._properties.get("status")
+
+ @property
+ def shuffle_output_bytes(self):
+ """Optional[int]: Number of bytes written by this stage to
+ intermediate shuffle.
+ """
+ return _helpers._int_or_none(self._properties.get("shuffleOutputBytes"))
+
+ @property
+ def shuffle_output_bytes_spilled(self):
+ """Optional[int]: Number of bytes written by this stage to
+ intermediate shuffle and spilled to disk.
+ """
+ return _helpers._int_or_none(self._properties.get("shuffleOutputBytesSpilled"))
+
+ @property
+ def steps(self):
+ """List(QueryPlanEntryStep): List of step operations performed by
+ each worker in the stage.
+ """
+ return [
+ QueryPlanEntryStep.from_api_repr(step)
+ for step in self._properties.get("steps", [])
+ ]
+
+ @property
+ def slot_ms(self):
+ """Optional[int]: Slot-milliseconds used by the stage."""
+ return _helpers._int_or_none(self._properties.get("slotMs"))
+
+
+class TimelineEntry(object):
+ """TimelineEntry represents progress of a query job at a particular
+ point in time.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#querytimelinesample
+ for the underlying API representation within query statistics.
+ """
+
+ def __init__(self):
+ self._properties = {}
+
+ @classmethod
+ def from_api_repr(cls, resource):
+ """Factory: construct instance from the JSON repr.
+
+ Args:
+ resource(Dict[str: object]):
+ QueryTimelineSample representation returned from API.
+
+ Returns:
+ google.cloud.bigquery.TimelineEntry:
+ Timeline sample parsed from ``resource``.
+ """
+ entry = cls()
+ entry._properties = resource
+ return entry
+
+ @property
+ def elapsed_ms(self):
+ """Optional[int]: Milliseconds elapsed since start of query
+ execution."""
+ return _helpers._int_or_none(self._properties.get("elapsedMs"))
+
+ @property
+ def active_units(self):
+ """Optional[int]: Current number of input units being processed
+ by workers, reported as largest value since the last sample."""
+ return _helpers._int_or_none(self._properties.get("activeUnits"))
+
+ @property
+ def pending_units(self):
+ """Optional[int]: Current number of input units remaining for
+ query stages active at this sample time."""
+ return _helpers._int_or_none(self._properties.get("pendingUnits"))
+
+ @property
+ def completed_units(self):
+ """Optional[int]: Current number of input units completed by
+ this query."""
+ return _helpers._int_or_none(self._properties.get("completedUnits"))
+
+ @property
+ def slot_millis(self):
+ """Optional[int]: Cumulative slot-milliseconds consumed by
+ this query."""
+ return _helpers._int_or_none(self._properties.get("totalSlotMs"))
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/__init__.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d228a35bb1348e38a26ef5489ac099f66c1f6992
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from google.cloud.bigquery.magics.magics import context
+
+
+# For backwards compatibility we need to make the context available in the path
+# google.cloud.bigquery.magics.context
+__all__ = ("context",)
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/line_arg_parser/__init__.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/line_arg_parser/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9471446c516be47ea1593b01b76306c48f62d9b7
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/line_arg_parser/__init__.py
@@ -0,0 +1,34 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from google.cloud.bigquery.magics.line_arg_parser.exceptions import ParseError
+from google.cloud.bigquery.magics.line_arg_parser.exceptions import (
+ DuplicateQueryParamsError,
+ QueryParamsParseError,
+)
+from google.cloud.bigquery.magics.line_arg_parser.lexer import Lexer
+from google.cloud.bigquery.magics.line_arg_parser.lexer import TokenType
+from google.cloud.bigquery.magics.line_arg_parser.parser import Parser
+from google.cloud.bigquery.magics.line_arg_parser.visitors import QueryParamsExtractor
+
+
+__all__ = (
+ "DuplicateQueryParamsError",
+ "Lexer",
+ "Parser",
+ "ParseError",
+ "QueryParamsExtractor",
+ "QueryParamsParseError",
+ "TokenType",
+)
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/line_arg_parser/exceptions.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/line_arg_parser/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b2081186cab1f14069fbe32afbdee7a6bb21219
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/line_arg_parser/exceptions.py
@@ -0,0 +1,25 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class ParseError(Exception):
+ pass
+
+
+class QueryParamsParseError(ParseError):
+ """Raised when --params option is syntactically incorrect."""
+
+
+class DuplicateQueryParamsError(ParseError):
+ pass
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/line_arg_parser/lexer.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/line_arg_parser/lexer.py
new file mode 100644
index 0000000000000000000000000000000000000000..71b287d01e83e43b97884538999e5b497d23622b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/line_arg_parser/lexer.py
@@ -0,0 +1,200 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from collections import namedtuple
+from collections import OrderedDict
+import itertools
+import re
+
+import enum
+
+
+Token = namedtuple("Token", ("type_", "lexeme", "pos"))
+StateTransition = namedtuple("StateTransition", ("new_state", "total_offset"))
+
+# Pattern matching is done with regexes, and the order in which the token patterns are
+# defined is important.
+#
+# Suppose we had the following token definitions:
+# * INT - a token matching integers,
+# * FLOAT - a token matching floating point numbers,
+# * DOT - a token matching a single literal dot character, i.e. "."
+#
+# The FLOAT token would have to be defined first, since we would want the input "1.23"
+# to be tokenized as a single FLOAT token, and *not* three tokens (INT, DOT, INT).
+#
+# Sometimes, however, different tokens match too similar patterns, and it is not
+# possible to define them in order that would avoid any ambiguity. One such case are
+# the OPT_VAL and PY_NUMBER tokens, as both can match an integer literal, say "42".
+#
+# In order to avoid the dilemmas, the lexer implements a concept of STATES. States are
+# used to split token definitions into subgroups, and in each lexer state only a single
+# subgroup is used for tokenizing the input. Lexer states can therefore be though of as
+# token namespaces.
+#
+# For example, while parsing the value of the "--params" option, we do not want to
+# "recognize" it as a single OPT_VAL token, but instead want to parse it as a Python
+# dictionary and verify its syntactial correctness. On the other hand, while parsing
+# the value of an option other than "--params", we do not really care about its
+# structure, and thus do not want to use any of the "Python tokens" for pattern matching.
+#
+# Token definition order is important, thus an OrderedDict is used. In addition, PEP 468
+# guarantees us that the order of kwargs is preserved in Python 3.6+.
+token_types = OrderedDict(
+ state_parse_pos_args=OrderedDict(
+ GOTO_PARSE_NON_PARAMS_OPTIONS=r"(?P(?=--))", # double dash - starting the options list
+ DEST_VAR=r"(?P[^\d\W]\w*)", # essentially a Python ID
+ ),
+ state_parse_non_params_options=OrderedDict(
+ GOTO_PARSE_PARAMS_OPTION=r"(?P(?=--params(?:\s|=|--|$)))", # the --params option
+ OPTION_SPEC=r"(?P--\w+)",
+ OPTION_EQ=r"(?P=)",
+ OPT_VAL=r"(?P\S+?(?=\s|--|$))",
+ ),
+ state_parse_params_option=OrderedDict(
+ PY_STRING=r"(?P(?:{})|(?:{}))".format( # single and double quoted strings
+ r"'(?:[^'\\]|\.)*'", r'"(?:[^"\\]|\.)*"'
+ ),
+ PARAMS_OPT_SPEC=r"(?P--params(?=\s|=|--|$))",
+ PARAMS_OPT_EQ=r"(?P=)",
+ GOTO_PARSE_NON_PARAMS_OPTIONS=r"(?P(?=--\w+))", # found another option spec
+ PY_BOOL=r"(?PTrue|False)",
+ DOLLAR_PY_ID=r"(?P\$[^\d\W]\w*)",
+ PY_NUMBER=r"(?P-?[1-9]\d*(?:\.\d+)?(:?[e|E][+-]?\d+)?)",
+ SQUOTE=r"(?P')",
+ DQUOTE=r'(?P")',
+ COLON=r"(?P:)",
+ COMMA=r"(?P,)",
+ LCURL=r"(?P\{)",
+ RCURL=r"(?P})",
+ LSQUARE=r"(?P\[)",
+ RSQUARE=r"(?P])",
+ LPAREN=r"(?P\()",
+ RPAREN=r"(?P\))",
+ ),
+ common=OrderedDict(
+ WS=r"(?P\s+)",
+ EOL=r"(?P$)",
+ UNKNOWN=r"(?P\S+)", # anything not a whitespace or matched by something else
+ ),
+)
+
+
+class AutoStrEnum(str, enum.Enum):
+ """Base enum class for for name=value str enums."""
+
+ def _generate_next_value_(name, start, count, last_values):
+ return name
+
+
+TokenType = AutoStrEnum( # type: ignore # pytype: disable=wrong-arg-types
+ "TokenType",
+ [
+ (name, enum.auto())
+ for name in itertools.chain.from_iterable(token_types.values())
+ if not name.startswith("GOTO_")
+ ],
+)
+
+
+class LexerState(AutoStrEnum):
+ PARSE_POS_ARGS = enum.auto() # parsing positional arguments
+ PARSE_NON_PARAMS_OPTIONS = enum.auto() # parsing options other than "--params"
+ PARSE_PARAMS_OPTION = enum.auto() # parsing the "--params" option
+ STATE_END = enum.auto()
+
+
+class Lexer(object):
+ """Lexical analyzer for tokenizing the cell magic input line."""
+
+ _GRAND_PATTERNS = {
+ LexerState.PARSE_POS_ARGS: re.compile(
+ "|".join(
+ itertools.chain(
+ token_types["state_parse_pos_args"].values(),
+ token_types["common"].values(),
+ )
+ )
+ ),
+ LexerState.PARSE_NON_PARAMS_OPTIONS: re.compile(
+ "|".join(
+ itertools.chain(
+ token_types["state_parse_non_params_options"].values(),
+ token_types["common"].values(),
+ )
+ )
+ ),
+ LexerState.PARSE_PARAMS_OPTION: re.compile(
+ "|".join(
+ itertools.chain(
+ token_types["state_parse_params_option"].values(),
+ token_types["common"].values(),
+ )
+ )
+ ),
+ }
+
+ def __init__(self, input_text):
+ self._text = input_text
+
+ def __iter__(self):
+ # Since re.scanner does not seem to support manipulating inner scanner states,
+ # we need to implement lexer state transitions manually using special
+ # non-capturing lookahead token patterns to signal when a state transition
+ # should be made.
+ # Since we don't have "nested" states, we don't really need a stack and
+ # this simple mechanism is sufficient.
+ state = LexerState.PARSE_POS_ARGS
+ offset = 0 # the number of characters processed so far
+
+ while state != LexerState.STATE_END:
+ token_stream = self._find_state_tokens(state, offset)
+
+ for maybe_token in token_stream: # pragma: NO COVER
+ if isinstance(maybe_token, StateTransition):
+ state = maybe_token.new_state
+ offset = maybe_token.total_offset
+ break
+
+ if maybe_token.type_ != TokenType.WS:
+ yield maybe_token
+
+ if maybe_token.type_ == TokenType.EOL:
+ state = LexerState.STATE_END
+ break
+
+ def _find_state_tokens(self, state, current_offset):
+ """Scan the input for current state's tokens starting at ``current_offset``.
+
+ Args:
+ state (LexerState): The current lexer state.
+ current_offset (int): The offset in the input text, i.e. the number
+ of characters already scanned so far.
+
+ Yields:
+ The next ``Token`` or ``StateTransition`` instance.
+ """
+ pattern = self._GRAND_PATTERNS[state]
+ scanner = pattern.finditer(self._text, current_offset)
+
+ for match in scanner: # pragma: NO COVER
+ token_type = match.lastgroup
+
+ if token_type.startswith("GOTO_"):
+ yield StateTransition(
+ new_state=getattr(LexerState, token_type[5:]), # w/o "GOTO_" prefix
+ total_offset=match.start(),
+ )
+
+ yield Token(token_type, match.group(), match.start())
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/line_arg_parser/parser.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/line_arg_parser/parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..b9da20cd78620dfca9d085333ffc574308ad25bb
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/line_arg_parser/parser.py
@@ -0,0 +1,484 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from google.cloud.bigquery.magics.line_arg_parser import DuplicateQueryParamsError
+from google.cloud.bigquery.magics.line_arg_parser import ParseError
+from google.cloud.bigquery.magics.line_arg_parser import QueryParamsParseError
+from google.cloud.bigquery.magics.line_arg_parser import TokenType
+
+
+class ParseNode(object):
+ """A base class for nodes in the input parsed to an abstract syntax tree."""
+
+
+class InputLine(ParseNode):
+ def __init__(self, destination_var, option_list):
+ self.destination_var = destination_var
+ self.option_list = option_list
+
+
+class DestinationVar(ParseNode):
+ def __init__(self, token):
+ # token type is DEST_VAR
+ self.token = token
+ self.name = token.lexeme if token is not None else None
+
+
+class CmdOptionList(ParseNode):
+ def __init__(self, option_nodes):
+ self.options = [node for node in option_nodes] # shallow copy
+
+
+class CmdOption(ParseNode):
+ def __init__(self, name, value):
+ self.name = name # string
+ self.value = value # CmdOptionValue node
+
+
+class ParamsOption(CmdOption):
+ def __init__(self, value):
+ super(ParamsOption, self).__init__("params", value)
+
+
+class CmdOptionValue(ParseNode):
+ def __init__(self, token):
+ # token type is OPT_VAL
+ self.token = token
+ self.value = token.lexeme
+
+
+class PyVarExpansion(ParseNode):
+ def __init__(self, token):
+ self.token = token
+ self.raw_value = token.lexeme
+
+
+class PyDict(ParseNode):
+ def __init__(self, dict_items):
+ self.items = [item for item in dict_items] # shallow copy
+
+
+class PyDictItem(ParseNode):
+ def __init__(self, key, value):
+ self.key = key
+ self.value = value
+
+
+class PyDictKey(ParseNode):
+ def __init__(self, token):
+ self.token = token
+ self.key_value = token.lexeme
+
+
+class PyScalarValue(ParseNode):
+ def __init__(self, token, raw_value):
+ self.token = token
+ self.raw_value = raw_value
+
+
+class PyTuple(ParseNode):
+ def __init__(self, tuple_items):
+ self.items = [item for item in tuple_items] # shallow copy
+
+
+class PyList(ParseNode):
+ def __init__(self, list_items):
+ self.items = [item for item in list_items] # shallow copy
+
+
+class Parser(object):
+ """Parser for the tokenized cell magic input line.
+
+ The parser recognizes a simplified subset of Python grammar, specifically
+ a dictionary representation in typical use cases when the "--params" option
+ is used with the %%bigquery cell magic.
+
+ The grammar (terminal symbols are CAPITALIZED):
+
+ input_line : destination_var option_list
+ destination_var : DEST_VAR | EMPTY
+ option_list : (OPTION_SPEC [OPTION_EQ] option_value)*
+ (params_option | EMPTY)
+ (OPTION_SPEC [OPTION_EQ] option_value)*
+
+ option_value : OPT_VAL | EMPTY
+
+ # DOLLAR_PY_ID can occur if a variable passed to --params does not exist
+ # and is thus not expanded to a dict.
+ params_option : PARAMS_OPT_SPEC [PARAMS_OPT_EQ] \
+ (DOLLAR_PY_ID | PY_STRING | py_dict)
+
+ py_dict : LCURL dict_items RCURL
+ dict_items : dict_item | (dict_item COMMA dict_items)
+ dict_item : (dict_key COLON py_value) | EMPTY
+
+ # dict items are actually @parameter names in the cell body (i.e. the query),
+ # thus restricting them to strings.
+ dict_key : PY_STRING
+
+ py_value : PY_BOOL
+ | PY_NUMBER
+ | PY_STRING
+ | py_tuple
+ | py_list
+ | py_dict
+
+ py_tuple : LPAREN collection_items RPAREN
+ py_list : LSQUARE collection_items RSQUARE
+ collection_items : collection_item | (collection_item COMMA collection_items)
+ collection_item : py_value | EMPTY
+
+ Args:
+ lexer (line_arg_parser.lexer.Lexer):
+ An iterable producing a tokenized cell magic argument line.
+ """
+
+ def __init__(self, lexer):
+ self._lexer = lexer
+ self._tokens_iter = iter(self._lexer)
+ self.get_next_token()
+
+ def get_next_token(self):
+ """Obtain the next token from the token stream and store it as current."""
+ token = next(self._tokens_iter)
+ self._current_token = token
+
+ def consume(self, expected_type, exc_type=ParseError):
+ """Move to the next token in token stream if it matches the expected type.
+
+ Args:
+ expected_type (lexer.TokenType): The expected token type to be consumed.
+ exc_type (Optional[ParseError]): The type of the exception to raise. Should be
+ the ``ParseError`` class or one of its subclasses. Defaults to
+ ``ParseError``.
+
+ Raises:
+ ParseError: If the current token does not match the expected type.
+ """
+ if self._current_token.type_ == expected_type:
+ if expected_type != TokenType.EOL:
+ self.get_next_token()
+ else:
+ if self._current_token.type_ == TokenType.EOL:
+ msg = "Unexpected end of input, expected {}.".format(expected_type)
+ else:
+ msg = "Expected token type {}, but found {} at position {}.".format(
+ expected_type, self._current_token.lexeme, self._current_token.pos
+ )
+ self.error(message=msg, exc_type=exc_type)
+
+ def error(self, message="Syntax error.", exc_type=ParseError):
+ """Raise an error with the given message.
+
+ Args:
+ expected_type (lexer.TokenType): The expected token type to be consumed.
+ exc_type (Optional[ParseError]): The type of the exception to raise. Should be
+ the ``ParseError`` class or one of its subclasses. Defaults to
+ ``ParseError``.
+
+ Raises:
+ ParseError: If the current token does not match the expected type.
+ """
+ raise exc_type(message)
+
+ def input_line(self):
+ """The top level method for parsing the cell magic arguments line.
+
+ Implements the following grammar production rule:
+
+ input_line : destination_var option_list
+ """
+ dest_var = self.destination_var()
+ options = self.option_list()
+
+ token = self._current_token
+
+ if token.type_ != TokenType.EOL:
+ msg = "Unexpected input at position {}: {}".format(token.pos, token.lexeme)
+ self.error(msg)
+
+ return InputLine(dest_var, options)
+
+ def destination_var(self):
+ """Implementation of the ``destination_var`` grammar production rule.
+
+ Production:
+
+ destination_var : DEST_VAR | EMPTY
+ """
+ token = self._current_token
+
+ if token.type_ == TokenType.DEST_VAR:
+ self.consume(TokenType.DEST_VAR)
+ result = DestinationVar(token)
+ elif token.type_ == TokenType.UNKNOWN:
+ msg = "Unknown input at position {}: {}".format(token.pos, token.lexeme)
+ self.error(msg)
+ else:
+ result = DestinationVar(None)
+
+ return result
+
+ def option_list(self):
+ """Implementation of the ``option_list`` grammar production rule.
+
+ Production:
+
+ option_list : (OPTION_SPEC [OPTION_EQ] option_value)*
+ (params_option | EMPTY)
+ (OPTION_SPEC [OPTION_EQ] option_value)*
+ """
+ all_options = []
+
+ def parse_nonparams_options():
+ while self._current_token.type_ == TokenType.OPTION_SPEC:
+ token = self._current_token
+ self.consume(TokenType.OPTION_SPEC)
+
+ opt_name = token.lexeme[2:] # cut off the "--" prefix
+
+ # skip the optional "=" character
+ if self._current_token.type_ == TokenType.OPTION_EQ:
+ self.consume(TokenType.OPTION_EQ)
+
+ opt_value = self.option_value()
+ option = CmdOption(opt_name, opt_value)
+ all_options.append(option)
+
+ parse_nonparams_options()
+
+ token = self._current_token
+
+ if token.type_ == TokenType.PARAMS_OPT_SPEC:
+ option = self.params_option()
+ all_options.append(option)
+
+ parse_nonparams_options()
+
+ if self._current_token.type_ == TokenType.PARAMS_OPT_SPEC:
+ self.error(
+ message="Duplicate --params option", exc_type=DuplicateQueryParamsError
+ )
+
+ return CmdOptionList(all_options)
+
+ def option_value(self):
+ """Implementation of the ``option_value`` grammar production rule.
+
+ Production:
+
+ option_value : OPT_VAL | EMPTY
+ """
+ token = self._current_token
+
+ if token.type_ == TokenType.OPT_VAL:
+ self.consume(TokenType.OPT_VAL)
+ result = CmdOptionValue(token)
+ elif token.type_ == TokenType.UNKNOWN:
+ msg = "Unknown input at position {}: {}".format(token.pos, token.lexeme)
+ self.error(msg)
+ else:
+ result = None
+
+ return result
+
+ def params_option(self):
+ """Implementation of the ``params_option`` grammar production rule.
+
+ Production:
+
+ params_option : PARAMS_OPT_SPEC [PARAMS_OPT_EQ] \
+ (DOLLAR_PY_ID | PY_STRING | py_dict)
+ """
+ self.consume(TokenType.PARAMS_OPT_SPEC)
+
+ # skip the optional "=" character
+ if self._current_token.type_ == TokenType.PARAMS_OPT_EQ:
+ self.consume(TokenType.PARAMS_OPT_EQ)
+
+ if self._current_token.type_ == TokenType.DOLLAR_PY_ID:
+ token = self._current_token
+ self.consume(TokenType.DOLLAR_PY_ID)
+ opt_value = PyVarExpansion(token)
+ elif self._current_token.type_ == TokenType.PY_STRING:
+ token = self._current_token
+ self.consume(TokenType.PY_STRING, exc_type=QueryParamsParseError)
+ opt_value = PyScalarValue(token, token.lexeme)
+ else:
+ opt_value = self.py_dict()
+
+ result = ParamsOption(opt_value)
+
+ return result
+
+ def py_dict(self):
+ """Implementation of the ``py_dict`` grammar production rule.
+
+ Production:
+
+ py_dict : LCURL dict_items RCURL
+ """
+ self.consume(TokenType.LCURL, exc_type=QueryParamsParseError)
+ dict_items = self.dict_items()
+ self.consume(TokenType.RCURL, exc_type=QueryParamsParseError)
+
+ return PyDict(dict_items)
+
+ def dict_items(self):
+ """Implementation of the ``dict_items`` grammar production rule.
+
+ Production:
+
+ dict_items : dict_item | (dict_item COMMA dict_items)
+ """
+ result = []
+
+ item = self.dict_item()
+ if item is not None:
+ result.append(item)
+
+ while self._current_token.type_ == TokenType.COMMA:
+ self.consume(TokenType.COMMA, exc_type=QueryParamsParseError)
+ item = self.dict_item()
+ if item is not None:
+ result.append(item)
+
+ return result
+
+ def dict_item(self):
+ """Implementation of the ``dict_item`` grammar production rule.
+
+ Production:
+
+ dict_item : (dict_key COLON py_value) | EMPTY
+ """
+ token = self._current_token
+
+ if token.type_ == TokenType.PY_STRING:
+ key = self.dict_key()
+ self.consume(TokenType.COLON, exc_type=QueryParamsParseError)
+ value = self.py_value()
+ result = PyDictItem(key, value)
+ elif token.type_ == TokenType.UNKNOWN:
+ msg = "Unknown input at position {}: {}".format(token.pos, token.lexeme)
+ self.error(msg, exc_type=QueryParamsParseError)
+ else:
+ result = None
+
+ return result
+
+ def dict_key(self):
+ """Implementation of the ``dict_key`` grammar production rule.
+
+ Production:
+
+ dict_key : PY_STRING
+ """
+ token = self._current_token
+ self.consume(TokenType.PY_STRING, exc_type=QueryParamsParseError)
+ return PyDictKey(token)
+
+ def py_value(self):
+ """Implementation of the ``py_value`` grammar production rule.
+
+ Production:
+
+ py_value : PY_BOOL | PY_NUMBER | PY_STRING | py_tuple | py_list | py_dict
+ """
+ token = self._current_token
+
+ if token.type_ == TokenType.PY_BOOL:
+ self.consume(TokenType.PY_BOOL, exc_type=QueryParamsParseError)
+ return PyScalarValue(token, token.lexeme)
+ elif token.type_ == TokenType.PY_NUMBER:
+ self.consume(TokenType.PY_NUMBER, exc_type=QueryParamsParseError)
+ return PyScalarValue(token, token.lexeme)
+ elif token.type_ == TokenType.PY_STRING:
+ self.consume(TokenType.PY_STRING, exc_type=QueryParamsParseError)
+ return PyScalarValue(token, token.lexeme)
+ elif token.type_ == TokenType.LPAREN:
+ tuple_node = self.py_tuple()
+ return tuple_node
+ elif token.type_ == TokenType.LSQUARE:
+ list_node = self.py_list()
+ return list_node
+ elif token.type_ == TokenType.LCURL:
+ dict_node = self.py_dict()
+ return dict_node
+ else:
+ msg = "Unexpected token type {} at position {}.".format(
+ token.type_, token.pos
+ )
+ self.error(msg, exc_type=QueryParamsParseError)
+
+ def py_tuple(self):
+ """Implementation of the ``py_tuple`` grammar production rule.
+
+ Production:
+
+ py_tuple : LPAREN collection_items RPAREN
+ """
+ self.consume(TokenType.LPAREN, exc_type=QueryParamsParseError)
+ items = self.collection_items()
+ self.consume(TokenType.RPAREN, exc_type=QueryParamsParseError)
+
+ return PyTuple(items)
+
+ def py_list(self):
+ """Implementation of the ``py_list`` grammar production rule.
+
+ Production:
+
+ py_list : LSQUARE collection_items RSQUARE
+ """
+ self.consume(TokenType.LSQUARE, exc_type=QueryParamsParseError)
+ items = self.collection_items()
+ self.consume(TokenType.RSQUARE, exc_type=QueryParamsParseError)
+
+ return PyList(items)
+
+ def collection_items(self):
+ """Implementation of the ``collection_items`` grammar production rule.
+
+ Production:
+
+ collection_items : collection_item | (collection_item COMMA collection_items)
+ """
+ result = []
+
+ item = self.collection_item()
+ if item is not None:
+ result.append(item)
+
+ while self._current_token.type_ == TokenType.COMMA:
+ self.consume(TokenType.COMMA, exc_type=QueryParamsParseError)
+ item = self.collection_item()
+ if item is not None:
+ result.append(item)
+
+ return result
+
+ def collection_item(self):
+ """Implementation of the ``collection_item`` grammar production rule.
+
+ Production:
+
+ collection_item : py_value | EMPTY
+ """
+ if self._current_token.type_ not in {TokenType.RPAREN, TokenType.RSQUARE}:
+ result = self.py_value()
+ else:
+ result = None # end of list/tuple items
+
+ return result
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/line_arg_parser/visitors.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/line_arg_parser/visitors.py
new file mode 100644
index 0000000000000000000000000000000000000000..cbe236c06d91a6f9590104c4957381324f87b915
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/line_arg_parser/visitors.py
@@ -0,0 +1,159 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains classes that traverse AST and convert it to something else.
+
+If the parser successfully accepts a valid input (the bigquery cell magic arguments),
+the result is an Abstract Syntax Tree (AST) that represents the input as a tree
+with notes containing various useful metadata.
+
+Node visitors can process such tree and convert it to something else that can
+be used for further processing, for example:
+
+ * An optimized version of the tree with redundancy removed/simplified (not used here).
+ * The same tree, but with semantic errors checked, because an otherwise syntactically
+ valid input might still contain errors (not used here, semantic errors are detected
+ elsewhere).
+ * A form that can be directly handed to the code that operates on the input. The
+ ``QueryParamsExtractor`` class, for instance, splits the input arguments into
+ the "--params <...>" part and everything else.
+ The "everything else" part can be then parsed by the default Jupyter argument parser,
+ while the --params option is processed separately by the Python evaluator.
+
+More info on the visitor design pattern:
+https://en.wikipedia.org/wiki/Visitor_pattern
+
+"""
+
+from __future__ import print_function
+
+
+class NodeVisitor(object):
+ """Base visitor class implementing the dispatch machinery."""
+
+ def visit(self, node):
+ method_name = "visit_{}".format(type(node).__name__)
+ visitor_method = getattr(self, method_name, self.method_missing)
+ return visitor_method(node)
+
+ def method_missing(self, node):
+ raise Exception("No visit_{} method".format(type(node).__name__))
+
+
+class QueryParamsExtractor(NodeVisitor):
+ """A visitor that extracts the "--params <...>" part from input line arguments."""
+
+ def visit_InputLine(self, node):
+ params_dict_parts = []
+ other_parts = []
+
+ dest_var_parts = self.visit(node.destination_var)
+ params, other_options = self.visit(node.option_list)
+
+ if dest_var_parts:
+ other_parts.extend(dest_var_parts)
+
+ if dest_var_parts and other_options:
+ other_parts.append(" ")
+ other_parts.extend(other_options)
+
+ params_dict_parts.extend(params)
+
+ return "".join(params_dict_parts), "".join(other_parts)
+
+ def visit_DestinationVar(self, node):
+ return [node.name] if node.name is not None else []
+
+ def visit_CmdOptionList(self, node):
+ params_opt_parts = []
+ other_parts = []
+
+ for i, opt in enumerate(node.options):
+ option_parts = self.visit(opt)
+ list_to_extend = params_opt_parts if opt.name == "params" else other_parts
+
+ if list_to_extend:
+ list_to_extend.append(" ")
+ list_to_extend.extend(option_parts)
+
+ return params_opt_parts, other_parts
+
+ def visit_CmdOption(self, node):
+ result = ["--{}".format(node.name)]
+
+ if node.value is not None:
+ result.append(" ")
+ value_parts = self.visit(node.value)
+ result.extend(value_parts)
+
+ return result
+
+ def visit_CmdOptionValue(self, node):
+ return [node.value]
+
+ def visit_ParamsOption(self, node):
+ value_parts = self.visit(node.value)
+ return value_parts
+
+ def visit_PyVarExpansion(self, node):
+ return [node.raw_value]
+
+ def visit_PyDict(self, node):
+ result = ["{"]
+
+ for i, item in enumerate(node.items):
+ if i > 0:
+ result.append(", ")
+ item_parts = self.visit(item)
+ result.extend(item_parts)
+
+ result.append("}")
+ return result
+
+ def visit_PyDictItem(self, node):
+ result = self.visit(node.key) # key parts
+ result.append(": ")
+ value_parts = self.visit(node.value)
+ result.extend(value_parts)
+ return result
+
+ def visit_PyDictKey(self, node):
+ return [node.key_value]
+
+ def visit_PyScalarValue(self, node):
+ return [node.raw_value]
+
+ def visit_PyTuple(self, node):
+ result = ["("]
+
+ for i, item in enumerate(node.items):
+ if i > 0:
+ result.append(", ")
+ item_parts = self.visit(item)
+ result.extend(item_parts)
+
+ result.append(")")
+ return result
+
+ def visit_PyList(self, node):
+ result = ["["]
+
+ for i, item in enumerate(node.items):
+ if i > 0:
+ result.append(", ")
+ item_parts = self.visit(item)
+ result.extend(item_parts)
+
+ result.append("]")
+ return result
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/magics.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/magics.py
new file mode 100644
index 0000000000000000000000000000000000000000..b153d959a0bd467ece6d5dd2e9e9470a8b6400bd
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/magics/magics.py
@@ -0,0 +1,777 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""IPython Magics
+
+Install ``bigquery-magics`` and call ``%load_ext bigquery_magics`` to use the
+``%%bigquery`` cell magic.
+
+See the `BigQuery Magics reference documentation
+ `_.
+"""
+
+from __future__ import print_function
+
+import re
+import ast
+import copy
+import functools
+import sys
+import time
+import warnings
+from concurrent import futures
+
+try:
+ import IPython # type: ignore
+ from IPython import display # type: ignore
+ from IPython.core import magic_arguments # type: ignore
+except ImportError:
+ raise ImportError("This module can only be loaded in IPython.")
+
+from google.api_core import client_info
+from google.api_core import client_options
+from google.api_core.exceptions import NotFound
+import google.auth # type: ignore
+from google.cloud import bigquery
+import google.cloud.bigquery.dataset
+from google.cloud.bigquery import _versions_helpers
+from google.cloud.bigquery import exceptions
+from google.cloud.bigquery.dbapi import _helpers
+from google.cloud.bigquery.magics import line_arg_parser as lap
+
+try:
+ import bigquery_magics # type: ignore
+except ImportError:
+ bigquery_magics = None
+
+
+IPYTHON_USER_AGENT = "ipython-{}".format(IPython.__version__)
+
+
+class Context(object):
+ """Storage for objects to be used throughout an IPython notebook session.
+
+ A Context object is initialized when the ``magics`` module is imported,
+ and can be found at ``google.cloud.bigquery.magics.context``.
+ """
+
+ def __init__(self):
+ self._credentials = None
+ self._project = None
+ self._connection = None
+ self._default_query_job_config = bigquery.QueryJobConfig()
+ self._bigquery_client_options = client_options.ClientOptions()
+ self._bqstorage_client_options = client_options.ClientOptions()
+ self._progress_bar_type = "tqdm_notebook"
+
+ @property
+ def credentials(self):
+ """google.auth.credentials.Credentials: Credentials to use for queries
+ performed through IPython magics.
+
+ Note:
+ These credentials do not need to be explicitly defined if you are
+ using Application Default Credentials. If you are not using
+ Application Default Credentials, manually construct a
+ :class:`google.auth.credentials.Credentials` object and set it as
+ the context credentials as demonstrated in the example below. See
+ `auth docs`_ for more information on obtaining credentials.
+
+ Example:
+ Manually setting the context credentials:
+
+ >>> from google.cloud.bigquery import magics
+ >>> from google.oauth2 import service_account
+ >>> credentials = (service_account
+ ... .Credentials.from_service_account_file(
+ ... '/path/to/key.json'))
+ >>> magics.context.credentials = credentials
+
+
+ .. _auth docs: http://google-auth.readthedocs.io
+ /en/latest/user-guide.html#obtaining-credentials
+ """
+ if self._credentials is None:
+ self._credentials, _ = google.auth.default()
+ return self._credentials
+
+ @credentials.setter
+ def credentials(self, value):
+ self._credentials = value
+
+ @property
+ def project(self):
+ """str: Default project to use for queries performed through IPython
+ magics.
+
+ Note:
+ The project does not need to be explicitly defined if you have an
+ environment default project set. If you do not have a default
+ project set in your environment, manually assign the project as
+ demonstrated in the example below.
+
+ Example:
+ Manually setting the context project:
+
+ >>> from google.cloud.bigquery import magics
+ >>> magics.context.project = 'my-project'
+ """
+ if self._project is None:
+ _, self._project = google.auth.default()
+ return self._project
+
+ @project.setter
+ def project(self, value):
+ self._project = value
+
+ @property
+ def bigquery_client_options(self):
+ """google.api_core.client_options.ClientOptions: client options to be
+ used through IPython magics.
+
+ Note::
+ The client options do not need to be explicitly defined if no
+ special network connections are required. Normally you would be
+ using the https://bigquery.googleapis.com/ end point.
+
+ Example:
+ Manually setting the endpoint:
+
+ >>> from google.cloud.bigquery import magics
+ >>> client_options = {}
+ >>> client_options['api_endpoint'] = "https://some.special.url"
+ >>> magics.context.bigquery_client_options = client_options
+ """
+ return self._bigquery_client_options
+
+ @bigquery_client_options.setter
+ def bigquery_client_options(self, value):
+ self._bigquery_client_options = value
+
+ @property
+ def bqstorage_client_options(self):
+ """google.api_core.client_options.ClientOptions: client options to be
+ used through IPython magics for the storage client.
+
+ Note::
+ The client options do not need to be explicitly defined if no
+ special network connections are required. Normally you would be
+ using the https://bigquerystorage.googleapis.com/ end point.
+
+ Example:
+ Manually setting the endpoint:
+
+ >>> from google.cloud.bigquery import magics
+ >>> client_options = {}
+ >>> client_options['api_endpoint'] = "https://some.special.url"
+ >>> magics.context.bqstorage_client_options = client_options
+ """
+ return self._bqstorage_client_options
+
+ @bqstorage_client_options.setter
+ def bqstorage_client_options(self, value):
+ self._bqstorage_client_options = value
+
+ @property
+ def default_query_job_config(self):
+ """google.cloud.bigquery.job.QueryJobConfig: Default job
+ configuration for queries.
+
+ The context's :class:`~google.cloud.bigquery.job.QueryJobConfig` is
+ used for queries. Some properties can be overridden with arguments to
+ the magics.
+
+ Example:
+ Manually setting the default value for ``maximum_bytes_billed``
+ to 100 MB:
+
+ >>> from google.cloud.bigquery import magics
+ >>> magics.context.default_query_job_config.maximum_bytes_billed = 100000000
+ """
+ return self._default_query_job_config
+
+ @default_query_job_config.setter
+ def default_query_job_config(self, value):
+ self._default_query_job_config = value
+
+ @property
+ def progress_bar_type(self):
+ """str: Default progress bar type to use to display progress bar while
+ executing queries through IPython magics.
+
+ Note::
+ Install the ``tqdm`` package to use this feature.
+
+ Example:
+ Manually setting the progress_bar_type:
+
+ >>> from google.cloud.bigquery import magics
+ >>> magics.context.progress_bar_type = "tqdm_notebook"
+ """
+ return self._progress_bar_type
+
+ @progress_bar_type.setter
+ def progress_bar_type(self, value):
+ self._progress_bar_type = value
+
+
+# If bigquery_magics is available, we load that extension rather than this one.
+# Ensure google.cloud.bigquery.magics.context setters are on the correct magics
+# implementation in case the user has installed the package but hasn't updated
+# their code.
+if bigquery_magics is not None:
+ context = bigquery_magics.context
+else:
+ context = Context()
+
+
+def _handle_error(error, destination_var=None):
+ """Process a query execution error.
+
+ Args:
+ error (Exception):
+ An exception that occurred during the query execution.
+ destination_var (Optional[str]):
+ The name of the IPython session variable to store the query job.
+ """
+ if destination_var:
+ query_job = getattr(error, "query_job", None)
+
+ if query_job is not None:
+ IPython.get_ipython().push({destination_var: query_job})
+ else:
+ # this is the case when previewing table rows by providing just
+ # table ID to cell magic
+ print(
+ "Could not save output to variable '{}'.".format(destination_var),
+ file=sys.stderr,
+ )
+
+ print("\nERROR:\n", str(error), file=sys.stderr)
+
+
+def _run_query(client, query, job_config=None):
+ """Runs a query while printing status updates
+
+ Args:
+ client (google.cloud.bigquery.client.Client):
+ Client to bundle configuration needed for API requests.
+ query (str):
+ SQL query to be executed. Defaults to the standard SQL dialect.
+ Use the ``job_config`` parameter to change dialects.
+ job_config (Optional[google.cloud.bigquery.job.QueryJobConfig]):
+ Extra configuration options for the job.
+
+ Returns:
+ google.cloud.bigquery.job.QueryJob: the query job created
+
+ Example:
+ >>> client = bigquery.Client()
+ >>> _run_query(client, "SELECT 17")
+ Executing query with job ID: bf633912-af2c-4780-b568-5d868058632b
+ Query executing: 1.66s
+ Query complete after 2.07s
+ 'bf633912-af2c-4780-b568-5d868058632b'
+ """
+ start_time = time.perf_counter()
+ query_job = client.query(query, job_config=job_config)
+
+ if job_config and job_config.dry_run:
+ return query_job
+
+ print(f"Executing query with job ID: {query_job.job_id}")
+
+ while True:
+ print(
+ f"\rQuery executing: {time.perf_counter() - start_time:.2f}s".format(),
+ end="",
+ )
+ try:
+ query_job.result(timeout=0.5)
+ break
+ except futures.TimeoutError:
+ continue
+ print(f"\nJob ID {query_job.job_id} successfully executed")
+ return query_job
+
+
+def _create_dataset_if_necessary(client, dataset_id):
+ """Create a dataset in the current project if it doesn't exist.
+
+ Args:
+ client (google.cloud.bigquery.client.Client):
+ Client to bundle configuration needed for API requests.
+ dataset_id (str):
+ Dataset id.
+ """
+ dataset_reference = bigquery.dataset.DatasetReference(client.project, dataset_id)
+ try:
+ dataset = client.get_dataset(dataset_reference)
+ return
+ except NotFound:
+ pass
+ dataset = bigquery.Dataset(dataset_reference)
+ dataset.location = client.location
+ print(f"Creating dataset: {dataset_id}")
+ dataset = client.create_dataset(dataset)
+
+
+@magic_arguments.magic_arguments()
+@magic_arguments.argument(
+ "destination_var",
+ nargs="?",
+ help=("If provided, save the output to this variable instead of displaying it."),
+)
+@magic_arguments.argument(
+ "--destination_table",
+ type=str,
+ default=None,
+ help=(
+ "If provided, save the output of the query to a new BigQuery table. "
+ "Variable should be in a format .. "
+ "If table does not exists, it will be created. "
+ "If table already exists, its data will be overwritten."
+ ),
+)
+@magic_arguments.argument(
+ "--project",
+ type=str,
+ default=None,
+ help=("Project to use for executing this query. Defaults to the context project."),
+)
+@magic_arguments.argument(
+ "--max_results",
+ default=None,
+ help=(
+ "Maximum number of rows in dataframe returned from executing the query."
+ "Defaults to returning all rows."
+ ),
+)
+@magic_arguments.argument(
+ "--maximum_bytes_billed",
+ default=None,
+ help=(
+ "maximum_bytes_billed to use for executing this query. Defaults to "
+ "the context default_query_job_config.maximum_bytes_billed."
+ ),
+)
+@magic_arguments.argument(
+ "--dry_run",
+ action="store_true",
+ default=False,
+ help=(
+ "Sets query to be a dry run to estimate costs. "
+ "Defaults to executing the query instead of dry run if this argument is not used."
+ ),
+)
+@magic_arguments.argument(
+ "--use_legacy_sql",
+ action="store_true",
+ default=False,
+ help=(
+ "Sets query to use Legacy SQL instead of Standard SQL. Defaults to "
+ "Standard SQL if this argument is not used."
+ ),
+)
+@magic_arguments.argument(
+ "--bigquery_api_endpoint",
+ type=str,
+ default=None,
+ help=(
+ "The desired API endpoint, e.g., bigquery.googlepis.com. Defaults to this "
+ "option's value in the context bigquery_client_options."
+ ),
+)
+@magic_arguments.argument(
+ "--bqstorage_api_endpoint",
+ type=str,
+ default=None,
+ help=(
+ "The desired API endpoint, e.g., bigquerystorage.googlepis.com. Defaults to "
+ "this option's value in the context bqstorage_client_options."
+ ),
+)
+@magic_arguments.argument(
+ "--no_query_cache",
+ action="store_true",
+ default=False,
+ help=("Do not use cached query results."),
+)
+@magic_arguments.argument(
+ "--use_bqstorage_api",
+ action="store_true",
+ default=None,
+ help=(
+ "[Deprecated] The BigQuery Storage API is already used by default to "
+ "download large query results, and this option has no effect. "
+ "If you want to switch to the classic REST API instead, use the "
+ "--use_rest_api option."
+ ),
+)
+@magic_arguments.argument(
+ "--use_rest_api",
+ action="store_true",
+ default=False,
+ help=(
+ "Use the classic REST API instead of the BigQuery Storage API to "
+ "download query results."
+ ),
+)
+@magic_arguments.argument(
+ "--verbose",
+ action="store_true",
+ default=False,
+ help=(
+ "If set, print verbose output, including the query job ID and the "
+ "amount of time for the query to finish. By default, this "
+ "information will be displayed as the query runs, but will be "
+ "cleared after the query is finished."
+ ),
+)
+@magic_arguments.argument(
+ "--params",
+ nargs="+",
+ default=None,
+ help=(
+ "Parameters to format the query string. If present, the --params "
+ "flag should be followed by a string representation of a dictionary "
+ "in the format {'param_name': 'param_value'} (ex. {\"num\": 17}), "
+ "or a reference to a dictionary in the same format. The dictionary "
+ "reference can be made by including a '$' before the variable "
+ "name (ex. $my_dict_var)."
+ ),
+)
+@magic_arguments.argument(
+ "--progress_bar_type",
+ type=str,
+ default=None,
+ help=(
+ "Sets progress bar type to display a progress bar while executing the query."
+ "Defaults to use tqdm_notebook. Install the ``tqdm`` package to use this feature."
+ ),
+)
+@magic_arguments.argument(
+ "--location",
+ type=str,
+ default=None,
+ help=(
+ "Set the location to execute query."
+ "Defaults to location set in query setting in console."
+ ),
+)
+def _cell_magic(line, query):
+ """Underlying function for bigquery cell magic
+
+ Note:
+ This function contains the underlying logic for the 'bigquery' cell
+ magic. This function is not meant to be called directly.
+
+ Args:
+ line (str): "%%bigquery" followed by arguments as required
+ query (str): SQL query to run
+
+ Returns:
+ pandas.DataFrame: the query results.
+ """
+ # The built-in parser does not recognize Python structures such as dicts, thus
+ # we extract the "--params" option and inteprpret it separately.
+ try:
+ params_option_value, rest_of_args = _split_args_line(line)
+ except lap.exceptions.QueryParamsParseError as exc:
+ rebranded_error = SyntaxError(
+ "--params is not a correctly formatted JSON string or a JSON "
+ "serializable dictionary"
+ )
+ raise rebranded_error from exc
+ except lap.exceptions.DuplicateQueryParamsError as exc:
+ rebranded_error = ValueError("Duplicate --params option.")
+ raise rebranded_error from exc
+ except lap.exceptions.ParseError as exc:
+ rebranded_error = ValueError(
+ "Unrecognized input, are option values correct? "
+ "Error details: {}".format(exc.args[0])
+ )
+ raise rebranded_error from exc
+
+ args = magic_arguments.parse_argstring(_cell_magic, rest_of_args)
+
+ if args.use_bqstorage_api is not None:
+ warnings.warn(
+ "Deprecated option --use_bqstorage_api, the BigQuery "
+ "Storage API is already used by default.",
+ category=DeprecationWarning,
+ )
+ use_bqstorage_api = not args.use_rest_api
+ location = args.location
+
+ params = []
+ if params_option_value:
+ # A non-existing params variable is not expanded and ends up in the input
+ # in its raw form, e.g. "$query_params".
+ if params_option_value.startswith("$"):
+ msg = 'Parameter expansion failed, undefined variable "{}".'.format(
+ params_option_value[1:]
+ )
+ raise NameError(msg)
+
+ params = _helpers.to_query_parameters(ast.literal_eval(params_option_value), {})
+
+ project = args.project or context.project
+
+ bigquery_client_options = copy.deepcopy(context.bigquery_client_options)
+ if args.bigquery_api_endpoint:
+ if isinstance(bigquery_client_options, dict):
+ bigquery_client_options["api_endpoint"] = args.bigquery_api_endpoint
+ else:
+ bigquery_client_options.api_endpoint = args.bigquery_api_endpoint
+
+ client = bigquery.Client(
+ project=project,
+ credentials=context.credentials,
+ default_query_job_config=context.default_query_job_config,
+ client_info=client_info.ClientInfo(user_agent=IPYTHON_USER_AGENT),
+ client_options=bigquery_client_options,
+ location=location,
+ )
+ if context._connection:
+ client._connection = context._connection
+
+ bqstorage_client_options = copy.deepcopy(context.bqstorage_client_options)
+ if args.bqstorage_api_endpoint:
+ if isinstance(bqstorage_client_options, dict):
+ bqstorage_client_options["api_endpoint"] = args.bqstorage_api_endpoint
+ else:
+ bqstorage_client_options.api_endpoint = args.bqstorage_api_endpoint
+
+ bqstorage_client = _make_bqstorage_client(
+ client,
+ use_bqstorage_api,
+ bqstorage_client_options,
+ )
+
+ close_transports = functools.partial(_close_transports, client, bqstorage_client)
+
+ try:
+ if args.max_results:
+ max_results = int(args.max_results)
+ else:
+ max_results = None
+
+ query = query.strip()
+
+ if not query:
+ error = ValueError("Query is missing.")
+ _handle_error(error, args.destination_var)
+ return
+
+ # Check if query is given as a reference to a variable.
+ if query.startswith("$"):
+ query_var_name = query[1:]
+
+ if not query_var_name:
+ missing_msg = 'Missing query variable name, empty "$" is not allowed.'
+ raise NameError(missing_msg)
+
+ if query_var_name.isidentifier():
+ ip = IPython.get_ipython()
+ query = ip.user_ns.get(query_var_name, ip) # ip serves as a sentinel
+
+ if query is ip:
+ raise NameError(
+ f"Unknown query, variable {query_var_name} does not exist."
+ )
+ else:
+ if not isinstance(query, (str, bytes)):
+ raise TypeError(
+ f"Query variable {query_var_name} must be a string "
+ "or a bytes-like value."
+ )
+
+ # Any query that does not contain whitespace (aside from leading and trailing whitespace)
+ # is assumed to be a table id
+ if not re.search(r"\s", query):
+ try:
+ rows = client.list_rows(query, max_results=max_results)
+ except Exception as ex:
+ _handle_error(ex, args.destination_var)
+ return
+
+ result = rows.to_dataframe(
+ bqstorage_client=bqstorage_client,
+ create_bqstorage_client=False,
+ )
+ if args.destination_var:
+ IPython.get_ipython().push({args.destination_var: result})
+ return
+ else:
+ return result
+
+ job_config = bigquery.job.QueryJobConfig()
+ job_config.query_parameters = params
+ job_config.use_legacy_sql = args.use_legacy_sql
+ job_config.dry_run = args.dry_run
+
+ # Don't override context job config unless --no_query_cache is explicitly set.
+ if args.no_query_cache:
+ job_config.use_query_cache = False
+
+ if args.destination_table:
+ split = args.destination_table.split(".")
+ if len(split) != 2:
+ raise ValueError(
+ "--destination_table should be in a . format."
+ )
+ dataset_id, table_id = split
+ job_config.allow_large_results = True
+ dataset_ref = bigquery.dataset.DatasetReference(client.project, dataset_id)
+ destination_table_ref = dataset_ref.table(table_id)
+ job_config.destination = destination_table_ref
+ job_config.create_disposition = "CREATE_IF_NEEDED"
+ job_config.write_disposition = "WRITE_TRUNCATE"
+ _create_dataset_if_necessary(client, dataset_id)
+
+ if args.maximum_bytes_billed == "None":
+ job_config.maximum_bytes_billed = 0
+ elif args.maximum_bytes_billed is not None:
+ value = int(args.maximum_bytes_billed)
+ job_config.maximum_bytes_billed = value
+
+ try:
+ query_job = _run_query(client, query, job_config=job_config)
+ except Exception as ex:
+ _handle_error(ex, args.destination_var)
+ return
+
+ if not args.verbose:
+ display.clear_output()
+
+ if args.dry_run and args.destination_var:
+ IPython.get_ipython().push({args.destination_var: query_job})
+ return
+ elif args.dry_run:
+ print(
+ "Query validated. This query will process {} bytes.".format(
+ query_job.total_bytes_processed
+ )
+ )
+ return query_job
+
+ progress_bar = context.progress_bar_type or args.progress_bar_type
+
+ if max_results:
+ result = query_job.result(max_results=max_results).to_dataframe(
+ bqstorage_client=None,
+ create_bqstorage_client=False,
+ progress_bar_type=progress_bar,
+ )
+ else:
+ result = query_job.to_dataframe(
+ bqstorage_client=bqstorage_client,
+ create_bqstorage_client=False,
+ progress_bar_type=progress_bar,
+ )
+
+ if args.destination_var:
+ IPython.get_ipython().push({args.destination_var: result})
+ else:
+ return result
+ finally:
+ close_transports()
+
+
+def _split_args_line(line):
+ """Split out the --params option value from the input line arguments.
+
+ Args:
+ line (str): The line arguments passed to the cell magic.
+
+ Returns:
+ Tuple[str, str]
+ """
+ lexer = lap.Lexer(line)
+ scanner = lap.Parser(lexer)
+ tree = scanner.input_line()
+
+ extractor = lap.QueryParamsExtractor()
+ params_option_value, rest_of_args = extractor.visit(tree)
+
+ return params_option_value, rest_of_args
+
+
+def _make_bqstorage_client(client, use_bqstorage_api, client_options):
+ """Creates a BigQuery Storage client.
+
+ Args:
+ client (:class:`~google.cloud.bigquery.client.Client`): BigQuery client.
+ use_bqstorage_api (bool): whether BigQuery Storage API is used or not.
+ client_options (:class:`google.api_core.client_options.ClientOptions`):
+ Custom options used with a new BigQuery Storage client instance
+ if one is created.
+
+ Raises:
+ ImportError: if google-cloud-bigquery-storage is not installed, or
+ grpcio package is not installed.
+
+
+ Returns:
+ None: if ``use_bqstorage_api == False``, or google-cloud-bigquery-storage
+ is outdated.
+ BigQuery Storage Client:
+ """
+ if not use_bqstorage_api:
+ return None
+
+ try:
+ _versions_helpers.BQ_STORAGE_VERSIONS.try_import(raise_if_error=True)
+ except exceptions.BigQueryStorageNotFoundError as err:
+ customized_error = ImportError(
+ "The default BigQuery Storage API client cannot be used, install "
+ "the missing google-cloud-bigquery-storage and pyarrow packages "
+ "to use it. Alternatively, use the classic REST API by specifying "
+ "the --use_rest_api magic option."
+ )
+ raise customized_error from err
+ except exceptions.LegacyBigQueryStorageError:
+ pass
+
+ try:
+ from google.api_core.gapic_v1 import client_info as gapic_client_info
+ except ImportError as err:
+ customized_error = ImportError(
+ "Install the grpcio package to use the BigQuery Storage API."
+ )
+ raise customized_error from err
+
+ return client._ensure_bqstorage_client(
+ client_options=client_options,
+ client_info=gapic_client_info.ClientInfo(user_agent=IPYTHON_USER_AGENT),
+ )
+
+
+def _close_transports(client, bqstorage_client):
+ """Close the given clients' underlying transport channels.
+
+ Closing the transport is needed to release system resources, namely open
+ sockets.
+
+ Args:
+ client (:class:`~google.cloud.bigquery.client.Client`):
+ bqstorage_client
+ (Optional[:class:`~google.cloud.bigquery_storage.BigQueryReadClient`]):
+ A client for the BigQuery Storage API.
+
+ """
+ client.close()
+ if bqstorage_client is not None:
+ bqstorage_client._transport.grpc_channel.close()
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/model.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..45a88ab221e01353a34be3cac802c2a86a2b47b1
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/model.py
@@ -0,0 +1,517 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define resources for the BigQuery ML Models API."""
+
+from __future__ import annotations # type: ignore
+
+import copy
+import datetime
+import typing
+from typing import Any, Dict, Optional, Sequence, Union
+
+import google.cloud._helpers # type: ignore
+from google.cloud.bigquery import _helpers
+from google.cloud.bigquery import standard_sql
+from google.cloud.bigquery.encryption_configuration import EncryptionConfiguration
+
+
+class Model:
+ """Model represents a machine learning model resource.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/models
+
+ Args:
+ model_ref:
+ A pointer to a model. If ``model_ref`` is a string, it must
+ included a project ID, dataset ID, and model ID, each separated
+ by ``.``.
+ """
+
+ _PROPERTY_TO_API_FIELD = {
+ "expires": "expirationTime",
+ "friendly_name": "friendlyName",
+ # Even though it's not necessary for field mapping to map when the
+ # property name equals the resource name, we add these here so that we
+ # have an exhaustive list of all mutable properties.
+ "labels": "labels",
+ "description": "description",
+ "encryption_configuration": "encryptionConfiguration",
+ }
+
+ def __init__(self, model_ref: Union["ModelReference", str, None]):
+ # Use _properties on read-write properties to match the REST API
+ # semantics. The BigQuery API makes a distinction between an unset
+ # value, a null value, and a default value (0 or ""), but the protocol
+ # buffer classes do not.
+ self._properties = {}
+
+ if isinstance(model_ref, str):
+ model_ref = ModelReference.from_string(model_ref)
+
+ if model_ref:
+ self._properties["modelReference"] = model_ref.to_api_repr()
+
+ @property
+ def reference(self) -> Optional["ModelReference"]:
+ """A model reference pointing to this model.
+
+ Read-only.
+ """
+ resource = self._properties.get("modelReference")
+ if resource is None:
+ return None
+ else:
+ return ModelReference.from_api_repr(resource)
+
+ @property
+ def project(self) -> Optional[str]:
+ """Project bound to the model."""
+ ref = self.reference
+ return ref.project if ref is not None else None
+
+ @property
+ def dataset_id(self) -> Optional[str]:
+ """ID of dataset containing the model."""
+ ref = self.reference
+ return ref.dataset_id if ref is not None else None
+
+ @property
+ def model_id(self) -> Optional[str]:
+ """The model ID."""
+ ref = self.reference
+ return ref.model_id if ref is not None else None
+
+ @property
+ def path(self) -> Optional[str]:
+ """URL path for the model's APIs."""
+ ref = self.reference
+ return ref.path if ref is not None else None
+
+ @property
+ def location(self) -> Optional[str]:
+ """The geographic location where the model resides.
+
+ This value is inherited from the dataset.
+
+ Read-only.
+ """
+ return typing.cast(Optional[str], self._properties.get("location"))
+
+ @property
+ def etag(self) -> Optional[str]:
+ """ETag for the model resource (:data:`None` until set from the server).
+
+ Read-only.
+ """
+ return typing.cast(Optional[str], self._properties.get("etag"))
+
+ @property
+ def created(self) -> Optional[datetime.datetime]:
+ """Datetime at which the model was created (:data:`None` until set from the server).
+
+ Read-only.
+ """
+ value = typing.cast(Optional[float], self._properties.get("creationTime"))
+ if value is None:
+ return None
+ else:
+ # value will be in milliseconds.
+ return google.cloud._helpers._datetime_from_microseconds(
+ 1000.0 * float(value)
+ )
+
+ @property
+ def modified(self) -> Optional[datetime.datetime]:
+ """Datetime at which the model was last modified (:data:`None` until set from the server).
+
+ Read-only.
+ """
+ value = typing.cast(Optional[float], self._properties.get("lastModifiedTime"))
+ if value is None:
+ return None
+ else:
+ # value will be in milliseconds.
+ return google.cloud._helpers._datetime_from_microseconds(
+ 1000.0 * float(value)
+ )
+
+ @property
+ def model_type(self) -> str:
+ """Type of the model resource.
+
+ Read-only.
+ """
+ return typing.cast(
+ str, self._properties.get("modelType", "MODEL_TYPE_UNSPECIFIED")
+ )
+
+ @property
+ def training_runs(self) -> Sequence[Dict[str, Any]]:
+ """Information for all training runs in increasing order of start time.
+
+ Dictionaries are in REST API format. See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/models#trainingrun
+
+ Read-only.
+ """
+ return typing.cast(
+ Sequence[Dict[str, Any]], self._properties.get("trainingRuns", [])
+ )
+
+ @property
+ def feature_columns(self) -> Sequence[standard_sql.StandardSqlField]:
+ """Input feature columns that were used to train this model.
+
+ Read-only.
+ """
+ resource: Sequence[Dict[str, Any]] = typing.cast(
+ Sequence[Dict[str, Any]], self._properties.get("featureColumns", [])
+ )
+ return [
+ standard_sql.StandardSqlField.from_api_repr(column) for column in resource
+ ]
+
+ @property
+ def transform_columns(self) -> Sequence[TransformColumn]:
+ """The input feature columns that were used to train this model.
+ The output transform columns used to train this model.
+
+ See REST API:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/models#transformcolumn
+
+ Read-only.
+ """
+ resources: Sequence[Dict[str, Any]] = typing.cast(
+ Sequence[Dict[str, Any]], self._properties.get("transformColumns", [])
+ )
+ return [TransformColumn(resource) for resource in resources]
+
+ @property
+ def label_columns(self) -> Sequence[standard_sql.StandardSqlField]:
+ """Label columns that were used to train this model.
+
+ The output of the model will have a ``predicted_`` prefix to these columns.
+
+ Read-only.
+ """
+ resource: Sequence[Dict[str, Any]] = typing.cast(
+ Sequence[Dict[str, Any]], self._properties.get("labelColumns", [])
+ )
+ return [
+ standard_sql.StandardSqlField.from_api_repr(column) for column in resource
+ ]
+
+ @property
+ def best_trial_id(self) -> Optional[int]:
+ """The best trial_id across all training runs.
+
+ .. deprecated::
+ This property is deprecated!
+
+ Read-only.
+ """
+ value = typing.cast(Optional[int], self._properties.get("bestTrialId"))
+ if value is not None:
+ value = int(value)
+ return value
+
+ @property
+ def expires(self) -> Optional[datetime.datetime]:
+ """The datetime when this model expires.
+
+ If not present, the model will persist indefinitely. Expired models will be
+ deleted and their storage reclaimed.
+ """
+ value = typing.cast(Optional[float], self._properties.get("expirationTime"))
+ if value is None:
+ return None
+ else:
+ # value will be in milliseconds.
+ return google.cloud._helpers._datetime_from_microseconds(
+ 1000.0 * float(value)
+ )
+
+ @expires.setter
+ def expires(self, value: Optional[datetime.datetime]):
+ if value is None:
+ value_to_store: Optional[str] = None
+ else:
+ value_to_store = str(google.cloud._helpers._millis_from_datetime(value))
+ # TODO: Consider using typing.TypedDict when only Python 3.8+ is supported.
+ self._properties["expirationTime"] = value_to_store # type: ignore
+
+ @property
+ def description(self) -> Optional[str]:
+ """Description of the model (defaults to :data:`None`)."""
+ return typing.cast(Optional[str], self._properties.get("description"))
+
+ @description.setter
+ def description(self, value: Optional[str]):
+ # TODO: Consider using typing.TypedDict when only Python 3.8+ is supported.
+ self._properties["description"] = value # type: ignore
+
+ @property
+ def friendly_name(self) -> Optional[str]:
+ """Title of the table (defaults to :data:`None`)."""
+ return typing.cast(Optional[str], self._properties.get("friendlyName"))
+
+ @friendly_name.setter
+ def friendly_name(self, value: Optional[str]):
+ # TODO: Consider using typing.TypedDict when only Python 3.8+ is supported.
+ self._properties["friendlyName"] = value # type: ignore
+
+ @property
+ def labels(self) -> Dict[str, str]:
+ """Labels for the table.
+
+ This method always returns a dict. To change a model's labels, modify the dict,
+ then call ``Client.update_model``. To delete a label, set its value to
+ :data:`None` before updating.
+ """
+ return self._properties.setdefault("labels", {})
+
+ @labels.setter
+ def labels(self, value: Optional[Dict[str, str]]):
+ if value is None:
+ value = {}
+ self._properties["labels"] = value
+
+ @property
+ def encryption_configuration(self) -> Optional[EncryptionConfiguration]:
+ """Custom encryption configuration for the model.
+
+ Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
+ if using default encryption.
+
+ See `protecting data with Cloud KMS keys
+ `_
+ in the BigQuery documentation.
+ """
+ prop = self._properties.get("encryptionConfiguration")
+ if prop:
+ prop = EncryptionConfiguration.from_api_repr(prop)
+ return typing.cast(Optional[EncryptionConfiguration], prop)
+
+ @encryption_configuration.setter
+ def encryption_configuration(self, value: Optional[EncryptionConfiguration]):
+ api_repr = value.to_api_repr() if value else value
+ self._properties["encryptionConfiguration"] = api_repr
+
+ @classmethod
+ def from_api_repr(cls, resource: Dict[str, Any]) -> "Model":
+ """Factory: construct a model resource given its API representation
+
+ Args:
+ resource:
+ Model resource representation from the API
+
+ Returns:
+ Model parsed from ``resource``.
+ """
+ this = cls(None)
+ resource = copy.deepcopy(resource)
+ this._properties = resource
+ return this
+
+ def _build_resource(self, filter_fields):
+ """Generate a resource for ``update``."""
+ return _helpers._build_resource_from_properties(self, filter_fields)
+
+ def __repr__(self):
+ return f"Model(reference={self.reference!r})"
+
+ def to_api_repr(self) -> Dict[str, Any]:
+ """Construct the API resource representation of this model.
+
+ Returns:
+ Model reference represented as an API resource
+ """
+ return copy.deepcopy(self._properties)
+
+
+class ModelReference:
+ """ModelReferences are pointers to models.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/models#modelreference
+ """
+
+ def __init__(self):
+ self._properties = {}
+
+ @property
+ def project(self):
+ """str: Project bound to the model"""
+ return self._properties.get("projectId")
+
+ @property
+ def dataset_id(self):
+ """str: ID of dataset containing the model."""
+ return self._properties.get("datasetId")
+
+ @property
+ def model_id(self):
+ """str: The model ID."""
+ return self._properties.get("modelId")
+
+ @property
+ def path(self) -> str:
+ """URL path for the model's APIs."""
+ return f"/projects/{self.project}/datasets/{self.dataset_id}/models/{self.model_id}"
+
+ @classmethod
+ def from_api_repr(cls, resource: Dict[str, Any]) -> "ModelReference":
+ """Factory: construct a model reference given its API representation.
+
+ Args:
+ resource:
+ Model reference representation returned from the API
+
+ Returns:
+ Model reference parsed from ``resource``.
+ """
+ ref = cls()
+ ref._properties = resource
+ return ref
+
+ @classmethod
+ def from_string(
+ cls, model_id: str, default_project: Optional[str] = None
+ ) -> "ModelReference":
+ """Construct a model reference from model ID string.
+
+ Args:
+ model_id:
+ A model ID in standard SQL format. If ``default_project``
+ is not specified, this must included a project ID, dataset
+ ID, and model ID, each separated by ``.``.
+ default_project:
+ The project ID to use when ``model_id`` does not include
+ a project ID.
+
+ Returns:
+ Model reference parsed from ``model_id``.
+
+ Raises:
+ ValueError:
+ If ``model_id`` is not a fully-qualified table ID in
+ standard SQL format.
+ """
+ proj, dset, model = _helpers._parse_3_part_id(
+ model_id, default_project=default_project, property_name="model_id"
+ )
+ return cls.from_api_repr(
+ {"projectId": proj, "datasetId": dset, "modelId": model}
+ )
+
+ def to_api_repr(self) -> Dict[str, Any]:
+ """Construct the API resource representation of this model reference.
+
+ Returns:
+ Model reference represented as an API resource.
+ """
+ return copy.deepcopy(self._properties)
+
+ def _key(self):
+ """Unique key for this model.
+
+ This is used for hashing a ModelReference.
+ """
+ return self.project, self.dataset_id, self.model_id
+
+ def __eq__(self, other):
+ if not isinstance(other, ModelReference):
+ return NotImplemented
+ return self._properties == other._properties
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(self._key())
+
+ def __repr__(self):
+ return "ModelReference(project_id='{}', dataset_id='{}', model_id='{}')".format(
+ self.project, self.dataset_id, self.model_id
+ )
+
+
+class TransformColumn:
+ """TransformColumn represents a transform column feature.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/models#transformcolumn
+
+ Args:
+ resource:
+ A dictionary representing a transform column feature.
+ """
+
+ def __init__(self, resource: Dict[str, Any]):
+ self._properties = resource
+
+ @property
+ def name(self) -> Optional[str]:
+ """Name of the column."""
+ return self._properties.get("name")
+
+ @property
+ def type_(self) -> Optional[standard_sql.StandardSqlDataType]:
+ """Data type of the column after the transform.
+
+ Returns:
+ Optional[google.cloud.bigquery.standard_sql.StandardSqlDataType]:
+ Data type of the column.
+ """
+ type_json = self._properties.get("type")
+ if type_json is None:
+ return None
+ return standard_sql.StandardSqlDataType.from_api_repr(type_json)
+
+ @property
+ def transform_sql(self) -> Optional[str]:
+ """The SQL expression used in the column transform."""
+ return self._properties.get("transformSql")
+
+ @classmethod
+ def from_api_repr(cls, resource: Dict[str, Any]) -> "TransformColumn":
+ """Constructs a transform column feature given its API representation
+
+ Args:
+ resource:
+ Transform column feature representation from the API
+
+ Returns:
+ Transform column feature parsed from ``resource``.
+ """
+ this = cls({})
+ resource = copy.deepcopy(resource)
+ this._properties = resource
+ return this
+
+
+def _model_arg_to_model_ref(value, default_project=None):
+ """Helper to convert a string or Model to ModelReference.
+
+ This function keeps ModelReference and other kinds of objects unchanged.
+ """
+ if isinstance(value, str):
+ return ModelReference.from_string(value, default_project=default_project)
+ if isinstance(value, Model):
+ return value.reference
+ return value
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/opentelemetry_tracing.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/opentelemetry_tracing.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5f6bf9912edbf566a9806f48019f5c2100d5604
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/opentelemetry_tracing.py
@@ -0,0 +1,164 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from contextlib import contextmanager
+from google.api_core.exceptions import GoogleAPICallError # type: ignore
+
+logger = logging.getLogger(__name__)
+try:
+ from opentelemetry import trace # type: ignore
+ from opentelemetry.instrumentation.utils import http_status_to_status_code # type: ignore
+ from opentelemetry.trace.status import Status # type: ignore
+
+ HAS_OPENTELEMETRY = True
+ _warned_telemetry = True
+
+except ImportError:
+ HAS_OPENTELEMETRY = False
+ _warned_telemetry = False
+
+_default_attributes = {
+ "db.system": "BigQuery"
+} # static, default values assigned to all spans
+
+
+@contextmanager
+def create_span(name, attributes=None, client=None, job_ref=None):
+ """Creates a ContextManager for a Span to be exported to the configured exporter.
+ If no configuration exists yields None.
+
+ Args:
+ name (str): Name that will be set for the span being created
+ attributes (Optional[dict]):
+ Additional attributes that pertain to
+ the specific API call (i.e. not a default attribute)
+ client (Optional[google.cloud.bigquery.client.Client]):
+ Pass in a Client object to extract any attributes that may be
+ relevant to it and add them to the created spans.
+ job_ref (Optional[google.cloud.bigquery.job._AsyncJob])
+ Pass in a _AsyncJob object to extract any attributes that may be
+ relevant to it and add them to the created spans.
+
+ Yields:
+ opentelemetry.trace.Span: Yields the newly created Span.
+
+ Raises:
+ google.api_core.exceptions.GoogleAPICallError:
+ Raised if a span could not be yielded or issue with call to
+ OpenTelemetry.
+ """
+ global _warned_telemetry
+ final_attributes = _get_final_span_attributes(attributes, client, job_ref)
+ if not HAS_OPENTELEMETRY:
+ if not _warned_telemetry:
+ logger.debug(
+ "This service is instrumented using OpenTelemetry. "
+ "OpenTelemetry or one of its components could not be imported; "
+ "please add compatible versions of opentelemetry-api and "
+ "opentelemetry-instrumentation packages in order to get BigQuery "
+ "Tracing data."
+ )
+ _warned_telemetry = True
+
+ yield None
+ return
+ tracer = trace.get_tracer(__name__)
+
+ # yield new span value
+ with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:
+ try:
+ yield span
+ except GoogleAPICallError as error:
+ if error.code is not None:
+ span.set_status(Status(http_status_to_status_code(error.code)))
+ raise
+
+
+def _get_final_span_attributes(attributes=None, client=None, job_ref=None):
+ """Compiles attributes from: client, job_ref, user-provided attributes.
+
+ Attributes from all of these sources are merged together. Note the
+ attributes are added sequentially based on perceived order of precedence:
+ i.e. attributes added last may overwrite attributes added earlier.
+
+ Args:
+ attributes (Optional[dict]):
+ Additional attributes that pertain to
+ the specific API call (i.e. not a default attribute)
+
+ client (Optional[google.cloud.bigquery.client.Client]):
+ Pass in a Client object to extract any attributes that may be
+ relevant to it and add them to the final_attributes
+
+ job_ref (Optional[google.cloud.bigquery.job._AsyncJob])
+ Pass in a _AsyncJob object to extract any attributes that may be
+ relevant to it and add them to the final_attributes.
+
+ Returns: dict
+ """
+
+ collected_attributes = _default_attributes.copy()
+
+ if client:
+ collected_attributes.update(_set_client_attributes(client))
+ if job_ref:
+ collected_attributes.update(_set_job_attributes(job_ref))
+ if attributes:
+ collected_attributes.update(attributes)
+
+ final_attributes = {k: v for k, v in collected_attributes.items() if v is not None}
+ return final_attributes
+
+
+def _set_client_attributes(client):
+ return {"db.name": client.project, "location": client.location}
+
+
+def _set_job_attributes(job_ref):
+ job_attributes = {
+ "db.name": job_ref.project,
+ "job_id": job_ref.job_id,
+ "state": job_ref.state,
+ }
+
+ job_attributes["hasErrors"] = job_ref.error_result is not None
+
+ if job_ref.created is not None:
+ job_attributes["timeCreated"] = job_ref.created.isoformat()
+
+ if job_ref.started is not None:
+ job_attributes["timeStarted"] = job_ref.started.isoformat()
+
+ if job_ref.ended is not None:
+ job_attributes["timeEnded"] = job_ref.ended.isoformat()
+
+ if job_ref.location is not None:
+ job_attributes["location"] = job_ref.location
+
+ if job_ref.parent_job_id is not None:
+ job_attributes["parent_job_id"] = job_ref.parent_job_id
+
+ if job_ref.num_child_jobs is not None:
+ job_attributes["num_child_jobs"] = job_ref.num_child_jobs
+
+ total_bytes_billed = getattr(job_ref, "total_bytes_billed", None)
+ if total_bytes_billed is not None:
+ job_attributes["total_bytes_billed"] = total_bytes_billed
+
+ total_bytes_processed = getattr(job_ref, "total_bytes_processed", None)
+ if total_bytes_processed is not None:
+ job_attributes["total_bytes_processed"] = total_bytes_processed
+
+ return job_attributes
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/py.typed b/testbed/googleapis__python-bigquery/google/cloud/bigquery/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e73777993c3406624fca319884f8163f640fbab3
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/py.typed
@@ -0,0 +1,2 @@
+# Marker file for PEP 561.
+# The google-cloud-bigquery package uses inline types.
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/query.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/query.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1090a7dcc8b2500e8f2ddf525646d0b98eb05ff
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/query.py
@@ -0,0 +1,1327 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""BigQuery query processing."""
+
+from collections import OrderedDict
+import copy
+import datetime
+import decimal
+from typing import Any, Optional, Dict, Union
+
+from google.cloud.bigquery.table import _parse_schema_resource
+from google.cloud.bigquery._helpers import _rows_from_json
+from google.cloud.bigquery._helpers import _QUERY_PARAMS_FROM_JSON
+from google.cloud.bigquery._helpers import _SCALAR_VALUE_TO_JSON_PARAM
+from google.cloud.bigquery._helpers import _SUPPORTED_RANGE_ELEMENTS
+
+
+_SCALAR_VALUE_TYPE = Optional[
+ Union[str, int, float, decimal.Decimal, bool, datetime.datetime, datetime.date]
+]
+
+
+class ConnectionProperty:
+ """A connection-level property to customize query behavior.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/ConnectionProperty
+
+ Args:
+ key:
+ The key of the property to set, for example, ``'time_zone'`` or
+ ``'session_id'``.
+ value: The value of the property to set.
+ """
+
+ def __init__(self, key: str = "", value: str = ""):
+ self._properties = {
+ "key": key,
+ "value": value,
+ }
+
+ @property
+ def key(self) -> str:
+ """Name of the property.
+
+ For example:
+
+ * ``time_zone``
+ * ``session_id``
+ """
+ return self._properties["key"]
+
+ @property
+ def value(self) -> str:
+ """Value of the property."""
+ return self._properties["value"]
+
+ @classmethod
+ def from_api_repr(cls, resource) -> "ConnectionProperty":
+ """Construct :class:`~google.cloud.bigquery.query.ConnectionProperty`
+ from JSON resource.
+
+ Args:
+ resource: JSON representation.
+
+ Returns:
+ A connection property.
+ """
+ value = cls()
+ value._properties = resource
+ return value
+
+ def to_api_repr(self) -> Dict[str, Any]:
+ """Construct JSON API representation for the connection property.
+
+ Returns:
+ JSON mapping
+ """
+ return self._properties
+
+
+class UDFResource(object):
+ """Describe a single user-defined function (UDF) resource.
+
+ Args:
+ udf_type (str): The type of the resource ('inlineCode' or 'resourceUri')
+
+ value (str): The inline code or resource URI.
+
+ See:
+ https://cloud.google.com/bigquery/user-defined-functions#api
+ """
+
+ def __init__(self, udf_type, value):
+ self.udf_type = udf_type
+ self.value = value
+
+ def __eq__(self, other):
+ if not isinstance(other, UDFResource):
+ return NotImplemented
+ return self.udf_type == other.udf_type and self.value == other.value
+
+ def __ne__(self, other):
+ return not self == other
+
+
+class _AbstractQueryParameterType:
+ """Base class for representing query parameter types.
+
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/QueryParameter#queryparametertype
+ """
+
+ @classmethod
+ def from_api_repr(cls, resource):
+ """Factory: construct parameter type from JSON resource.
+
+ Args:
+ resource (Dict): JSON mapping of parameter
+
+ Returns:
+ google.cloud.bigquery.query.QueryParameterType: Instance
+ """
+ raise NotImplementedError
+
+ def to_api_repr(self):
+ """Construct JSON API representation for the parameter type.
+
+ Returns:
+ Dict: JSON mapping
+ """
+ raise NotImplementedError
+
+
+class ScalarQueryParameterType(_AbstractQueryParameterType):
+ """Type representation for scalar query parameters.
+
+ Args:
+ type_ (str):
+ One of 'STRING', 'INT64', 'FLOAT64', 'NUMERIC', 'BOOL', 'TIMESTAMP',
+ 'DATETIME', or 'DATE'.
+ name (Optional[str]):
+ The name of the query parameter. Primarily used if the type is
+ one of the subfields in ``StructQueryParameterType`` instance.
+ description (Optional[str]):
+ The query parameter description. Primarily used if the type is
+ one of the subfields in ``StructQueryParameterType`` instance.
+ """
+
+ def __init__(self, type_, *, name=None, description=None):
+ self._type = type_
+ self.name = name
+ self.description = description
+
+ @classmethod
+ def from_api_repr(cls, resource):
+ """Factory: construct parameter type from JSON resource.
+
+ Args:
+ resource (Dict): JSON mapping of parameter
+
+ Returns:
+ google.cloud.bigquery.query.ScalarQueryParameterType: Instance
+ """
+ type_ = resource["type"]
+ return cls(type_)
+
+ def to_api_repr(self):
+ """Construct JSON API representation for the parameter type.
+
+ Returns:
+ Dict: JSON mapping
+ """
+ # Name and description are only used if the type is a field inside a struct
+ # type, but it's StructQueryParameterType's responsibilty to use these two
+ # attributes in the API representation when needed. Here we omit them.
+ return {"type": self._type}
+
+ def with_name(self, new_name: Union[str, None]):
+ """Return a copy of the instance with ``name`` set to ``new_name``.
+
+ Args:
+ name (Union[str, None]):
+ The new name of the query parameter type. If ``None``, the existing
+ name is cleared.
+
+ Returns:
+ google.cloud.bigquery.query.ScalarQueryParameterType:
+ A new instance with updated name.
+ """
+ return type(self)(self._type, name=new_name, description=self.description)
+
+ def __repr__(self):
+ name = f", name={self.name!r}" if self.name is not None else ""
+ description = (
+ f", description={self.description!r}"
+ if self.description is not None
+ else ""
+ )
+ return f"{self.__class__.__name__}({self._type!r}{name}{description})"
+
+
+class ArrayQueryParameterType(_AbstractQueryParameterType):
+ """Type representation for array query parameters.
+
+ Args:
+ array_type (Union[ScalarQueryParameterType, StructQueryParameterType]):
+ The type of array elements.
+ name (Optional[str]):
+ The name of the query parameter. Primarily used if the type is
+ one of the subfields in ``StructQueryParameterType`` instance.
+ description (Optional[str]):
+ The query parameter description. Primarily used if the type is
+ one of the subfields in ``StructQueryParameterType`` instance.
+ """
+
+ def __init__(self, array_type, *, name=None, description=None):
+ self._array_type = array_type
+ self.name = name
+ self.description = description
+
+ @classmethod
+ def from_api_repr(cls, resource):
+ """Factory: construct parameter type from JSON resource.
+
+ Args:
+ resource (Dict): JSON mapping of parameter
+
+ Returns:
+ google.cloud.bigquery.query.ArrayQueryParameterType: Instance
+ """
+ array_item_type = resource["arrayType"]["type"]
+
+ if array_item_type in {"STRUCT", "RECORD"}:
+ klass = StructQueryParameterType
+ else:
+ klass = ScalarQueryParameterType
+
+ item_type_instance = klass.from_api_repr(resource["arrayType"])
+ return cls(item_type_instance)
+
+ def to_api_repr(self):
+ """Construct JSON API representation for the parameter type.
+
+ Returns:
+ Dict: JSON mapping
+ """
+ # Name and description are only used if the type is a field inside a struct
+ # type, but it's StructQueryParameterType's responsibilty to use these two
+ # attributes in the API representation when needed. Here we omit them.
+ return {
+ "type": "ARRAY",
+ "arrayType": self._array_type.to_api_repr(),
+ }
+
+ def __repr__(self):
+ name = f", name={self.name!r}" if self.name is not None else ""
+ description = (
+ f", description={self.description!r}"
+ if self.description is not None
+ else ""
+ )
+ return f"{self.__class__.__name__}({self._array_type!r}{name}{description})"
+
+
+class StructQueryParameterType(_AbstractQueryParameterType):
+ """Type representation for struct query parameters.
+
+ Args:
+ fields (Iterable[Union[ \
+ ArrayQueryParameterType, ScalarQueryParameterType, StructQueryParameterType \
+ ]]):
+ An non-empty iterable describing the struct's field types.
+ name (Optional[str]):
+ The name of the query parameter. Primarily used if the type is
+ one of the subfields in ``StructQueryParameterType`` instance.
+ description (Optional[str]):
+ The query parameter description. Primarily used if the type is
+ one of the subfields in ``StructQueryParameterType`` instance.
+ """
+
+ def __init__(self, *fields, name=None, description=None):
+ if not fields:
+ raise ValueError("Struct type must have at least one field defined.")
+
+ self._fields = fields # fields is a tuple (immutable), no shallow copy needed
+ self.name = name
+ self.description = description
+
+ @property
+ def fields(self):
+ return self._fields # no copy needed, self._fields is an immutable sequence
+
+ @classmethod
+ def from_api_repr(cls, resource):
+ """Factory: construct parameter type from JSON resource.
+
+ Args:
+ resource (Dict): JSON mapping of parameter
+
+ Returns:
+ google.cloud.bigquery.query.StructQueryParameterType: Instance
+ """
+ fields = []
+
+ for struct_field in resource["structTypes"]:
+ type_repr = struct_field["type"]
+ if type_repr["type"] in {"STRUCT", "RECORD"}:
+ klass = StructQueryParameterType
+ elif type_repr["type"] == "ARRAY":
+ klass = ArrayQueryParameterType
+ else:
+ klass = ScalarQueryParameterType
+
+ type_instance = klass.from_api_repr(type_repr)
+ type_instance.name = struct_field.get("name")
+ type_instance.description = struct_field.get("description")
+ fields.append(type_instance)
+
+ return cls(*fields)
+
+ def to_api_repr(self):
+ """Construct JSON API representation for the parameter type.
+
+ Returns:
+ Dict: JSON mapping
+ """
+ fields = []
+
+ for field in self._fields:
+ item = {"type": field.to_api_repr()}
+ if field.name is not None:
+ item["name"] = field.name
+ if field.description is not None:
+ item["description"] = field.description
+
+ fields.append(item)
+
+ return {
+ "type": "STRUCT",
+ "structTypes": fields,
+ }
+
+ def __repr__(self):
+ name = f", name={self.name!r}" if self.name is not None else ""
+ description = (
+ f", description={self.description!r}"
+ if self.description is not None
+ else ""
+ )
+ items = ", ".join(repr(field) for field in self._fields)
+ return f"{self.__class__.__name__}({items}{name}{description})"
+
+
+class RangeQueryParameterType(_AbstractQueryParameterType):
+ """Type representation for range query parameters.
+
+ Args:
+ type_ (Union[ScalarQueryParameterType, str]):
+ Type of range element, must be one of 'TIMESTAMP', 'DATETIME', or
+ 'DATE'.
+ name (Optional[str]):
+ The name of the query parameter. Primarily used if the type is
+ one of the subfields in ``StructQueryParameterType`` instance.
+ description (Optional[str]):
+ The query parameter description. Primarily used if the type is
+ one of the subfields in ``StructQueryParameterType`` instance.
+ """
+
+ @classmethod
+ def _parse_range_element_type(self, type_):
+ """Helper method that parses the input range element type, which may
+ be a string, or a ScalarQueryParameterType object.
+
+ Returns:
+ google.cloud.bigquery.query.ScalarQueryParameterType: Instance
+ """
+ if isinstance(type_, str):
+ if type_ not in _SUPPORTED_RANGE_ELEMENTS:
+ raise ValueError(
+ "If given as a string, range element type must be one of "
+ "'TIMESTAMP', 'DATE', or 'DATETIME'."
+ )
+ return ScalarQueryParameterType(type_)
+ elif isinstance(type_, ScalarQueryParameterType):
+ if type_._type not in _SUPPORTED_RANGE_ELEMENTS:
+ raise ValueError(
+ "If given as a ScalarQueryParameter object, range element "
+ "type must be one of 'TIMESTAMP', 'DATE', or 'DATETIME' "
+ "type."
+ )
+ return type_
+ else:
+ raise ValueError(
+ "range_type must be a string or ScalarQueryParameter object, "
+ "of 'TIMESTAMP', 'DATE', or 'DATETIME' type."
+ )
+
+ def __init__(self, type_, *, name=None, description=None):
+ self.type_ = self._parse_range_element_type(type_)
+ self.name = name
+ self.description = description
+
+ @classmethod
+ def from_api_repr(cls, resource):
+ """Factory: construct parameter type from JSON resource.
+
+ Args:
+ resource (Dict): JSON mapping of parameter
+
+ Returns:
+ google.cloud.bigquery.query.RangeQueryParameterType: Instance
+ """
+ type_ = resource["rangeElementType"]["type"]
+ name = resource.get("name")
+ description = resource.get("description")
+
+ return cls(type_, name=name, description=description)
+
+ def to_api_repr(self):
+ """Construct JSON API representation for the parameter type.
+
+ Returns:
+ Dict: JSON mapping
+ """
+ # Name and description are only used if the type is a field inside a struct
+ # type, but it's StructQueryParameterType's responsibilty to use these two
+ # attributes in the API representation when needed. Here we omit them.
+ return {
+ "type": "RANGE",
+ "rangeElementType": self.type_.to_api_repr(),
+ }
+
+ def with_name(self, new_name: Union[str, None]):
+ """Return a copy of the instance with ``name`` set to ``new_name``.
+
+ Args:
+ name (Union[str, None]):
+ The new name of the range query parameter type. If ``None``,
+ the existing name is cleared.
+
+ Returns:
+ google.cloud.bigquery.query.RangeQueryParameterType:
+ A new instance with updated name.
+ """
+ return type(self)(self.type_, name=new_name, description=self.description)
+
+ def __repr__(self):
+ name = f", name={self.name!r}" if self.name is not None else ""
+ description = (
+ f", description={self.description!r}"
+ if self.description is not None
+ else ""
+ )
+ return f"{self.__class__.__name__}({self.type_!r}{name}{description})"
+
+ def _key(self):
+ """A tuple key that uniquely describes this field.
+
+ Used to compute this instance's hashcode and evaluate equality.
+
+ Returns:
+ Tuple: The contents of this
+ :class:`~google.cloud.bigquery.query.RangeQueryParameterType`.
+ """
+ type_ = self.type_.to_api_repr()
+ return (self.name, type_, self.description)
+
+ def __eq__(self, other):
+ if not isinstance(other, RangeQueryParameterType):
+ return NotImplemented
+ return self._key() == other._key()
+
+ def __ne__(self, other):
+ return not self == other
+
+
+class _AbstractQueryParameter(object):
+ """Base class for named / positional query parameters."""
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "_AbstractQueryParameter":
+ """Factory: construct parameter from JSON resource.
+
+ Args:
+ resource (Dict): JSON mapping of parameter
+
+ Returns:
+ A new instance of _AbstractQueryParameter subclass.
+ """
+ raise NotImplementedError
+
+ def to_api_repr(self) -> dict:
+ """Construct JSON API representation for the parameter.
+
+ Returns:
+ Dict: JSON representation for the parameter.
+ """
+ raise NotImplementedError
+
+
+class ScalarQueryParameter(_AbstractQueryParameter):
+ """Named / positional query parameters for scalar values.
+
+ Args:
+ name:
+ Parameter name, used via ``@foo`` syntax. If None, the
+ parameter can only be addressed via position (``?``).
+
+ type_:
+ Name of parameter type. See
+ :class:`google.cloud.bigquery.enums.SqlTypeNames` and
+ :class:`google.cloud.bigquery.query.SqlParameterScalarTypes` for
+ supported types.
+
+ value:
+ The scalar parameter value.
+ """
+
+ def __init__(
+ self,
+ name: Optional[str],
+ type_: Optional[Union[str, ScalarQueryParameterType]],
+ value: _SCALAR_VALUE_TYPE,
+ ):
+ self.name = name
+ if isinstance(type_, ScalarQueryParameterType):
+ self.type_ = type_._type
+ else:
+ self.type_ = type_
+ self.value = value
+
+ @classmethod
+ def positional(
+ cls, type_: Union[str, ScalarQueryParameterType], value: _SCALAR_VALUE_TYPE
+ ) -> "ScalarQueryParameter":
+ """Factory for positional paramater.
+
+ Args:
+ type_:
+ Name of parameter type. One of 'STRING', 'INT64',
+ 'FLOAT64', 'NUMERIC', 'BIGNUMERIC', 'BOOL', 'TIMESTAMP', 'DATETIME', or
+ 'DATE'.
+
+ value:
+ The scalar parameter value.
+
+ Returns:
+ google.cloud.bigquery.query.ScalarQueryParameter: Instance without name
+ """
+ return cls(None, type_, value)
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "ScalarQueryParameter":
+ """Factory: construct parameter from JSON resource.
+
+ Args:
+ resource (Dict): JSON mapping of parameter
+
+ Returns:
+ google.cloud.bigquery.query.ScalarQueryParameter: Instance
+ """
+ name = resource.get("name")
+ type_ = resource["parameterType"]["type"]
+
+ # parameterValue might not be present if JSON resource originates
+ # from the back-end - the latter omits it for None values.
+ value = resource.get("parameterValue", {}).get("value")
+ if value is not None:
+ converted = _QUERY_PARAMS_FROM_JSON[type_](value, None)
+ else:
+ converted = None
+
+ return cls(name, type_, converted)
+
+ def to_api_repr(self) -> dict:
+ """Construct JSON API representation for the parameter.
+
+ Returns:
+ Dict: JSON mapping
+ """
+ value = self.value
+ converter = _SCALAR_VALUE_TO_JSON_PARAM.get(self.type_, lambda value: value)
+ value = converter(value) # type: ignore
+ resource: Dict[str, Any] = {
+ "parameterType": {"type": self.type_},
+ "parameterValue": {"value": value},
+ }
+ if self.name is not None:
+ resource["name"] = self.name
+ return resource
+
+ def _key(self):
+ """A tuple key that uniquely describes this field.
+
+ Used to compute this instance's hashcode and evaluate equality.
+
+ Returns:
+ Tuple: The contents of this :class:`~google.cloud.bigquery.query.ScalarQueryParameter`.
+ """
+ return (self.name, self.type_.upper(), self.value)
+
+ def __eq__(self, other):
+ if not isinstance(other, ScalarQueryParameter):
+ return NotImplemented
+ return self._key() == other._key()
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self):
+ return "ScalarQueryParameter{}".format(self._key())
+
+
+class ArrayQueryParameter(_AbstractQueryParameter):
+ """Named / positional query parameters for array values.
+
+ Args:
+ name (Optional[str]):
+ Parameter name, used via ``@foo`` syntax. If None, the
+ parameter can only be addressed via position (``?``).
+
+ array_type (Union[str, ScalarQueryParameterType, StructQueryParameterType]):
+ The type of array elements. If given as a string, it must be one of
+ `'STRING'`, `'INT64'`, `'FLOAT64'`, `'NUMERIC'`, `'BIGNUMERIC'`, `'BOOL'`,
+ `'TIMESTAMP'`, `'DATE'`, or `'STRUCT'`/`'RECORD'`.
+ If the type is ``'STRUCT'``/``'RECORD'`` and ``values`` is empty,
+ the exact item type cannot be deduced, thus a ``StructQueryParameterType``
+ instance needs to be passed in.
+
+ values (List[appropriate type]): The parameter array values.
+ """
+
+ def __init__(self, name, array_type, values) -> None:
+ self.name = name
+ self.values = values
+
+ if isinstance(array_type, str):
+ if not values and array_type in {"RECORD", "STRUCT"}:
+ raise ValueError(
+ "Missing detailed struct item type info for an empty array, "
+ "please provide a StructQueryParameterType instance."
+ )
+ self.array_type = array_type
+
+ @classmethod
+ def positional(cls, array_type: str, values: list) -> "ArrayQueryParameter":
+ """Factory for positional parameters.
+
+ Args:
+ array_type (Union[str, ScalarQueryParameterType, StructQueryParameterType]):
+ The type of array elements. If given as a string, it must be one of
+ `'STRING'`, `'INT64'`, `'FLOAT64'`, `'NUMERIC'`, `'BIGNUMERIC'`,
+ `'BOOL'`, `'TIMESTAMP'`, `'DATE'`, or `'STRUCT'`/`'RECORD'`.
+ If the type is ``'STRUCT'``/``'RECORD'`` and ``values`` is empty,
+ the exact item type cannot be deduced, thus a ``StructQueryParameterType``
+ instance needs to be passed in.
+
+ values (List[appropriate type]): The parameter array values.
+
+ Returns:
+ google.cloud.bigquery.query.ArrayQueryParameter: Instance without name
+ """
+ return cls(None, array_type, values)
+
+ @classmethod
+ def _from_api_repr_struct(cls, resource):
+ name = resource.get("name")
+ converted = []
+ # We need to flatten the array to use the StructQueryParameter
+ # parse code.
+ resource_template = {
+ # The arrayType includes all the types of the fields of the STRUCT
+ "parameterType": resource["parameterType"]["arrayType"]
+ }
+ for array_value in resource["parameterValue"]["arrayValues"]:
+ struct_resource = copy.deepcopy(resource_template)
+ struct_resource["parameterValue"] = array_value
+ struct_value = StructQueryParameter.from_api_repr(struct_resource)
+ converted.append(struct_value)
+ return cls(name, "STRUCT", converted)
+
+ @classmethod
+ def _from_api_repr_scalar(cls, resource):
+ name = resource.get("name")
+ array_type = resource["parameterType"]["arrayType"]["type"]
+ parameter_value = resource.get("parameterValue", {})
+ array_values = parameter_value.get("arrayValues", ())
+ values = [value["value"] for value in array_values]
+ converted = [
+ _QUERY_PARAMS_FROM_JSON[array_type](value, None) for value in values
+ ]
+ return cls(name, array_type, converted)
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "ArrayQueryParameter":
+ """Factory: construct parameter from JSON resource.
+
+ Args:
+ resource (Dict): JSON mapping of parameter
+
+ Returns:
+ google.cloud.bigquery.query.ArrayQueryParameter: Instance
+ """
+ array_type = resource["parameterType"]["arrayType"]["type"]
+ if array_type == "STRUCT":
+ return cls._from_api_repr_struct(resource)
+ return cls._from_api_repr_scalar(resource)
+
+ def to_api_repr(self) -> dict:
+ """Construct JSON API representation for the parameter.
+
+ Returns:
+ Dict: JSON mapping
+ """
+ values = self.values
+
+ if self.array_type in {"RECORD", "STRUCT"} or isinstance(
+ self.array_type, StructQueryParameterType
+ ):
+ reprs = [value.to_api_repr() for value in values]
+ a_values = [repr_["parameterValue"] for repr_ in reprs]
+
+ if reprs:
+ a_type = reprs[0]["parameterType"]
+ else:
+ # This assertion always evaluates to True because the
+ # constructor disallows STRUCT/RECORD type defined as a
+ # string with empty values.
+ assert isinstance(self.array_type, StructQueryParameterType)
+ a_type = self.array_type.to_api_repr()
+ else:
+ # Scalar array item type.
+ if isinstance(self.array_type, str):
+ a_type = {"type": self.array_type}
+ else:
+ a_type = self.array_type.to_api_repr()
+
+ converter = _SCALAR_VALUE_TO_JSON_PARAM.get(
+ a_type["type"], lambda value: value
+ )
+ values = [converter(value) for value in values] # type: ignore
+ a_values = [{"value": value} for value in values]
+
+ resource = {
+ "parameterType": {"type": "ARRAY", "arrayType": a_type},
+ "parameterValue": {"arrayValues": a_values},
+ }
+ if self.name is not None:
+ resource["name"] = self.name
+
+ return resource
+
+ def _key(self):
+ """A tuple key that uniquely describes this field.
+
+ Used to compute this instance's hashcode and evaluate equality.
+
+ Returns:
+ Tuple: The contents of this :class:`~google.cloud.bigquery.query.ArrayQueryParameter`.
+ """
+ if isinstance(self.array_type, str):
+ item_type = self.array_type
+ elif isinstance(self.array_type, ScalarQueryParameterType):
+ item_type = self.array_type._type
+ else:
+ item_type = "STRUCT"
+
+ return (self.name, item_type.upper(), self.values)
+
+ def __eq__(self, other):
+ if not isinstance(other, ArrayQueryParameter):
+ return NotImplemented
+ return self._key() == other._key()
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self):
+ return "ArrayQueryParameter{}".format(self._key())
+
+
+class StructQueryParameter(_AbstractQueryParameter):
+ """Name / positional query parameters for struct values.
+
+ Args:
+ name (Optional[str]):
+ Parameter name, used via ``@foo`` syntax. If None, the
+ parameter can only be addressed via position (``?``).
+
+ sub_params (Union[Tuple[
+ google.cloud.bigquery.query.ScalarQueryParameter,
+ google.cloud.bigquery.query.ArrayQueryParameter,
+ google.cloud.bigquery.query.StructQueryParameter
+ ]]): The sub-parameters for the struct
+ """
+
+ def __init__(self, name, *sub_params) -> None:
+ self.name = name
+ self.struct_types: Dict[str, Any] = OrderedDict()
+ self.struct_values: Dict[str, Any] = {}
+
+ types = self.struct_types
+ values = self.struct_values
+ for sub in sub_params:
+ if isinstance(sub, self.__class__):
+ types[sub.name] = "STRUCT"
+ values[sub.name] = sub
+ elif isinstance(sub, ArrayQueryParameter):
+ types[sub.name] = "ARRAY"
+ values[sub.name] = sub
+ else:
+ types[sub.name] = sub.type_
+ values[sub.name] = sub.value
+
+ @classmethod
+ def positional(cls, *sub_params):
+ """Factory for positional parameters.
+
+ Args:
+ sub_params (Union[Tuple[
+ google.cloud.bigquery.query.ScalarQueryParameter,
+ google.cloud.bigquery.query.ArrayQueryParameter,
+ google.cloud.bigquery.query.StructQueryParameter
+ ]]): The sub-parameters for the struct
+
+ Returns:
+ google.cloud.bigquery.query.StructQueryParameter: Instance without name
+ """
+ return cls(None, *sub_params)
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "StructQueryParameter":
+ """Factory: construct parameter from JSON resource.
+
+ Args:
+ resource (Dict): JSON mapping of parameter
+
+ Returns:
+ google.cloud.bigquery.query.StructQueryParameter: Instance
+ """
+ name = resource.get("name")
+ instance = cls(name)
+ type_resources = {}
+ types = instance.struct_types
+ for item in resource["parameterType"]["structTypes"]:
+ types[item["name"]] = item["type"]["type"]
+ type_resources[item["name"]] = item["type"]
+ struct_values = resource["parameterValue"]["structValues"]
+ for key, value in struct_values.items():
+ type_ = types[key]
+ converted: Optional[Union[ArrayQueryParameter, StructQueryParameter]] = None
+ if type_ == "STRUCT":
+ struct_resource = {
+ "name": key,
+ "parameterType": type_resources[key],
+ "parameterValue": value,
+ }
+ converted = StructQueryParameter.from_api_repr(struct_resource)
+ elif type_ == "ARRAY":
+ struct_resource = {
+ "name": key,
+ "parameterType": type_resources[key],
+ "parameterValue": value,
+ }
+ converted = ArrayQueryParameter.from_api_repr(struct_resource)
+ else:
+ value = value["value"]
+ converted = _QUERY_PARAMS_FROM_JSON[type_](value, None)
+ instance.struct_values[key] = converted
+ return instance
+
+ def to_api_repr(self) -> dict:
+ """Construct JSON API representation for the parameter.
+
+ Returns:
+ Dict: JSON mapping
+ """
+ s_types = {}
+ values = {}
+ for name, value in self.struct_values.items():
+ type_ = self.struct_types[name]
+ if type_ in ("STRUCT", "ARRAY"):
+ repr_ = value.to_api_repr()
+ s_types[name] = {"name": name, "type": repr_["parameterType"]}
+ values[name] = repr_["parameterValue"]
+ else:
+ s_types[name] = {"name": name, "type": {"type": type_}}
+ converter = _SCALAR_VALUE_TO_JSON_PARAM.get(type_, lambda value: value)
+ values[name] = {"value": converter(value)}
+
+ resource = {
+ "parameterType": {
+ "type": "STRUCT",
+ "structTypes": [s_types[key] for key in self.struct_types],
+ },
+ "parameterValue": {"structValues": values},
+ }
+ if self.name is not None:
+ resource["name"] = self.name
+ return resource
+
+ def _key(self):
+ """A tuple key that uniquely describes this field.
+
+ Used to compute this instance's hashcode and evaluate equality.
+
+ Returns:
+ Tuple: The contents of this :class:`~google.cloud.bigquery.ArrayQueryParameter`.
+ """
+ return (self.name, self.struct_types, self.struct_values)
+
+ def __eq__(self, other):
+ if not isinstance(other, StructQueryParameter):
+ return NotImplemented
+ return self._key() == other._key()
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self):
+ return "StructQueryParameter{}".format(self._key())
+
+
+class RangeQueryParameter(_AbstractQueryParameter):
+ """Named / positional query parameters for range values.
+
+ Args:
+ range_element_type (Union[str, RangeQueryParameterType]):
+ The type of range elements. It must be one of 'TIMESTAMP',
+ 'DATE', or 'DATETIME'.
+
+ start (Optional[Union[ScalarQueryParameter, str]]):
+ The start of the range value. Must be the same type as
+ range_element_type. If not provided, it's interpreted as UNBOUNDED.
+
+ end (Optional[Union[ScalarQueryParameter, str]]):
+ The end of the range value. Must be the same type as
+ range_element_type. If not provided, it's interpreted as UNBOUNDED.
+
+ name (Optional[str]):
+ Parameter name, used via ``@foo`` syntax. If None, the
+ parameter can only be addressed via position (``?``).
+ """
+
+ @classmethod
+ def _parse_range_element_type(self, range_element_type):
+ if isinstance(range_element_type, str):
+ if range_element_type not in _SUPPORTED_RANGE_ELEMENTS:
+ raise ValueError(
+ "If given as a string, range_element_type must be one of "
+ f"'TIMESTAMP', 'DATE', or 'DATETIME'. Got {range_element_type}."
+ )
+ return RangeQueryParameterType(range_element_type)
+ elif isinstance(range_element_type, RangeQueryParameterType):
+ if range_element_type.type_._type not in _SUPPORTED_RANGE_ELEMENTS:
+ raise ValueError(
+ "If given as a RangeQueryParameterType object, "
+ "range_element_type must be one of 'TIMESTAMP', 'DATE', "
+ "or 'DATETIME' type."
+ )
+ return range_element_type
+ else:
+ raise ValueError(
+ "range_element_type must be a string or "
+ "RangeQueryParameterType object, of 'TIMESTAMP', 'DATE', "
+ "or 'DATETIME' type. Got "
+ f"{type(range_element_type)}:{range_element_type}"
+ )
+
+ @classmethod
+ def _serialize_range_element_value(self, value, type_):
+ if value is None or isinstance(value, str):
+ return value
+ else:
+ converter = _SCALAR_VALUE_TO_JSON_PARAM.get(type_)
+ if converter is not None:
+ return converter(value) # type: ignore
+ else:
+ raise ValueError(
+ f"Cannot convert range element value from type {type_}, "
+ "must be one of the strings 'TIMESTAMP', 'DATE' "
+ "'DATETIME' or a RangeQueryParameterType object."
+ )
+
+ def __init__(
+ self,
+ range_element_type,
+ start=None,
+ end=None,
+ name=None,
+ ):
+ self.name = name
+ self.range_element_type = self._parse_range_element_type(range_element_type)
+ print(self.range_element_type.type_._type)
+ self.start = start
+ self.end = end
+
+ @classmethod
+ def positional(
+ cls, range_element_type, start=None, end=None
+ ) -> "RangeQueryParameter":
+ """Factory for positional parameters.
+
+ Args:
+ range_element_type (Union[str, RangeQueryParameterType]):
+ The type of range elements. It must be one of `'TIMESTAMP'`,
+ `'DATE'`, or `'DATETIME'`.
+
+ start (Optional[Union[ScalarQueryParameter, str]]):
+ The start of the range value. Must be the same type as
+ range_element_type. If not provided, it's interpreted as
+ UNBOUNDED.
+
+ end (Optional[Union[ScalarQueryParameter, str]]):
+ The end of the range value. Must be the same type as
+ range_element_type. If not provided, it's interpreted as
+ UNBOUNDED.
+
+ Returns:
+ google.cloud.bigquery.query.RangeQueryParameter: Instance without
+ name.
+ """
+ return cls(range_element_type, start, end)
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "RangeQueryParameter":
+ """Factory: construct parameter from JSON resource.
+
+ Args:
+ resource (Dict): JSON mapping of parameter
+
+ Returns:
+ google.cloud.bigquery.query.RangeQueryParameter: Instance
+ """
+ name = resource.get("name")
+ range_element_type = (
+ resource.get("parameterType", {}).get("rangeElementType", {}).get("type")
+ )
+ range_value = resource.get("parameterValue", {}).get("rangeValue", {})
+ start = range_value.get("start", {}).get("value")
+ end = range_value.get("end", {}).get("value")
+
+ return cls(range_element_type, start=start, end=end, name=name)
+
+ def to_api_repr(self) -> dict:
+ """Construct JSON API representation for the parameter.
+
+ Returns:
+ Dict: JSON mapping
+ """
+ range_element_type = self.range_element_type.to_api_repr()
+ type_ = self.range_element_type.type_._type
+ start = self._serialize_range_element_value(self.start, type_)
+ end = self._serialize_range_element_value(self.end, type_)
+ resource = {
+ "parameterType": range_element_type,
+ "parameterValue": {
+ "rangeValue": {
+ "start": {"value": start},
+ "end": {"value": end},
+ },
+ },
+ }
+
+ # distinguish between name not provided vs. name being empty string
+ if self.name is not None:
+ resource["name"] = self.name
+
+ return resource
+
+ def _key(self):
+ """A tuple key that uniquely describes this field.
+
+ Used to compute this instance's hashcode and evaluate equality.
+
+ Returns:
+ Tuple: The contents of this
+ :class:`~google.cloud.bigquery.query.RangeQueryParameter`.
+ """
+
+ range_element_type = self.range_element_type.to_api_repr()
+ return (self.name, range_element_type, self.start, self.end)
+
+ def __eq__(self, other):
+ if not isinstance(other, RangeQueryParameter):
+ return NotImplemented
+ return self._key() == other._key()
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self):
+ return "RangeQueryParameter{}".format(self._key())
+
+
+class SqlParameterScalarTypes:
+ """Supported scalar SQL query parameter types as type objects."""
+
+ BOOL = ScalarQueryParameterType("BOOL")
+ BOOLEAN = ScalarQueryParameterType("BOOL")
+ BIGDECIMAL = ScalarQueryParameterType("BIGNUMERIC")
+ BIGNUMERIC = ScalarQueryParameterType("BIGNUMERIC")
+ BYTES = ScalarQueryParameterType("BYTES")
+ DATE = ScalarQueryParameterType("DATE")
+ DATETIME = ScalarQueryParameterType("DATETIME")
+ DECIMAL = ScalarQueryParameterType("NUMERIC")
+ FLOAT = ScalarQueryParameterType("FLOAT64")
+ FLOAT64 = ScalarQueryParameterType("FLOAT64")
+ GEOGRAPHY = ScalarQueryParameterType("GEOGRAPHY")
+ INT64 = ScalarQueryParameterType("INT64")
+ INTEGER = ScalarQueryParameterType("INT64")
+ NUMERIC = ScalarQueryParameterType("NUMERIC")
+ STRING = ScalarQueryParameterType("STRING")
+ TIME = ScalarQueryParameterType("TIME")
+ TIMESTAMP = ScalarQueryParameterType("TIMESTAMP")
+
+
+class _QueryResults(object):
+ """Results of a query.
+
+ See:
+ https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs/getQueryResults
+ """
+
+ def __init__(self, properties):
+ self._properties = {}
+ self._set_properties(properties)
+
+ @classmethod
+ def from_api_repr(cls, api_response):
+ return cls(api_response)
+
+ @property
+ def project(self):
+ """Project bound to the query job.
+
+ Returns:
+ str: The project that the query job is associated with.
+ """
+ return self._properties.get("jobReference", {}).get("projectId")
+
+ @property
+ def cache_hit(self):
+ """Query results served from cache.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#body.QueryResponse.FIELDS.cache_hit
+
+ Returns:
+ Optional[bool]:
+ True if the query results were served from cache (None
+ until set by the server).
+ """
+ return self._properties.get("cacheHit")
+
+ @property
+ def complete(self):
+ """Server completed query.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#body.QueryResponse.FIELDS.job_complete
+
+ Returns:
+ Optional[bool]:
+ True if the query completed on the server (None
+ until set by the server).
+ """
+ return self._properties.get("jobComplete")
+
+ @property
+ def errors(self):
+ """Errors generated by the query.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#body.QueryResponse.FIELDS.errors
+
+ Returns:
+ Optional[List[Mapping]]:
+ Mappings describing errors generated on the server (None
+ until set by the server).
+ """
+ return self._properties.get("errors")
+
+ @property
+ def job_id(self):
+ """Job ID of the query job these results are from.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#body.QueryResponse.FIELDS.job_reference
+
+ Returns:
+ str: Job ID of the query job.
+ """
+ return self._properties.get("jobReference", {}).get("jobId")
+
+ @property
+ def location(self):
+ """Location of the query job these results are from.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#body.QueryResponse.FIELDS.job_reference
+
+ Returns:
+ str: Job ID of the query job.
+ """
+ return self._properties.get("jobReference", {}).get("location")
+
+ @property
+ def query_id(self) -> Optional[str]:
+ """[Preview] ID of a completed query.
+
+ This ID is auto-generated and not guaranteed to be populated.
+ """
+ return self._properties.get("queryId")
+
+ @property
+ def page_token(self):
+ """Token for fetching next bach of results.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#body.QueryResponse.FIELDS.page_token
+
+ Returns:
+ Optional[str]: Token generated on the server (None until set by the server).
+ """
+ return self._properties.get("pageToken")
+
+ @property
+ def total_rows(self):
+ """Total number of rows returned by the query.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#body.QueryResponse.FIELDS.total_rows
+
+ Returns:
+ Optional[int]: Count generated on the server (None until set by the server).
+ """
+ total_rows = self._properties.get("totalRows")
+ if total_rows is not None:
+ return int(total_rows)
+
+ @property
+ def total_bytes_processed(self):
+ """Total number of bytes processed by the query.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#body.QueryResponse.FIELDS.total_bytes_processed
+
+ Returns:
+ Optional[int]: Count generated on the server (None until set by the server).
+ """
+ total_bytes_processed = self._properties.get("totalBytesProcessed")
+ if total_bytes_processed is not None:
+ return int(total_bytes_processed)
+
+ @property
+ def num_dml_affected_rows(self):
+ """Total number of rows affected by a DML query.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#body.QueryResponse.FIELDS.num_dml_affected_rows
+
+ Returns:
+ Optional[int]: Count generated on the server (None until set by the server).
+ """
+ num_dml_affected_rows = self._properties.get("numDmlAffectedRows")
+ if num_dml_affected_rows is not None:
+ return int(num_dml_affected_rows)
+
+ @property
+ def rows(self):
+ """Query results.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#body.QueryResponse.FIELDS.rows
+
+ Returns:
+ Optional[List[google.cloud.bigquery.table.Row]]:
+ Rows containing the results of the query.
+ """
+ return _rows_from_json(self._properties.get("rows", ()), self.schema)
+
+ @property
+ def schema(self):
+ """Schema for query results.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#body.QueryResponse.FIELDS.schema
+
+ Returns:
+ Optional[List[SchemaField]]:
+ Fields describing the schema (None until set by the server).
+ """
+ return _parse_schema_resource(self._properties.get("schema", {}))
+
+ def _set_properties(self, api_response):
+ """Update properties from resource in body of ``api_response``
+
+ Args:
+ api_response (Dict): Response returned from an API call
+ """
+ self._properties.clear()
+ self._properties.update(copy.deepcopy(api_response))
+
+
+def _query_param_from_api_repr(resource):
+ """Helper: Construct concrete query parameter from JSON resource."""
+ qp_type = resource["parameterType"]
+ if "arrayType" in qp_type:
+ klass = ArrayQueryParameter
+ elif "structTypes" in qp_type:
+ klass = StructQueryParameter
+ else:
+ klass = ScalarQueryParameter
+ return klass.from_api_repr(resource)
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/retry.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/retry.py
new file mode 100644
index 0000000000000000000000000000000000000000..10958980dc7a196f504e4303dc9add5f59ece5d5
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/retry.py
@@ -0,0 +1,153 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from google.api_core import exceptions
+from google.api_core import retry
+import google.api_core.future.polling
+from google.auth import exceptions as auth_exceptions # type: ignore
+import requests.exceptions
+
+
+_RETRYABLE_REASONS = frozenset(
+ ["rateLimitExceeded", "backendError", "internalError", "badGateway"]
+)
+
+_UNSTRUCTURED_RETRYABLE_TYPES = (
+ ConnectionError,
+ exceptions.TooManyRequests,
+ exceptions.InternalServerError,
+ exceptions.BadGateway,
+ exceptions.ServiceUnavailable,
+ requests.exceptions.ChunkedEncodingError,
+ requests.exceptions.ConnectionError,
+ requests.exceptions.Timeout,
+ auth_exceptions.TransportError,
+)
+
+_DEFAULT_RETRY_DEADLINE = 10.0 * 60.0 # 10 minutes
+
+# Ambiguous errors (e.g. internalError, backendError, rateLimitExceeded) retry
+# until the full `_DEFAULT_RETRY_DEADLINE`. This is because the
+# `jobs.getQueryResults` REST API translates a job failure into an HTTP error.
+#
+# TODO(https://github.com/googleapis/python-bigquery/issues/1903): Investigate
+# if we can fail early for ambiguous errors in `QueryJob.result()`'s call to
+# the `jobs.getQueryResult` API.
+#
+# We need `_DEFAULT_JOB_DEADLINE` to be some multiple of
+# `_DEFAULT_RETRY_DEADLINE` to allow for a few retries after the retry
+# timeout is reached.
+#
+# Note: This multiple should actually be a multiple of
+# (2 * _DEFAULT_RETRY_DEADLINE). After an ambiguous exception, the first
+# call from `job_retry()` refreshes the job state without actually restarting
+# the query. The second `job_retry()` actually restarts the query. For a more
+# detailed explanation, see the comments where we set `restart_query_job = True`
+# in `QueryJob.result()`'s inner `is_job_done()` function.
+_DEFAULT_JOB_DEADLINE = 2.0 * (2.0 * _DEFAULT_RETRY_DEADLINE)
+
+
+def _should_retry(exc):
+ """Predicate for determining when to retry.
+
+ We retry if and only if the 'reason' is 'backendError'
+ or 'rateLimitExceeded'.
+ """
+ if not hasattr(exc, "errors") or len(exc.errors) == 0:
+ # Check for unstructured error returns, e.g. from GFE
+ return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)
+
+ reason = exc.errors[0]["reason"]
+ return reason in _RETRYABLE_REASONS
+
+
+DEFAULT_RETRY = retry.Retry(predicate=_should_retry, deadline=_DEFAULT_RETRY_DEADLINE)
+"""The default retry object.
+
+Any method with a ``retry`` parameter will be retried automatically,
+with reasonable defaults. To disable retry, pass ``retry=None``.
+To modify the default retry behavior, call a ``with_XXX`` method
+on ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,
+pass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.
+"""
+
+# Note: Take care when updating DEFAULT_TIMEOUT to anything but None. We
+# briefly had a default timeout, but even setting it at more than twice the
+# theoretical server-side default timeout of 2 minutes was not enough for
+# complex queries. See:
+# https://github.com/googleapis/python-bigquery/issues/970#issuecomment-921934647
+DEFAULT_TIMEOUT = None
+"""The default API timeout.
+
+This is the time to wait per request. To adjust the total wait time, set a
+deadline on the retry object.
+"""
+
+job_retry_reasons = (
+ "rateLimitExceeded",
+ "backendError",
+ "internalError",
+ "jobRateLimitExceeded",
+)
+
+
+def _job_should_retry(exc):
+ # Sometimes we have ambiguous errors, such as 'backendError' which could
+ # be due to an API problem or a job problem. For these, make sure we retry
+ # our is_job_done() function.
+ #
+ # Note: This won't restart the job unless we know for sure it's because of
+ # the job status and set restart_query_job = True in that loop. This means
+ # that we might end up calling this predicate twice for the same job
+ # but from different paths: (1) from jobs.getQueryResults RetryError and
+ # (2) from translating the job error from the body of a jobs.get response.
+ #
+ # Note: If we start retrying job types other than queries where we don't
+ # call the problematic getQueryResults API to check the status, we need
+ # to provide a different predicate, as there shouldn't be ambiguous
+ # errors in those cases.
+ if isinstance(exc, exceptions.RetryError):
+ exc = exc.cause
+
+ # Per https://github.com/googleapis/python-bigquery/issues/1929, sometimes
+ # retriable errors make their way here. Because of the separate
+ # `restart_query_job` logic to make sure we aren't restarting non-failed
+ # jobs, it should be safe to continue and not totally fail our attempt at
+ # waiting for the query to complete.
+ if _should_retry(exc):
+ return True
+
+ if not hasattr(exc, "errors") or len(exc.errors) == 0:
+ return False
+
+ reason = exc.errors[0]["reason"]
+ return reason in job_retry_reasons
+
+
+DEFAULT_JOB_RETRY = retry.Retry(
+ predicate=_job_should_retry, deadline=_DEFAULT_JOB_DEADLINE
+)
+"""
+The default job retry object.
+"""
+
+DEFAULT_GET_JOB_TIMEOUT = 128
+"""
+Default timeout for Client.get_job().
+"""
+
+POLLING_DEFAULT_VALUE = google.api_core.future.polling.PollingFuture._DEFAULT_VALUE
+"""
+Default value defined in google.api_core.future.polling.PollingFuture.
+"""
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/routine/__init__.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/routine/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e576b0d49c0f79eab18fdddb7534b4621b7fc50e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/routine/__init__.py
@@ -0,0 +1,33 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User-Defined Routines."""
+
+
+from google.cloud.bigquery.enums import DeterminismLevel
+from google.cloud.bigquery.routine.routine import Routine
+from google.cloud.bigquery.routine.routine import RoutineArgument
+from google.cloud.bigquery.routine.routine import RoutineReference
+from google.cloud.bigquery.routine.routine import RoutineType
+from google.cloud.bigquery.routine.routine import RemoteFunctionOptions
+
+
+__all__ = (
+ "DeterminismLevel",
+ "Routine",
+ "RoutineArgument",
+ "RoutineReference",
+ "RoutineType",
+ "RemoteFunctionOptions",
+)
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/routine/routine.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/routine/routine.py
new file mode 100644
index 0000000000000000000000000000000000000000..83cb6362d95077c38b2f82549638386350eb0a24
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/routine/routine.py
@@ -0,0 +1,738 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define resources for the BigQuery Routines API."""
+
+from typing import Any, Dict, Optional
+
+import google.cloud._helpers # type: ignore
+from google.cloud.bigquery import _helpers
+from google.cloud.bigquery.standard_sql import StandardSqlDataType
+from google.cloud.bigquery.standard_sql import StandardSqlTableType
+
+
+class RoutineType:
+ """The fine-grained type of the routine.
+
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/routines#routinetype
+
+ .. versionadded:: 2.22.0
+ """
+
+ ROUTINE_TYPE_UNSPECIFIED = "ROUTINE_TYPE_UNSPECIFIED"
+ SCALAR_FUNCTION = "SCALAR_FUNCTION"
+ PROCEDURE = "PROCEDURE"
+ TABLE_VALUED_FUNCTION = "TABLE_VALUED_FUNCTION"
+
+
+class Routine(object):
+ """Resource representing a user-defined routine.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/routines
+
+ Args:
+ routine_ref (Union[str, google.cloud.bigquery.routine.RoutineReference]):
+ A pointer to a routine. If ``routine_ref`` is a string, it must
+ included a project ID, dataset ID, and routine ID, each separated
+ by ``.``.
+ ``**kwargs`` (Dict):
+ Initial property values.
+ """
+
+ _PROPERTY_TO_API_FIELD = {
+ "arguments": "arguments",
+ "body": "definitionBody",
+ "created": "creationTime",
+ "etag": "etag",
+ "imported_libraries": "importedLibraries",
+ "language": "language",
+ "modified": "lastModifiedTime",
+ "reference": "routineReference",
+ "return_type": "returnType",
+ "return_table_type": "returnTableType",
+ "type_": "routineType",
+ "description": "description",
+ "determinism_level": "determinismLevel",
+ "remote_function_options": "remoteFunctionOptions",
+ "data_governance_type": "dataGovernanceType",
+ }
+
+ def __init__(self, routine_ref, **kwargs) -> None:
+ if isinstance(routine_ref, str):
+ routine_ref = RoutineReference.from_string(routine_ref)
+
+ self._properties = {"routineReference": routine_ref.to_api_repr()}
+ for property_name in kwargs:
+ setattr(self, property_name, kwargs[property_name])
+
+ @property
+ def reference(self):
+ """google.cloud.bigquery.routine.RoutineReference: Reference
+ describing the ID of this routine.
+ """
+ return RoutineReference.from_api_repr(
+ self._properties[self._PROPERTY_TO_API_FIELD["reference"]]
+ )
+
+ @property
+ def path(self):
+ """str: URL path for the routine's APIs."""
+ return self.reference.path
+
+ @property
+ def project(self):
+ """str: ID of the project containing the routine."""
+ return self.reference.project
+
+ @property
+ def dataset_id(self):
+ """str: ID of dataset containing the routine."""
+ return self.reference.dataset_id
+
+ @property
+ def routine_id(self):
+ """str: The routine ID."""
+ return self.reference.routine_id
+
+ @property
+ def etag(self):
+ """str: ETag for the resource (:data:`None` until set from the
+ server).
+
+ Read-only.
+ """
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["etag"])
+
+ @property
+ def type_(self):
+ """str: The fine-grained type of the routine.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/routines#RoutineType
+ """
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["type_"])
+
+ @type_.setter
+ def type_(self, value):
+ self._properties[self._PROPERTY_TO_API_FIELD["type_"]] = value
+
+ @property
+ def created(self):
+ """Optional[datetime.datetime]: Datetime at which the routine was
+ created (:data:`None` until set from the server).
+
+ Read-only.
+ """
+ value = self._properties.get(self._PROPERTY_TO_API_FIELD["created"])
+ if value is not None and value != 0:
+ # value will be in milliseconds.
+ return google.cloud._helpers._datetime_from_microseconds(
+ 1000.0 * float(value)
+ )
+
+ @property
+ def modified(self):
+ """Optional[datetime.datetime]: Datetime at which the routine was
+ last modified (:data:`None` until set from the server).
+
+ Read-only.
+ """
+ value = self._properties.get(self._PROPERTY_TO_API_FIELD["modified"])
+ if value is not None and value != 0:
+ # value will be in milliseconds.
+ return google.cloud._helpers._datetime_from_microseconds(
+ 1000.0 * float(value)
+ )
+
+ @property
+ def language(self):
+ """Optional[str]: The language of the routine.
+
+ Defaults to ``SQL``.
+ """
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["language"])
+
+ @language.setter
+ def language(self, value):
+ self._properties[self._PROPERTY_TO_API_FIELD["language"]] = value
+
+ @property
+ def arguments(self):
+ """List[google.cloud.bigquery.routine.RoutineArgument]: Input/output
+ argument of a function or a stored procedure.
+
+ In-place modification is not supported. To set, replace the entire
+ property value with the modified list of
+ :class:`~google.cloud.bigquery.routine.RoutineArgument` objects.
+ """
+ resources = self._properties.get(self._PROPERTY_TO_API_FIELD["arguments"], [])
+ return [RoutineArgument.from_api_repr(resource) for resource in resources]
+
+ @arguments.setter
+ def arguments(self, value):
+ if not value:
+ resource = []
+ else:
+ resource = [argument.to_api_repr() for argument in value]
+ self._properties[self._PROPERTY_TO_API_FIELD["arguments"]] = resource
+
+ @property
+ def return_type(self):
+ """google.cloud.bigquery.StandardSqlDataType: Return type of
+ the routine.
+
+ If absent, the return type is inferred from
+ :attr:`~google.cloud.bigquery.routine.Routine.body` at query time in
+ each query that references this routine. If present, then the
+ evaluated result will be cast to the specified returned type at query
+ time.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/routines#Routine.FIELDS.return_type
+ """
+ resource = self._properties.get(self._PROPERTY_TO_API_FIELD["return_type"])
+ if not resource:
+ return resource
+
+ return StandardSqlDataType.from_api_repr(resource)
+
+ @return_type.setter
+ def return_type(self, value: StandardSqlDataType):
+ resource = None if not value else value.to_api_repr()
+ self._properties[self._PROPERTY_TO_API_FIELD["return_type"]] = resource
+
+ @property
+ def return_table_type(self) -> Optional[StandardSqlTableType]:
+ """The return type of a Table Valued Function (TVF) routine.
+
+ .. versionadded:: 2.22.0
+ """
+ resource = self._properties.get(
+ self._PROPERTY_TO_API_FIELD["return_table_type"]
+ )
+ if not resource:
+ return resource
+
+ return StandardSqlTableType.from_api_repr(resource)
+
+ @return_table_type.setter
+ def return_table_type(self, value: Optional[StandardSqlTableType]):
+ if not value:
+ resource = None
+ else:
+ resource = value.to_api_repr()
+
+ self._properties[self._PROPERTY_TO_API_FIELD["return_table_type"]] = resource
+
+ @property
+ def imported_libraries(self):
+ """List[str]: The path of the imported JavaScript libraries.
+
+ The :attr:`~google.cloud.bigquery.routine.Routine.language` must
+ equal ``JAVACRIPT``.
+
+ Examples:
+ Set the ``imported_libraries`` to a list of Google Cloud Storage
+ URIs.
+
+ .. code-block:: python
+
+ routine = bigquery.Routine("proj.dataset.routine_id")
+ routine.imported_libraries = [
+ "gs://cloud-samples-data/bigquery/udfs/max-value.js",
+ ]
+ """
+ return self._properties.get(
+ self._PROPERTY_TO_API_FIELD["imported_libraries"], []
+ )
+
+ @imported_libraries.setter
+ def imported_libraries(self, value):
+ if not value:
+ resource = []
+ else:
+ resource = value
+ self._properties[self._PROPERTY_TO_API_FIELD["imported_libraries"]] = resource
+
+ @property
+ def body(self):
+ """str: The body of the routine."""
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["body"])
+
+ @body.setter
+ def body(self, value):
+ self._properties[self._PROPERTY_TO_API_FIELD["body"]] = value
+
+ @property
+ def description(self):
+ """Optional[str]: Description of the routine (defaults to
+ :data:`None`).
+ """
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["description"])
+
+ @description.setter
+ def description(self, value):
+ self._properties[self._PROPERTY_TO_API_FIELD["description"]] = value
+
+ @property
+ def determinism_level(self):
+ """Optional[str]: (experimental) The determinism level of the JavaScript UDF
+ if defined.
+ """
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["determinism_level"])
+
+ @determinism_level.setter
+ def determinism_level(self, value):
+ self._properties[self._PROPERTY_TO_API_FIELD["determinism_level"]] = value
+
+ @property
+ def remote_function_options(self):
+ """Optional[google.cloud.bigquery.routine.RemoteFunctionOptions]:
+ Configures remote function options for a routine.
+
+ Raises:
+ ValueError:
+ If the value is not
+ :class:`~google.cloud.bigquery.routine.RemoteFunctionOptions` or
+ :data:`None`.
+ """
+ prop = self._properties.get(
+ self._PROPERTY_TO_API_FIELD["remote_function_options"]
+ )
+ if prop is not None:
+ return RemoteFunctionOptions.from_api_repr(prop)
+
+ @remote_function_options.setter
+ def remote_function_options(self, value):
+ api_repr = value
+ if isinstance(value, RemoteFunctionOptions):
+ api_repr = value.to_api_repr()
+ elif value is not None:
+ raise ValueError(
+ "value must be google.cloud.bigquery.routine.RemoteFunctionOptions "
+ "or None"
+ )
+ self._properties[
+ self._PROPERTY_TO_API_FIELD["remote_function_options"]
+ ] = api_repr
+
+ @property
+ def data_governance_type(self):
+ """Optional[str]: If set to ``DATA_MASKING``, the function is validated
+ and made available as a masking function.
+
+ Raises:
+ ValueError:
+ If the value is not :data:`string` or :data:`None`.
+ """
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["data_governance_type"])
+
+ @data_governance_type.setter
+ def data_governance_type(self, value):
+ if value is not None and not isinstance(value, str):
+ raise ValueError(
+ "invalid data_governance_type, must be a string or `None`."
+ )
+ self._properties[self._PROPERTY_TO_API_FIELD["data_governance_type"]] = value
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "Routine":
+ """Factory: construct a routine given its API representation.
+
+ Args:
+ resource (Dict[str, object]):
+ Resource, as returned from the API.
+
+ Returns:
+ google.cloud.bigquery.routine.Routine:
+ Python object, as parsed from ``resource``.
+ """
+ ref = cls(RoutineReference.from_api_repr(resource["routineReference"]))
+ ref._properties = resource
+ return ref
+
+ def to_api_repr(self) -> dict:
+ """Construct the API resource representation of this routine.
+
+ Returns:
+ Dict[str, object]: Routine represented as an API resource.
+ """
+ return self._properties
+
+ def _build_resource(self, filter_fields):
+ """Generate a resource for ``update``."""
+ return _helpers._build_resource_from_properties(self, filter_fields)
+
+ def __repr__(self):
+ return "Routine('{}.{}.{}')".format(
+ self.project, self.dataset_id, self.routine_id
+ )
+
+
+class RoutineArgument(object):
+ """Input/output argument of a function or a stored procedure.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/routines#argument
+
+ Args:
+ ``**kwargs`` (Dict):
+ Initial property values.
+ """
+
+ _PROPERTY_TO_API_FIELD = {
+ "data_type": "dataType",
+ "kind": "argumentKind",
+ # Even though it's not necessary for field mapping to map when the
+ # property name equals the resource name, we add these here so that we
+ # have an exhaustive list of all properties.
+ "name": "name",
+ "mode": "mode",
+ }
+
+ def __init__(self, **kwargs) -> None:
+ self._properties: Dict[str, Any] = {}
+ for property_name in kwargs:
+ setattr(self, property_name, kwargs[property_name])
+
+ @property
+ def name(self):
+ """Optional[str]: Name of this argument.
+
+ Can be absent for function return argument.
+ """
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["name"])
+
+ @name.setter
+ def name(self, value):
+ self._properties[self._PROPERTY_TO_API_FIELD["name"]] = value
+
+ @property
+ def kind(self):
+ """Optional[str]: The kind of argument, for example ``FIXED_TYPE`` or
+ ``ANY_TYPE``.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/routines#Argument.FIELDS.argument_kind
+ """
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["kind"])
+
+ @kind.setter
+ def kind(self, value):
+ self._properties[self._PROPERTY_TO_API_FIELD["kind"]] = value
+
+ @property
+ def mode(self):
+ """Optional[str]: The input/output mode of the argument."""
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["mode"])
+
+ @mode.setter
+ def mode(self, value):
+ self._properties[self._PROPERTY_TO_API_FIELD["mode"]] = value
+
+ @property
+ def data_type(self):
+ """Optional[google.cloud.bigquery.StandardSqlDataType]: Type
+ of a variable, e.g., a function argument.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/routines#Argument.FIELDS.data_type
+ """
+ resource = self._properties.get(self._PROPERTY_TO_API_FIELD["data_type"])
+ if not resource:
+ return resource
+
+ return StandardSqlDataType.from_api_repr(resource)
+
+ @data_type.setter
+ def data_type(self, value):
+ if value:
+ resource = value.to_api_repr()
+ else:
+ resource = None
+ self._properties[self._PROPERTY_TO_API_FIELD["data_type"]] = resource
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "RoutineArgument":
+ """Factory: construct a routine argument given its API representation.
+
+ Args:
+ resource (Dict[str, object]): Resource, as returned from the API.
+
+ Returns:
+ google.cloud.bigquery.routine.RoutineArgument:
+ Python object, as parsed from ``resource``.
+ """
+ ref = cls()
+ ref._properties = resource
+ return ref
+
+ def to_api_repr(self) -> dict:
+ """Construct the API resource representation of this routine argument.
+
+ Returns:
+ Dict[str, object]: Routine argument represented as an API resource.
+ """
+ return self._properties
+
+ def __eq__(self, other):
+ if not isinstance(other, RoutineArgument):
+ return NotImplemented
+ return self._properties == other._properties
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self):
+ all_properties = [
+ "{}={}".format(property_name, repr(getattr(self, property_name)))
+ for property_name in sorted(self._PROPERTY_TO_API_FIELD)
+ ]
+ return "RoutineArgument({})".format(", ".join(all_properties))
+
+
+class RoutineReference(object):
+ """A pointer to a routine.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/routines#routinereference
+ """
+
+ def __init__(self):
+ self._properties = {}
+
+ @property
+ def project(self):
+ """str: ID of the project containing the routine."""
+ return self._properties["projectId"] # pytype: disable=key-error
+
+ @property
+ def dataset_id(self):
+ """str: ID of dataset containing the routine."""
+ return self._properties["datasetId"] # pytype: disable=key-error
+
+ @property
+ def routine_id(self):
+ """str: The routine ID."""
+ return self._properties["routineId"] # pytype: disable=key-error
+
+ @property
+ def path(self):
+ """str: URL path for the routine's APIs."""
+ return "/projects/%s/datasets/%s/routines/%s" % (
+ self.project,
+ self.dataset_id,
+ self.routine_id,
+ )
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "RoutineReference":
+ """Factory: construct a routine reference given its API representation.
+
+ Args:
+ resource (Dict[str, object]):
+ Routine reference representation returned from the API.
+
+ Returns:
+ google.cloud.bigquery.routine.RoutineReference:
+ Routine reference parsed from ``resource``.
+ """
+ ref = cls()
+ ref._properties = resource
+ return ref
+
+ @classmethod
+ def from_string(
+ cls, routine_id: str, default_project: Optional[str] = None
+ ) -> "RoutineReference":
+ """Factory: construct a routine reference from routine ID string.
+
+ Args:
+ routine_id (str):
+ A routine ID in standard SQL format. If ``default_project``
+ is not specified, this must included a project ID, dataset
+ ID, and routine ID, each separated by ``.``.
+ default_project (Optional[str]):
+ The project ID to use when ``routine_id`` does not
+ include a project ID.
+
+ Returns:
+ google.cloud.bigquery.routine.RoutineReference:
+ Routine reference parsed from ``routine_id``.
+
+ Raises:
+ ValueError:
+ If ``routine_id`` is not a fully-qualified routine ID in
+ standard SQL format.
+ """
+ proj, dset, routine = _helpers._parse_3_part_id(
+ routine_id, default_project=default_project, property_name="routine_id"
+ )
+ return cls.from_api_repr(
+ {"projectId": proj, "datasetId": dset, "routineId": routine}
+ )
+
+ def to_api_repr(self) -> dict:
+ """Construct the API resource representation of this routine reference.
+
+ Returns:
+ Dict[str, object]: Routine reference represented as an API resource.
+ """
+ return self._properties
+
+ def __eq__(self, other):
+ """Two RoutineReferences are equal if they point to the same routine."""
+ if not isinstance(other, RoutineReference):
+ return NotImplemented
+ return str(self) == str(other)
+
+ def __hash__(self):
+ return hash(str(self))
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self):
+ return "RoutineReference.from_string('{}')".format(str(self))
+
+ def __str__(self):
+ """String representation of the reference.
+
+ This is a fully-qualified ID, including the project ID and dataset ID.
+ """
+ return "{}.{}.{}".format(self.project, self.dataset_id, self.routine_id)
+
+
+class RemoteFunctionOptions(object):
+ """Configuration options for controlling remote BigQuery functions."""
+
+ _PROPERTY_TO_API_FIELD = {
+ "endpoint": "endpoint",
+ "connection": "connection",
+ "max_batching_rows": "maxBatchingRows",
+ "user_defined_context": "userDefinedContext",
+ }
+
+ def __init__(
+ self,
+ endpoint=None,
+ connection=None,
+ max_batching_rows=None,
+ user_defined_context=None,
+ _properties=None,
+ ) -> None:
+ if _properties is None:
+ _properties = {}
+ self._properties = _properties
+
+ if endpoint is not None:
+ self.endpoint = endpoint
+ if connection is not None:
+ self.connection = connection
+ if max_batching_rows is not None:
+ self.max_batching_rows = max_batching_rows
+ if user_defined_context is not None:
+ self.user_defined_context = user_defined_context
+
+ @property
+ def connection(self):
+ """string: Fully qualified name of the user-provided connection object which holds the authentication information to send requests to the remote service.
+
+ Format is "projects/{projectId}/locations/{locationId}/connections/{connectionId}"
+ """
+ return _helpers._str_or_none(self._properties.get("connection"))
+
+ @connection.setter
+ def connection(self, value):
+ self._properties["connection"] = _helpers._str_or_none(value)
+
+ @property
+ def endpoint(self):
+ """string: Endpoint of the user-provided remote service
+
+ Example: "https://us-east1-my_gcf_project.cloudfunctions.net/remote_add"
+ """
+ return _helpers._str_or_none(self._properties.get("endpoint"))
+
+ @endpoint.setter
+ def endpoint(self, value):
+ self._properties["endpoint"] = _helpers._str_or_none(value)
+
+ @property
+ def max_batching_rows(self):
+ """int64: Max number of rows in each batch sent to the remote service.
+
+ If absent or if 0, BigQuery dynamically decides the number of rows in a batch.
+ """
+ return _helpers._int_or_none(self._properties.get("maxBatchingRows"))
+
+ @max_batching_rows.setter
+ def max_batching_rows(self, value):
+ self._properties["maxBatchingRows"] = _helpers._str_or_none(value)
+
+ @property
+ def user_defined_context(self):
+ """Dict[str, str]: User-defined context as a set of key/value pairs,
+ which will be sent as function invocation context together with
+ batched arguments in the requests to the remote service. The total
+ number of bytes of keys and values must be less than 8KB.
+ """
+ return self._properties.get("userDefinedContext")
+
+ @user_defined_context.setter
+ def user_defined_context(self, value):
+ if not isinstance(value, dict):
+ raise ValueError("value must be dictionary")
+ self._properties["userDefinedContext"] = value
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "RemoteFunctionOptions":
+ """Factory: construct remote function options given its API representation.
+
+ Args:
+ resource (Dict[str, object]): Resource, as returned from the API.
+
+ Returns:
+ google.cloud.bigquery.routine.RemoteFunctionOptions:
+ Python object, as parsed from ``resource``.
+ """
+ ref = cls()
+ ref._properties = resource
+ return ref
+
+ def to_api_repr(self) -> dict:
+ """Construct the API resource representation of this RemoteFunctionOptions.
+
+ Returns:
+ Dict[str, object]: Remote function options represented as an API resource.
+ """
+ return self._properties
+
+ def __eq__(self, other):
+ if not isinstance(other, RemoteFunctionOptions):
+ return NotImplemented
+ return self._properties == other._properties
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self):
+ all_properties = [
+ "{}={}".format(property_name, repr(getattr(self, property_name)))
+ for property_name in sorted(self._PROPERTY_TO_API_FIELD)
+ ]
+ return "RemoteFunctionOptions({})".format(", ".join(all_properties))
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/schema.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/schema.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5b03cbef400b9b2d7814c538fbcce759ca8db9b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/schema.py
@@ -0,0 +1,590 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Schemas for BigQuery tables / queries."""
+
+import collections
+import enum
+from typing import Any, Dict, Iterable, Optional, Union, cast
+
+from google.cloud.bigquery import standard_sql
+from google.cloud.bigquery.enums import StandardSqlTypeNames
+
+
+_STRUCT_TYPES = ("RECORD", "STRUCT")
+
+# SQL types reference:
+# https://cloud.google.com/bigquery/data-types#legacy_sql_data_types
+# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
+LEGACY_TO_STANDARD_TYPES = {
+ "STRING": StandardSqlTypeNames.STRING,
+ "BYTES": StandardSqlTypeNames.BYTES,
+ "INTEGER": StandardSqlTypeNames.INT64,
+ "INT64": StandardSqlTypeNames.INT64,
+ "FLOAT": StandardSqlTypeNames.FLOAT64,
+ "FLOAT64": StandardSqlTypeNames.FLOAT64,
+ "NUMERIC": StandardSqlTypeNames.NUMERIC,
+ "BIGNUMERIC": StandardSqlTypeNames.BIGNUMERIC,
+ "BOOLEAN": StandardSqlTypeNames.BOOL,
+ "BOOL": StandardSqlTypeNames.BOOL,
+ "GEOGRAPHY": StandardSqlTypeNames.GEOGRAPHY,
+ "RECORD": StandardSqlTypeNames.STRUCT,
+ "STRUCT": StandardSqlTypeNames.STRUCT,
+ "TIMESTAMP": StandardSqlTypeNames.TIMESTAMP,
+ "DATE": StandardSqlTypeNames.DATE,
+ "TIME": StandardSqlTypeNames.TIME,
+ "DATETIME": StandardSqlTypeNames.DATETIME,
+ # no direct conversion from ARRAY, the latter is represented by mode="REPEATED"
+}
+"""String names of the legacy SQL types to integer codes of Standard SQL standard_sql."""
+
+
+class _DefaultSentinel(enum.Enum):
+ """Object used as 'sentinel' indicating default value should be used.
+
+ Uses enum so that pytype/mypy knows that this is the only possible value.
+ https://stackoverflow.com/a/60605919/101923
+
+ Literal[_DEFAULT_VALUE] is an alternative, but only added in Python 3.8.
+ https://docs.python.org/3/library/typing.html#typing.Literal
+ """
+
+ DEFAULT_VALUE = object()
+
+
+_DEFAULT_VALUE = _DefaultSentinel.DEFAULT_VALUE
+
+
+class FieldElementType(object):
+ """Represents the type of a field element.
+
+ Args:
+ element_type (str): The type of a field element.
+ """
+
+ def __init__(self, element_type: str):
+ self._properties = {}
+ self._properties["type"] = element_type.upper()
+
+ @property
+ def element_type(self):
+ return self._properties.get("type")
+
+ @classmethod
+ def from_api_repr(cls, api_repr: Optional[dict]) -> Optional["FieldElementType"]:
+ """Factory: construct a FieldElementType given its API representation.
+
+ Args:
+ api_repr (Dict[str, str]): field element type as returned from
+ the API.
+
+ Returns:
+ google.cloud.bigquery.FieldElementType:
+ Python object, as parsed from ``api_repr``.
+ """
+ if not api_repr:
+ return None
+ return cls(api_repr["type"].upper())
+
+ def to_api_repr(self) -> dict:
+ """Construct the API resource representation of this field element type.
+
+ Returns:
+ Dict[str, str]: Field element type represented as an API resource.
+ """
+ return self._properties
+
+
+class SchemaField(object):
+ """Describe a single field within a table schema.
+
+ Args:
+ name: The name of the field.
+
+ field_type:
+ The type of the field. See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableFieldSchema.FIELDS.type
+
+ mode:
+ Defaults to ``'NULLABLE'``. The mode of the field. See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableFieldSchema.FIELDS.mode
+
+ description: Description for the field.
+
+ fields: Subfields (requires ``field_type`` of 'RECORD').
+
+ policy_tags: The policy tag list for the field.
+
+ precision:
+ Precison (number of digits) of fields with NUMERIC or BIGNUMERIC type.
+
+ scale:
+ Scale (digits after decimal) of fields with NUMERIC or BIGNUMERIC type.
+
+ max_length: Maximum length of fields with STRING or BYTES type.
+
+ default_value_expression: str, Optional
+ Used to specify the default value of a field using a SQL expression. It can only be set for
+ top level fields (columns).
+
+ You can use a struct or array expression to specify default value for the entire struct or
+ array. The valid SQL expressions are:
+
+ - Literals for all data types, including STRUCT and ARRAY.
+
+ - The following functions:
+
+ `CURRENT_TIMESTAMP`
+ `CURRENT_TIME`
+ `CURRENT_DATE`
+ `CURRENT_DATETIME`
+ `GENERATE_UUID`
+ `RAND`
+ `SESSION_USER`
+ `ST_GEOPOINT`
+
+ - Struct or array composed with the above allowed functions, for example:
+
+ "[CURRENT_DATE(), DATE '2020-01-01'"]
+
+ range_element_type: FieldElementType, str, Optional
+ The subtype of the RANGE, if the type of this field is RANGE. If
+ the type is RANGE, this field is required. Possible values for the
+ field element type of a RANGE include `DATE`, `DATETIME` and
+ `TIMESTAMP`.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ field_type: str,
+ mode: str = "NULLABLE",
+ default_value_expression: Optional[str] = None,
+ description: Union[str, _DefaultSentinel] = _DEFAULT_VALUE,
+ fields: Iterable["SchemaField"] = (),
+ policy_tags: Union["PolicyTagList", None, _DefaultSentinel] = _DEFAULT_VALUE,
+ precision: Union[int, _DefaultSentinel] = _DEFAULT_VALUE,
+ scale: Union[int, _DefaultSentinel] = _DEFAULT_VALUE,
+ max_length: Union[int, _DefaultSentinel] = _DEFAULT_VALUE,
+ range_element_type: Union[FieldElementType, str, None] = None,
+ ):
+ self._properties: Dict[str, Any] = {
+ "name": name,
+ "type": field_type,
+ }
+ if mode is not None:
+ self._properties["mode"] = mode.upper()
+ if description is not _DEFAULT_VALUE:
+ self._properties["description"] = description
+ if default_value_expression is not None:
+ self._properties["defaultValueExpression"] = default_value_expression
+ if precision is not _DEFAULT_VALUE:
+ self._properties["precision"] = precision
+ if scale is not _DEFAULT_VALUE:
+ self._properties["scale"] = scale
+ if max_length is not _DEFAULT_VALUE:
+ self._properties["maxLength"] = max_length
+ if policy_tags is not _DEFAULT_VALUE:
+ self._properties["policyTags"] = (
+ policy_tags.to_api_repr() if policy_tags is not None else None
+ )
+ if isinstance(range_element_type, str):
+ self._properties["rangeElementType"] = {"type": range_element_type}
+ if isinstance(range_element_type, FieldElementType):
+ self._properties["rangeElementType"] = range_element_type.to_api_repr()
+
+ self._fields = tuple(fields)
+
+ @staticmethod
+ def __get_int(api_repr, name):
+ v = api_repr.get(name, _DEFAULT_VALUE)
+ if v is not _DEFAULT_VALUE:
+ v = int(v)
+ return v
+
+ @classmethod
+ def from_api_repr(cls, api_repr: dict) -> "SchemaField":
+ """Return a ``SchemaField`` object deserialized from a dictionary.
+
+ Args:
+ api_repr (Mapping[str, str]): The serialized representation
+ of the SchemaField, such as what is output by
+ :meth:`to_api_repr`.
+
+ Returns:
+ google.cloud.bigquery.schema.SchemaField: The ``SchemaField`` object.
+ """
+ field_type = api_repr["type"].upper()
+
+ # Handle optional properties with default values
+ mode = api_repr.get("mode", "NULLABLE")
+ description = api_repr.get("description", _DEFAULT_VALUE)
+ fields = api_repr.get("fields", ())
+ policy_tags = api_repr.get("policyTags", _DEFAULT_VALUE)
+
+ default_value_expression = api_repr.get("defaultValueExpression", None)
+
+ if policy_tags is not None and policy_tags is not _DEFAULT_VALUE:
+ policy_tags = PolicyTagList.from_api_repr(policy_tags)
+
+ if api_repr.get("rangeElementType"):
+ range_element_type = cast(dict, api_repr.get("rangeElementType"))
+ element_type = range_element_type.get("type")
+ else:
+ element_type = None
+
+ return cls(
+ field_type=field_type,
+ fields=[cls.from_api_repr(f) for f in fields],
+ mode=mode.upper(),
+ default_value_expression=default_value_expression,
+ description=description,
+ name=api_repr["name"],
+ policy_tags=policy_tags,
+ precision=cls.__get_int(api_repr, "precision"),
+ scale=cls.__get_int(api_repr, "scale"),
+ max_length=cls.__get_int(api_repr, "maxLength"),
+ range_element_type=element_type,
+ )
+
+ @property
+ def name(self):
+ """str: The name of the field."""
+ return self._properties["name"]
+
+ @property
+ def field_type(self):
+ """str: The type of the field.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableFieldSchema.FIELDS.type
+ """
+ return self._properties["type"]
+
+ @property
+ def mode(self):
+ """Optional[str]: The mode of the field.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableFieldSchema.FIELDS.mode
+ """
+ return self._properties.get("mode")
+
+ @property
+ def is_nullable(self):
+ """bool: whether 'mode' is 'nullable'."""
+ return self.mode == "NULLABLE"
+
+ @property
+ def default_value_expression(self):
+ """Optional[str] default value of a field, using an SQL expression"""
+ return self._properties.get("defaultValueExpression")
+
+ @property
+ def description(self):
+ """Optional[str]: description for the field."""
+ return self._properties.get("description")
+
+ @property
+ def precision(self):
+ """Optional[int]: Precision (number of digits) for the NUMERIC field."""
+ return self._properties.get("precision")
+
+ @property
+ def scale(self):
+ """Optional[int]: Scale (digits after decimal) for the NUMERIC field."""
+ return self._properties.get("scale")
+
+ @property
+ def max_length(self):
+ """Optional[int]: Maximum length for the STRING or BYTES field."""
+ return self._properties.get("maxLength")
+
+ @property
+ def range_element_type(self):
+ """Optional[FieldElementType]: The subtype of the RANGE, if the
+ type of this field is RANGE.
+
+ Must be set when ``type`` is `"RANGE"`. Must be one of `"DATE"`,
+ `"DATETIME"` or `"TIMESTAMP"`.
+ """
+ if self._properties.get("rangeElementType"):
+ ret = self._properties.get("rangeElementType")
+ return FieldElementType.from_api_repr(ret)
+
+ @property
+ def fields(self):
+ """Optional[tuple]: Subfields contained in this field.
+
+ Must be empty unset if ``field_type`` is not 'RECORD'.
+ """
+ return self._fields
+
+ @property
+ def policy_tags(self):
+ """Optional[google.cloud.bigquery.schema.PolicyTagList]: Policy tag list
+ definition for this field.
+ """
+ resource = self._properties.get("policyTags")
+ return PolicyTagList.from_api_repr(resource) if resource is not None else None
+
+ def to_api_repr(self) -> dict:
+ """Return a dictionary representing this schema field.
+
+ Returns:
+ Dict: A dictionary representing the SchemaField in a serialized form.
+ """
+ answer = self._properties.copy()
+
+ # If this is a RECORD type, then sub-fields are also included,
+ # add this to the serialized representation.
+ if self.field_type.upper() in _STRUCT_TYPES:
+ answer["fields"] = [f.to_api_repr() for f in self.fields]
+
+ # Done; return the serialized dictionary.
+ return answer
+
+ def _key(self):
+ """A tuple key that uniquely describes this field.
+
+ Used to compute this instance's hashcode and evaluate equality.
+
+ Returns:
+ Tuple: The contents of this :class:`~google.cloud.bigquery.schema.SchemaField`.
+ """
+ field_type = self.field_type.upper() if self.field_type is not None else None
+
+ # Type can temporarily be set to None if the code needs a SchemaField instance,
+ # but has not determined the exact type of the field yet.
+ if field_type is not None:
+ if field_type == "STRING" or field_type == "BYTES":
+ if self.max_length is not None:
+ field_type = f"{field_type}({self.max_length})"
+ elif field_type.endswith("NUMERIC"):
+ if self.precision is not None:
+ if self.scale is not None:
+ field_type = f"{field_type}({self.precision}, {self.scale})"
+ else:
+ field_type = f"{field_type}({self.precision})"
+
+ policy_tags = (
+ None if self.policy_tags is None else tuple(sorted(self.policy_tags.names))
+ )
+
+ return (
+ self.name,
+ field_type,
+ # Mode is always str, if not given it defaults to a str value
+ self.mode.upper(), # pytype: disable=attribute-error
+ self.default_value_expression,
+ self.description,
+ self._fields,
+ policy_tags,
+ )
+
+ def to_standard_sql(self) -> standard_sql.StandardSqlField:
+ """Return the field as the standard SQL field representation object."""
+ sql_type = standard_sql.StandardSqlDataType()
+
+ if self.mode == "REPEATED":
+ sql_type.type_kind = StandardSqlTypeNames.ARRAY
+ else:
+ sql_type.type_kind = LEGACY_TO_STANDARD_TYPES.get(
+ self.field_type,
+ StandardSqlTypeNames.TYPE_KIND_UNSPECIFIED,
+ )
+
+ if sql_type.type_kind == StandardSqlTypeNames.ARRAY: # noqa: E721
+ array_element_type = LEGACY_TO_STANDARD_TYPES.get(
+ self.field_type,
+ StandardSqlTypeNames.TYPE_KIND_UNSPECIFIED,
+ )
+ sql_type.array_element_type = standard_sql.StandardSqlDataType(
+ type_kind=array_element_type
+ )
+
+ # ARRAY cannot directly contain other arrays, only scalar types and STRUCTs
+ # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#array-type
+ if array_element_type == StandardSqlTypeNames.STRUCT: # noqa: E721
+ sql_type.array_element_type.struct_type = (
+ standard_sql.StandardSqlStructType(
+ fields=(field.to_standard_sql() for field in self.fields)
+ )
+ )
+ elif sql_type.type_kind == StandardSqlTypeNames.STRUCT: # noqa: E721
+ sql_type.struct_type = standard_sql.StandardSqlStructType(
+ fields=(field.to_standard_sql() for field in self.fields)
+ )
+
+ return standard_sql.StandardSqlField(name=self.name, type=sql_type)
+
+ def __eq__(self, other):
+ if not isinstance(other, SchemaField):
+ return NotImplemented
+ return self._key() == other._key()
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(self._key())
+
+ def __repr__(self):
+ key = self._key()
+ policy_tags = key[-1]
+ policy_tags_inst = None if policy_tags is None else PolicyTagList(policy_tags)
+ adjusted_key = key[:-1] + (policy_tags_inst,)
+ return f"{self.__class__.__name__}{adjusted_key}"
+
+
+def _parse_schema_resource(info):
+ """Parse a resource fragment into a schema field.
+
+ Args:
+ info: (Mapping[str, Dict]): should contain a "fields" key to be parsed
+
+ Returns:
+ Optional[Sequence[google.cloud.bigquery.schema.SchemaField`]:
+ A list of parsed fields, or ``None`` if no "fields" key found.
+ """
+ return [SchemaField.from_api_repr(f) for f in info.get("fields", ())]
+
+
+def _build_schema_resource(fields):
+ """Generate a resource fragment for a schema.
+
+ Args:
+ fields (Sequence[google.cloud.bigquery.schema.SchemaField): schema to be dumped.
+
+ Returns:
+ Sequence[Dict]: Mappings describing the schema of the supplied fields.
+ """
+ return [field.to_api_repr() for field in fields]
+
+
+def _to_schema_fields(schema):
+ """Coerce `schema` to a list of schema field instances.
+
+ Args:
+ schema(Sequence[Union[ \
+ :class:`~google.cloud.bigquery.schema.SchemaField`, \
+ Mapping[str, Any] \
+ ]]):
+ Table schema to convert. If some items are passed as mappings,
+ their content must be compatible with
+ :meth:`~google.cloud.bigquery.schema.SchemaField.from_api_repr`.
+
+ Returns:
+ Sequence[:class:`~google.cloud.bigquery.schema.SchemaField`]
+
+ Raises:
+ Exception: If ``schema`` is not a sequence, or if any item in the
+ sequence is not a :class:`~google.cloud.bigquery.schema.SchemaField`
+ instance or a compatible mapping representation of the field.
+ """
+ for field in schema:
+ if not isinstance(field, (SchemaField, collections.abc.Mapping)):
+ raise ValueError(
+ "Schema items must either be fields or compatible "
+ "mapping representations."
+ )
+
+ return [
+ field if isinstance(field, SchemaField) else SchemaField.from_api_repr(field)
+ for field in schema
+ ]
+
+
+class PolicyTagList(object):
+ """Define Policy Tags for a column.
+
+ Args:
+ names (
+ Optional[Tuple[str]]): list of policy tags to associate with
+ the column. Policy tag identifiers are of the form
+ `projects/*/locations/*/taxonomies/*/policyTags/*`.
+ """
+
+ def __init__(self, names: Iterable[str] = ()):
+ self._properties = {}
+ self._properties["names"] = tuple(names)
+
+ @property
+ def names(self):
+ """Tuple[str]: Policy tags associated with this definition."""
+ return self._properties.get("names", ())
+
+ def _key(self):
+ """A tuple key that uniquely describes this PolicyTagList.
+
+ Used to compute this instance's hashcode and evaluate equality.
+
+ Returns:
+ Tuple: The contents of this :class:`~google.cloud.bigquery.schema.PolicyTagList`.
+ """
+ return tuple(sorted(self._properties.get("names", ())))
+
+ def __eq__(self, other):
+ if not isinstance(other, PolicyTagList):
+ return NotImplemented
+ return self._key() == other._key()
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(self._key())
+
+ def __repr__(self):
+ return f"{self.__class__.__name__}(names={self._key()})"
+
+ @classmethod
+ def from_api_repr(cls, api_repr: dict) -> "PolicyTagList":
+ """Return a :class:`PolicyTagList` object deserialized from a dict.
+
+ This method creates a new ``PolicyTagList`` instance that points to
+ the ``api_repr`` parameter as its internal properties dict. This means
+ that when a ``PolicyTagList`` instance is stored as a property of
+ another object, any changes made at the higher level will also appear
+ here.
+
+ Args:
+ api_repr (Mapping[str, str]):
+ The serialized representation of the PolicyTagList, such as
+ what is output by :meth:`to_api_repr`.
+
+ Returns:
+ Optional[google.cloud.bigquery.schema.PolicyTagList]:
+ The ``PolicyTagList`` object or None.
+ """
+ if api_repr is None:
+ return None
+ names = api_repr.get("names", ())
+ return cls(names=names)
+
+ def to_api_repr(self) -> dict:
+ """Return a dictionary representing this object.
+
+ This method returns the properties dict of the ``PolicyTagList``
+ instance rather than making a copy. This means that when a
+ ``PolicyTagList`` instance is stored as a property of another
+ object, any changes made at the higher level will also appear here.
+
+ Returns:
+ dict:
+ A dictionary representing the PolicyTagList object in
+ serialized form.
+ """
+ answer = {"names": list(self.names)}
+ return answer
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/standard_sql.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/standard_sql.py
new file mode 100644
index 0000000000000000000000000000000000000000..68332eb807cc9fab685d66ad910fe21f4189c5ec
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/standard_sql.py
@@ -0,0 +1,389 @@
+# Copyright 2021 Google LLC
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# https://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import typing
+from typing import Any, Dict, Iterable, List, Optional
+
+from google.cloud.bigquery.enums import StandardSqlTypeNames
+
+
+class StandardSqlDataType:
+ """The type of a variable, e.g., a function argument.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/StandardSqlDataType
+
+ Examples:
+
+ .. code-block:: text
+
+ INT64: {type_kind="INT64"}
+ ARRAY: {type_kind="ARRAY", array_element_type="STRING"}
+ STRUCT: {
+ type_kind="STRUCT",
+ struct_type={
+ fields=[
+ {name="x", type={type_kind="STRING"}},
+ {
+ name="y",
+ type={type_kind="ARRAY", array_element_type="DATE"}
+ }
+ ]
+ }
+ }
+ RANGE: {type_kind="RANGE", range_element_type="DATETIME"}
+
+ Args:
+ type_kind:
+ The top level type of this field. Can be any standard SQL data type,
+ e.g. INT64, DATE, ARRAY.
+ array_element_type:
+ The type of the array's elements, if type_kind is ARRAY.
+ struct_type:
+ The fields of this struct, in order, if type_kind is STRUCT.
+ range_element_type:
+ The type of the range's elements, if type_kind is RANGE.
+ """
+
+ def __init__(
+ self,
+ type_kind: Optional[
+ StandardSqlTypeNames
+ ] = StandardSqlTypeNames.TYPE_KIND_UNSPECIFIED,
+ array_element_type: Optional["StandardSqlDataType"] = None,
+ struct_type: Optional["StandardSqlStructType"] = None,
+ range_element_type: Optional["StandardSqlDataType"] = None,
+ ):
+ self._properties: Dict[str, Any] = {}
+
+ self.type_kind = type_kind
+ self.array_element_type = array_element_type
+ self.struct_type = struct_type
+ self.range_element_type = range_element_type
+
+ @property
+ def type_kind(self) -> Optional[StandardSqlTypeNames]:
+ """The top level type of this field.
+
+ Can be any standard SQL data type, e.g. INT64, DATE, ARRAY.
+ """
+ kind = self._properties["typeKind"]
+ return StandardSqlTypeNames[kind] # pytype: disable=missing-parameter
+
+ @type_kind.setter
+ def type_kind(self, value: Optional[StandardSqlTypeNames]):
+ if not value:
+ kind = StandardSqlTypeNames.TYPE_KIND_UNSPECIFIED.value
+ else:
+ kind = value.value
+ self._properties["typeKind"] = kind
+
+ @property
+ def array_element_type(self) -> Optional["StandardSqlDataType"]:
+ """The type of the array's elements, if type_kind is ARRAY."""
+ element_type = self._properties.get("arrayElementType")
+
+ if element_type is None:
+ return None
+
+ result = StandardSqlDataType()
+ result._properties = element_type # We do not use a copy on purpose.
+ return result
+
+ @array_element_type.setter
+ def array_element_type(self, value: Optional["StandardSqlDataType"]):
+ element_type = None if value is None else value.to_api_repr()
+
+ if element_type is None:
+ self._properties.pop("arrayElementType", None)
+ else:
+ self._properties["arrayElementType"] = element_type
+
+ @property
+ def struct_type(self) -> Optional["StandardSqlStructType"]:
+ """The fields of this struct, in order, if type_kind is STRUCT."""
+ struct_info = self._properties.get("structType")
+
+ if struct_info is None:
+ return None
+
+ result = StandardSqlStructType()
+ result._properties = struct_info # We do not use a copy on purpose.
+ return result
+
+ @struct_type.setter
+ def struct_type(self, value: Optional["StandardSqlStructType"]):
+ struct_type = None if value is None else value.to_api_repr()
+
+ if struct_type is None:
+ self._properties.pop("structType", None)
+ else:
+ self._properties["structType"] = struct_type
+
+ @property
+ def range_element_type(self) -> Optional["StandardSqlDataType"]:
+ """The type of the range's elements, if type_kind = "RANGE". Must be
+ one of DATETIME, DATE, or TIMESTAMP."""
+ range_element_info = self._properties.get("rangeElementType")
+
+ if range_element_info is None:
+ return None
+
+ result = StandardSqlDataType()
+ result._properties = range_element_info # We do not use a copy on purpose.
+ return result
+
+ @range_element_type.setter
+ def range_element_type(self, value: Optional["StandardSqlDataType"]):
+ range_element_type = None if value is None else value.to_api_repr()
+
+ if range_element_type is None:
+ self._properties.pop("rangeElementType", None)
+ else:
+ self._properties["rangeElementType"] = range_element_type
+
+ def to_api_repr(self) -> Dict[str, Any]:
+ """Construct the API resource representation of this SQL data type."""
+ return copy.deepcopy(self._properties)
+
+ @classmethod
+ def from_api_repr(cls, resource: Dict[str, Any]):
+ """Construct an SQL data type instance given its API representation."""
+ type_kind = resource.get("typeKind")
+ if type_kind not in StandardSqlTypeNames.__members__:
+ type_kind = StandardSqlTypeNames.TYPE_KIND_UNSPECIFIED
+ else:
+ # Convert string to an enum member.
+ type_kind = StandardSqlTypeNames[ # pytype: disable=missing-parameter
+ typing.cast(str, type_kind)
+ ]
+
+ array_element_type = None
+ if type_kind == StandardSqlTypeNames.ARRAY:
+ element_type = resource.get("arrayElementType")
+ if element_type:
+ array_element_type = cls.from_api_repr(element_type)
+
+ struct_type = None
+ if type_kind == StandardSqlTypeNames.STRUCT:
+ struct_info = resource.get("structType")
+ if struct_info:
+ struct_type = StandardSqlStructType.from_api_repr(struct_info)
+
+ range_element_type = None
+ if type_kind == StandardSqlTypeNames.RANGE:
+ range_element_info = resource.get("rangeElementType")
+ if range_element_info:
+ range_element_type = cls.from_api_repr(range_element_info)
+
+ return cls(type_kind, array_element_type, struct_type, range_element_type)
+
+ def __eq__(self, other):
+ if not isinstance(other, StandardSqlDataType):
+ return NotImplemented
+ else:
+ return (
+ self.type_kind == other.type_kind
+ and self.array_element_type == other.array_element_type
+ and self.struct_type == other.struct_type
+ and self.range_element_type == other.range_element_type
+ )
+
+ def __str__(self):
+ result = f"{self.__class__.__name__}(type_kind={self.type_kind!r}, ...)"
+ return result
+
+
+class StandardSqlField:
+ """A field or a column.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/StandardSqlField
+
+ Args:
+ name:
+ The name of this field. Can be absent for struct fields.
+ type:
+ The type of this parameter. Absent if not explicitly specified.
+
+ For example, CREATE FUNCTION statement can omit the return type; in this
+ case the output parameter does not have this "type" field).
+ """
+
+ def __init__(
+ self, name: Optional[str] = None, type: Optional[StandardSqlDataType] = None
+ ):
+ type_repr = None if type is None else type.to_api_repr()
+ self._properties = {"name": name, "type": type_repr}
+
+ @property
+ def name(self) -> Optional[str]:
+ """The name of this field. Can be absent for struct fields."""
+ return typing.cast(Optional[str], self._properties["name"])
+
+ @name.setter
+ def name(self, value: Optional[str]):
+ self._properties["name"] = value
+
+ @property
+ def type(self) -> Optional[StandardSqlDataType]:
+ """The type of this parameter. Absent if not explicitly specified.
+
+ For example, CREATE FUNCTION statement can omit the return type; in this
+ case the output parameter does not have this "type" field).
+ """
+ type_info = self._properties["type"]
+
+ if type_info is None:
+ return None
+
+ result = StandardSqlDataType()
+ # We do not use a properties copy on purpose.
+ result._properties = typing.cast(Dict[str, Any], type_info)
+
+ return result
+
+ @type.setter
+ def type(self, value: Optional[StandardSqlDataType]):
+ value_repr = None if value is None else value.to_api_repr()
+ self._properties["type"] = value_repr
+
+ def to_api_repr(self) -> Dict[str, Any]:
+ """Construct the API resource representation of this SQL field."""
+ return copy.deepcopy(self._properties)
+
+ @classmethod
+ def from_api_repr(cls, resource: Dict[str, Any]):
+ """Construct an SQL field instance given its API representation."""
+ result = cls(
+ name=resource.get("name"),
+ type=StandardSqlDataType.from_api_repr(resource.get("type", {})),
+ )
+ return result
+
+ def __eq__(self, other):
+ if not isinstance(other, StandardSqlField):
+ return NotImplemented
+ else:
+ return self.name == other.name and self.type == other.type
+
+
+class StandardSqlStructType:
+ """Type of a struct field.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/StandardSqlDataType#StandardSqlStructType
+
+ Args:
+ fields: The fields in this struct.
+ """
+
+ def __init__(self, fields: Optional[Iterable[StandardSqlField]] = None):
+ if fields is None:
+ fields = []
+ self._properties = {"fields": [field.to_api_repr() for field in fields]}
+
+ @property
+ def fields(self) -> List[StandardSqlField]:
+ """The fields in this struct."""
+ result = []
+
+ for field_resource in self._properties.get("fields", []):
+ field = StandardSqlField()
+ field._properties = field_resource # We do not use a copy on purpose.
+ result.append(field)
+
+ return result
+
+ @fields.setter
+ def fields(self, value: Iterable[StandardSqlField]):
+ self._properties["fields"] = [field.to_api_repr() for field in value]
+
+ def to_api_repr(self) -> Dict[str, Any]:
+ """Construct the API resource representation of this SQL struct type."""
+ return copy.deepcopy(self._properties)
+
+ @classmethod
+ def from_api_repr(cls, resource: Dict[str, Any]) -> "StandardSqlStructType":
+ """Construct an SQL struct type instance given its API representation."""
+ fields = (
+ StandardSqlField.from_api_repr(field_resource)
+ for field_resource in resource.get("fields", [])
+ )
+ return cls(fields=fields)
+
+ def __eq__(self, other):
+ if not isinstance(other, StandardSqlStructType):
+ return NotImplemented
+ else:
+ return self.fields == other.fields
+
+
+class StandardSqlTableType:
+ """A table type.
+
+ See:
+ https://cloud.google.com/workflows/docs/reference/googleapis/bigquery/v2/Overview#StandardSqlTableType
+
+ Args:
+ columns: The columns in this table type.
+ """
+
+ def __init__(self, columns: Iterable[StandardSqlField]):
+ self._properties = {"columns": [col.to_api_repr() for col in columns]}
+
+ @property
+ def columns(self) -> List[StandardSqlField]:
+ """The columns in this table type."""
+ result = []
+
+ for column_resource in self._properties.get("columns", []):
+ column = StandardSqlField()
+ column._properties = column_resource # We do not use a copy on purpose.
+ result.append(column)
+
+ return result
+
+ @columns.setter
+ def columns(self, value: Iterable[StandardSqlField]):
+ self._properties["columns"] = [col.to_api_repr() for col in value]
+
+ def to_api_repr(self) -> Dict[str, Any]:
+ """Construct the API resource representation of this SQL table type."""
+ return copy.deepcopy(self._properties)
+
+ @classmethod
+ def from_api_repr(cls, resource: Dict[str, Any]) -> "StandardSqlTableType":
+ """Construct an SQL table type instance given its API representation."""
+ columns = []
+
+ for column_resource in resource.get("columns", []):
+ type_ = column_resource.get("type")
+ if type_ is None:
+ type_ = {}
+
+ column = StandardSqlField(
+ name=column_resource.get("name"),
+ type=StandardSqlDataType.from_api_repr(type_),
+ )
+ columns.append(column)
+
+ return cls(columns=columns)
+
+ def __eq__(self, other):
+ if not isinstance(other, StandardSqlTableType):
+ return NotImplemented
+ else:
+ return self.columns == other.columns
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/table.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/table.py
new file mode 100644
index 0000000000000000000000000000000000000000..faf827be4d3ac6a77209663dd67bf46a78388985
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/table.py
@@ -0,0 +1,3310 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define API Tables."""
+
+from __future__ import absolute_import
+
+import copy
+import datetime
+import functools
+import operator
+import typing
+from typing import Any, Dict, Iterable, Iterator, List, Optional, Tuple, Union
+import warnings
+
+try:
+ import pandas # type: ignore
+except ImportError:
+ pandas = None
+
+try:
+ import pyarrow # type: ignore
+except ImportError:
+ pyarrow = None
+
+try:
+ import db_dtypes # type: ignore
+except ImportError:
+ db_dtypes = None
+
+try:
+ import geopandas # type: ignore
+except ImportError:
+ geopandas = None
+else:
+ _COORDINATE_REFERENCE_SYSTEM = "EPSG:4326"
+
+try:
+ import shapely # type: ignore
+ from shapely import wkt # type: ignore
+except ImportError:
+ shapely = None
+else:
+ _read_wkt = wkt.loads
+
+import google.api_core.exceptions
+from google.api_core.page_iterator import HTTPIterator
+
+import google.cloud._helpers # type: ignore
+from google.cloud.bigquery import _helpers
+from google.cloud.bigquery import _pandas_helpers
+from google.cloud.bigquery import _versions_helpers
+from google.cloud.bigquery import exceptions as bq_exceptions
+from google.cloud.bigquery._tqdm_helpers import get_progress_bar
+from google.cloud.bigquery.encryption_configuration import EncryptionConfiguration
+from google.cloud.bigquery.enums import DefaultPandasDTypes
+from google.cloud.bigquery.external_config import ExternalConfig
+from google.cloud.bigquery.schema import _build_schema_resource
+from google.cloud.bigquery.schema import _parse_schema_resource
+from google.cloud.bigquery.schema import _to_schema_fields
+
+if typing.TYPE_CHECKING: # pragma: NO COVER
+ # Unconditionally import optional dependencies again to tell pytype that
+ # they are not None, avoiding false "no attribute" errors.
+ import pandas
+ import pyarrow
+ import geopandas # type: ignore
+ from google.cloud import bigquery_storage # type: ignore
+ from google.cloud.bigquery.dataset import DatasetReference
+
+
+_NO_GEOPANDAS_ERROR = (
+ "The geopandas library is not installed, please install "
+ "geopandas to use the to_geodataframe() function."
+)
+_NO_PYARROW_ERROR = (
+ "The pyarrow library is not installed, please install "
+ "pyarrow to use the to_arrow() function."
+)
+_NO_SHAPELY_ERROR = (
+ "The shapely library is not installed, please install "
+ "shapely to use the geography_as_object option."
+)
+
+_TABLE_HAS_NO_SCHEMA = 'Table has no schema: call "client.get_table()"'
+
+_NO_SUPPORTED_DTYPE = (
+ "The dtype cannot to be converted to a pandas ExtensionArray "
+ "because the necessary `__from_arrow__` attribute is missing."
+)
+
+_RANGE_PYARROW_WARNING = (
+ "Unable to represent RANGE schema as struct using pandas ArrowDtype. Using "
+ "`object` instead. To use ArrowDtype, use pandas >= 1.5 and "
+ "pyarrow >= 10.0.1."
+)
+
+# How many of the total rows need to be downloaded already for us to skip
+# calling the BQ Storage API?
+#
+# In microbenchmarks on 2024-05-21, I (tswast@) measure that at about 2 MB of
+# remaining results, it's faster to use the BQ Storage Read API to download
+# the results than use jobs.getQueryResults. Since we don't have a good way to
+# know the remaining bytes, we estimate by remaining number of rows.
+#
+# Except when rows themselves are larger, I observe that the a single page of
+# results will be around 10 MB. Therefore, the proportion of rows already
+# downloaded should be 10 (first page) / 12 (all results) or less for it to be
+# worth it to make a call to jobs.getQueryResults.
+ALMOST_COMPLETELY_CACHED_RATIO = 0.833333
+
+
+def _reference_getter(table):
+ """A :class:`~google.cloud.bigquery.table.TableReference` pointing to
+ this table.
+
+ Returns:
+ google.cloud.bigquery.table.TableReference: pointer to this table.
+ """
+ from google.cloud.bigquery import dataset
+
+ dataset_ref = dataset.DatasetReference(table.project, table.dataset_id)
+ return TableReference(dataset_ref, table.table_id)
+
+
+def _view_use_legacy_sql_getter(table):
+ """bool: Specifies whether to execute the view with Legacy or Standard SQL.
+
+ This boolean specifies whether to execute the view with Legacy SQL
+ (:data:`True`) or Standard SQL (:data:`False`). The client side default is
+ :data:`False`. The server-side default is :data:`True`. If this table is
+ not a view, :data:`None` is returned.
+
+ Raises:
+ ValueError: For invalid value types.
+ """
+ view = table._properties.get("view")
+ if view is not None:
+ # The server-side default for useLegacySql is True.
+ return view.get("useLegacySql", True)
+ # In some cases, such as in a table list no view object is present, but the
+ # resource still represents a view. Use the type as a fallback.
+ if table.table_type == "VIEW":
+ # The server-side default for useLegacySql is True.
+ return True
+
+
+class _TableBase:
+ """Base class for Table-related classes with common functionality."""
+
+ _PROPERTY_TO_API_FIELD: Dict[str, Union[str, List[str]]] = {
+ "dataset_id": ["tableReference", "datasetId"],
+ "project": ["tableReference", "projectId"],
+ "table_id": ["tableReference", "tableId"],
+ }
+
+ def __init__(self):
+ self._properties = {}
+
+ @property
+ def project(self) -> str:
+ """Project bound to the table."""
+ return _helpers._get_sub_prop(
+ self._properties, self._PROPERTY_TO_API_FIELD["project"]
+ )
+
+ @property
+ def dataset_id(self) -> str:
+ """ID of dataset containing the table."""
+ return _helpers._get_sub_prop(
+ self._properties, self._PROPERTY_TO_API_FIELD["dataset_id"]
+ )
+
+ @property
+ def table_id(self) -> str:
+ """The table ID."""
+ return _helpers._get_sub_prop(
+ self._properties, self._PROPERTY_TO_API_FIELD["table_id"]
+ )
+
+ @property
+ def path(self) -> str:
+ """URL path for the table's APIs."""
+ return (
+ f"/projects/{self.project}/datasets/{self.dataset_id}"
+ f"/tables/{self.table_id}"
+ )
+
+ def __eq__(self, other):
+ if isinstance(other, _TableBase):
+ return (
+ self.project == other.project
+ and self.dataset_id == other.dataset_id
+ and self.table_id == other.table_id
+ )
+ else:
+ return NotImplemented
+
+ def __hash__(self):
+ return hash((self.project, self.dataset_id, self.table_id))
+
+
+class TableReference(_TableBase):
+ """TableReferences are pointers to tables.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#tablereference
+
+ Args:
+ dataset_ref: A pointer to the dataset
+ table_id: The ID of the table
+ """
+
+ _PROPERTY_TO_API_FIELD = {
+ "dataset_id": "datasetId",
+ "project": "projectId",
+ "table_id": "tableId",
+ }
+
+ def __init__(self, dataset_ref: "DatasetReference", table_id: str):
+ self._properties = {}
+
+ _helpers._set_sub_prop(
+ self._properties,
+ self._PROPERTY_TO_API_FIELD["project"],
+ dataset_ref.project,
+ )
+ _helpers._set_sub_prop(
+ self._properties,
+ self._PROPERTY_TO_API_FIELD["dataset_id"],
+ dataset_ref.dataset_id,
+ )
+ _helpers._set_sub_prop(
+ self._properties,
+ self._PROPERTY_TO_API_FIELD["table_id"],
+ table_id,
+ )
+
+ @classmethod
+ def from_string(
+ cls, table_id: str, default_project: Optional[str] = None
+ ) -> "TableReference":
+ """Construct a table reference from table ID string.
+
+ Args:
+ table_id (str):
+ A table ID in standard SQL format. If ``default_project``
+ is not specified, this must included a project ID, dataset
+ ID, and table ID, each separated by ``.``.
+ default_project (Optional[str]):
+ The project ID to use when ``table_id`` does not
+ include a project ID.
+
+ Returns:
+ TableReference: Table reference parsed from ``table_id``.
+
+ Examples:
+ >>> TableReference.from_string('my-project.mydataset.mytable')
+ TableRef...(DatasetRef...('my-project', 'mydataset'), 'mytable')
+
+ Raises:
+ ValueError:
+ If ``table_id`` is not a fully-qualified table ID in
+ standard SQL format.
+ """
+ from google.cloud.bigquery.dataset import DatasetReference
+
+ (
+ output_project_id,
+ output_dataset_id,
+ output_table_id,
+ ) = _helpers._parse_3_part_id(
+ table_id, default_project=default_project, property_name="table_id"
+ )
+
+ return cls(
+ DatasetReference(output_project_id, output_dataset_id), output_table_id
+ )
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "TableReference":
+ """Factory: construct a table reference given its API representation
+
+ Args:
+ resource (Dict[str, object]):
+ Table reference representation returned from the API
+
+ Returns:
+ google.cloud.bigquery.table.TableReference:
+ Table reference parsed from ``resource``.
+ """
+ from google.cloud.bigquery.dataset import DatasetReference
+
+ project = resource["projectId"]
+ dataset_id = resource["datasetId"]
+ table_id = resource["tableId"]
+
+ return cls(DatasetReference(project, dataset_id), table_id)
+
+ def to_api_repr(self) -> dict:
+ """Construct the API resource representation of this table reference.
+
+ Returns:
+ Dict[str, object]: Table reference represented as an API resource
+ """
+ return copy.deepcopy(self._properties)
+
+ def to_bqstorage(self) -> str:
+ """Construct a BigQuery Storage API representation of this table.
+
+ Install the ``google-cloud-bigquery-storage`` package to use this
+ feature.
+
+ If the ``table_id`` contains a partition identifier (e.g.
+ ``my_table$201812``) or a snapshot identifier (e.g.
+ ``mytable@1234567890``), it is ignored. Use
+ :class:`google.cloud.bigquery_storage.types.ReadSession.TableReadOptions`
+ to filter rows by partition. Use
+ :class:`google.cloud.bigquery_storage.types.ReadSession.TableModifiers`
+ to select a specific snapshot to read from.
+
+ Returns:
+ str: A reference to this table in the BigQuery Storage API.
+ """
+
+ table_id, _, _ = self.table_id.partition("@")
+ table_id, _, _ = table_id.partition("$")
+
+ table_ref = (
+ f"projects/{self.project}/datasets/{self.dataset_id}/tables/{table_id}"
+ )
+ return table_ref
+
+ def __str__(self):
+ return f"{self.project}.{self.dataset_id}.{self.table_id}"
+
+ def __repr__(self):
+ from google.cloud.bigquery.dataset import DatasetReference
+
+ dataset_ref = DatasetReference(self.project, self.dataset_id)
+ return f"TableReference({dataset_ref!r}, '{self.table_id}')"
+
+
+class Table(_TableBase):
+ """Tables represent a set of rows whose values correspond to a schema.
+
+ See
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource-table
+
+ Args:
+ table_ref (Union[google.cloud.bigquery.table.TableReference, str]):
+ A pointer to a table. If ``table_ref`` is a string, it must
+ included a project ID, dataset ID, and table ID, each separated
+ by ``.``.
+ schema (Optional[Sequence[Union[ \
+ :class:`~google.cloud.bigquery.schema.SchemaField`, \
+ Mapping[str, Any] \
+ ]]]):
+ The table's schema. If any item is a mapping, its content must be
+ compatible with
+ :meth:`~google.cloud.bigquery.schema.SchemaField.from_api_repr`.
+ """
+
+ _PROPERTY_TO_API_FIELD = {
+ **_TableBase._PROPERTY_TO_API_FIELD,
+ "clustering_fields": "clustering",
+ "created": "creationTime",
+ "description": "description",
+ "encryption_configuration": "encryptionConfiguration",
+ "etag": "etag",
+ "expires": "expirationTime",
+ "external_data_configuration": "externalDataConfiguration",
+ "friendly_name": "friendlyName",
+ "full_table_id": "id",
+ "labels": "labels",
+ "location": "location",
+ "modified": "lastModifiedTime",
+ "mview_enable_refresh": "materializedView",
+ "mview_last_refresh_time": ["materializedView", "lastRefreshTime"],
+ "mview_query": "materializedView",
+ "mview_refresh_interval": "materializedView",
+ "num_bytes": "numBytes",
+ "num_rows": "numRows",
+ "partition_expiration": "timePartitioning",
+ "partitioning_type": "timePartitioning",
+ "range_partitioning": "rangePartitioning",
+ "time_partitioning": "timePartitioning",
+ "schema": "schema",
+ "snapshot_definition": "snapshotDefinition",
+ "clone_definition": "cloneDefinition",
+ "streaming_buffer": "streamingBuffer",
+ "self_link": "selfLink",
+ "type": "type",
+ "view_use_legacy_sql": "view",
+ "view_query": "view",
+ "require_partition_filter": "requirePartitionFilter",
+ "table_constraints": "tableConstraints",
+ }
+
+ def __init__(self, table_ref, schema=None) -> None:
+ table_ref = _table_arg_to_table_ref(table_ref)
+ self._properties = {"tableReference": table_ref.to_api_repr(), "labels": {}}
+ # Let the @property do validation.
+ if schema is not None:
+ self.schema = schema
+
+ reference = property(_reference_getter)
+
+ @property
+ def require_partition_filter(self):
+ """bool: If set to true, queries over the partitioned table require a
+ partition filter that can be used for partition elimination to be
+ specified.
+ """
+ return self._properties.get(
+ self._PROPERTY_TO_API_FIELD["require_partition_filter"]
+ )
+
+ @require_partition_filter.setter
+ def require_partition_filter(self, value):
+ self._properties[
+ self._PROPERTY_TO_API_FIELD["require_partition_filter"]
+ ] = value
+
+ @property
+ def schema(self):
+ """Sequence[Union[ \
+ :class:`~google.cloud.bigquery.schema.SchemaField`, \
+ Mapping[str, Any] \
+ ]]:
+ Table's schema.
+
+ Raises:
+ Exception:
+ If ``schema`` is not a sequence, or if any item in the sequence
+ is not a :class:`~google.cloud.bigquery.schema.SchemaField`
+ instance or a compatible mapping representation of the field.
+ """
+ prop = self._properties.get(self._PROPERTY_TO_API_FIELD["schema"])
+ if not prop:
+ return []
+ else:
+ return _parse_schema_resource(prop)
+
+ @schema.setter
+ def schema(self, value):
+ api_field = self._PROPERTY_TO_API_FIELD["schema"]
+
+ if value is None:
+ self._properties[api_field] = None
+ else:
+ value = _to_schema_fields(value)
+ self._properties[api_field] = {"fields": _build_schema_resource(value)}
+
+ @property
+ def labels(self):
+ """Dict[str, str]: Labels for the table.
+
+ This method always returns a dict. To change a table's labels,
+ modify the dict, then call ``Client.update_table``. To delete a
+ label, set its value to :data:`None` before updating.
+
+ Raises:
+ ValueError: If ``value`` type is invalid.
+ """
+ return self._properties.setdefault(self._PROPERTY_TO_API_FIELD["labels"], {})
+
+ @labels.setter
+ def labels(self, value):
+ if not isinstance(value, dict):
+ raise ValueError("Pass a dict")
+ self._properties[self._PROPERTY_TO_API_FIELD["labels"]] = value
+
+ @property
+ def encryption_configuration(self):
+ """google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom
+ encryption configuration for the table.
+
+ Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
+ if using default encryption.
+
+ See `protecting data with Cloud KMS keys
+ `_
+ in the BigQuery documentation.
+ """
+ prop = self._properties.get(
+ self._PROPERTY_TO_API_FIELD["encryption_configuration"]
+ )
+ if prop is not None:
+ prop = EncryptionConfiguration.from_api_repr(prop)
+ return prop
+
+ @encryption_configuration.setter
+ def encryption_configuration(self, value):
+ api_repr = value
+ if value is not None:
+ api_repr = value.to_api_repr()
+ self._properties[
+ self._PROPERTY_TO_API_FIELD["encryption_configuration"]
+ ] = api_repr
+
+ @property
+ def created(self):
+ """Union[datetime.datetime, None]: Datetime at which the table was
+ created (:data:`None` until set from the server).
+ """
+ creation_time = self._properties.get(self._PROPERTY_TO_API_FIELD["created"])
+ if creation_time is not None:
+ # creation_time will be in milliseconds.
+ return google.cloud._helpers._datetime_from_microseconds(
+ 1000.0 * float(creation_time)
+ )
+
+ @property
+ def etag(self):
+ """Union[str, None]: ETag for the table resource (:data:`None` until
+ set from the server).
+ """
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["etag"])
+
+ @property
+ def modified(self):
+ """Union[datetime.datetime, None]: Datetime at which the table was last
+ modified (:data:`None` until set from the server).
+ """
+ modified_time = self._properties.get(self._PROPERTY_TO_API_FIELD["modified"])
+ if modified_time is not None:
+ # modified_time will be in milliseconds.
+ return google.cloud._helpers._datetime_from_microseconds(
+ 1000.0 * float(modified_time)
+ )
+
+ @property
+ def num_bytes(self):
+ """Union[int, None]: The size of the table in bytes (:data:`None` until
+ set from the server).
+ """
+ return _helpers._int_or_none(
+ self._properties.get(self._PROPERTY_TO_API_FIELD["num_bytes"])
+ )
+
+ @property
+ def num_rows(self):
+ """Union[int, None]: The number of rows in the table (:data:`None`
+ until set from the server).
+ """
+ return _helpers._int_or_none(
+ self._properties.get(self._PROPERTY_TO_API_FIELD["num_rows"])
+ )
+
+ @property
+ def self_link(self):
+ """Union[str, None]: URL for the table resource (:data:`None` until set
+ from the server).
+ """
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["self_link"])
+
+ @property
+ def full_table_id(self):
+ """Union[str, None]: ID for the table (:data:`None` until set from the
+ server).
+
+ In the format ``project-id:dataset_id.table_id``.
+ """
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["full_table_id"])
+
+ @property
+ def table_type(self):
+ """Union[str, None]: The type of the table (:data:`None` until set from
+ the server).
+
+ Possible values are ``'TABLE'``, ``'VIEW'``, ``'MATERIALIZED_VIEW'`` or
+ ``'EXTERNAL'``.
+ """
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["type"])
+
+ @property
+ def range_partitioning(self):
+ """Optional[google.cloud.bigquery.table.RangePartitioning]:
+ Configures range-based partitioning for a table.
+
+ .. note::
+ **Beta**. The integer range partitioning feature is in a
+ pre-release state and might change or have limited support.
+
+ Only specify at most one of
+ :attr:`~google.cloud.bigquery.table.Table.time_partitioning` or
+ :attr:`~google.cloud.bigquery.table.Table.range_partitioning`.
+
+ Raises:
+ ValueError:
+ If the value is not
+ :class:`~google.cloud.bigquery.table.RangePartitioning` or
+ :data:`None`.
+ """
+ resource = self._properties.get(
+ self._PROPERTY_TO_API_FIELD["range_partitioning"]
+ )
+ if resource is not None:
+ return RangePartitioning(_properties=resource)
+
+ @range_partitioning.setter
+ def range_partitioning(self, value):
+ resource = value
+ if isinstance(value, RangePartitioning):
+ resource = value._properties
+ elif value is not None:
+ raise ValueError(
+ "Expected value to be RangePartitioning or None, got {}.".format(value)
+ )
+ self._properties[self._PROPERTY_TO_API_FIELD["range_partitioning"]] = resource
+
+ @property
+ def time_partitioning(self):
+ """Optional[google.cloud.bigquery.table.TimePartitioning]: Configures time-based
+ partitioning for a table.
+
+ Only specify at most one of
+ :attr:`~google.cloud.bigquery.table.Table.time_partitioning` or
+ :attr:`~google.cloud.bigquery.table.Table.range_partitioning`.
+
+ Raises:
+ ValueError:
+ If the value is not
+ :class:`~google.cloud.bigquery.table.TimePartitioning` or
+ :data:`None`.
+ """
+ prop = self._properties.get(self._PROPERTY_TO_API_FIELD["time_partitioning"])
+ if prop is not None:
+ return TimePartitioning.from_api_repr(prop)
+
+ @time_partitioning.setter
+ def time_partitioning(self, value):
+ api_repr = value
+ if isinstance(value, TimePartitioning):
+ api_repr = value.to_api_repr()
+ elif value is not None:
+ raise ValueError(
+ "value must be google.cloud.bigquery.table.TimePartitioning " "or None"
+ )
+ self._properties[self._PROPERTY_TO_API_FIELD["time_partitioning"]] = api_repr
+
+ @property
+ def partitioning_type(self):
+ """Union[str, None]: Time partitioning of the table if it is
+ partitioned (Defaults to :data:`None`).
+
+ """
+ warnings.warn(
+ "This method will be deprecated in future versions. Please use "
+ "Table.time_partitioning.type_ instead.",
+ PendingDeprecationWarning,
+ stacklevel=2,
+ )
+ if self.time_partitioning is not None:
+ return self.time_partitioning.type_
+
+ @partitioning_type.setter
+ def partitioning_type(self, value):
+ warnings.warn(
+ "This method will be deprecated in future versions. Please use "
+ "Table.time_partitioning.type_ instead.",
+ PendingDeprecationWarning,
+ stacklevel=2,
+ )
+ api_field = self._PROPERTY_TO_API_FIELD["partitioning_type"]
+ if self.time_partitioning is None:
+ self._properties[api_field] = {}
+ self._properties[api_field]["type"] = value
+
+ @property
+ def partition_expiration(self):
+ """Union[int, None]: Expiration time in milliseconds for a partition.
+
+ If :attr:`partition_expiration` is set and :attr:`type_` is
+ not set, :attr:`type_` will default to
+ :attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`.
+ """
+ warnings.warn(
+ "This method will be deprecated in future versions. Please use "
+ "Table.time_partitioning.expiration_ms instead.",
+ PendingDeprecationWarning,
+ stacklevel=2,
+ )
+ if self.time_partitioning is not None:
+ return self.time_partitioning.expiration_ms
+
+ @partition_expiration.setter
+ def partition_expiration(self, value):
+ warnings.warn(
+ "This method will be deprecated in future versions. Please use "
+ "Table.time_partitioning.expiration_ms instead.",
+ PendingDeprecationWarning,
+ stacklevel=2,
+ )
+ api_field = self._PROPERTY_TO_API_FIELD["partition_expiration"]
+
+ if self.time_partitioning is None:
+ self._properties[api_field] = {"type": TimePartitioningType.DAY}
+
+ if value is None:
+ self._properties[api_field]["expirationMs"] = None
+ else:
+ self._properties[api_field]["expirationMs"] = str(value)
+
+ @property
+ def clustering_fields(self):
+ """Union[List[str], None]: Fields defining clustering for the table
+
+ (Defaults to :data:`None`).
+
+ Clustering fields are immutable after table creation.
+
+ .. note::
+
+ BigQuery supports clustering for both partitioned and
+ non-partitioned tables.
+ """
+ prop = self._properties.get(self._PROPERTY_TO_API_FIELD["clustering_fields"])
+ if prop is not None:
+ return list(prop.get("fields", ()))
+
+ @clustering_fields.setter
+ def clustering_fields(self, value):
+ """Union[List[str], None]: Fields defining clustering for the table
+
+ (Defaults to :data:`None`).
+ """
+ api_field = self._PROPERTY_TO_API_FIELD["clustering_fields"]
+
+ if value is not None:
+ prop = self._properties.setdefault(api_field, {})
+ prop["fields"] = value
+ else:
+ # In order to allow unsetting clustering fields completely, we explicitly
+ # set this property to None (as oposed to merely removing the key).
+ self._properties[api_field] = None
+
+ @property
+ def description(self):
+ """Union[str, None]: Description of the table (defaults to
+ :data:`None`).
+
+ Raises:
+ ValueError: For invalid value types.
+ """
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["description"])
+
+ @description.setter
+ def description(self, value):
+ if not isinstance(value, str) and value is not None:
+ raise ValueError("Pass a string, or None")
+ self._properties[self._PROPERTY_TO_API_FIELD["description"]] = value
+
+ @property
+ def expires(self):
+ """Union[datetime.datetime, None]: Datetime at which the table will be
+ deleted.
+
+ Raises:
+ ValueError: For invalid value types.
+ """
+ expiration_time = self._properties.get(self._PROPERTY_TO_API_FIELD["expires"])
+ if expiration_time is not None:
+ # expiration_time will be in milliseconds.
+ return google.cloud._helpers._datetime_from_microseconds(
+ 1000.0 * float(expiration_time)
+ )
+
+ @expires.setter
+ def expires(self, value):
+ if not isinstance(value, datetime.datetime) and value is not None:
+ raise ValueError("Pass a datetime, or None")
+ value_ms = google.cloud._helpers._millis_from_datetime(value)
+ self._properties[
+ self._PROPERTY_TO_API_FIELD["expires"]
+ ] = _helpers._str_or_none(value_ms)
+
+ @property
+ def friendly_name(self):
+ """Union[str, None]: Title of the table (defaults to :data:`None`).
+
+ Raises:
+ ValueError: For invalid value types.
+ """
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["friendly_name"])
+
+ @friendly_name.setter
+ def friendly_name(self, value):
+ if not isinstance(value, str) and value is not None:
+ raise ValueError("Pass a string, or None")
+ self._properties[self._PROPERTY_TO_API_FIELD["friendly_name"]] = value
+
+ @property
+ def location(self):
+ """Union[str, None]: Location in which the table is hosted
+
+ Defaults to :data:`None`.
+ """
+ return self._properties.get(self._PROPERTY_TO_API_FIELD["location"])
+
+ @property
+ def view_query(self):
+ """Union[str, None]: SQL query defining the table as a view (defaults
+ to :data:`None`).
+
+ By default, the query is treated as Standard SQL. To use Legacy
+ SQL, set :attr:`view_use_legacy_sql` to :data:`True`.
+
+ Raises:
+ ValueError: For invalid value types.
+ """
+ api_field = self._PROPERTY_TO_API_FIELD["view_query"]
+ return _helpers._get_sub_prop(self._properties, [api_field, "query"])
+
+ @view_query.setter
+ def view_query(self, value):
+ if not isinstance(value, str):
+ raise ValueError("Pass a string")
+
+ api_field = self._PROPERTY_TO_API_FIELD["view_query"]
+ _helpers._set_sub_prop(self._properties, [api_field, "query"], value)
+ view = self._properties[api_field]
+ # The service defaults useLegacySql to True, but this
+ # client uses Standard SQL by default.
+ if view.get("useLegacySql") is None:
+ view["useLegacySql"] = False
+
+ @view_query.deleter
+ def view_query(self):
+ """Delete SQL query defining the table as a view."""
+ self._properties.pop(self._PROPERTY_TO_API_FIELD["view_query"], None)
+
+ view_use_legacy_sql = property(_view_use_legacy_sql_getter)
+
+ @view_use_legacy_sql.setter # type: ignore # (redefinition from above)
+ def view_use_legacy_sql(self, value):
+ if not isinstance(value, bool):
+ raise ValueError("Pass a boolean")
+
+ api_field = self._PROPERTY_TO_API_FIELD["view_query"]
+ if self._properties.get(api_field) is None:
+ self._properties[api_field] = {}
+ self._properties[api_field]["useLegacySql"] = value
+
+ @property
+ def mview_query(self):
+ """Optional[str]: SQL query defining the table as a materialized
+ view (defaults to :data:`None`).
+ """
+ api_field = self._PROPERTY_TO_API_FIELD["mview_query"]
+ return _helpers._get_sub_prop(self._properties, [api_field, "query"])
+
+ @mview_query.setter
+ def mview_query(self, value):
+ api_field = self._PROPERTY_TO_API_FIELD["mview_query"]
+ _helpers._set_sub_prop(self._properties, [api_field, "query"], str(value))
+
+ @mview_query.deleter
+ def mview_query(self):
+ """Delete SQL query defining the table as a materialized view."""
+ self._properties.pop(self._PROPERTY_TO_API_FIELD["mview_query"], None)
+
+ @property
+ def mview_last_refresh_time(self):
+ """Optional[datetime.datetime]: Datetime at which the materialized view was last
+ refreshed (:data:`None` until set from the server).
+ """
+ refresh_time = _helpers._get_sub_prop(
+ self._properties, self._PROPERTY_TO_API_FIELD["mview_last_refresh_time"]
+ )
+ if refresh_time is not None:
+ # refresh_time will be in milliseconds.
+ return google.cloud._helpers._datetime_from_microseconds(
+ 1000 * int(refresh_time)
+ )
+
+ @property
+ def mview_enable_refresh(self):
+ """Optional[bool]: Enable automatic refresh of the materialized view
+ when the base table is updated. The default value is :data:`True`.
+ """
+ api_field = self._PROPERTY_TO_API_FIELD["mview_enable_refresh"]
+ return _helpers._get_sub_prop(self._properties, [api_field, "enableRefresh"])
+
+ @mview_enable_refresh.setter
+ def mview_enable_refresh(self, value):
+ api_field = self._PROPERTY_TO_API_FIELD["mview_enable_refresh"]
+ return _helpers._set_sub_prop(
+ self._properties, [api_field, "enableRefresh"], value
+ )
+
+ @property
+ def mview_refresh_interval(self):
+ """Optional[datetime.timedelta]: The maximum frequency at which this
+ materialized view will be refreshed. The default value is 1800000
+ milliseconds (30 minutes).
+ """
+ api_field = self._PROPERTY_TO_API_FIELD["mview_refresh_interval"]
+ refresh_interval = _helpers._get_sub_prop(
+ self._properties, [api_field, "refreshIntervalMs"]
+ )
+ if refresh_interval is not None:
+ return datetime.timedelta(milliseconds=int(refresh_interval))
+
+ @mview_refresh_interval.setter
+ def mview_refresh_interval(self, value):
+ if value is None:
+ refresh_interval_ms = None
+ else:
+ refresh_interval_ms = str(value // datetime.timedelta(milliseconds=1))
+
+ api_field = self._PROPERTY_TO_API_FIELD["mview_refresh_interval"]
+ _helpers._set_sub_prop(
+ self._properties,
+ [api_field, "refreshIntervalMs"],
+ refresh_interval_ms,
+ )
+
+ @property
+ def streaming_buffer(self):
+ """google.cloud.bigquery.StreamingBuffer: Information about a table's
+ streaming buffer.
+ """
+ sb = self._properties.get(self._PROPERTY_TO_API_FIELD["streaming_buffer"])
+ if sb is not None:
+ return StreamingBuffer(sb)
+
+ @property
+ def external_data_configuration(self):
+ """Union[google.cloud.bigquery.ExternalConfig, None]: Configuration for
+ an external data source (defaults to :data:`None`).
+
+ Raises:
+ ValueError: For invalid value types.
+ """
+ prop = self._properties.get(
+ self._PROPERTY_TO_API_FIELD["external_data_configuration"]
+ )
+ if prop is not None:
+ prop = ExternalConfig.from_api_repr(prop)
+ return prop
+
+ @external_data_configuration.setter
+ def external_data_configuration(self, value):
+ if not (value is None or isinstance(value, ExternalConfig)):
+ raise ValueError("Pass an ExternalConfig or None")
+ api_repr = value
+ if value is not None:
+ api_repr = value.to_api_repr()
+ self._properties[
+ self._PROPERTY_TO_API_FIELD["external_data_configuration"]
+ ] = api_repr
+
+ @property
+ def snapshot_definition(self) -> Optional["SnapshotDefinition"]:
+ """Information about the snapshot. This value is set via snapshot creation.
+
+ See: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table.FIELDS.snapshot_definition
+ """
+ snapshot_info = self._properties.get(
+ self._PROPERTY_TO_API_FIELD["snapshot_definition"]
+ )
+ if snapshot_info is not None:
+ snapshot_info = SnapshotDefinition(snapshot_info)
+ return snapshot_info
+
+ @property
+ def clone_definition(self) -> Optional["CloneDefinition"]:
+ """Information about the clone. This value is set via clone creation.
+
+ See: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table.FIELDS.clone_definition
+ """
+ clone_info = self._properties.get(
+ self._PROPERTY_TO_API_FIELD["clone_definition"]
+ )
+ if clone_info is not None:
+ clone_info = CloneDefinition(clone_info)
+ return clone_info
+
+ @property
+ def table_constraints(self) -> Optional["TableConstraints"]:
+ """Tables Primary Key and Foreign Key information."""
+ table_constraints = self._properties.get(
+ self._PROPERTY_TO_API_FIELD["table_constraints"]
+ )
+ if table_constraints is not None:
+ table_constraints = TableConstraints.from_api_repr(table_constraints)
+ return table_constraints
+
+ @classmethod
+ def from_string(cls, full_table_id: str) -> "Table":
+ """Construct a table from fully-qualified table ID.
+
+ Args:
+ full_table_id (str):
+ A fully-qualified table ID in standard SQL format. Must
+ included a project ID, dataset ID, and table ID, each
+ separated by ``.``.
+
+ Returns:
+ Table: Table parsed from ``full_table_id``.
+
+ Examples:
+ >>> Table.from_string('my-project.mydataset.mytable')
+ Table(TableRef...(D...('my-project', 'mydataset'), 'mytable'))
+
+ Raises:
+ ValueError:
+ If ``full_table_id`` is not a fully-qualified table ID in
+ standard SQL format.
+ """
+ return cls(TableReference.from_string(full_table_id))
+
+ @classmethod
+ def from_api_repr(cls, resource: dict) -> "Table":
+ """Factory: construct a table given its API representation
+
+ Args:
+ resource (Dict[str, object]):
+ Table resource representation from the API
+
+ Returns:
+ google.cloud.bigquery.table.Table: Table parsed from ``resource``.
+
+ Raises:
+ KeyError:
+ If the ``resource`` lacks the key ``'tableReference'``, or if
+ the ``dict`` stored within the key ``'tableReference'`` lacks
+ the keys ``'tableId'``, ``'projectId'``, or ``'datasetId'``.
+ """
+ from google.cloud.bigquery import dataset
+
+ if (
+ "tableReference" not in resource
+ or "tableId" not in resource["tableReference"]
+ ):
+ raise KeyError(
+ "Resource lacks required identity information:"
+ '["tableReference"]["tableId"]'
+ )
+ project_id = _helpers._get_sub_prop(
+ resource, cls._PROPERTY_TO_API_FIELD["project"]
+ )
+ table_id = _helpers._get_sub_prop(
+ resource, cls._PROPERTY_TO_API_FIELD["table_id"]
+ )
+ dataset_id = _helpers._get_sub_prop(
+ resource, cls._PROPERTY_TO_API_FIELD["dataset_id"]
+ )
+ dataset_ref = dataset.DatasetReference(project_id, dataset_id)
+
+ table = cls(dataset_ref.table(table_id))
+ table._properties = resource
+
+ return table
+
+ def to_api_repr(self) -> dict:
+ """Constructs the API resource of this table
+
+ Returns:
+ Dict[str, object]: Table represented as an API resource
+ """
+ return copy.deepcopy(self._properties)
+
+ def to_bqstorage(self) -> str:
+ """Construct a BigQuery Storage API representation of this table.
+
+ Returns:
+ str: A reference to this table in the BigQuery Storage API.
+ """
+ return self.reference.to_bqstorage()
+
+ def _build_resource(self, filter_fields):
+ """Generate a resource for ``update``."""
+ return _helpers._build_resource_from_properties(self, filter_fields)
+
+ def __repr__(self):
+ return "Table({})".format(repr(self.reference))
+
+ def __str__(self):
+ return f"{self.project}.{self.dataset_id}.{self.table_id}"
+
+
+class TableListItem(_TableBase):
+ """A read-only table resource from a list operation.
+
+ For performance reasons, the BigQuery API only includes some of the table
+ properties when listing tables. Notably,
+ :attr:`~google.cloud.bigquery.table.Table.schema` and
+ :attr:`~google.cloud.bigquery.table.Table.num_rows` are missing.
+
+ For a full list of the properties that the BigQuery API returns, see the
+ `REST documentation for tables.list
+ `_.
+
+
+ Args:
+ resource (Dict[str, object]):
+ A table-like resource object from a table list response. A
+ ``tableReference`` property is required.
+
+ Raises:
+ ValueError:
+ If ``tableReference`` or one of its required members is missing
+ from ``resource``.
+ """
+
+ def __init__(self, resource):
+ if "tableReference" not in resource:
+ raise ValueError("resource must contain a tableReference value")
+ if "projectId" not in resource["tableReference"]:
+ raise ValueError(
+ "resource['tableReference'] must contain a projectId value"
+ )
+ if "datasetId" not in resource["tableReference"]:
+ raise ValueError(
+ "resource['tableReference'] must contain a datasetId value"
+ )
+ if "tableId" not in resource["tableReference"]:
+ raise ValueError("resource['tableReference'] must contain a tableId value")
+
+ self._properties = resource
+
+ @property
+ def created(self):
+ """Union[datetime.datetime, None]: Datetime at which the table was
+ created (:data:`None` until set from the server).
+ """
+ creation_time = self._properties.get("creationTime")
+ if creation_time is not None:
+ # creation_time will be in milliseconds.
+ return google.cloud._helpers._datetime_from_microseconds(
+ 1000.0 * float(creation_time)
+ )
+
+ @property
+ def expires(self):
+ """Union[datetime.datetime, None]: Datetime at which the table will be
+ deleted.
+ """
+ expiration_time = self._properties.get("expirationTime")
+ if expiration_time is not None:
+ # expiration_time will be in milliseconds.
+ return google.cloud._helpers._datetime_from_microseconds(
+ 1000.0 * float(expiration_time)
+ )
+
+ reference = property(_reference_getter)
+
+ @property
+ def labels(self):
+ """Dict[str, str]: Labels for the table.
+
+ This method always returns a dict. To change a table's labels,
+ modify the dict, then call ``Client.update_table``. To delete a
+ label, set its value to :data:`None` before updating.
+ """
+ return self._properties.setdefault("labels", {})
+
+ @property
+ def full_table_id(self):
+ """Union[str, None]: ID for the table (:data:`None` until set from the
+ server).
+
+ In the format ``project_id:dataset_id.table_id``.
+ """
+ return self._properties.get("id")
+
+ @property
+ def table_type(self):
+ """Union[str, None]: The type of the table (:data:`None` until set from
+ the server).
+
+ Possible values are ``'TABLE'``, ``'VIEW'``, or ``'EXTERNAL'``.
+ """
+ return self._properties.get("type")
+
+ @property
+ def time_partitioning(self):
+ """google.cloud.bigquery.table.TimePartitioning: Configures time-based
+ partitioning for a table.
+ """
+ prop = self._properties.get("timePartitioning")
+ if prop is not None:
+ return TimePartitioning.from_api_repr(prop)
+
+ @property
+ def partitioning_type(self):
+ """Union[str, None]: Time partitioning of the table if it is
+ partitioned (Defaults to :data:`None`).
+ """
+ warnings.warn(
+ "This method will be deprecated in future versions. Please use "
+ "TableListItem.time_partitioning.type_ instead.",
+ PendingDeprecationWarning,
+ stacklevel=2,
+ )
+ if self.time_partitioning is not None:
+ return self.time_partitioning.type_
+
+ @property
+ def partition_expiration(self):
+ """Union[int, None]: Expiration time in milliseconds for a partition.
+
+ If this property is set and :attr:`type_` is not set, :attr:`type_`
+ will default to :attr:`TimePartitioningType.DAY`.
+ """
+ warnings.warn(
+ "This method will be deprecated in future versions. Please use "
+ "TableListItem.time_partitioning.expiration_ms instead.",
+ PendingDeprecationWarning,
+ stacklevel=2,
+ )
+ if self.time_partitioning is not None:
+ return self.time_partitioning.expiration_ms
+
+ @property
+ def friendly_name(self):
+ """Union[str, None]: Title of the table (defaults to :data:`None`)."""
+ return self._properties.get("friendlyName")
+
+ view_use_legacy_sql = property(_view_use_legacy_sql_getter)
+
+ @property
+ def clustering_fields(self):
+ """Union[List[str], None]: Fields defining clustering for the table
+
+ (Defaults to :data:`None`).
+
+ Clustering fields are immutable after table creation.
+
+ .. note::
+
+ BigQuery supports clustering for both partitioned and
+ non-partitioned tables.
+ """
+ prop = self._properties.get("clustering")
+ if prop is not None:
+ return list(prop.get("fields", ()))
+
+ @classmethod
+ def from_string(cls, full_table_id: str) -> "TableListItem":
+ """Construct a table from fully-qualified table ID.
+
+ Args:
+ full_table_id (str):
+ A fully-qualified table ID in standard SQL format. Must
+ included a project ID, dataset ID, and table ID, each
+ separated by ``.``.
+
+ Returns:
+ Table: Table parsed from ``full_table_id``.
+
+ Examples:
+ >>> Table.from_string('my-project.mydataset.mytable')
+ Table(TableRef...(D...('my-project', 'mydataset'), 'mytable'))
+
+ Raises:
+ ValueError:
+ If ``full_table_id`` is not a fully-qualified table ID in
+ standard SQL format.
+ """
+ return cls(
+ {"tableReference": TableReference.from_string(full_table_id).to_api_repr()}
+ )
+
+ def to_bqstorage(self) -> str:
+ """Construct a BigQuery Storage API representation of this table.
+
+ Returns:
+ str: A reference to this table in the BigQuery Storage API.
+ """
+ return self.reference.to_bqstorage()
+
+ def to_api_repr(self) -> dict:
+ """Constructs the API resource of this table
+
+ Returns:
+ Dict[str, object]: Table represented as an API resource
+ """
+ return copy.deepcopy(self._properties)
+
+
+def _row_from_mapping(mapping, schema):
+ """Convert a mapping to a row tuple using the schema.
+
+ Args:
+ mapping (Dict[str, object])
+ Mapping of row data: must contain keys for all required fields in
+ the schema. Keys which do not correspond to a field in the schema
+ are ignored.
+ schema (List[google.cloud.bigquery.schema.SchemaField]):
+ The schema of the table destination for the rows
+
+ Returns:
+ Tuple[object]:
+ Tuple whose elements are ordered according to the schema.
+
+ Raises:
+ ValueError: If schema is empty.
+ """
+ if len(schema) == 0:
+ raise ValueError(_TABLE_HAS_NO_SCHEMA)
+
+ row = []
+ for field in schema:
+ if field.mode == "REQUIRED":
+ row.append(mapping[field.name])
+ elif field.mode == "REPEATED":
+ row.append(mapping.get(field.name, ()))
+ elif field.mode == "NULLABLE":
+ row.append(mapping.get(field.name))
+ else:
+ raise ValueError("Unknown field mode: {}".format(field.mode))
+ return tuple(row)
+
+
+class StreamingBuffer(object):
+ """Information about a table's streaming buffer.
+
+ See https://cloud.google.com/bigquery/streaming-data-into-bigquery.
+
+ Args:
+ resource (Dict[str, object]):
+ streaming buffer representation returned from the API
+ """
+
+ def __init__(self, resource):
+ self.estimated_bytes = None
+ if "estimatedBytes" in resource:
+ self.estimated_bytes = int(resource["estimatedBytes"])
+ self.estimated_rows = None
+ if "estimatedRows" in resource:
+ self.estimated_rows = int(resource["estimatedRows"])
+ self.oldest_entry_time = None
+ if "oldestEntryTime" in resource:
+ self.oldest_entry_time = google.cloud._helpers._datetime_from_microseconds(
+ 1000.0 * int(resource["oldestEntryTime"])
+ )
+
+
+class SnapshotDefinition:
+ """Information about base table and snapshot time of the snapshot.
+
+ See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#snapshotdefinition
+
+ Args:
+ resource: Snapshot definition representation returned from the API.
+ """
+
+ def __init__(self, resource: Dict[str, Any]):
+ self.base_table_reference = None
+ if "baseTableReference" in resource:
+ self.base_table_reference = TableReference.from_api_repr(
+ resource["baseTableReference"]
+ )
+
+ self.snapshot_time = None
+ if "snapshotTime" in resource:
+ self.snapshot_time = google.cloud._helpers._rfc3339_to_datetime(
+ resource["snapshotTime"]
+ )
+
+
+class CloneDefinition:
+ """Information about base table and clone time of the clone.
+
+ See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#clonedefinition
+
+ Args:
+ resource: Clone definition representation returned from the API.
+ """
+
+ def __init__(self, resource: Dict[str, Any]):
+ self.base_table_reference = None
+ if "baseTableReference" in resource:
+ self.base_table_reference = TableReference.from_api_repr(
+ resource["baseTableReference"]
+ )
+
+ self.clone_time = None
+ if "cloneTime" in resource:
+ self.clone_time = google.cloud._helpers._rfc3339_to_datetime(
+ resource["cloneTime"]
+ )
+
+
+class Row(object):
+ """A BigQuery row.
+
+ Values can be accessed by position (index), by key like a dict,
+ or as properties.
+
+ Args:
+ values (Sequence[object]): The row values
+ field_to_index (Dict[str, int]):
+ A mapping from schema field names to indexes
+ """
+
+ # Choose unusual field names to try to avoid conflict with schema fields.
+ __slots__ = ("_xxx_values", "_xxx_field_to_index")
+
+ def __init__(self, values, field_to_index) -> None:
+ self._xxx_values = values
+ self._xxx_field_to_index = field_to_index
+
+ def values(self):
+ """Return the values included in this row.
+
+ Returns:
+ Sequence[object]: A sequence of length ``len(row)``.
+ """
+ return copy.deepcopy(self._xxx_values)
+
+ def keys(self) -> Iterable[str]:
+ """Return the keys for using a row as a dict.
+
+ Returns:
+ Iterable[str]: The keys corresponding to the columns of a row
+
+ Examples:
+
+ >>> list(Row(('a', 'b'), {'x': 0, 'y': 1}).keys())
+ ['x', 'y']
+ """
+ return self._xxx_field_to_index.keys()
+
+ def items(self) -> Iterable[Tuple[str, Any]]:
+ """Return items as ``(key, value)`` pairs.
+
+ Returns:
+ Iterable[Tuple[str, object]]:
+ The ``(key, value)`` pairs representing this row.
+
+ Examples:
+
+ >>> list(Row(('a', 'b'), {'x': 0, 'y': 1}).items())
+ [('x', 'a'), ('y', 'b')]
+ """
+ for key, index in self._xxx_field_to_index.items():
+ yield (key, copy.deepcopy(self._xxx_values[index]))
+
+ def get(self, key: str, default: Any = None) -> Any:
+ """Return a value for key, with a default value if it does not exist.
+
+ Args:
+ key (str): The key of the column to access
+ default (object):
+ The default value to use if the key does not exist. (Defaults
+ to :data:`None`.)
+
+ Returns:
+ object:
+ The value associated with the provided key, or a default value.
+
+ Examples:
+ When the key exists, the value associated with it is returned.
+
+ >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('x')
+ 'a'
+
+ The default value is :data:`None` when the key does not exist.
+
+ >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z')
+ None
+
+ The default value can be overridden with the ``default`` parameter.
+
+ >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', '')
+ ''
+
+ >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', default = '')
+ ''
+ """
+ index = self._xxx_field_to_index.get(key)
+ if index is None:
+ return default
+ return self._xxx_values[index]
+
+ def __getattr__(self, name):
+ value = self._xxx_field_to_index.get(name)
+ if value is None:
+ raise AttributeError("no row field {!r}".format(name))
+ return self._xxx_values[value]
+
+ def __len__(self):
+ return len(self._xxx_values)
+
+ def __getitem__(self, key):
+ if isinstance(key, str):
+ value = self._xxx_field_to_index.get(key)
+ if value is None:
+ raise KeyError("no row field {!r}".format(key))
+ key = value
+ return self._xxx_values[key]
+
+ def __eq__(self, other):
+ if not isinstance(other, Row):
+ return NotImplemented
+ return (
+ self._xxx_values == other._xxx_values
+ and self._xxx_field_to_index == other._xxx_field_to_index
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self):
+ # sort field dict by value, for determinism
+ items = sorted(self._xxx_field_to_index.items(), key=operator.itemgetter(1))
+ f2i = "{" + ", ".join("%r: %d" % item for item in items) + "}"
+ return "Row({}, {})".format(self._xxx_values, f2i)
+
+
+class _NoopProgressBarQueue(object):
+ """A fake Queue class that does nothing.
+
+ This is used when there is no progress bar to send updates to.
+ """
+
+ def put_nowait(self, item):
+ """Don't actually do anything with the item."""
+
+
+class RowIterator(HTTPIterator):
+ """A class for iterating through HTTP/JSON API row list responses.
+
+ Args:
+ client (Optional[google.cloud.bigquery.Client]):
+ The API client instance. This should always be non-`None`, except for
+ subclasses that do not use it, namely the ``_EmptyRowIterator``.
+ api_request (Callable[google.cloud._http.JSONConnection.api_request]):
+ The function to use to make API requests.
+ path (str): The method path to query for the list of items.
+ schema (Sequence[Union[ \
+ :class:`~google.cloud.bigquery.schema.SchemaField`, \
+ Mapping[str, Any] \
+ ]]):
+ The table's schema. If any item is a mapping, its content must be
+ compatible with
+ :meth:`~google.cloud.bigquery.schema.SchemaField.from_api_repr`.
+ page_token (str): A token identifying a page in a result set to start
+ fetching results from.
+ max_results (Optional[int]): The maximum number of results to fetch.
+ page_size (Optional[int]): The maximum number of rows in each page
+ of results from this request. Non-positive values are ignored.
+ Defaults to a sensible value set by the API.
+ extra_params (Optional[Dict[str, object]]):
+ Extra query string parameters for the API call.
+ table (Optional[Union[ \
+ google.cloud.bigquery.table.Table, \
+ google.cloud.bigquery.table.TableReference, \
+ ]]):
+ The table which these rows belong to, or a reference to it. Used to
+ call the BigQuery Storage API to fetch rows.
+ selected_fields (Optional[Sequence[google.cloud.bigquery.schema.SchemaField]]):
+ A subset of columns to select from this table.
+ total_rows (Optional[int]):
+ Total number of rows in the table.
+ first_page_response (Optional[dict]):
+ API response for the first page of results. These are returned when
+ the first page is requested.
+ """
+
+ def __init__(
+ self,
+ client,
+ api_request,
+ path,
+ schema,
+ page_token=None,
+ max_results=None,
+ page_size=None,
+ extra_params=None,
+ table=None,
+ selected_fields=None,
+ total_rows=None,
+ first_page_response=None,
+ location: Optional[str] = None,
+ job_id: Optional[str] = None,
+ query_id: Optional[str] = None,
+ project: Optional[str] = None,
+ num_dml_affected_rows: Optional[int] = None,
+ ):
+ super(RowIterator, self).__init__(
+ client,
+ api_request,
+ path,
+ item_to_value=_item_to_row,
+ items_key="rows",
+ page_token=page_token,
+ max_results=max_results,
+ extra_params=extra_params,
+ page_start=_rows_page_start,
+ next_token="pageToken",
+ )
+ schema = _to_schema_fields(schema)
+ self._field_to_index = _helpers._field_to_index_mapping(schema)
+ self._page_size = page_size
+ self._preserve_order = False
+ self._schema = schema
+ self._selected_fields = selected_fields
+ self._table = table
+ self._total_rows = total_rows
+ self._first_page_response = first_page_response
+ self._location = location
+ self._job_id = job_id
+ self._query_id = query_id
+ self._project = project
+ self._num_dml_affected_rows = num_dml_affected_rows
+
+ @property
+ def _billing_project(self) -> Optional[str]:
+ """GCP Project ID where BQ API will bill to (if applicable)."""
+ client = self.client
+ return client.project if client is not None else None
+
+ @property
+ def job_id(self) -> Optional[str]:
+ """ID of the query job (if applicable).
+
+ To get the job metadata, call
+ ``job = client.get_job(rows.job_id, location=rows.location)``.
+ """
+ return self._job_id
+
+ @property
+ def location(self) -> Optional[str]:
+ """Location where the query executed (if applicable).
+
+ See: https://cloud.google.com/bigquery/docs/locations
+ """
+ return self._location
+
+ @property
+ def num_dml_affected_rows(self) -> Optional[int]:
+ """If this RowIterator is the result of a DML query, the number of
+ rows that were affected.
+
+ See:
+ https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#body.QueryResponse.FIELDS.num_dml_affected_rows
+ """
+ return self._num_dml_affected_rows
+
+ @property
+ def project(self) -> Optional[str]:
+ """GCP Project ID where these rows are read from."""
+ return self._project
+
+ @property
+ def query_id(self) -> Optional[str]:
+ """[Preview] ID of a completed query.
+
+ This ID is auto-generated and not guaranteed to be populated.
+ """
+ return self._query_id
+
+ def _is_almost_completely_cached(self):
+ """Check if all results are completely cached.
+
+ This is useful to know, because we can avoid alternative download
+ mechanisms.
+ """
+ if (
+ not hasattr(self, "_first_page_response")
+ or self._first_page_response is None
+ ):
+ return False
+
+ total_cached_rows = len(self._first_page_response.get(self._items_key, []))
+ if self.max_results is not None and total_cached_rows >= self.max_results:
+ return True
+
+ if (
+ self.next_page_token is None
+ and self._first_page_response.get(self._next_token) is None
+ ):
+ return True
+
+ if self._total_rows is not None:
+ almost_completely = self._total_rows * ALMOST_COMPLETELY_CACHED_RATIO
+ if total_cached_rows >= almost_completely:
+ return True
+
+ return False
+
+ def _should_use_bqstorage(self, bqstorage_client, create_bqstorage_client):
+ """Returns True if the BigQuery Storage API can be used.
+
+ Returns:
+ bool
+ True if the BigQuery Storage client can be used or created.
+ """
+ using_bqstorage_api = bqstorage_client or create_bqstorage_client
+ if not using_bqstorage_api:
+ return False
+
+ if self._table is None:
+ return False
+
+ # The developer has already started paging through results if
+ # next_page_token is set.
+ if hasattr(self, "next_page_token") and self.next_page_token is not None:
+ return False
+
+ if self._is_almost_completely_cached():
+ return False
+
+ if self.max_results is not None:
+ return False
+
+ try:
+ _versions_helpers.BQ_STORAGE_VERSIONS.try_import(raise_if_error=True)
+ except bq_exceptions.BigQueryStorageNotFoundError:
+ warnings.warn(
+ "BigQuery Storage module not found, fetch data with the REST "
+ "endpoint instead."
+ )
+ return False
+ except bq_exceptions.LegacyBigQueryStorageError as exc:
+ warnings.warn(str(exc))
+ return False
+
+ return True
+
+ def _get_next_page_response(self):
+ """Requests the next page from the path provided.
+
+ Returns:
+ Dict[str, object]:
+ The parsed JSON response of the next page's contents.
+ """
+ if self._first_page_response:
+ rows = self._first_page_response.get(self._items_key, [])[
+ : self.max_results
+ ]
+ response = {
+ self._items_key: rows,
+ }
+ if self._next_token in self._first_page_response:
+ response[self._next_token] = self._first_page_response[self._next_token]
+
+ self._first_page_response = None
+ return response
+
+ params = self._get_query_params()
+ if self._page_size is not None:
+ if self.page_number and "startIndex" in params:
+ del params["startIndex"]
+
+ return self.api_request(
+ method=self._HTTP_METHOD, path=self.path, query_params=params
+ )
+
+ @property
+ def schema(self):
+ """List[google.cloud.bigquery.schema.SchemaField]: The subset of
+ columns to be read from the table."""
+ return list(self._schema)
+
+ @property
+ def total_rows(self):
+ """int: The total number of rows in the table or query results."""
+ return self._total_rows
+
+ def _maybe_warn_max_results(
+ self,
+ bqstorage_client: Optional["bigquery_storage.BigQueryReadClient"],
+ ):
+ """Issue a warning if BQ Storage client is not ``None`` with ``max_results`` set.
+
+ This helper method should be used directly in the relevant top-level public
+ methods, so that the warning is issued for the correct line in user code.
+
+ Args:
+ bqstorage_client:
+ The BigQuery Storage client intended to use for downloading result rows.
+ """
+ if bqstorage_client is not None and self.max_results is not None:
+ warnings.warn(
+ "Cannot use bqstorage_client if max_results is set, "
+ "reverting to fetching data with the REST endpoint.",
+ stacklevel=3,
+ )
+
+ def _to_page_iterable(
+ self, bqstorage_download, tabledata_list_download, bqstorage_client=None
+ ):
+ if not self._should_use_bqstorage(bqstorage_client, False):
+ bqstorage_client = None
+
+ result_pages = (
+ bqstorage_download()
+ if bqstorage_client is not None
+ else tabledata_list_download()
+ )
+ yield from result_pages
+
+ def to_arrow_iterable(
+ self,
+ bqstorage_client: Optional["bigquery_storage.BigQueryReadClient"] = None,
+ max_queue_size: int = _pandas_helpers._MAX_QUEUE_SIZE_DEFAULT, # type: ignore
+ ) -> Iterator["pyarrow.RecordBatch"]:
+ """[Beta] Create an iterable of class:`pyarrow.RecordBatch`, to process the table as a stream.
+
+ Args:
+ bqstorage_client (Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient]):
+ A BigQuery Storage API client. If supplied, use the faster
+ BigQuery Storage API to fetch rows from BigQuery.
+
+ This method requires the ``pyarrow`` and
+ ``google-cloud-bigquery-storage`` libraries.
+
+ This method only exposes a subset of the capabilities of the
+ BigQuery Storage API. For full access to all features
+ (projections, filters, snapshots) use the Storage API directly.
+
+ max_queue_size (Optional[int]):
+ The maximum number of result pages to hold in the internal queue when
+ streaming query results over the BigQuery Storage API. Ignored if
+ Storage API is not used.
+
+ By default, the max queue size is set to the number of BQ Storage streams
+ created by the server. If ``max_queue_size`` is :data:`None`, the queue
+ size is infinite.
+
+ Returns:
+ pyarrow.RecordBatch:
+ A generator of :class:`~pyarrow.RecordBatch`.
+
+ .. versionadded:: 2.31.0
+ """
+ self._maybe_warn_max_results(bqstorage_client)
+
+ bqstorage_download = functools.partial(
+ _pandas_helpers.download_arrow_bqstorage,
+ self._billing_project,
+ self._table,
+ bqstorage_client,
+ preserve_order=self._preserve_order,
+ selected_fields=self._selected_fields,
+ max_queue_size=max_queue_size,
+ )
+ tabledata_list_download = functools.partial(
+ _pandas_helpers.download_arrow_row_iterator, iter(self.pages), self.schema
+ )
+ return self._to_page_iterable(
+ bqstorage_download,
+ tabledata_list_download,
+ bqstorage_client=bqstorage_client,
+ )
+
+ # If changing the signature of this method, make sure to apply the same
+ # changes to job.QueryJob.to_arrow()
+ def to_arrow(
+ self,
+ progress_bar_type: Optional[str] = None,
+ bqstorage_client: Optional["bigquery_storage.BigQueryReadClient"] = None,
+ create_bqstorage_client: bool = True,
+ ) -> "pyarrow.Table":
+ """[Beta] Create a class:`pyarrow.Table` by loading all pages of a
+ table or query.
+
+ Args:
+ progress_bar_type (Optional[str]):
+ If set, use the `tqdm `_ library to
+ display a progress bar while the data downloads. Install the
+ ``tqdm`` package to use this feature.
+
+ Possible values of ``progress_bar_type`` include:
+
+ ``None``
+ No progress bar.
+ ``'tqdm'``
+ Use the :func:`tqdm.tqdm` function to print a progress bar
+ to :data:`sys.stdout`.
+ ``'tqdm_notebook'``
+ Use the :func:`tqdm.notebook.tqdm` function to display a
+ progress bar as a Jupyter notebook widget.
+ ``'tqdm_gui'``
+ Use the :func:`tqdm.tqdm_gui` function to display a
+ progress bar as a graphical dialog box.
+ bqstorage_client (Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient]):
+ A BigQuery Storage API client. If supplied, use the faster BigQuery
+ Storage API to fetch rows from BigQuery. This API is a billable API.
+
+ This method requires ``google-cloud-bigquery-storage`` library.
+
+ This method only exposes a subset of the capabilities of the
+ BigQuery Storage API. For full access to all features
+ (projections, filters, snapshots) use the Storage API directly.
+ create_bqstorage_client (Optional[bool]):
+ If ``True`` (default), create a BigQuery Storage API client using
+ the default API settings. The BigQuery Storage API is a faster way
+ to fetch rows from BigQuery. See the ``bqstorage_client`` parameter
+ for more information.
+
+ This argument does nothing if ``bqstorage_client`` is supplied.
+
+ .. versionadded:: 1.24.0
+
+ Returns:
+ pyarrow.Table
+ A :class:`pyarrow.Table` populated with row data and column
+ headers from the query results. The column headers are derived
+ from the destination table's schema.
+
+ Raises:
+ ValueError: If the :mod:`pyarrow` library cannot be imported.
+
+
+ .. versionadded:: 1.17.0
+ """
+ if pyarrow is None:
+ raise ValueError(_NO_PYARROW_ERROR)
+
+ self._maybe_warn_max_results(bqstorage_client)
+
+ if not self._should_use_bqstorage(bqstorage_client, create_bqstorage_client):
+ create_bqstorage_client = False
+ bqstorage_client = None
+
+ owns_bqstorage_client = False
+ if not bqstorage_client and create_bqstorage_client:
+ bqstorage_client = self.client._ensure_bqstorage_client()
+ owns_bqstorage_client = bqstorage_client is not None
+
+ try:
+ progress_bar = get_progress_bar(
+ progress_bar_type, "Downloading", self.total_rows, "rows"
+ )
+
+ record_batches = []
+ for record_batch in self.to_arrow_iterable(
+ bqstorage_client=bqstorage_client
+ ):
+ record_batches.append(record_batch)
+
+ if progress_bar is not None:
+ # In some cases, the number of total rows is not populated
+ # until the first page of rows is fetched. Update the
+ # progress bar's total to keep an accurate count.
+ progress_bar.total = progress_bar.total or self.total_rows
+ progress_bar.update(record_batch.num_rows)
+
+ if progress_bar is not None:
+ # Indicate that the download has finished.
+ progress_bar.close()
+ finally:
+ if owns_bqstorage_client:
+ bqstorage_client._transport.grpc_channel.close() # type: ignore
+
+ if record_batches and bqstorage_client is not None:
+ return pyarrow.Table.from_batches(record_batches)
+ else:
+ # No records (not record_batches), use schema based on BigQuery schema
+ # **or**
+ # we used the REST API (bqstorage_client is None),
+ # which doesn't add arrow extension metadata, so we let
+ # `bq_to_arrow_schema` do it.
+ arrow_schema = _pandas_helpers.bq_to_arrow_schema(self._schema)
+ return pyarrow.Table.from_batches(record_batches, schema=arrow_schema)
+
+ def to_dataframe_iterable(
+ self,
+ bqstorage_client: Optional["bigquery_storage.BigQueryReadClient"] = None,
+ dtypes: Optional[Dict[str, Any]] = None,
+ max_queue_size: int = _pandas_helpers._MAX_QUEUE_SIZE_DEFAULT, # type: ignore
+ ) -> "pandas.DataFrame":
+ """Create an iterable of pandas DataFrames, to process the table as a stream.
+
+ Args:
+ bqstorage_client (Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient]):
+ A BigQuery Storage API client. If supplied, use the faster
+ BigQuery Storage API to fetch rows from BigQuery.
+
+ This method requires ``google-cloud-bigquery-storage`` library.
+
+ This method only exposes a subset of the capabilities of the
+ BigQuery Storage API. For full access to all features
+ (projections, filters, snapshots) use the Storage API directly.
+
+ dtypes (Optional[Map[str, Union[str, pandas.Series.dtype]]]):
+ A dictionary of column names pandas ``dtype``s. The provided
+ ``dtype`` is used when constructing the series for the column
+ specified. Otherwise, the default pandas behavior is used.
+
+ max_queue_size (Optional[int]):
+ The maximum number of result pages to hold in the internal queue when
+ streaming query results over the BigQuery Storage API. Ignored if
+ Storage API is not used.
+
+ By default, the max queue size is set to the number of BQ Storage streams
+ created by the server. If ``max_queue_size`` is :data:`None`, the queue
+ size is infinite.
+
+ .. versionadded:: 2.14.0
+
+ Returns:
+ pandas.DataFrame:
+ A generator of :class:`~pandas.DataFrame`.
+
+ Raises:
+ ValueError:
+ If the :mod:`pandas` library cannot be imported.
+ """
+ _pandas_helpers.verify_pandas_imports()
+
+ if dtypes is None:
+ dtypes = {}
+
+ self._maybe_warn_max_results(bqstorage_client)
+
+ column_names = [field.name for field in self._schema]
+ bqstorage_download = functools.partial(
+ _pandas_helpers.download_dataframe_bqstorage,
+ self._billing_project,
+ self._table,
+ bqstorage_client,
+ column_names,
+ dtypes,
+ preserve_order=self._preserve_order,
+ selected_fields=self._selected_fields,
+ max_queue_size=max_queue_size,
+ )
+ tabledata_list_download = functools.partial(
+ _pandas_helpers.download_dataframe_row_iterator,
+ iter(self.pages),
+ self.schema,
+ dtypes,
+ )
+ return self._to_page_iterable(
+ bqstorage_download,
+ tabledata_list_download,
+ bqstorage_client=bqstorage_client,
+ )
+
+ # If changing the signature of this method, make sure to apply the same
+ # changes to job.QueryJob.to_dataframe()
+ def to_dataframe(
+ self,
+ bqstorage_client: Optional["bigquery_storage.BigQueryReadClient"] = None,
+ dtypes: Optional[Dict[str, Any]] = None,
+ progress_bar_type: Optional[str] = None,
+ create_bqstorage_client: bool = True,
+ geography_as_object: bool = False,
+ bool_dtype: Union[Any, None] = DefaultPandasDTypes.BOOL_DTYPE,
+ int_dtype: Union[Any, None] = DefaultPandasDTypes.INT_DTYPE,
+ float_dtype: Union[Any, None] = None,
+ string_dtype: Union[Any, None] = None,
+ date_dtype: Union[Any, None] = DefaultPandasDTypes.DATE_DTYPE,
+ datetime_dtype: Union[Any, None] = None,
+ time_dtype: Union[Any, None] = DefaultPandasDTypes.TIME_DTYPE,
+ timestamp_dtype: Union[Any, None] = None,
+ range_date_dtype: Union[Any, None] = DefaultPandasDTypes.RANGE_DATE_DTYPE,
+ range_datetime_dtype: Union[
+ Any, None
+ ] = DefaultPandasDTypes.RANGE_DATETIME_DTYPE,
+ range_timestamp_dtype: Union[
+ Any, None
+ ] = DefaultPandasDTypes.RANGE_TIMESTAMP_DTYPE,
+ ) -> "pandas.DataFrame":
+ """Create a pandas DataFrame by loading all pages of a query.
+
+ Args:
+ bqstorage_client (Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient]):
+ A BigQuery Storage API client. If supplied, use the faster
+ BigQuery Storage API to fetch rows from BigQuery.
+
+ This method requires ``google-cloud-bigquery-storage`` library.
+
+ This method only exposes a subset of the capabilities of the
+ BigQuery Storage API. For full access to all features
+ (projections, filters, snapshots) use the Storage API directly.
+
+ dtypes (Optional[Map[str, Union[str, pandas.Series.dtype]]]):
+ A dictionary of column names pandas ``dtype``s. The provided
+ ``dtype`` is used when constructing the series for the column
+ specified. Otherwise, the default pandas behavior is used.
+ progress_bar_type (Optional[str]):
+ If set, use the `tqdm `_ library to
+ display a progress bar while the data downloads. Install the
+ ``tqdm`` package to use this feature.
+
+ Possible values of ``progress_bar_type`` include:
+
+ ``None``
+ No progress bar.
+ ``'tqdm'``
+ Use the :func:`tqdm.tqdm` function to print a progress bar
+ to :data:`sys.stdout`.
+ ``'tqdm_notebook'``
+ Use the :func:`tqdm.notebook.tqdm` function to display a
+ progress bar as a Jupyter notebook widget.
+ ``'tqdm_gui'``
+ Use the :func:`tqdm.tqdm_gui` function to display a
+ progress bar as a graphical dialog box.
+
+ .. versionadded:: 1.11.0
+
+ create_bqstorage_client (Optional[bool]):
+ If ``True`` (default), create a BigQuery Storage API client
+ using the default API settings. The BigQuery Storage API
+ is a faster way to fetch rows from BigQuery. See the
+ ``bqstorage_client`` parameter for more information.
+
+ This argument does nothing if ``bqstorage_client`` is supplied.
+
+ .. versionadded:: 1.24.0
+
+ geography_as_object (Optional[bool]):
+ If ``True``, convert GEOGRAPHY data to :mod:`shapely`
+ geometry objects. If ``False`` (default), don't cast
+ geography data to :mod:`shapely` geometry objects.
+
+ .. versionadded:: 2.24.0
+
+ bool_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype (e.g. ``pandas.BooleanDtype()``)
+ to convert BigQuery Boolean type, instead of relying on the default
+ ``pandas.BooleanDtype()``. If you explicitly set the value to ``None``,
+ then the data type will be ``numpy.dtype("bool")``. BigQuery Boolean
+ type can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#boolean_type
+
+ .. versionadded:: 3.8.0
+
+ int_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype (e.g. ``pandas.Int64Dtype()``)
+ to convert BigQuery Integer types, instead of relying on the default
+ ``pandas.Int64Dtype()``. If you explicitly set the value to ``None``,
+ then the data type will be ``numpy.dtype("int64")``. A list of BigQuery
+ Integer types can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#integer_types
+
+ .. versionadded:: 3.8.0
+
+ float_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype (e.g. ``pandas.Float32Dtype()``)
+ to convert BigQuery Float type, instead of relying on the default
+ ``numpy.dtype("float64")``. If you explicitly set the value to ``None``,
+ then the data type will be ``numpy.dtype("float64")``. BigQuery Float
+ type can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#floating_point_types
+
+ .. versionadded:: 3.8.0
+
+ string_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype (e.g. ``pandas.StringDtype()``) to
+ convert BigQuery String type, instead of relying on the default
+ ``numpy.dtype("object")``. If you explicitly set the value to ``None``,
+ then the data type will be ``numpy.dtype("object")``. BigQuery String
+ type can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#string_type
+
+ .. versionadded:: 3.8.0
+
+ date_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype (e.g.
+ ``pandas.ArrowDtype(pyarrow.date32())``) to convert BigQuery Date
+ type, instead of relying on the default ``db_dtypes.DateDtype()``.
+ If you explicitly set the value to ``None``, then the data type will be
+ ``numpy.dtype("datetime64[ns]")`` or ``object`` if out of bound. BigQuery
+ Date type can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#date_type
+
+ .. versionadded:: 3.10.0
+
+ datetime_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype (e.g.
+ ``pandas.ArrowDtype(pyarrow.timestamp("us"))``) to convert BigQuery Datetime
+ type, instead of relying on the default ``numpy.dtype("datetime64[ns]``.
+ If you explicitly set the value to ``None``, then the data type will be
+ ``numpy.dtype("datetime64[ns]")`` or ``object`` if out of bound. BigQuery
+ Datetime type can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#datetime_type
+
+ .. versionadded:: 3.10.0
+
+ time_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype (e.g.
+ ``pandas.ArrowDtype(pyarrow.time64("us"))``) to convert BigQuery Time
+ type, instead of relying on the default ``db_dtypes.TimeDtype()``.
+ If you explicitly set the value to ``None``, then the data type will be
+ ``numpy.dtype("object")``. BigQuery Time type can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#time_type
+
+ .. versionadded:: 3.10.0
+
+ timestamp_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype (e.g.
+ ``pandas.ArrowDtype(pyarrow.timestamp("us", tz="UTC"))``) to convert BigQuery Timestamp
+ type, instead of relying on the default ``numpy.dtype("datetime64[ns, UTC]")``.
+ If you explicitly set the value to ``None``, then the data type will be
+ ``numpy.dtype("datetime64[ns, UTC]")`` or ``object`` if out of bound. BigQuery
+ Datetime type can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp_type
+
+ .. versionadded:: 3.10.0
+
+ range_date_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype, such as:
+
+ .. code-block:: python
+
+ pandas.ArrowDtype(pyarrow.struct(
+ [("start", pyarrow.date32()), ("end", pyarrow.date32())]
+ ))
+
+ to convert BigQuery RANGE type, instead of relying on
+ the default ``object``. If you explicitly set the value to
+ ``None``, the data type will be ``object``. BigQuery Range type
+ can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#range_type
+
+ .. versionadded:: 3.21.0
+
+ range_datetime_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype, such as:
+
+ .. code-block:: python
+
+ pandas.ArrowDtype(pyarrow.struct(
+ [
+ ("start", pyarrow.timestamp("us")),
+ ("end", pyarrow.timestamp("us")),
+ ]
+ ))
+
+ to convert BigQuery RANGE type, instead of relying on
+ the default ``object``. If you explicitly set the value to
+ ``None``, the data type will be ``object``. BigQuery Range type
+ can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#range_type
+
+ .. versionadded:: 3.21.0
+
+ range_timestamp_dtype (Optional[pandas.Series.dtype, None]):
+ If set, indicate a pandas ExtensionDtype, such as:
+
+ .. code-block:: python
+
+ pandas.ArrowDtype(pyarrow.struct(
+ [
+ ("start", pyarrow.timestamp("us", tz="UTC")),
+ ("end", pyarrow.timestamp("us", tz="UTC")),
+ ]
+ ))
+
+ to convert BigQuery RANGE type, instead of relying
+ on the default ``object``. If you explicitly set the value to
+ ``None``, the data type will be ``object``. BigQuery Range type
+ can be found at:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#range_type
+
+ .. versionadded:: 3.21.0
+
+ Returns:
+ pandas.DataFrame:
+ A :class:`~pandas.DataFrame` populated with row data and column
+ headers from the query results. The column headers are derived
+ from the destination table's schema.
+
+ Raises:
+ ValueError:
+ If the :mod:`pandas` library cannot be imported, or
+ the :mod:`google.cloud.bigquery_storage_v1` module is
+ required but cannot be imported. Also if
+ `geography_as_object` is `True`, but the
+ :mod:`shapely` library cannot be imported. Also if
+ `bool_dtype`, `int_dtype` or other dtype parameters
+ is not supported dtype.
+
+ """
+ _pandas_helpers.verify_pandas_imports()
+
+ if geography_as_object and shapely is None:
+ raise ValueError(_NO_SHAPELY_ERROR)
+
+ if bool_dtype is DefaultPandasDTypes.BOOL_DTYPE:
+ bool_dtype = pandas.BooleanDtype()
+
+ if int_dtype is DefaultPandasDTypes.INT_DTYPE:
+ int_dtype = pandas.Int64Dtype()
+
+ if time_dtype is DefaultPandasDTypes.TIME_DTYPE:
+ time_dtype = db_dtypes.TimeDtype()
+
+ if range_date_dtype is DefaultPandasDTypes.RANGE_DATE_DTYPE:
+ if _versions_helpers.SUPPORTS_RANGE_PYARROW:
+ range_date_dtype = pandas.ArrowDtype(
+ pyarrow.struct(
+ [("start", pyarrow.date32()), ("end", pyarrow.date32())]
+ )
+ )
+ else:
+ warnings.warn(_RANGE_PYARROW_WARNING)
+ range_date_dtype = None
+
+ if range_datetime_dtype is DefaultPandasDTypes.RANGE_DATETIME_DTYPE:
+ if _versions_helpers.SUPPORTS_RANGE_PYARROW:
+ range_datetime_dtype = pandas.ArrowDtype(
+ pyarrow.struct(
+ [
+ ("start", pyarrow.timestamp("us")),
+ ("end", pyarrow.timestamp("us")),
+ ]
+ )
+ )
+ else:
+ warnings.warn(_RANGE_PYARROW_WARNING)
+ range_datetime_dtype = None
+
+ if range_timestamp_dtype is DefaultPandasDTypes.RANGE_TIMESTAMP_DTYPE:
+ if _versions_helpers.SUPPORTS_RANGE_PYARROW:
+ range_timestamp_dtype = pandas.ArrowDtype(
+ pyarrow.struct(
+ [
+ ("start", pyarrow.timestamp("us", tz="UTC")),
+ ("end", pyarrow.timestamp("us", tz="UTC")),
+ ]
+ )
+ )
+ else:
+ warnings.warn(_RANGE_PYARROW_WARNING)
+ range_timestamp_dtype = None
+
+ if bool_dtype is not None and not hasattr(bool_dtype, "__from_arrow__"):
+ raise ValueError("bool_dtype", _NO_SUPPORTED_DTYPE)
+
+ if int_dtype is not None and not hasattr(int_dtype, "__from_arrow__"):
+ raise ValueError("int_dtype", _NO_SUPPORTED_DTYPE)
+
+ if float_dtype is not None and not hasattr(float_dtype, "__from_arrow__"):
+ raise ValueError("float_dtype", _NO_SUPPORTED_DTYPE)
+
+ if string_dtype is not None and not hasattr(string_dtype, "__from_arrow__"):
+ raise ValueError("string_dtype", _NO_SUPPORTED_DTYPE)
+
+ if (
+ date_dtype is not None
+ and date_dtype is not DefaultPandasDTypes.DATE_DTYPE
+ and not hasattr(date_dtype, "__from_arrow__")
+ ):
+ raise ValueError("date_dtype", _NO_SUPPORTED_DTYPE)
+
+ if datetime_dtype is not None and not hasattr(datetime_dtype, "__from_arrow__"):
+ raise ValueError("datetime_dtype", _NO_SUPPORTED_DTYPE)
+
+ if time_dtype is not None and not hasattr(time_dtype, "__from_arrow__"):
+ raise ValueError("time_dtype", _NO_SUPPORTED_DTYPE)
+
+ if timestamp_dtype is not None and not hasattr(
+ timestamp_dtype, "__from_arrow__"
+ ):
+ raise ValueError("timestamp_dtype", _NO_SUPPORTED_DTYPE)
+
+ if dtypes is None:
+ dtypes = {}
+
+ self._maybe_warn_max_results(bqstorage_client)
+
+ if not self._should_use_bqstorage(bqstorage_client, create_bqstorage_client):
+ create_bqstorage_client = False
+ bqstorage_client = None
+
+ record_batch = self.to_arrow(
+ progress_bar_type=progress_bar_type,
+ bqstorage_client=bqstorage_client,
+ create_bqstorage_client=create_bqstorage_client,
+ )
+
+ # Default date dtype is `db_dtypes.DateDtype()` that could cause out of bounds error,
+ # when pyarrow converts date values to nanosecond precision. To avoid the error, we
+ # set the date_as_object parameter to True, if necessary.
+ date_as_object = False
+ if date_dtype is DefaultPandasDTypes.DATE_DTYPE:
+ date_dtype = db_dtypes.DateDtype()
+ date_as_object = not all(
+ self.__can_cast_timestamp_ns(col)
+ for col in record_batch
+ # Type can be date32 or date64 (plus units).
+ # See: https://arrow.apache.org/docs/python/api/datatypes.html
+ if pyarrow.types.is_date(col.type)
+ )
+
+ timestamp_as_object = False
+ if datetime_dtype is None and timestamp_dtype is None:
+ timestamp_as_object = not all(
+ self.__can_cast_timestamp_ns(col)
+ for col in record_batch
+ # Type can be datetime and timestamp (plus units and time zone).
+ # See: https://arrow.apache.org/docs/python/api/datatypes.html
+ if pyarrow.types.is_timestamp(col.type)
+ )
+
+ if len(record_batch) > 0:
+ df = record_batch.to_pandas(
+ date_as_object=date_as_object,
+ timestamp_as_object=timestamp_as_object,
+ integer_object_nulls=True,
+ types_mapper=_pandas_helpers.default_types_mapper(
+ date_as_object=date_as_object,
+ bool_dtype=bool_dtype,
+ int_dtype=int_dtype,
+ float_dtype=float_dtype,
+ string_dtype=string_dtype,
+ date_dtype=date_dtype,
+ datetime_dtype=datetime_dtype,
+ time_dtype=time_dtype,
+ timestamp_dtype=timestamp_dtype,
+ range_date_dtype=range_date_dtype,
+ range_datetime_dtype=range_datetime_dtype,
+ range_timestamp_dtype=range_timestamp_dtype,
+ ),
+ )
+ else:
+ # Avoid "ValueError: need at least one array to concatenate" on
+ # older versions of pandas when converting empty RecordBatch to
+ # DataFrame. See: https://github.com/pandas-dev/pandas/issues/41241
+ df = pandas.DataFrame([], columns=record_batch.schema.names)
+
+ for column in dtypes:
+ df[column] = pandas.Series(df[column], dtype=dtypes[column], copy=False)
+
+ if geography_as_object:
+ for field in self.schema:
+ if field.field_type.upper() == "GEOGRAPHY" and field.mode != "REPEATED":
+ df[field.name] = df[field.name].dropna().apply(_read_wkt)
+
+ return df
+
+ @staticmethod
+ def __can_cast_timestamp_ns(column):
+ try:
+ column.cast("timestamp[ns]")
+ except pyarrow.lib.ArrowInvalid:
+ return False
+ else:
+ return True
+
+ # If changing the signature of this method, make sure to apply the same
+ # changes to job.QueryJob.to_geodataframe()
+ def to_geodataframe(
+ self,
+ bqstorage_client: Optional["bigquery_storage.BigQueryReadClient"] = None,
+ dtypes: Optional[Dict[str, Any]] = None,
+ progress_bar_type: Optional[str] = None,
+ create_bqstorage_client: bool = True,
+ geography_column: Optional[str] = None,
+ ) -> "geopandas.GeoDataFrame":
+ """Create a GeoPandas GeoDataFrame by loading all pages of a query.
+
+ Args:
+ bqstorage_client (Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient]):
+ A BigQuery Storage API client. If supplied, use the faster
+ BigQuery Storage API to fetch rows from BigQuery.
+
+ This method requires the ``pyarrow`` and
+ ``google-cloud-bigquery-storage`` libraries.
+
+ This method only exposes a subset of the capabilities of the
+ BigQuery Storage API. For full access to all features
+ (projections, filters, snapshots) use the Storage API directly.
+
+ dtypes (Optional[Map[str, Union[str, pandas.Series.dtype]]]):
+ A dictionary of column names pandas ``dtype``s. The provided
+ ``dtype`` is used when constructing the series for the column
+ specified. Otherwise, the default pandas behavior is used.
+ progress_bar_type (Optional[str]):
+ If set, use the `tqdm `_ library to
+ display a progress bar while the data downloads. Install the
+ ``tqdm`` package to use this feature.
+
+ Possible values of ``progress_bar_type`` include:
+
+ ``None``
+ No progress bar.
+ ``'tqdm'``
+ Use the :func:`tqdm.tqdm` function to print a progress bar
+ to :data:`sys.stdout`.
+ ``'tqdm_notebook'``
+ Use the :func:`tqdm.notebook.tqdm` function to display a
+ progress bar as a Jupyter notebook widget.
+ ``'tqdm_gui'``
+ Use the :func:`tqdm.tqdm_gui` function to display a
+ progress bar as a graphical dialog box.
+
+ create_bqstorage_client (Optional[bool]):
+ If ``True`` (default), create a BigQuery Storage API client
+ using the default API settings. The BigQuery Storage API
+ is a faster way to fetch rows from BigQuery. See the
+ ``bqstorage_client`` parameter for more information.
+
+ This argument does nothing if ``bqstorage_client`` is supplied.
+
+ geography_column (Optional[str]):
+ If there are more than one GEOGRAPHY column,
+ identifies which one to use to construct a geopandas
+ GeoDataFrame. This option can be ommitted if there's
+ only one GEOGRAPHY column.
+
+ Returns:
+ geopandas.GeoDataFrame:
+ A :class:`geopandas.GeoDataFrame` populated with row
+ data and column headers from the query results. The
+ column headers are derived from the destination
+ table's schema.
+
+ Raises:
+ ValueError:
+ If the :mod:`geopandas` library cannot be imported, or the
+ :mod:`google.cloud.bigquery_storage_v1` module is
+ required but cannot be imported.
+
+ .. versionadded:: 2.24.0
+ """
+ if geopandas is None:
+ raise ValueError(_NO_GEOPANDAS_ERROR)
+
+ geography_columns = set(
+ field.name
+ for field in self.schema
+ if field.field_type.upper() == "GEOGRAPHY"
+ )
+ if not geography_columns:
+ raise TypeError(
+ "There must be at least one GEOGRAPHY column"
+ " to create a GeoDataFrame"
+ )
+
+ if geography_column:
+ if geography_column not in geography_columns:
+ raise ValueError(
+ f"The given geography column, {geography_column}, doesn't name"
+ f" a GEOGRAPHY column in the result."
+ )
+ elif len(geography_columns) == 1:
+ [geography_column] = geography_columns
+ else:
+ raise ValueError(
+ "There is more than one GEOGRAPHY column in the result. "
+ "The geography_column argument must be used to specify which "
+ "one to use to create a GeoDataFrame"
+ )
+
+ df = self.to_dataframe(
+ bqstorage_client,
+ dtypes,
+ progress_bar_type,
+ create_bqstorage_client,
+ geography_as_object=True,
+ )
+
+ return geopandas.GeoDataFrame(
+ df, crs=_COORDINATE_REFERENCE_SYSTEM, geometry=geography_column
+ )
+
+
+class _EmptyRowIterator(RowIterator):
+ """An empty row iterator.
+
+ This class prevents API requests when there are no rows to fetch or rows
+ are impossible to fetch, such as with query results for DDL CREATE VIEW
+ statements.
+ """
+
+ schema = ()
+ pages = ()
+ total_rows = 0
+
+ def __init__(
+ self, client=None, api_request=None, path=None, schema=(), *args, **kwargs
+ ):
+ super().__init__(
+ client=client,
+ api_request=api_request,
+ path=path,
+ schema=schema,
+ *args,
+ **kwargs,
+ )
+
+ def to_arrow(
+ self,
+ progress_bar_type=None,
+ bqstorage_client=None,
+ create_bqstorage_client=True,
+ ) -> "pyarrow.Table":
+ """[Beta] Create an empty class:`pyarrow.Table`.
+
+ Args:
+ progress_bar_type (str): Ignored. Added for compatibility with RowIterator.
+ bqstorage_client (Any): Ignored. Added for compatibility with RowIterator.
+ create_bqstorage_client (bool): Ignored. Added for compatibility with RowIterator.
+
+ Returns:
+ pyarrow.Table: An empty :class:`pyarrow.Table`.
+ """
+ if pyarrow is None:
+ raise ValueError(_NO_PYARROW_ERROR)
+ return pyarrow.Table.from_arrays(())
+
+ def to_dataframe(
+ self,
+ bqstorage_client=None,
+ dtypes=None,
+ progress_bar_type=None,
+ create_bqstorage_client=True,
+ geography_as_object=False,
+ bool_dtype=None,
+ int_dtype=None,
+ float_dtype=None,
+ string_dtype=None,
+ date_dtype=None,
+ datetime_dtype=None,
+ time_dtype=None,
+ timestamp_dtype=None,
+ range_date_dtype=None,
+ range_datetime_dtype=None,
+ range_timestamp_dtype=None,
+ ) -> "pandas.DataFrame":
+ """Create an empty dataframe.
+
+ Args:
+ bqstorage_client (Any): Ignored. Added for compatibility with RowIterator.
+ dtypes (Any): Ignored. Added for compatibility with RowIterator.
+ progress_bar_type (Any): Ignored. Added for compatibility with RowIterator.
+ create_bqstorage_client (bool): Ignored. Added for compatibility with RowIterator.
+ geography_as_object (bool): Ignored. Added for compatibility with RowIterator.
+ bool_dtype (Any): Ignored. Added for compatibility with RowIterator.
+ int_dtype (Any): Ignored. Added for compatibility with RowIterator.
+ float_dtype (Any): Ignored. Added for compatibility with RowIterator.
+ string_dtype (Any): Ignored. Added for compatibility with RowIterator.
+ date_dtype (Any): Ignored. Added for compatibility with RowIterator.
+ datetime_dtype (Any): Ignored. Added for compatibility with RowIterator.
+ time_dtype (Any): Ignored. Added for compatibility with RowIterator.
+ timestamp_dtype (Any): Ignored. Added for compatibility with RowIterator.
+ range_date_dtype (Any): Ignored. Added for compatibility with RowIterator.
+ range_datetime_dtype (Any): Ignored. Added for compatibility with RowIterator.
+ range_timestamp_dtype (Any): Ignored. Added for compatibility with RowIterator.
+
+ Returns:
+ pandas.DataFrame: An empty :class:`~pandas.DataFrame`.
+ """
+ _pandas_helpers.verify_pandas_imports()
+ return pandas.DataFrame()
+
+ def to_geodataframe(
+ self,
+ bqstorage_client=None,
+ dtypes=None,
+ progress_bar_type=None,
+ create_bqstorage_client=True,
+ geography_column: Optional[str] = None,
+ ) -> "pandas.DataFrame":
+ """Create an empty dataframe.
+
+ Args:
+ bqstorage_client (Any): Ignored. Added for compatibility with RowIterator.
+ dtypes (Any): Ignored. Added for compatibility with RowIterator.
+ progress_bar_type (Any): Ignored. Added for compatibility with RowIterator.
+ create_bqstorage_client (bool): Ignored. Added for compatibility with RowIterator.
+ geography_column (str): Ignored. Added for compatibility with RowIterator.
+
+ Returns:
+ pandas.DataFrame: An empty :class:`~pandas.DataFrame`.
+ """
+ if geopandas is None:
+ raise ValueError(_NO_GEOPANDAS_ERROR)
+
+ # Since an empty GeoDataFrame has no geometry column, we do not CRS on it,
+ # because that's deprecated.
+ return geopandas.GeoDataFrame()
+
+ def to_dataframe_iterable(
+ self,
+ bqstorage_client: Optional["bigquery_storage.BigQueryReadClient"] = None,
+ dtypes: Optional[Dict[str, Any]] = None,
+ max_queue_size: Optional[int] = None,
+ ) -> Iterator["pandas.DataFrame"]:
+ """Create an iterable of pandas DataFrames, to process the table as a stream.
+
+ .. versionadded:: 2.21.0
+
+ Args:
+ bqstorage_client:
+ Ignored. Added for compatibility with RowIterator.
+
+ dtypes (Optional[Map[str, Union[str, pandas.Series.dtype]]]):
+ Ignored. Added for compatibility with RowIterator.
+
+ max_queue_size:
+ Ignored. Added for compatibility with RowIterator.
+
+ Returns:
+ An iterator yielding a single empty :class:`~pandas.DataFrame`.
+
+ Raises:
+ ValueError:
+ If the :mod:`pandas` library cannot be imported.
+ """
+ _pandas_helpers.verify_pandas_imports()
+ return iter((pandas.DataFrame(),))
+
+ def to_arrow_iterable(
+ self,
+ bqstorage_client: Optional["bigquery_storage.BigQueryReadClient"] = None,
+ max_queue_size: Optional[int] = None,
+ ) -> Iterator["pyarrow.RecordBatch"]:
+ """Create an iterable of pandas DataFrames, to process the table as a stream.
+
+ .. versionadded:: 2.31.0
+
+ Args:
+ bqstorage_client:
+ Ignored. Added for compatibility with RowIterator.
+
+ max_queue_size:
+ Ignored. Added for compatibility with RowIterator.
+
+ Returns:
+ An iterator yielding a single empty :class:`~pyarrow.RecordBatch`.
+ """
+ return iter((pyarrow.record_batch([]),))
+
+ def __iter__(self):
+ return iter(())
+
+
+class PartitionRange(object):
+ """Definition of the ranges for range partitioning.
+
+ .. note::
+ **Beta**. The integer range partitioning feature is in a pre-release
+ state and might change or have limited support.
+
+ Args:
+ start (Optional[int]):
+ Sets the
+ :attr:`~google.cloud.bigquery.table.PartitionRange.start`
+ property.
+ end (Optional[int]):
+ Sets the
+ :attr:`~google.cloud.bigquery.table.PartitionRange.end`
+ property.
+ interval (Optional[int]):
+ Sets the
+ :attr:`~google.cloud.bigquery.table.PartitionRange.interval`
+ property.
+ _properties (Optional[dict]):
+ Private. Used to construct object from API resource.
+ """
+
+ def __init__(self, start=None, end=None, interval=None, _properties=None) -> None:
+ if _properties is None:
+ _properties = {}
+ self._properties = _properties
+
+ if start is not None:
+ self.start = start
+ if end is not None:
+ self.end = end
+ if interval is not None:
+ self.interval = interval
+
+ @property
+ def start(self):
+ """int: The start of range partitioning, inclusive."""
+ return _helpers._int_or_none(self._properties.get("start"))
+
+ @start.setter
+ def start(self, value):
+ self._properties["start"] = _helpers._str_or_none(value)
+
+ @property
+ def end(self):
+ """int: The end of range partitioning, exclusive."""
+ return _helpers._int_or_none(self._properties.get("end"))
+
+ @end.setter
+ def end(self, value):
+ self._properties["end"] = _helpers._str_or_none(value)
+
+ @property
+ def interval(self):
+ """int: The width of each interval."""
+ return _helpers._int_or_none(self._properties.get("interval"))
+
+ @interval.setter
+ def interval(self, value):
+ self._properties["interval"] = _helpers._str_or_none(value)
+
+ def _key(self):
+ return tuple(sorted(self._properties.items()))
+
+ def __eq__(self, other):
+ if not isinstance(other, PartitionRange):
+ return NotImplemented
+ return self._key() == other._key()
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self):
+ key_vals = ["{}={}".format(key, val) for key, val in self._key()]
+ return "PartitionRange({})".format(", ".join(key_vals))
+
+
+class RangePartitioning(object):
+ """Range-based partitioning configuration for a table.
+
+ .. note::
+ **Beta**. The integer range partitioning feature is in a pre-release
+ state and might change or have limited support.
+
+ Args:
+ range_ (Optional[google.cloud.bigquery.table.PartitionRange]):
+ Sets the
+ :attr:`google.cloud.bigquery.table.RangePartitioning.range_`
+ property.
+ field (Optional[str]):
+ Sets the
+ :attr:`google.cloud.bigquery.table.RangePartitioning.field`
+ property.
+ _properties (Optional[dict]):
+ Private. Used to construct object from API resource.
+ """
+
+ def __init__(self, range_=None, field=None, _properties=None) -> None:
+ if _properties is None:
+ _properties = {}
+ self._properties: Dict[str, Any] = _properties
+
+ if range_ is not None:
+ self.range_ = range_
+ if field is not None:
+ self.field = field
+
+ # Trailing underscore to prevent conflict with built-in range() function.
+ @property
+ def range_(self):
+ """google.cloud.bigquery.table.PartitionRange: Defines the
+ ranges for range partitioning.
+
+ Raises:
+ ValueError:
+ If the value is not a :class:`PartitionRange`.
+ """
+ range_properties = self._properties.setdefault("range", {})
+ return PartitionRange(_properties=range_properties)
+
+ @range_.setter
+ def range_(self, value):
+ if not isinstance(value, PartitionRange):
+ raise ValueError("Expected a PartitionRange, but got {}.".format(value))
+ self._properties["range"] = value._properties
+
+ @property
+ def field(self):
+ """str: The table is partitioned by this field.
+
+ The field must be a top-level ``NULLABLE`` / ``REQUIRED`` field. The
+ only supported type is ``INTEGER`` / ``INT64``.
+ """
+ return self._properties.get("field")
+
+ @field.setter
+ def field(self, value):
+ self._properties["field"] = value
+
+ def _key(self):
+ return (("field", self.field), ("range_", self.range_))
+
+ def __eq__(self, other):
+ if not isinstance(other, RangePartitioning):
+ return NotImplemented
+ return self._key() == other._key()
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self):
+ key_vals = ["{}={}".format(key, repr(val)) for key, val in self._key()]
+ return "RangePartitioning({})".format(", ".join(key_vals))
+
+
+class TimePartitioningType(object):
+ """Specifies the type of time partitioning to perform."""
+
+ DAY = "DAY"
+ """str: Generates one partition per day."""
+
+ HOUR = "HOUR"
+ """str: Generates one partition per hour."""
+
+ MONTH = "MONTH"
+ """str: Generates one partition per month."""
+
+ YEAR = "YEAR"
+ """str: Generates one partition per year."""
+
+
+class TimePartitioning(object):
+ """Configures time-based partitioning for a table.
+
+ Args:
+ type_ (Optional[google.cloud.bigquery.table.TimePartitioningType]):
+ Specifies the type of time partitioning to perform. Defaults to
+ :attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`.
+
+ Supported values are:
+
+ * :attr:`~google.cloud.bigquery.table.TimePartitioningType.HOUR`
+ * :attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`
+ * :attr:`~google.cloud.bigquery.table.TimePartitioningType.MONTH`
+ * :attr:`~google.cloud.bigquery.table.TimePartitioningType.YEAR`
+
+ field (Optional[str]):
+ If set, the table is partitioned by this field. If not set, the
+ table is partitioned by pseudo column ``_PARTITIONTIME``. The field
+ must be a top-level ``TIMESTAMP``, ``DATETIME``, or ``DATE``
+ field. Its mode must be ``NULLABLE`` or ``REQUIRED``.
+
+ See the `time-unit column-partitioned tables guide
+ `_
+ in the BigQuery documentation.
+ expiration_ms(Optional[int]):
+ Number of milliseconds for which to keep the storage for a
+ partition.
+ require_partition_filter (Optional[bool]):
+ DEPRECATED: Use
+ :attr:`~google.cloud.bigquery.table.Table.require_partition_filter`,
+ instead.
+ """
+
+ def __init__(
+ self, type_=None, field=None, expiration_ms=None, require_partition_filter=None
+ ) -> None:
+ self._properties: Dict[str, Any] = {}
+ if type_ is None:
+ self.type_ = TimePartitioningType.DAY
+ else:
+ self.type_ = type_
+ if field is not None:
+ self.field = field
+ if expiration_ms is not None:
+ self.expiration_ms = expiration_ms
+ if require_partition_filter is not None:
+ self.require_partition_filter = require_partition_filter
+
+ @property
+ def type_(self):
+ """google.cloud.bigquery.table.TimePartitioningType: The type of time
+ partitioning to use.
+ """
+ return self._properties.get("type")
+
+ @type_.setter
+ def type_(self, value):
+ self._properties["type"] = value
+
+ @property
+ def field(self):
+ """str: Field in the table to use for partitioning"""
+ return self._properties.get("field")
+
+ @field.setter
+ def field(self, value):
+ self._properties["field"] = value
+
+ @property
+ def expiration_ms(self):
+ """int: Number of milliseconds to keep the storage for a partition."""
+ return _helpers._int_or_none(self._properties.get("expirationMs"))
+
+ @expiration_ms.setter
+ def expiration_ms(self, value):
+ if value is not None:
+ # Allow explicitly setting the expiration to None.
+ value = str(value)
+ self._properties["expirationMs"] = value
+
+ @property
+ def require_partition_filter(self):
+ """bool: Specifies whether partition filters are required for queries
+
+ DEPRECATED: Use
+ :attr:`~google.cloud.bigquery.table.Table.require_partition_filter`,
+ instead.
+ """
+ warnings.warn(
+ (
+ "TimePartitioning.require_partition_filter will be removed in "
+ "future versions. Please use Table.require_partition_filter "
+ "instead."
+ ),
+ PendingDeprecationWarning,
+ stacklevel=2,
+ )
+ return self._properties.get("requirePartitionFilter")
+
+ @require_partition_filter.setter
+ def require_partition_filter(self, value):
+ warnings.warn(
+ (
+ "TimePartitioning.require_partition_filter will be removed in "
+ "future versions. Please use Table.require_partition_filter "
+ "instead."
+ ),
+ PendingDeprecationWarning,
+ stacklevel=2,
+ )
+ self._properties["requirePartitionFilter"] = value
+
+ @classmethod
+ def from_api_repr(cls, api_repr: dict) -> "TimePartitioning":
+ """Return a :class:`TimePartitioning` object deserialized from a dict.
+
+ This method creates a new ``TimePartitioning`` instance that points to
+ the ``api_repr`` parameter as its internal properties dict. This means
+ that when a ``TimePartitioning`` instance is stored as a property of
+ another object, any changes made at the higher level will also appear
+ here::
+
+ >>> time_partitioning = TimePartitioning()
+ >>> table.time_partitioning = time_partitioning
+ >>> table.time_partitioning.field = 'timecolumn'
+ >>> time_partitioning.field
+ 'timecolumn'
+
+ Args:
+ api_repr (Mapping[str, str]):
+ The serialized representation of the TimePartitioning, such as
+ what is output by :meth:`to_api_repr`.
+
+ Returns:
+ google.cloud.bigquery.table.TimePartitioning:
+ The ``TimePartitioning`` object.
+ """
+ instance = cls()
+ instance._properties = api_repr
+ return instance
+
+ def to_api_repr(self) -> dict:
+ """Return a dictionary representing this object.
+
+ This method returns the properties dict of the ``TimePartitioning``
+ instance rather than making a copy. This means that when a
+ ``TimePartitioning`` instance is stored as a property of another
+ object, any changes made at the higher level will also appear here.
+
+ Returns:
+ dict:
+ A dictionary representing the TimePartitioning object in
+ serialized form.
+ """
+ return self._properties
+
+ def _key(self):
+ # because we are only "renaming" top level keys shallow copy is sufficient here.
+ properties = self._properties.copy()
+ # calling repr for non built-in type objects.
+ properties["type_"] = repr(properties.pop("type"))
+ if "field" in properties:
+ # calling repr for non built-in type objects.
+ properties["field"] = repr(properties["field"])
+ if "requirePartitionFilter" in properties:
+ properties["require_partition_filter"] = properties.pop(
+ "requirePartitionFilter"
+ )
+ if "expirationMs" in properties:
+ properties["expiration_ms"] = properties.pop("expirationMs")
+ return tuple(sorted(properties.items()))
+
+ def __eq__(self, other):
+ if not isinstance(other, TimePartitioning):
+ return NotImplemented
+ return self._key() == other._key()
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(self._key())
+
+ def __repr__(self):
+ key_vals = ["{}={}".format(key, val) for key, val in self._key()]
+ return "TimePartitioning({})".format(",".join(key_vals))
+
+
+class PrimaryKey:
+ """Represents the primary key constraint on a table's columns.
+
+ Args:
+ columns: The columns that are composed of the primary key constraint.
+ """
+
+ def __init__(self, columns: List[str]):
+ self.columns = columns
+
+ def __eq__(self, other):
+ if not isinstance(other, PrimaryKey):
+ raise TypeError("The value provided is not a BigQuery PrimaryKey.")
+ return self.columns == other.columns
+
+
+class ColumnReference:
+ """The pair of the foreign key column and primary key column.
+
+ Args:
+ referencing_column: The column that composes the foreign key.
+ referenced_column: The column in the primary key that are referenced by the referencingColumn.
+ """
+
+ def __init__(self, referencing_column: str, referenced_column: str):
+ self.referencing_column = referencing_column
+ self.referenced_column = referenced_column
+
+ def __eq__(self, other):
+ if not isinstance(other, ColumnReference):
+ raise TypeError("The value provided is not a BigQuery ColumnReference.")
+ return (
+ self.referencing_column == other.referencing_column
+ and self.referenced_column == other.referenced_column
+ )
+
+
+class ForeignKey:
+ """Represents a foreign key constraint on a table's columns.
+
+ Args:
+ name: Set only if the foreign key constraint is named.
+ referenced_table: The table that holds the primary key and is referenced by this foreign key.
+ column_references: The columns that compose the foreign key.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ referenced_table: TableReference,
+ column_references: List[ColumnReference],
+ ):
+ self.name = name
+ self.referenced_table = referenced_table
+ self.column_references = column_references
+
+ def __eq__(self, other):
+ if not isinstance(other, ForeignKey):
+ raise TypeError("The value provided is not a BigQuery ForeignKey.")
+ return (
+ self.name == other.name
+ and self.referenced_table == other.referenced_table
+ and self.column_references == other.column_references
+ )
+
+ @classmethod
+ def from_api_repr(cls, api_repr: Dict[str, Any]) -> "ForeignKey":
+ """Create an instance from API representation."""
+ return cls(
+ name=api_repr["name"],
+ referenced_table=TableReference.from_api_repr(api_repr["referencedTable"]),
+ column_references=[
+ ColumnReference(
+ column_reference_resource["referencingColumn"],
+ column_reference_resource["referencedColumn"],
+ )
+ for column_reference_resource in api_repr["columnReferences"]
+ ],
+ )
+
+
+class TableConstraints:
+ """The TableConstraints defines the primary key and foreign key.
+
+ Args:
+ primary_key:
+ Represents a primary key constraint on a table's columns. Present only if the table
+ has a primary key. The primary key is not enforced.
+ foreign_keys:
+ Present only if the table has a foreign key. The foreign key is not enforced.
+
+ """
+
+ def __init__(
+ self,
+ primary_key: Optional[PrimaryKey],
+ foreign_keys: Optional[List[ForeignKey]],
+ ):
+ self.primary_key = primary_key
+ self.foreign_keys = foreign_keys
+
+ @classmethod
+ def from_api_repr(cls, resource: Dict[str, Any]) -> "TableConstraints":
+ """Create an instance from API representation."""
+ primary_key = None
+ if "primaryKey" in resource:
+ primary_key = PrimaryKey(resource["primaryKey"]["columns"])
+
+ foreign_keys = None
+ if "foreignKeys" in resource:
+ foreign_keys = [
+ ForeignKey.from_api_repr(foreign_key_resource)
+ for foreign_key_resource in resource["foreignKeys"]
+ ]
+ return cls(primary_key, foreign_keys)
+
+
+def _item_to_row(iterator, resource):
+ """Convert a JSON row to the native object.
+
+ .. note::
+
+ This assumes that the ``schema`` attribute has been
+ added to the iterator after being created, which
+ should be done by the caller.
+
+ Args:
+ iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use.
+ resource (Dict): An item to be converted to a row.
+
+ Returns:
+ google.cloud.bigquery.table.Row: The next row in the page.
+ """
+ return Row(
+ _helpers._row_tuple_from_json(resource, iterator.schema),
+ iterator._field_to_index,
+ )
+
+
+def _row_iterator_page_columns(schema, response):
+ """Make a generator of all the columns in a page from tabledata.list.
+
+ This enables creating a :class:`pandas.DataFrame` and other
+ column-oriented data structures such as :class:`pyarrow.RecordBatch`
+ """
+ columns = []
+ rows = response.get("rows", [])
+
+ def get_column_data(field_index, field):
+ for row in rows:
+ yield _helpers._field_from_json(row["f"][field_index]["v"], field)
+
+ for field_index, field in enumerate(schema):
+ columns.append(get_column_data(field_index, field))
+
+ return columns
+
+
+# pylint: disable=unused-argument
+def _rows_page_start(iterator, page, response):
+ """Grab total rows when :class:`~google.cloud.iterator.Page` starts.
+
+ Args:
+ iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use.
+ page (google.api_core.page_iterator.Page): The page that was just created.
+ response (Dict): The JSON API response for a page of rows in a table.
+ """
+ # Make a (lazy) copy of the page in column-oriented format for use in data
+ # science packages.
+ page._columns = _row_iterator_page_columns(iterator._schema, response)
+
+ total_rows = response.get("totalRows")
+ # Don't reset total_rows if it's not present in the next API response.
+ if total_rows is not None:
+ iterator._total_rows = int(total_rows)
+
+
+# pylint: enable=unused-argument
+
+
+def _table_arg_to_table_ref(value, default_project=None) -> TableReference:
+ """Helper to convert a string or Table to TableReference.
+
+ This function keeps TableReference and other kinds of objects unchanged.
+ """
+ if isinstance(value, str):
+ value = TableReference.from_string(value, default_project=default_project)
+ if isinstance(value, (Table, TableListItem)):
+ value = value.reference
+ return value
+
+
+def _table_arg_to_table(value, default_project=None) -> Table:
+ """Helper to convert a string or TableReference to a Table.
+
+ This function keeps Table and other kinds of objects unchanged.
+ """
+ if isinstance(value, str):
+ value = TableReference.from_string(value, default_project=default_project)
+ if isinstance(value, TableReference):
+ value = Table(value)
+ if isinstance(value, TableListItem):
+ newvalue = Table(value.reference)
+ newvalue._properties = value._properties
+ value = newvalue
+
+ return value
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery/version.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..ebc9112530b1e07bd95f9bd07decbd27e3eda6b9
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery/version.py
@@ -0,0 +1,15 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__version__ = "3.26.0"
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/__init__.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..55486a39a2de49984c975147a7d584bd61b49b3d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/__init__.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+
+from .types.encryption_config import EncryptionConfiguration
+from .types.model import DeleteModelRequest
+from .types.model import GetModelRequest
+from .types.model import ListModelsRequest
+from .types.model import ListModelsResponse
+from .types.model import Model
+from .types.model import PatchModelRequest
+from .types.model_reference import ModelReference
+from .types.standard_sql import StandardSqlDataType
+from .types.standard_sql import StandardSqlField
+from .types.standard_sql import StandardSqlStructType
+from .types.standard_sql import StandardSqlTableType
+from .types.table_reference import TableReference
+
+
+_LEGACY_MSG = (
+ "Legacy proto-based types from bigquery_v2 are not maintained anymore, "
+ "use types defined in google.cloud.bigquery instead."
+)
+
+warnings.warn(_LEGACY_MSG, category=DeprecationWarning)
+
+
+__all__ = (
+ "DeleteModelRequest",
+ "EncryptionConfiguration",
+ "GetModelRequest",
+ "ListModelsRequest",
+ "ListModelsResponse",
+ "Model",
+ "ModelReference",
+ "PatchModelRequest",
+ "StandardSqlDataType",
+ "StandardSqlField",
+ "StandardSqlStructType",
+ "StandardSqlTableType",
+ "TableReference",
+)
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/__init__.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c36b30969b08b61066b6a7a3898735992cd717ad
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/__init__.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from .encryption_config import (
+ EncryptionConfiguration,
+)
+from .model import (
+ DeleteModelRequest,
+ GetModelRequest,
+ ListModelsRequest,
+ ListModelsResponse,
+ Model,
+ PatchModelRequest,
+)
+from .model_reference import (
+ ModelReference,
+)
+from .standard_sql import (
+ StandardSqlDataType,
+ StandardSqlField,
+ StandardSqlStructType,
+ StandardSqlTableType,
+)
+from .table_reference import (
+ TableReference,
+)
+
+__all__ = (
+ "EncryptionConfiguration",
+ "DeleteModelRequest",
+ "GetModelRequest",
+ "ListModelsRequest",
+ "ListModelsResponse",
+ "Model",
+ "PatchModelRequest",
+ "ModelReference",
+ "StandardSqlDataType",
+ "StandardSqlField",
+ "StandardSqlStructType",
+ "StandardSqlTableType",
+ "TableReference",
+)
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/encryption_config.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/encryption_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f57acb7c1f5b94b85b222e7dbf1bc8c3b71ac3a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/encryption_config.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import proto # type: ignore
+
+from google.protobuf import wrappers_pb2 # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.bigquery.v2",
+ manifest={
+ "EncryptionConfiguration",
+ },
+)
+
+
+class EncryptionConfiguration(proto.Message):
+ r"""
+
+ Attributes:
+ kms_key_name (google.protobuf.wrappers_pb2.StringValue):
+ Optional. Describes the Cloud KMS encryption
+ key that will be used to protect destination
+ BigQuery table. The BigQuery Service Account
+ associated with your project requires access to
+ this encryption key.
+ """
+
+ kms_key_name = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message=wrappers_pb2.StringValue,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/model.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..f32e15eb12b6ce6b08262f4c2cb50b5e366bb7ce
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/model.py
@@ -0,0 +1,1994 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import proto # type: ignore
+
+from google.cloud.bigquery_v2.types import encryption_config
+from google.cloud.bigquery_v2.types import model_reference as gcb_model_reference
+from google.cloud.bigquery_v2.types import standard_sql
+from google.cloud.bigquery_v2.types import table_reference
+from google.protobuf import timestamp_pb2 # type: ignore
+from google.protobuf import wrappers_pb2 # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.bigquery.v2",
+ manifest={
+ "Model",
+ "GetModelRequest",
+ "PatchModelRequest",
+ "DeleteModelRequest",
+ "ListModelsRequest",
+ "ListModelsResponse",
+ },
+)
+
+
+class Model(proto.Message):
+ r"""
+
+ Attributes:
+ etag (str):
+ Output only. A hash of this resource.
+ model_reference (google.cloud.bigquery_v2.types.ModelReference):
+ Required. Unique identifier for this model.
+ creation_time (int):
+ Output only. The time when this model was
+ created, in millisecs since the epoch.
+ last_modified_time (int):
+ Output only. The time when this model was
+ last modified, in millisecs since the epoch.
+ description (str):
+ Optional. A user-friendly description of this
+ model.
+ friendly_name (str):
+ Optional. A descriptive name for this model.
+ labels (Mapping[str, str]):
+ The labels associated with this model. You
+ can use these to organize and group your models.
+ Label keys and values can be no longer than 63
+ characters, can only contain lowercase letters,
+ numeric characters, underscores and dashes.
+ International characters are allowed. Label
+ values are optional. Label keys must start with
+ a letter and each label in the list must have a
+ different key.
+ expiration_time (int):
+ Optional. The time when this model expires,
+ in milliseconds since the epoch. If not present,
+ the model will persist indefinitely. Expired
+ models will be deleted and their storage
+ reclaimed. The defaultTableExpirationMs
+ property of the encapsulating dataset can be
+ used to set a default expirationTime on newly
+ created models.
+ location (str):
+ Output only. The geographic location where
+ the model resides. This value is inherited from
+ the dataset.
+ encryption_configuration (google.cloud.bigquery_v2.types.EncryptionConfiguration):
+ Custom encryption configuration (e.g., Cloud
+ KMS keys). This shows the encryption
+ configuration of the model data while stored in
+ BigQuery storage. This field can be used with
+ PatchModel to update encryption key for an
+ already encrypted model.
+ model_type (google.cloud.bigquery_v2.types.Model.ModelType):
+ Output only. Type of the model resource.
+ training_runs (Sequence[google.cloud.bigquery_v2.types.Model.TrainingRun]):
+ Output only. Information for all training runs in increasing
+ order of start_time.
+ feature_columns (Sequence[google.cloud.bigquery_v2.types.StandardSqlField]):
+ Output only. Input feature columns that were
+ used to train this model.
+ label_columns (Sequence[google.cloud.bigquery_v2.types.StandardSqlField]):
+ Output only. Label columns that were used to train this
+ model. The output of the model will have a `predicted_`
+ prefix to these columns.
+ best_trial_id (int):
+ The best trial_id across all training runs.
+ """
+
+ class ModelType(proto.Enum):
+ r"""Indicates the type of the Model."""
+ MODEL_TYPE_UNSPECIFIED = 0
+ LINEAR_REGRESSION = 1
+ LOGISTIC_REGRESSION = 2
+ KMEANS = 3
+ MATRIX_FACTORIZATION = 4
+ DNN_CLASSIFIER = 5
+ TENSORFLOW = 6
+ DNN_REGRESSOR = 7
+ BOOSTED_TREE_REGRESSOR = 9
+ BOOSTED_TREE_CLASSIFIER = 10
+ ARIMA = 11
+ AUTOML_REGRESSOR = 12
+ AUTOML_CLASSIFIER = 13
+ ARIMA_PLUS = 19
+
+ class LossType(proto.Enum):
+ r"""Loss metric to evaluate model training performance."""
+ LOSS_TYPE_UNSPECIFIED = 0
+ MEAN_SQUARED_LOSS = 1
+ MEAN_LOG_LOSS = 2
+
+ class DistanceType(proto.Enum):
+ r"""Distance metric used to compute the distance between two
+ points.
+ """
+ DISTANCE_TYPE_UNSPECIFIED = 0
+ EUCLIDEAN = 1
+ COSINE = 2
+
+ class DataSplitMethod(proto.Enum):
+ r"""Indicates the method to split input data into multiple
+ tables.
+ """
+ DATA_SPLIT_METHOD_UNSPECIFIED = 0
+ RANDOM = 1
+ CUSTOM = 2
+ SEQUENTIAL = 3
+ NO_SPLIT = 4
+ AUTO_SPLIT = 5
+
+ class DataFrequency(proto.Enum):
+ r"""Type of supported data frequency for time series forecasting
+ models.
+ """
+ DATA_FREQUENCY_UNSPECIFIED = 0
+ AUTO_FREQUENCY = 1
+ YEARLY = 2
+ QUARTERLY = 3
+ MONTHLY = 4
+ WEEKLY = 5
+ DAILY = 6
+ HOURLY = 7
+ PER_MINUTE = 8
+
+ class HolidayRegion(proto.Enum):
+ r"""Type of supported holiday regions for time series forecasting
+ models.
+ """
+ HOLIDAY_REGION_UNSPECIFIED = 0
+ GLOBAL = 1
+ NA = 2
+ JAPAC = 3
+ EMEA = 4
+ LAC = 5
+ AE = 6
+ AR = 7
+ AT = 8
+ AU = 9
+ BE = 10
+ BR = 11
+ CA = 12
+ CH = 13
+ CL = 14
+ CN = 15
+ CO = 16
+ CS = 17
+ CZ = 18
+ DE = 19
+ DK = 20
+ DZ = 21
+ EC = 22
+ EE = 23
+ EG = 24
+ ES = 25
+ FI = 26
+ FR = 27
+ GB = 28
+ GR = 29
+ HK = 30
+ HU = 31
+ ID = 32
+ IE = 33
+ IL = 34
+ IN = 35
+ IR = 36
+ IT = 37
+ JP = 38
+ KR = 39
+ LV = 40
+ MA = 41
+ MX = 42
+ MY = 43
+ NG = 44
+ NL = 45
+ NO = 46
+ NZ = 47
+ PE = 48
+ PH = 49
+ PK = 50
+ PL = 51
+ PT = 52
+ RO = 53
+ RS = 54
+ RU = 55
+ SA = 56
+ SE = 57
+ SG = 58
+ SI = 59
+ SK = 60
+ TH = 61
+ TR = 62
+ TW = 63
+ UA = 64
+ US = 65
+ VE = 66
+ VN = 67
+ ZA = 68
+
+ class LearnRateStrategy(proto.Enum):
+ r"""Indicates the learning rate optimization strategy to use."""
+ LEARN_RATE_STRATEGY_UNSPECIFIED = 0
+ LINE_SEARCH = 1
+ CONSTANT = 2
+
+ class OptimizationStrategy(proto.Enum):
+ r"""Indicates the optimization strategy used for training."""
+ OPTIMIZATION_STRATEGY_UNSPECIFIED = 0
+ BATCH_GRADIENT_DESCENT = 1
+ NORMAL_EQUATION = 2
+
+ class FeedbackType(proto.Enum):
+ r"""Indicates the training algorithm to use for matrix
+ factorization models.
+ """
+ FEEDBACK_TYPE_UNSPECIFIED = 0
+ IMPLICIT = 1
+ EXPLICIT = 2
+
+ class SeasonalPeriod(proto.Message):
+ r""" """
+
+ class SeasonalPeriodType(proto.Enum):
+ r""""""
+ SEASONAL_PERIOD_TYPE_UNSPECIFIED = 0
+ NO_SEASONALITY = 1
+ DAILY = 2
+ WEEKLY = 3
+ MONTHLY = 4
+ QUARTERLY = 5
+ YEARLY = 6
+
+ class KmeansEnums(proto.Message):
+ r""" """
+
+ class KmeansInitializationMethod(proto.Enum):
+ r"""Indicates the method used to initialize the centroids for
+ KMeans clustering algorithm.
+ """
+ KMEANS_INITIALIZATION_METHOD_UNSPECIFIED = 0
+ RANDOM = 1
+ CUSTOM = 2
+ KMEANS_PLUS_PLUS = 3
+
+ class RegressionMetrics(proto.Message):
+ r"""Evaluation metrics for regression and explicit feedback type
+ matrix factorization models.
+
+ Attributes:
+ mean_absolute_error (google.protobuf.wrappers_pb2.DoubleValue):
+ Mean absolute error.
+ mean_squared_error (google.protobuf.wrappers_pb2.DoubleValue):
+ Mean squared error.
+ mean_squared_log_error (google.protobuf.wrappers_pb2.DoubleValue):
+ Mean squared log error.
+ median_absolute_error (google.protobuf.wrappers_pb2.DoubleValue):
+ Median absolute error.
+ r_squared (google.protobuf.wrappers_pb2.DoubleValue):
+ R^2 score. This corresponds to r2_score in ML.EVALUATE.
+ """
+
+ mean_absolute_error = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message=wrappers_pb2.DoubleValue,
+ )
+ mean_squared_error = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=wrappers_pb2.DoubleValue,
+ )
+ mean_squared_log_error = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=wrappers_pb2.DoubleValue,
+ )
+ median_absolute_error = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=wrappers_pb2.DoubleValue,
+ )
+ r_squared = proto.Field(
+ proto.MESSAGE,
+ number=5,
+ message=wrappers_pb2.DoubleValue,
+ )
+
+ class AggregateClassificationMetrics(proto.Message):
+ r"""Aggregate metrics for classification/classifier models. For
+ multi-class models, the metrics are either macro-averaged or
+ micro-averaged. When macro-averaged, the metrics are calculated
+ for each label and then an unweighted average is taken of those
+ values. When micro-averaged, the metric is calculated globally
+ by counting the total number of correctly predicted rows.
+
+ Attributes:
+ precision (google.protobuf.wrappers_pb2.DoubleValue):
+ Precision is the fraction of actual positive
+ predictions that had positive actual labels. For
+ multiclass this is a macro-averaged metric
+ treating each class as a binary classifier.
+ recall (google.protobuf.wrappers_pb2.DoubleValue):
+ Recall is the fraction of actual positive
+ labels that were given a positive prediction.
+ For multiclass this is a macro-averaged metric.
+ accuracy (google.protobuf.wrappers_pb2.DoubleValue):
+ Accuracy is the fraction of predictions given
+ the correct label. For multiclass this is a
+ micro-averaged metric.
+ threshold (google.protobuf.wrappers_pb2.DoubleValue):
+ Threshold at which the metrics are computed.
+ For binary classification models this is the
+ positive class threshold. For multi-class
+ classfication models this is the confidence
+ threshold.
+ f1_score (google.protobuf.wrappers_pb2.DoubleValue):
+ The F1 score is an average of recall and
+ precision. For multiclass this is a
+ macro-averaged metric.
+ log_loss (google.protobuf.wrappers_pb2.DoubleValue):
+ Logarithmic Loss. For multiclass this is a
+ macro-averaged metric.
+ roc_auc (google.protobuf.wrappers_pb2.DoubleValue):
+ Area Under a ROC Curve. For multiclass this
+ is a macro-averaged metric.
+ """
+
+ precision = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message=wrappers_pb2.DoubleValue,
+ )
+ recall = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=wrappers_pb2.DoubleValue,
+ )
+ accuracy = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=wrappers_pb2.DoubleValue,
+ )
+ threshold = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=wrappers_pb2.DoubleValue,
+ )
+ f1_score = proto.Field(
+ proto.MESSAGE,
+ number=5,
+ message=wrappers_pb2.DoubleValue,
+ )
+ log_loss = proto.Field(
+ proto.MESSAGE,
+ number=6,
+ message=wrappers_pb2.DoubleValue,
+ )
+ roc_auc = proto.Field(
+ proto.MESSAGE,
+ number=7,
+ message=wrappers_pb2.DoubleValue,
+ )
+
+ class BinaryClassificationMetrics(proto.Message):
+ r"""Evaluation metrics for binary classification/classifier
+ models.
+
+ Attributes:
+ aggregate_classification_metrics (google.cloud.bigquery_v2.types.Model.AggregateClassificationMetrics):
+ Aggregate classification metrics.
+ binary_confusion_matrix_list (Sequence[google.cloud.bigquery_v2.types.Model.BinaryClassificationMetrics.BinaryConfusionMatrix]):
+ Binary confusion matrix at multiple
+ thresholds.
+ positive_label (str):
+ Label representing the positive class.
+ negative_label (str):
+ Label representing the negative class.
+ """
+
+ class BinaryConfusionMatrix(proto.Message):
+ r"""Confusion matrix for binary classification models.
+
+ Attributes:
+ positive_class_threshold (google.protobuf.wrappers_pb2.DoubleValue):
+ Threshold value used when computing each of
+ the following metric.
+ true_positives (google.protobuf.wrappers_pb2.Int64Value):
+ Number of true samples predicted as true.
+ false_positives (google.protobuf.wrappers_pb2.Int64Value):
+ Number of false samples predicted as true.
+ true_negatives (google.protobuf.wrappers_pb2.Int64Value):
+ Number of true samples predicted as false.
+ false_negatives (google.protobuf.wrappers_pb2.Int64Value):
+ Number of false samples predicted as false.
+ precision (google.protobuf.wrappers_pb2.DoubleValue):
+ The fraction of actual positive predictions
+ that had positive actual labels.
+ recall (google.protobuf.wrappers_pb2.DoubleValue):
+ The fraction of actual positive labels that
+ were given a positive prediction.
+ f1_score (google.protobuf.wrappers_pb2.DoubleValue):
+ The equally weighted average of recall and
+ precision.
+ accuracy (google.protobuf.wrappers_pb2.DoubleValue):
+ The fraction of predictions given the correct
+ label.
+ """
+
+ positive_class_threshold = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message=wrappers_pb2.DoubleValue,
+ )
+ true_positives = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=wrappers_pb2.Int64Value,
+ )
+ false_positives = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=wrappers_pb2.Int64Value,
+ )
+ true_negatives = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=wrappers_pb2.Int64Value,
+ )
+ false_negatives = proto.Field(
+ proto.MESSAGE,
+ number=5,
+ message=wrappers_pb2.Int64Value,
+ )
+ precision = proto.Field(
+ proto.MESSAGE,
+ number=6,
+ message=wrappers_pb2.DoubleValue,
+ )
+ recall = proto.Field(
+ proto.MESSAGE,
+ number=7,
+ message=wrappers_pb2.DoubleValue,
+ )
+ f1_score = proto.Field(
+ proto.MESSAGE,
+ number=8,
+ message=wrappers_pb2.DoubleValue,
+ )
+ accuracy = proto.Field(
+ proto.MESSAGE,
+ number=9,
+ message=wrappers_pb2.DoubleValue,
+ )
+
+ aggregate_classification_metrics = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message="Model.AggregateClassificationMetrics",
+ )
+ binary_confusion_matrix_list = proto.RepeatedField(
+ proto.MESSAGE,
+ number=2,
+ message="Model.BinaryClassificationMetrics.BinaryConfusionMatrix",
+ )
+ positive_label = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+ negative_label = proto.Field(
+ proto.STRING,
+ number=4,
+ )
+
+ class MultiClassClassificationMetrics(proto.Message):
+ r"""Evaluation metrics for multi-class classification/classifier
+ models.
+
+ Attributes:
+ aggregate_classification_metrics (google.cloud.bigquery_v2.types.Model.AggregateClassificationMetrics):
+ Aggregate classification metrics.
+ confusion_matrix_list (Sequence[google.cloud.bigquery_v2.types.Model.MultiClassClassificationMetrics.ConfusionMatrix]):
+ Confusion matrix at different thresholds.
+ """
+
+ class ConfusionMatrix(proto.Message):
+ r"""Confusion matrix for multi-class classification models.
+
+ Attributes:
+ confidence_threshold (google.protobuf.wrappers_pb2.DoubleValue):
+ Confidence threshold used when computing the
+ entries of the confusion matrix.
+ rows (Sequence[google.cloud.bigquery_v2.types.Model.MultiClassClassificationMetrics.ConfusionMatrix.Row]):
+ One row per actual label.
+ """
+
+ class Entry(proto.Message):
+ r"""A single entry in the confusion matrix.
+
+ Attributes:
+ predicted_label (str):
+ The predicted label. For confidence_threshold > 0, we will
+ also add an entry indicating the number of items under the
+ confidence threshold.
+ item_count (google.protobuf.wrappers_pb2.Int64Value):
+ Number of items being predicted as this
+ label.
+ """
+
+ predicted_label = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ item_count = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=wrappers_pb2.Int64Value,
+ )
+
+ class Row(proto.Message):
+ r"""A single row in the confusion matrix.
+
+ Attributes:
+ actual_label (str):
+ The original label of this row.
+ entries (Sequence[google.cloud.bigquery_v2.types.Model.MultiClassClassificationMetrics.ConfusionMatrix.Entry]):
+ Info describing predicted label distribution.
+ """
+
+ actual_label = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ entries = proto.RepeatedField(
+ proto.MESSAGE,
+ number=2,
+ message="Model.MultiClassClassificationMetrics.ConfusionMatrix.Entry",
+ )
+
+ confidence_threshold = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message=wrappers_pb2.DoubleValue,
+ )
+ rows = proto.RepeatedField(
+ proto.MESSAGE,
+ number=2,
+ message="Model.MultiClassClassificationMetrics.ConfusionMatrix.Row",
+ )
+
+ aggregate_classification_metrics = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message="Model.AggregateClassificationMetrics",
+ )
+ confusion_matrix_list = proto.RepeatedField(
+ proto.MESSAGE,
+ number=2,
+ message="Model.MultiClassClassificationMetrics.ConfusionMatrix",
+ )
+
+ class ClusteringMetrics(proto.Message):
+ r"""Evaluation metrics for clustering models.
+
+ Attributes:
+ davies_bouldin_index (google.protobuf.wrappers_pb2.DoubleValue):
+ Davies-Bouldin index.
+ mean_squared_distance (google.protobuf.wrappers_pb2.DoubleValue):
+ Mean of squared distances between each sample
+ to its cluster centroid.
+ clusters (Sequence[google.cloud.bigquery_v2.types.Model.ClusteringMetrics.Cluster]):
+ Information for all clusters.
+ """
+
+ class Cluster(proto.Message):
+ r"""Message containing the information about one cluster.
+
+ Attributes:
+ centroid_id (int):
+ Centroid id.
+ feature_values (Sequence[google.cloud.bigquery_v2.types.Model.ClusteringMetrics.Cluster.FeatureValue]):
+ Values of highly variant features for this
+ cluster.
+ count (google.protobuf.wrappers_pb2.Int64Value):
+ Count of training data rows that were
+ assigned to this cluster.
+ """
+
+ class FeatureValue(proto.Message):
+ r"""Representative value of a single feature within the cluster.
+
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
+ Attributes:
+ feature_column (str):
+ The feature column name.
+ numerical_value (google.protobuf.wrappers_pb2.DoubleValue):
+ The numerical feature value. This is the
+ centroid value for this feature.
+
+ This field is a member of `oneof`_ ``value``.
+ categorical_value (google.cloud.bigquery_v2.types.Model.ClusteringMetrics.Cluster.FeatureValue.CategoricalValue):
+ The categorical feature value.
+
+ This field is a member of `oneof`_ ``value``.
+ """
+
+ class CategoricalValue(proto.Message):
+ r"""Representative value of a categorical feature.
+
+ Attributes:
+ category_counts (Sequence[google.cloud.bigquery_v2.types.Model.ClusteringMetrics.Cluster.FeatureValue.CategoricalValue.CategoryCount]):
+ Counts of all categories for the categorical feature. If
+ there are more than ten categories, we return top ten (by
+ count) and return one more CategoryCount with category
+ "*OTHER*" and count as aggregate counts of remaining
+ categories.
+ """
+
+ class CategoryCount(proto.Message):
+ r"""Represents the count of a single category within the cluster.
+
+ Attributes:
+ category (str):
+ The name of category.
+ count (google.protobuf.wrappers_pb2.Int64Value):
+ The count of training samples matching the
+ category within the cluster.
+ """
+
+ category = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ count = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=wrappers_pb2.Int64Value,
+ )
+
+ category_counts = proto.RepeatedField(
+ proto.MESSAGE,
+ number=1,
+ message="Model.ClusteringMetrics.Cluster.FeatureValue.CategoricalValue.CategoryCount",
+ )
+
+ feature_column = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ numerical_value = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ oneof="value",
+ message=wrappers_pb2.DoubleValue,
+ )
+ categorical_value = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ oneof="value",
+ message="Model.ClusteringMetrics.Cluster.FeatureValue.CategoricalValue",
+ )
+
+ centroid_id = proto.Field(
+ proto.INT64,
+ number=1,
+ )
+ feature_values = proto.RepeatedField(
+ proto.MESSAGE,
+ number=2,
+ message="Model.ClusteringMetrics.Cluster.FeatureValue",
+ )
+ count = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=wrappers_pb2.Int64Value,
+ )
+
+ davies_bouldin_index = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message=wrappers_pb2.DoubleValue,
+ )
+ mean_squared_distance = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=wrappers_pb2.DoubleValue,
+ )
+ clusters = proto.RepeatedField(
+ proto.MESSAGE,
+ number=3,
+ message="Model.ClusteringMetrics.Cluster",
+ )
+
+ class RankingMetrics(proto.Message):
+ r"""Evaluation metrics used by weighted-ALS models specified by
+ feedback_type=implicit.
+
+ Attributes:
+ mean_average_precision (google.protobuf.wrappers_pb2.DoubleValue):
+ Calculates a precision per user for all the
+ items by ranking them and then averages all the
+ precisions across all the users.
+ mean_squared_error (google.protobuf.wrappers_pb2.DoubleValue):
+ Similar to the mean squared error computed in
+ regression and explicit recommendation models
+ except instead of computing the rating directly,
+ the output from evaluate is computed against a
+ preference which is 1 or 0 depending on if the
+ rating exists or not.
+ normalized_discounted_cumulative_gain (google.protobuf.wrappers_pb2.DoubleValue):
+ A metric to determine the goodness of a
+ ranking calculated from the predicted confidence
+ by comparing it to an ideal rank measured by the
+ original ratings.
+ average_rank (google.protobuf.wrappers_pb2.DoubleValue):
+ Determines the goodness of a ranking by
+ computing the percentile rank from the predicted
+ confidence and dividing it by the original rank.
+ """
+
+ mean_average_precision = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message=wrappers_pb2.DoubleValue,
+ )
+ mean_squared_error = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=wrappers_pb2.DoubleValue,
+ )
+ normalized_discounted_cumulative_gain = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=wrappers_pb2.DoubleValue,
+ )
+ average_rank = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=wrappers_pb2.DoubleValue,
+ )
+
+ class ArimaForecastingMetrics(proto.Message):
+ r"""Model evaluation metrics for ARIMA forecasting models.
+
+ Attributes:
+ non_seasonal_order (Sequence[google.cloud.bigquery_v2.types.Model.ArimaOrder]):
+ Non-seasonal order.
+ arima_fitting_metrics (Sequence[google.cloud.bigquery_v2.types.Model.ArimaFittingMetrics]):
+ Arima model fitting metrics.
+ seasonal_periods (Sequence[google.cloud.bigquery_v2.types.Model.SeasonalPeriod.SeasonalPeriodType]):
+ Seasonal periods. Repeated because multiple
+ periods are supported for one time series.
+ has_drift (Sequence[bool]):
+ Whether Arima model fitted with drift or not.
+ It is always false when d is not 1.
+ time_series_id (Sequence[str]):
+ Id to differentiate different time series for
+ the large-scale case.
+ arima_single_model_forecasting_metrics (Sequence[google.cloud.bigquery_v2.types.Model.ArimaForecastingMetrics.ArimaSingleModelForecastingMetrics]):
+ Repeated as there can be many metric sets
+ (one for each model) in auto-arima and the
+ large-scale case.
+ """
+
+ class ArimaSingleModelForecastingMetrics(proto.Message):
+ r"""Model evaluation metrics for a single ARIMA forecasting
+ model.
+
+ Attributes:
+ non_seasonal_order (google.cloud.bigquery_v2.types.Model.ArimaOrder):
+ Non-seasonal order.
+ arima_fitting_metrics (google.cloud.bigquery_v2.types.Model.ArimaFittingMetrics):
+ Arima fitting metrics.
+ has_drift (bool):
+ Is arima model fitted with drift or not. It
+ is always false when d is not 1.
+ time_series_id (str):
+ The time_series_id value for this time series. It will be
+ one of the unique values from the time_series_id_column
+ specified during ARIMA model training. Only present when
+ time_series_id_column training option was used.
+ time_series_ids (Sequence[str]):
+ The tuple of time_series_ids identifying this time series.
+ It will be one of the unique tuples of values present in the
+ time_series_id_columns specified during ARIMA model
+ training. Only present when time_series_id_columns training
+ option was used and the order of values here are same as the
+ order of time_series_id_columns.
+ seasonal_periods (Sequence[google.cloud.bigquery_v2.types.Model.SeasonalPeriod.SeasonalPeriodType]):
+ Seasonal periods. Repeated because multiple
+ periods are supported for one time series.
+ has_holiday_effect (google.protobuf.wrappers_pb2.BoolValue):
+ If true, holiday_effect is a part of time series
+ decomposition result.
+ has_spikes_and_dips (google.protobuf.wrappers_pb2.BoolValue):
+ If true, spikes_and_dips is a part of time series
+ decomposition result.
+ has_step_changes (google.protobuf.wrappers_pb2.BoolValue):
+ If true, step_changes is a part of time series decomposition
+ result.
+ """
+
+ non_seasonal_order = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message="Model.ArimaOrder",
+ )
+ arima_fitting_metrics = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message="Model.ArimaFittingMetrics",
+ )
+ has_drift = proto.Field(
+ proto.BOOL,
+ number=3,
+ )
+ time_series_id = proto.Field(
+ proto.STRING,
+ number=4,
+ )
+ time_series_ids = proto.RepeatedField(
+ proto.STRING,
+ number=9,
+ )
+ seasonal_periods = proto.RepeatedField(
+ proto.ENUM,
+ number=5,
+ enum="Model.SeasonalPeriod.SeasonalPeriodType",
+ )
+ has_holiday_effect = proto.Field(
+ proto.MESSAGE,
+ number=6,
+ message=wrappers_pb2.BoolValue,
+ )
+ has_spikes_and_dips = proto.Field(
+ proto.MESSAGE,
+ number=7,
+ message=wrappers_pb2.BoolValue,
+ )
+ has_step_changes = proto.Field(
+ proto.MESSAGE,
+ number=8,
+ message=wrappers_pb2.BoolValue,
+ )
+
+ non_seasonal_order = proto.RepeatedField(
+ proto.MESSAGE,
+ number=1,
+ message="Model.ArimaOrder",
+ )
+ arima_fitting_metrics = proto.RepeatedField(
+ proto.MESSAGE,
+ number=2,
+ message="Model.ArimaFittingMetrics",
+ )
+ seasonal_periods = proto.RepeatedField(
+ proto.ENUM,
+ number=3,
+ enum="Model.SeasonalPeriod.SeasonalPeriodType",
+ )
+ has_drift = proto.RepeatedField(
+ proto.BOOL,
+ number=4,
+ )
+ time_series_id = proto.RepeatedField(
+ proto.STRING,
+ number=5,
+ )
+ arima_single_model_forecasting_metrics = proto.RepeatedField(
+ proto.MESSAGE,
+ number=6,
+ message="Model.ArimaForecastingMetrics.ArimaSingleModelForecastingMetrics",
+ )
+
+ class EvaluationMetrics(proto.Message):
+ r"""Evaluation metrics of a model. These are either computed on
+ all training data or just the eval data based on whether eval
+ data was used during training. These are not present for
+ imported models.
+
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
+ Attributes:
+ regression_metrics (google.cloud.bigquery_v2.types.Model.RegressionMetrics):
+ Populated for regression models and explicit
+ feedback type matrix factorization models.
+
+ This field is a member of `oneof`_ ``metrics``.
+ binary_classification_metrics (google.cloud.bigquery_v2.types.Model.BinaryClassificationMetrics):
+ Populated for binary
+ classification/classifier models.
+
+ This field is a member of `oneof`_ ``metrics``.
+ multi_class_classification_metrics (google.cloud.bigquery_v2.types.Model.MultiClassClassificationMetrics):
+ Populated for multi-class
+ classification/classifier models.
+
+ This field is a member of `oneof`_ ``metrics``.
+ clustering_metrics (google.cloud.bigquery_v2.types.Model.ClusteringMetrics):
+ Populated for clustering models.
+
+ This field is a member of `oneof`_ ``metrics``.
+ ranking_metrics (google.cloud.bigquery_v2.types.Model.RankingMetrics):
+ Populated for implicit feedback type matrix
+ factorization models.
+
+ This field is a member of `oneof`_ ``metrics``.
+ arima_forecasting_metrics (google.cloud.bigquery_v2.types.Model.ArimaForecastingMetrics):
+ Populated for ARIMA models.
+
+ This field is a member of `oneof`_ ``metrics``.
+ """
+
+ regression_metrics = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ oneof="metrics",
+ message="Model.RegressionMetrics",
+ )
+ binary_classification_metrics = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ oneof="metrics",
+ message="Model.BinaryClassificationMetrics",
+ )
+ multi_class_classification_metrics = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ oneof="metrics",
+ message="Model.MultiClassClassificationMetrics",
+ )
+ clustering_metrics = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ oneof="metrics",
+ message="Model.ClusteringMetrics",
+ )
+ ranking_metrics = proto.Field(
+ proto.MESSAGE,
+ number=5,
+ oneof="metrics",
+ message="Model.RankingMetrics",
+ )
+ arima_forecasting_metrics = proto.Field(
+ proto.MESSAGE,
+ number=6,
+ oneof="metrics",
+ message="Model.ArimaForecastingMetrics",
+ )
+
+ class DataSplitResult(proto.Message):
+ r"""Data split result. This contains references to the training
+ and evaluation data tables that were used to train the model.
+
+ Attributes:
+ training_table (google.cloud.bigquery_v2.types.TableReference):
+ Table reference of the training data after
+ split.
+ evaluation_table (google.cloud.bigquery_v2.types.TableReference):
+ Table reference of the evaluation data after
+ split.
+ """
+
+ training_table = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message=table_reference.TableReference,
+ )
+ evaluation_table = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=table_reference.TableReference,
+ )
+
+ class ArimaOrder(proto.Message):
+ r"""Arima order, can be used for both non-seasonal and seasonal
+ parts.
+
+ Attributes:
+ p (int):
+ Order of the autoregressive part.
+ d (int):
+ Order of the differencing part.
+ q (int):
+ Order of the moving-average part.
+ """
+
+ p = proto.Field(
+ proto.INT64,
+ number=1,
+ )
+ d = proto.Field(
+ proto.INT64,
+ number=2,
+ )
+ q = proto.Field(
+ proto.INT64,
+ number=3,
+ )
+
+ class ArimaFittingMetrics(proto.Message):
+ r"""ARIMA model fitting metrics.
+
+ Attributes:
+ log_likelihood (float):
+ Log-likelihood.
+ aic (float):
+ AIC.
+ variance (float):
+ Variance.
+ """
+
+ log_likelihood = proto.Field(
+ proto.DOUBLE,
+ number=1,
+ )
+ aic = proto.Field(
+ proto.DOUBLE,
+ number=2,
+ )
+ variance = proto.Field(
+ proto.DOUBLE,
+ number=3,
+ )
+
+ class GlobalExplanation(proto.Message):
+ r"""Global explanations containing the top most important
+ features after training.
+
+ Attributes:
+ explanations (Sequence[google.cloud.bigquery_v2.types.Model.GlobalExplanation.Explanation]):
+ A list of the top global explanations. Sorted
+ by absolute value of attribution in descending
+ order.
+ class_label (str):
+ Class label for this set of global
+ explanations. Will be empty/null for binary
+ logistic and linear regression models. Sorted
+ alphabetically in descending order.
+ """
+
+ class Explanation(proto.Message):
+ r"""Explanation for a single feature.
+
+ Attributes:
+ feature_name (str):
+ Full name of the feature. For non-numerical features, will
+ be formatted like ..
+ Overall size of feature name will always be truncated to
+ first 120 characters.
+ attribution (google.protobuf.wrappers_pb2.DoubleValue):
+ Attribution of feature.
+ """
+
+ feature_name = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ attribution = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=wrappers_pb2.DoubleValue,
+ )
+
+ explanations = proto.RepeatedField(
+ proto.MESSAGE,
+ number=1,
+ message="Model.GlobalExplanation.Explanation",
+ )
+ class_label = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+
+ class TrainingRun(proto.Message):
+ r"""Information about a single training query run for the model.
+
+ Attributes:
+ training_options (google.cloud.bigquery_v2.types.Model.TrainingRun.TrainingOptions):
+ Options that were used for this training run,
+ includes user specified and default options that
+ were used.
+ start_time (google.protobuf.timestamp_pb2.Timestamp):
+ The start time of this training run.
+ results (Sequence[google.cloud.bigquery_v2.types.Model.TrainingRun.IterationResult]):
+ Output of each iteration run, results.size() <=
+ max_iterations.
+ evaluation_metrics (google.cloud.bigquery_v2.types.Model.EvaluationMetrics):
+ The evaluation metrics over training/eval
+ data that were computed at the end of training.
+ data_split_result (google.cloud.bigquery_v2.types.Model.DataSplitResult):
+ Data split result of the training run. Only
+ set when the input data is actually split.
+ global_explanations (Sequence[google.cloud.bigquery_v2.types.Model.GlobalExplanation]):
+ Global explanations for important features of
+ the model. For multi-class models, there is one
+ entry for each label class. For other models,
+ there is only one entry in the list.
+ """
+
+ class TrainingOptions(proto.Message):
+ r"""Options used in model training.
+
+ Attributes:
+ max_iterations (int):
+ The maximum number of iterations in training.
+ Used only for iterative training algorithms.
+ loss_type (google.cloud.bigquery_v2.types.Model.LossType):
+ Type of loss function used during training
+ run.
+ learn_rate (float):
+ Learning rate in training. Used only for
+ iterative training algorithms.
+ l1_regularization (google.protobuf.wrappers_pb2.DoubleValue):
+ L1 regularization coefficient.
+ l2_regularization (google.protobuf.wrappers_pb2.DoubleValue):
+ L2 regularization coefficient.
+ min_relative_progress (google.protobuf.wrappers_pb2.DoubleValue):
+ When early_stop is true, stops training when accuracy
+ improvement is less than 'min_relative_progress'. Used only
+ for iterative training algorithms.
+ warm_start (google.protobuf.wrappers_pb2.BoolValue):
+ Whether to train a model from the last
+ checkpoint.
+ early_stop (google.protobuf.wrappers_pb2.BoolValue):
+ Whether to stop early when the loss doesn't improve
+ significantly any more (compared to min_relative_progress).
+ Used only for iterative training algorithms.
+ input_label_columns (Sequence[str]):
+ Name of input label columns in training data.
+ data_split_method (google.cloud.bigquery_v2.types.Model.DataSplitMethod):
+ The data split type for training and
+ evaluation, e.g. RANDOM.
+ data_split_eval_fraction (float):
+ The fraction of evaluation data over the
+ whole input data. The rest of data will be used
+ as training data. The format should be double.
+ Accurate to two decimal places.
+ Default value is 0.2.
+ data_split_column (str):
+ The column to split data with. This column won't be used as
+ a feature.
+
+ 1. When data_split_method is CUSTOM, the corresponding
+ column should be boolean. The rows with true value tag
+ are eval data, and the false are training data.
+ 2. When data_split_method is SEQ, the first
+ DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest)
+ in the corresponding column are used as training data,
+ and the rest are eval data. It respects the order in
+ Orderable data types:
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties
+ learn_rate_strategy (google.cloud.bigquery_v2.types.Model.LearnRateStrategy):
+ The strategy to determine learn rate for the
+ current iteration.
+ initial_learn_rate (float):
+ Specifies the initial learning rate for the
+ line search learn rate strategy.
+ label_class_weights (Mapping[str, float]):
+ Weights associated with each label class, for
+ rebalancing the training data. Only applicable
+ for classification models.
+ user_column (str):
+ User column specified for matrix
+ factorization models.
+ item_column (str):
+ Item column specified for matrix
+ factorization models.
+ distance_type (google.cloud.bigquery_v2.types.Model.DistanceType):
+ Distance type for clustering models.
+ num_clusters (int):
+ Number of clusters for clustering models.
+ model_uri (str):
+ Google Cloud Storage URI from which the model
+ was imported. Only applicable for imported
+ models.
+ optimization_strategy (google.cloud.bigquery_v2.types.Model.OptimizationStrategy):
+ Optimization strategy for training linear
+ regression models.
+ hidden_units (Sequence[int]):
+ Hidden units for dnn models.
+ batch_size (int):
+ Batch size for dnn models.
+ dropout (google.protobuf.wrappers_pb2.DoubleValue):
+ Dropout probability for dnn models.
+ max_tree_depth (int):
+ Maximum depth of a tree for boosted tree
+ models.
+ subsample (float):
+ Subsample fraction of the training data to
+ grow tree to prevent overfitting for boosted
+ tree models.
+ min_split_loss (google.protobuf.wrappers_pb2.DoubleValue):
+ Minimum split loss for boosted tree models.
+ num_factors (int):
+ Num factors specified for matrix
+ factorization models.
+ feedback_type (google.cloud.bigquery_v2.types.Model.FeedbackType):
+ Feedback type that specifies which algorithm
+ to run for matrix factorization.
+ wals_alpha (google.protobuf.wrappers_pb2.DoubleValue):
+ Hyperparameter for matrix factoration when
+ implicit feedback type is specified.
+ kmeans_initialization_method (google.cloud.bigquery_v2.types.Model.KmeansEnums.KmeansInitializationMethod):
+ The method used to initialize the centroids
+ for kmeans algorithm.
+ kmeans_initialization_column (str):
+ The column used to provide the initial centroids for kmeans
+ algorithm when kmeans_initialization_method is CUSTOM.
+ time_series_timestamp_column (str):
+ Column to be designated as time series
+ timestamp for ARIMA model.
+ time_series_data_column (str):
+ Column to be designated as time series data
+ for ARIMA model.
+ auto_arima (bool):
+ Whether to enable auto ARIMA or not.
+ non_seasonal_order (google.cloud.bigquery_v2.types.Model.ArimaOrder):
+ A specification of the non-seasonal part of
+ the ARIMA model: the three components (p, d, q)
+ are the AR order, the degree of differencing,
+ and the MA order.
+ data_frequency (google.cloud.bigquery_v2.types.Model.DataFrequency):
+ The data frequency of a time series.
+ include_drift (bool):
+ Include drift when fitting an ARIMA model.
+ holiday_region (google.cloud.bigquery_v2.types.Model.HolidayRegion):
+ The geographical region based on which the
+ holidays are considered in time series modeling.
+ If a valid value is specified, then holiday
+ effects modeling is enabled.
+ time_series_id_column (str):
+ The time series id column that was used
+ during ARIMA model training.
+ time_series_id_columns (Sequence[str]):
+ The time series id columns that were used
+ during ARIMA model training.
+ horizon (int):
+ The number of periods ahead that need to be
+ forecasted.
+ preserve_input_structs (bool):
+ Whether to preserve the input structs in output feature
+ names. Suppose there is a struct A with field b. When false
+ (default), the output feature name is A_b. When true, the
+ output feature name is A.b.
+ auto_arima_max_order (int):
+ The max value of non-seasonal p and q.
+ decompose_time_series (google.protobuf.wrappers_pb2.BoolValue):
+ If true, perform decompose time series and
+ save the results.
+ clean_spikes_and_dips (google.protobuf.wrappers_pb2.BoolValue):
+ If true, clean spikes and dips in the input
+ time series.
+ adjust_step_changes (google.protobuf.wrappers_pb2.BoolValue):
+ If true, detect step changes and make data
+ adjustment in the input time series.
+ """
+
+ max_iterations = proto.Field(
+ proto.INT64,
+ number=1,
+ )
+ loss_type = proto.Field(
+ proto.ENUM,
+ number=2,
+ enum="Model.LossType",
+ )
+ learn_rate = proto.Field(
+ proto.DOUBLE,
+ number=3,
+ )
+ l1_regularization = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=wrappers_pb2.DoubleValue,
+ )
+ l2_regularization = proto.Field(
+ proto.MESSAGE,
+ number=5,
+ message=wrappers_pb2.DoubleValue,
+ )
+ min_relative_progress = proto.Field(
+ proto.MESSAGE,
+ number=6,
+ message=wrappers_pb2.DoubleValue,
+ )
+ warm_start = proto.Field(
+ proto.MESSAGE,
+ number=7,
+ message=wrappers_pb2.BoolValue,
+ )
+ early_stop = proto.Field(
+ proto.MESSAGE,
+ number=8,
+ message=wrappers_pb2.BoolValue,
+ )
+ input_label_columns = proto.RepeatedField(
+ proto.STRING,
+ number=9,
+ )
+ data_split_method = proto.Field(
+ proto.ENUM,
+ number=10,
+ enum="Model.DataSplitMethod",
+ )
+ data_split_eval_fraction = proto.Field(
+ proto.DOUBLE,
+ number=11,
+ )
+ data_split_column = proto.Field(
+ proto.STRING,
+ number=12,
+ )
+ learn_rate_strategy = proto.Field(
+ proto.ENUM,
+ number=13,
+ enum="Model.LearnRateStrategy",
+ )
+ initial_learn_rate = proto.Field(
+ proto.DOUBLE,
+ number=16,
+ )
+ label_class_weights = proto.MapField(
+ proto.STRING,
+ proto.DOUBLE,
+ number=17,
+ )
+ user_column = proto.Field(
+ proto.STRING,
+ number=18,
+ )
+ item_column = proto.Field(
+ proto.STRING,
+ number=19,
+ )
+ distance_type = proto.Field(
+ proto.ENUM,
+ number=20,
+ enum="Model.DistanceType",
+ )
+ num_clusters = proto.Field(
+ proto.INT64,
+ number=21,
+ )
+ model_uri = proto.Field(
+ proto.STRING,
+ number=22,
+ )
+ optimization_strategy = proto.Field(
+ proto.ENUM,
+ number=23,
+ enum="Model.OptimizationStrategy",
+ )
+ hidden_units = proto.RepeatedField(
+ proto.INT64,
+ number=24,
+ )
+ batch_size = proto.Field(
+ proto.INT64,
+ number=25,
+ )
+ dropout = proto.Field(
+ proto.MESSAGE,
+ number=26,
+ message=wrappers_pb2.DoubleValue,
+ )
+ max_tree_depth = proto.Field(
+ proto.INT64,
+ number=27,
+ )
+ subsample = proto.Field(
+ proto.DOUBLE,
+ number=28,
+ )
+ min_split_loss = proto.Field(
+ proto.MESSAGE,
+ number=29,
+ message=wrappers_pb2.DoubleValue,
+ )
+ num_factors = proto.Field(
+ proto.INT64,
+ number=30,
+ )
+ feedback_type = proto.Field(
+ proto.ENUM,
+ number=31,
+ enum="Model.FeedbackType",
+ )
+ wals_alpha = proto.Field(
+ proto.MESSAGE,
+ number=32,
+ message=wrappers_pb2.DoubleValue,
+ )
+ kmeans_initialization_method = proto.Field(
+ proto.ENUM,
+ number=33,
+ enum="Model.KmeansEnums.KmeansInitializationMethod",
+ )
+ kmeans_initialization_column = proto.Field(
+ proto.STRING,
+ number=34,
+ )
+ time_series_timestamp_column = proto.Field(
+ proto.STRING,
+ number=35,
+ )
+ time_series_data_column = proto.Field(
+ proto.STRING,
+ number=36,
+ )
+ auto_arima = proto.Field(
+ proto.BOOL,
+ number=37,
+ )
+ non_seasonal_order = proto.Field(
+ proto.MESSAGE,
+ number=38,
+ message="Model.ArimaOrder",
+ )
+ data_frequency = proto.Field(
+ proto.ENUM,
+ number=39,
+ enum="Model.DataFrequency",
+ )
+ include_drift = proto.Field(
+ proto.BOOL,
+ number=41,
+ )
+ holiday_region = proto.Field(
+ proto.ENUM,
+ number=42,
+ enum="Model.HolidayRegion",
+ )
+ time_series_id_column = proto.Field(
+ proto.STRING,
+ number=43,
+ )
+ time_series_id_columns = proto.RepeatedField(
+ proto.STRING,
+ number=51,
+ )
+ horizon = proto.Field(
+ proto.INT64,
+ number=44,
+ )
+ preserve_input_structs = proto.Field(
+ proto.BOOL,
+ number=45,
+ )
+ auto_arima_max_order = proto.Field(
+ proto.INT64,
+ number=46,
+ )
+ decompose_time_series = proto.Field(
+ proto.MESSAGE,
+ number=50,
+ message=wrappers_pb2.BoolValue,
+ )
+ clean_spikes_and_dips = proto.Field(
+ proto.MESSAGE,
+ number=52,
+ message=wrappers_pb2.BoolValue,
+ )
+ adjust_step_changes = proto.Field(
+ proto.MESSAGE,
+ number=53,
+ message=wrappers_pb2.BoolValue,
+ )
+
+ class IterationResult(proto.Message):
+ r"""Information about a single iteration of the training run.
+
+ Attributes:
+ index (google.protobuf.wrappers_pb2.Int32Value):
+ Index of the iteration, 0 based.
+ duration_ms (google.protobuf.wrappers_pb2.Int64Value):
+ Time taken to run the iteration in
+ milliseconds.
+ training_loss (google.protobuf.wrappers_pb2.DoubleValue):
+ Loss computed on the training data at the end
+ of iteration.
+ eval_loss (google.protobuf.wrappers_pb2.DoubleValue):
+ Loss computed on the eval data at the end of
+ iteration.
+ learn_rate (float):
+ Learn rate used for this iteration.
+ cluster_infos (Sequence[google.cloud.bigquery_v2.types.Model.TrainingRun.IterationResult.ClusterInfo]):
+ Information about top clusters for clustering
+ models.
+ arima_result (google.cloud.bigquery_v2.types.Model.TrainingRun.IterationResult.ArimaResult):
+
+ """
+
+ class ClusterInfo(proto.Message):
+ r"""Information about a single cluster for clustering model.
+
+ Attributes:
+ centroid_id (int):
+ Centroid id.
+ cluster_radius (google.protobuf.wrappers_pb2.DoubleValue):
+ Cluster radius, the average distance from
+ centroid to each point assigned to the cluster.
+ cluster_size (google.protobuf.wrappers_pb2.Int64Value):
+ Cluster size, the total number of points
+ assigned to the cluster.
+ """
+
+ centroid_id = proto.Field(
+ proto.INT64,
+ number=1,
+ )
+ cluster_radius = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=wrappers_pb2.DoubleValue,
+ )
+ cluster_size = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=wrappers_pb2.Int64Value,
+ )
+
+ class ArimaResult(proto.Message):
+ r"""(Auto-)arima fitting result. Wrap everything in ArimaResult
+ for easier refactoring if we want to use model-specific
+ iteration results.
+
+ Attributes:
+ arima_model_info (Sequence[google.cloud.bigquery_v2.types.Model.TrainingRun.IterationResult.ArimaResult.ArimaModelInfo]):
+ This message is repeated because there are
+ multiple arima models fitted in auto-arima. For
+ non-auto-arima model, its size is one.
+ seasonal_periods (Sequence[google.cloud.bigquery_v2.types.Model.SeasonalPeriod.SeasonalPeriodType]):
+ Seasonal periods. Repeated because multiple
+ periods are supported for one time series.
+ """
+
+ class ArimaCoefficients(proto.Message):
+ r"""Arima coefficients.
+
+ Attributes:
+ auto_regressive_coefficients (Sequence[float]):
+ Auto-regressive coefficients, an array of
+ double.
+ moving_average_coefficients (Sequence[float]):
+ Moving-average coefficients, an array of
+ double.
+ intercept_coefficient (float):
+ Intercept coefficient, just a double not an
+ array.
+ """
+
+ auto_regressive_coefficients = proto.RepeatedField(
+ proto.DOUBLE,
+ number=1,
+ )
+ moving_average_coefficients = proto.RepeatedField(
+ proto.DOUBLE,
+ number=2,
+ )
+ intercept_coefficient = proto.Field(
+ proto.DOUBLE,
+ number=3,
+ )
+
+ class ArimaModelInfo(proto.Message):
+ r"""Arima model information.
+
+ Attributes:
+ non_seasonal_order (google.cloud.bigquery_v2.types.Model.ArimaOrder):
+ Non-seasonal order.
+ arima_coefficients (google.cloud.bigquery_v2.types.Model.TrainingRun.IterationResult.ArimaResult.ArimaCoefficients):
+ Arima coefficients.
+ arima_fitting_metrics (google.cloud.bigquery_v2.types.Model.ArimaFittingMetrics):
+ Arima fitting metrics.
+ has_drift (bool):
+ Whether Arima model fitted with drift or not.
+ It is always false when d is not 1.
+ time_series_id (str):
+ The time_series_id value for this time series. It will be
+ one of the unique values from the time_series_id_column
+ specified during ARIMA model training. Only present when
+ time_series_id_column training option was used.
+ time_series_ids (Sequence[str]):
+ The tuple of time_series_ids identifying this time series.
+ It will be one of the unique tuples of values present in the
+ time_series_id_columns specified during ARIMA model
+ training. Only present when time_series_id_columns training
+ option was used and the order of values here are same as the
+ order of time_series_id_columns.
+ seasonal_periods (Sequence[google.cloud.bigquery_v2.types.Model.SeasonalPeriod.SeasonalPeriodType]):
+ Seasonal periods. Repeated because multiple
+ periods are supported for one time series.
+ has_holiday_effect (google.protobuf.wrappers_pb2.BoolValue):
+ If true, holiday_effect is a part of time series
+ decomposition result.
+ has_spikes_and_dips (google.protobuf.wrappers_pb2.BoolValue):
+ If true, spikes_and_dips is a part of time series
+ decomposition result.
+ has_step_changes (google.protobuf.wrappers_pb2.BoolValue):
+ If true, step_changes is a part of time series decomposition
+ result.
+ """
+
+ non_seasonal_order = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message="Model.ArimaOrder",
+ )
+ arima_coefficients = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message="Model.TrainingRun.IterationResult.ArimaResult.ArimaCoefficients",
+ )
+ arima_fitting_metrics = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message="Model.ArimaFittingMetrics",
+ )
+ has_drift = proto.Field(
+ proto.BOOL,
+ number=4,
+ )
+ time_series_id = proto.Field(
+ proto.STRING,
+ number=5,
+ )
+ time_series_ids = proto.RepeatedField(
+ proto.STRING,
+ number=10,
+ )
+ seasonal_periods = proto.RepeatedField(
+ proto.ENUM,
+ number=6,
+ enum="Model.SeasonalPeriod.SeasonalPeriodType",
+ )
+ has_holiday_effect = proto.Field(
+ proto.MESSAGE,
+ number=7,
+ message=wrappers_pb2.BoolValue,
+ )
+ has_spikes_and_dips = proto.Field(
+ proto.MESSAGE,
+ number=8,
+ message=wrappers_pb2.BoolValue,
+ )
+ has_step_changes = proto.Field(
+ proto.MESSAGE,
+ number=9,
+ message=wrappers_pb2.BoolValue,
+ )
+
+ arima_model_info = proto.RepeatedField(
+ proto.MESSAGE,
+ number=1,
+ message="Model.TrainingRun.IterationResult.ArimaResult.ArimaModelInfo",
+ )
+ seasonal_periods = proto.RepeatedField(
+ proto.ENUM,
+ number=2,
+ enum="Model.SeasonalPeriod.SeasonalPeriodType",
+ )
+
+ index = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message=wrappers_pb2.Int32Value,
+ )
+ duration_ms = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=wrappers_pb2.Int64Value,
+ )
+ training_loss = proto.Field(
+ proto.MESSAGE,
+ number=5,
+ message=wrappers_pb2.DoubleValue,
+ )
+ eval_loss = proto.Field(
+ proto.MESSAGE,
+ number=6,
+ message=wrappers_pb2.DoubleValue,
+ )
+ learn_rate = proto.Field(
+ proto.DOUBLE,
+ number=7,
+ )
+ cluster_infos = proto.RepeatedField(
+ proto.MESSAGE,
+ number=8,
+ message="Model.TrainingRun.IterationResult.ClusterInfo",
+ )
+ arima_result = proto.Field(
+ proto.MESSAGE,
+ number=9,
+ message="Model.TrainingRun.IterationResult.ArimaResult",
+ )
+
+ training_options = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message="Model.TrainingRun.TrainingOptions",
+ )
+ start_time = proto.Field(
+ proto.MESSAGE,
+ number=8,
+ message=timestamp_pb2.Timestamp,
+ )
+ results = proto.RepeatedField(
+ proto.MESSAGE,
+ number=6,
+ message="Model.TrainingRun.IterationResult",
+ )
+ evaluation_metrics = proto.Field(
+ proto.MESSAGE,
+ number=7,
+ message="Model.EvaluationMetrics",
+ )
+ data_split_result = proto.Field(
+ proto.MESSAGE,
+ number=9,
+ message="Model.DataSplitResult",
+ )
+ global_explanations = proto.RepeatedField(
+ proto.MESSAGE,
+ number=10,
+ message="Model.GlobalExplanation",
+ )
+
+ etag = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ model_reference = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=gcb_model_reference.ModelReference,
+ )
+ creation_time = proto.Field(
+ proto.INT64,
+ number=5,
+ )
+ last_modified_time = proto.Field(
+ proto.INT64,
+ number=6,
+ )
+ description = proto.Field(
+ proto.STRING,
+ number=12,
+ )
+ friendly_name = proto.Field(
+ proto.STRING,
+ number=14,
+ )
+ labels = proto.MapField(
+ proto.STRING,
+ proto.STRING,
+ number=15,
+ )
+ expiration_time = proto.Field(
+ proto.INT64,
+ number=16,
+ )
+ location = proto.Field(
+ proto.STRING,
+ number=13,
+ )
+ encryption_configuration = proto.Field(
+ proto.MESSAGE,
+ number=17,
+ message=encryption_config.EncryptionConfiguration,
+ )
+ model_type = proto.Field(
+ proto.ENUM,
+ number=7,
+ enum=ModelType,
+ )
+ training_runs = proto.RepeatedField(
+ proto.MESSAGE,
+ number=9,
+ message=TrainingRun,
+ )
+ feature_columns = proto.RepeatedField(
+ proto.MESSAGE,
+ number=10,
+ message=standard_sql.StandardSqlField,
+ )
+ label_columns = proto.RepeatedField(
+ proto.MESSAGE,
+ number=11,
+ message=standard_sql.StandardSqlField,
+ )
+ best_trial_id = proto.Field(
+ proto.INT64,
+ number=19,
+ )
+
+
+class GetModelRequest(proto.Message):
+ r"""
+
+ Attributes:
+ project_id (str):
+ Required. Project ID of the requested model.
+ dataset_id (str):
+ Required. Dataset ID of the requested model.
+ model_id (str):
+ Required. Model ID of the requested model.
+ """
+
+ project_id = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ dataset_id = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ model_id = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+
+
+class PatchModelRequest(proto.Message):
+ r"""
+
+ Attributes:
+ project_id (str):
+ Required. Project ID of the model to patch.
+ dataset_id (str):
+ Required. Dataset ID of the model to patch.
+ model_id (str):
+ Required. Model ID of the model to patch.
+ model (google.cloud.bigquery_v2.types.Model):
+ Required. Patched model.
+ Follows RFC5789 patch semantics. Missing fields
+ are not updated. To clear a field, explicitly
+ set to default value.
+ """
+
+ project_id = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ dataset_id = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ model_id = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+ model = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message="Model",
+ )
+
+
+class DeleteModelRequest(proto.Message):
+ r"""
+
+ Attributes:
+ project_id (str):
+ Required. Project ID of the model to delete.
+ dataset_id (str):
+ Required. Dataset ID of the model to delete.
+ model_id (str):
+ Required. Model ID of the model to delete.
+ """
+
+ project_id = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ dataset_id = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ model_id = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+
+
+class ListModelsRequest(proto.Message):
+ r"""
+
+ Attributes:
+ project_id (str):
+ Required. Project ID of the models to list.
+ dataset_id (str):
+ Required. Dataset ID of the models to list.
+ max_results (google.protobuf.wrappers_pb2.UInt32Value):
+ The maximum number of results to return in a
+ single response page. Leverage the page tokens
+ to iterate through the entire collection.
+ page_token (str):
+ Page token, returned by a previous call to
+ request the next page of results
+ """
+
+ project_id = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ dataset_id = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ max_results = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=wrappers_pb2.UInt32Value,
+ )
+ page_token = proto.Field(
+ proto.STRING,
+ number=4,
+ )
+
+
+class ListModelsResponse(proto.Message):
+ r"""
+
+ Attributes:
+ models (Sequence[google.cloud.bigquery_v2.types.Model]):
+ Models in the requested dataset. Only the following fields
+ are populated: model_reference, model_type, creation_time,
+ last_modified_time and labels.
+ next_page_token (str):
+ A token to request the next page of results.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ models = proto.RepeatedField(
+ proto.MESSAGE,
+ number=1,
+ message="Model",
+ )
+ next_page_token = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/model_reference.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/model_reference.py
new file mode 100644
index 0000000000000000000000000000000000000000..cde139ebe5cdfa26445ffec8a1689d79ec19d52c
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/model_reference.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.bigquery.v2",
+ manifest={
+ "ModelReference",
+ },
+)
+
+
+class ModelReference(proto.Message):
+ r"""Id path of a model.
+
+ Attributes:
+ project_id (str):
+ Required. The ID of the project containing
+ this model.
+ dataset_id (str):
+ Required. The ID of the dataset containing
+ this model.
+ model_id (str):
+ Required. The ID of the model. The ID must contain only
+ letters (a-z, A-Z), numbers (0-9), or underscores (_). The
+ maximum length is 1,024 characters.
+ """
+
+ project_id = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ dataset_id = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ model_id = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/standard_sql.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/standard_sql.py
new file mode 100644
index 0000000000000000000000000000000000000000..3be5304fc153786cd12992cae41b9b54c35885cb
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/standard_sql.py
@@ -0,0 +1,156 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.bigquery.v2",
+ manifest={
+ "StandardSqlDataType",
+ "StandardSqlField",
+ "StandardSqlStructType",
+ "StandardSqlTableType",
+ },
+)
+
+
+class StandardSqlDataType(proto.Message):
+ r"""The type of a variable, e.g., a function argument. Examples: INT64:
+ {type_kind="INT64"} ARRAY: {type_kind="ARRAY",
+ array_element_type="STRING"} STRUCT:
+ {type_kind="STRUCT", struct_type={fields=[ {name="x",
+ type={type_kind="STRING"}}, {name="y", type={type_kind="ARRAY",
+ array_element_type="DATE"}} ]}}
+
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
+ Attributes:
+ type_kind (google.cloud.bigquery_v2.types.StandardSqlDataType.TypeKind):
+ Required. The top level type of this field.
+ Can be any standard SQL data type (e.g.,
+ "INT64", "DATE", "ARRAY").
+ array_element_type (google.cloud.bigquery_v2.types.StandardSqlDataType):
+ The type of the array's elements, if type_kind = "ARRAY".
+
+ This field is a member of `oneof`_ ``sub_type``.
+ struct_type (google.cloud.bigquery_v2.types.StandardSqlStructType):
+ The fields of this struct, in order, if type_kind =
+ "STRUCT".
+
+ This field is a member of `oneof`_ ``sub_type``.
+ """
+
+ class TypeKind(proto.Enum):
+ r""""""
+ TYPE_KIND_UNSPECIFIED = 0
+ INT64 = 2
+ BOOL = 5
+ FLOAT64 = 7
+ STRING = 8
+ BYTES = 9
+ TIMESTAMP = 19
+ DATE = 10
+ TIME = 20
+ DATETIME = 21
+ INTERVAL = 26
+ GEOGRAPHY = 22
+ NUMERIC = 23
+ BIGNUMERIC = 24
+ JSON = 25
+ ARRAY = 16
+ STRUCT = 17
+
+ type_kind = proto.Field(
+ proto.ENUM,
+ number=1,
+ enum=TypeKind,
+ )
+ array_element_type = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ oneof="sub_type",
+ message="StandardSqlDataType",
+ )
+ struct_type = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ oneof="sub_type",
+ message="StandardSqlStructType",
+ )
+
+
+class StandardSqlField(proto.Message):
+ r"""A field or a column.
+
+ Attributes:
+ name (str):
+ Optional. The name of this field. Can be
+ absent for struct fields.
+ type (google.cloud.bigquery_v2.types.StandardSqlDataType):
+ Optional. The type of this parameter. Absent
+ if not explicitly specified (e.g., CREATE
+ FUNCTION statement can omit the return type; in
+ this case the output parameter does not have
+ this "type" field).
+ """
+
+ name = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ type = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message="StandardSqlDataType",
+ )
+
+
+class StandardSqlStructType(proto.Message):
+ r"""
+
+ Attributes:
+ fields (Sequence[google.cloud.bigquery_v2.types.StandardSqlField]):
+
+ """
+
+ fields = proto.RepeatedField(
+ proto.MESSAGE,
+ number=1,
+ message="StandardSqlField",
+ )
+
+
+class StandardSqlTableType(proto.Message):
+ r"""A table type
+
+ Attributes:
+ columns (Sequence[google.cloud.bigquery_v2.types.StandardSqlField]):
+ The columns in this table type
+ """
+
+ columns = proto.RepeatedField(
+ proto.MESSAGE,
+ number=1,
+ message="StandardSqlField",
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/table_reference.py b/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/table_reference.py
new file mode 100644
index 0000000000000000000000000000000000000000..c02eb206f364b425422fb23d25c78bac35944d7a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/google/cloud/bigquery_v2/types/table_reference.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.bigquery.v2",
+ manifest={
+ "TableReference",
+ },
+)
+
+
+class TableReference(proto.Message):
+ r"""
+
+ Attributes:
+ project_id (str):
+ Required. The ID of the project containing
+ this table.
+ dataset_id (str):
+ Required. The ID of the dataset containing
+ this table.
+ table_id (str):
+ Required. The ID of the table. The ID must contain only
+ letters (a-z, A-Z), numbers (0-9), or underscores (_). The
+ maximum length is 1,024 characters. Certain operations allow
+ suffixing of the table ID with a partition decorator, such
+ as ``sample_table$20190123``.
+ project_id_alternative (Sequence[str]):
+ The alternative field that will be used when ESF is not able
+ to translate the received data to the project_id field.
+ dataset_id_alternative (Sequence[str]):
+ The alternative field that will be used when ESF is not able
+ to translate the received data to the project_id field.
+ table_id_alternative (Sequence[str]):
+ The alternative field that will be used when ESF is not able
+ to translate the received data to the project_id field.
+ """
+
+ project_id = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ dataset_id = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ table_id = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+ project_id_alternative = proto.RepeatedField(
+ proto.STRING,
+ number=4,
+ )
+ dataset_id_alternative = proto.RepeatedField(
+ proto.STRING,
+ number=5,
+ )
+ table_id_alternative = proto.RepeatedField(
+ proto.STRING,
+ number=6,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/testbed/googleapis__python-bigquery/mypy.ini b/testbed/googleapis__python-bigquery/mypy.ini
new file mode 100644
index 0000000000000000000000000000000000000000..beaa679a8d2b6d01af360d57efdc027e0e35bbf8
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/mypy.ini
@@ -0,0 +1,3 @@
+[mypy]
+python_version = 3.8
+namespace_packages = True
diff --git a/testbed/googleapis__python-bigquery/noxfile.py b/testbed/googleapis__python-bigquery/noxfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..2376309ff7c7525cb3da08c6946fc841567affa0
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/noxfile.py
@@ -0,0 +1,546 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+from functools import wraps
+import pathlib
+import os
+import re
+import shutil
+import nox
+import time
+
+
+MYPY_VERSION = "mypy==1.6.1"
+PYTYPE_VERSION = "pytype==2021.4.9"
+BLACK_VERSION = "black==23.7.0"
+BLACK_PATHS = (
+ "benchmark",
+ "docs",
+ "google",
+ "samples",
+ "samples/tests",
+ "tests",
+ "noxfile.py",
+ "setup.py",
+)
+
+DEFAULT_PYTHON_VERSION = "3.8"
+SYSTEM_TEST_PYTHON_VERSIONS = ["3.8", "3.11", "3.12"]
+UNIT_TEST_PYTHON_VERSIONS = ["3.7", "3.8", "3.12"]
+CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
+
+
+def _calculate_duration(func):
+ """This decorator prints the execution time for the decorated function."""
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ start = time.monotonic()
+ result = func(*args, **kwargs)
+ end = time.monotonic()
+ total_seconds = round(end - start)
+ hours = total_seconds // 3600 # Integer division to get hours
+ remaining_seconds = total_seconds % 3600 # Modulo to find remaining seconds
+ minutes = remaining_seconds // 60
+ seconds = remaining_seconds % 60
+ human_time = f"{hours:}:{minutes:0>2}:{seconds:0>2}"
+ print(f"Session ran in {total_seconds} seconds ({human_time})")
+ return result
+
+ return wrapper
+
+
+# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
+nox.options.sessions = [
+ "unit_noextras",
+ "unit",
+ "system",
+ "snippets",
+ "cover",
+ "lint",
+ "lint_setup_py",
+ "blacken",
+ "mypy",
+ "mypy_samples",
+ "pytype",
+ "docs",
+]
+
+
+def default(session, install_extras=True):
+ """Default unit test session.
+
+ This is intended to be run **without** an interpreter set, so
+ that the current ``python`` (on the ``PATH``) or the version of
+ Python corresponding to the ``nox`` binary the ``PATH`` can
+ run the tests.
+ """
+
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+
+ # Install all test dependencies, then install local packages in-place.
+ session.install(
+ "pytest",
+ "google-cloud-testutils",
+ "pytest-cov",
+ "freezegun",
+ "-c",
+ constraints_path,
+ )
+
+ if install_extras and session.python in ["3.11", "3.12"]:
+ install_target = ".[bqstorage,ipywidgets,pandas,tqdm,opentelemetry]"
+ elif install_extras:
+ install_target = ".[all]"
+ else:
+ install_target = "."
+ session.install("-e", install_target, "-c", constraints_path)
+ session.run("python", "-m", "pip", "freeze")
+
+ # Run py.test against the unit tests.
+ session.run(
+ "py.test",
+ "--quiet",
+ "-W default::PendingDeprecationWarning",
+ "--cov=google/cloud/bigquery",
+ "--cov=tests/unit",
+ "--cov-append",
+ "--cov-config=.coveragerc",
+ "--cov-report=",
+ "--cov-fail-under=0",
+ os.path.join("tests", "unit"),
+ *session.posargs,
+ )
+
+
+@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
+@_calculate_duration
+def unit(session):
+ """Run the unit test suite."""
+
+ default(session)
+
+
+@nox.session(python=[UNIT_TEST_PYTHON_VERSIONS[0], UNIT_TEST_PYTHON_VERSIONS[-1]])
+@_calculate_duration
+def unit_noextras(session):
+ """Run the unit test suite."""
+
+ # Install optional dependencies that are out-of-date to see that
+ # we fail gracefully.
+ # https://github.com/googleapis/python-bigquery/issues/933
+ #
+ # We only install this extra package on one of the two Python versions
+ # so that it continues to be an optional dependency.
+ # https://github.com/googleapis/python-bigquery/issues/1877
+ if session.python == UNIT_TEST_PYTHON_VERSIONS[0]:
+ session.install("pyarrow==1.0.0")
+
+ default(session, install_extras=False)
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+@_calculate_duration
+def mypy(session):
+ """Run type checks with mypy."""
+
+ session.install("-e", ".[all]")
+ session.install(MYPY_VERSION)
+
+ # Just install the dependencies' type info directly, since "mypy --install-types"
+ # might require an additional pass.
+ session.install(
+ "types-protobuf",
+ "types-python-dateutil",
+ "types-requests",
+ "types-setuptools",
+ )
+ session.run("mypy", "-p", "google", "--show-traceback")
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+@_calculate_duration
+def pytype(session):
+ """Run type checks with pytype."""
+ # An indirect dependecy attrs==21.1.0 breaks the check, and installing a less
+ # recent version avoids the error until a possibly better fix is found.
+ # https://github.com/googleapis/python-bigquery/issues/655
+
+ session.install("attrs==20.3.0")
+ session.install("-e", ".[all]")
+ session.install(PYTYPE_VERSION)
+ # See https://github.com/google/pytype/issues/464
+ session.run("pytype", "-P", ".", "google/cloud/bigquery")
+
+
+@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
+@_calculate_duration
+def system(session):
+ """Run the system test suite."""
+
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+
+ # Sanity check: Only run system tests if the environment variable is set.
+ if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
+ session.skip("Credentials must be set via environment variable.")
+
+ # Use pre-release gRPC for system tests.
+ # Exclude version 1.49.0rc1 which has a known issue.
+ # See https://github.com/grpc/grpc/pull/30642
+ session.install("--pre", "grpcio!=1.49.0rc1", "-c", constraints_path)
+
+ # Install all test dependencies, then install local packages in place.
+ session.install(
+ "pytest", "psutil", "google-cloud-testutils", "-c", constraints_path
+ )
+ if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "") == "true":
+ # mTLS test requires pyopenssl and latest google-cloud-storage
+ session.install("google-cloud-storage", "pyopenssl")
+ else:
+ session.install("google-cloud-storage", "-c", constraints_path)
+
+ # Data Catalog needed for the column ACL test with a real Policy Tag.
+ session.install("google-cloud-datacatalog", "-c", constraints_path)
+
+ if session.python in ["3.11", "3.12"]:
+ extras = "[bqstorage,ipywidgets,pandas,tqdm,opentelemetry]"
+ else:
+ extras = "[all]"
+ session.install("-e", f".{extras}", "-c", constraints_path)
+
+ # print versions of all dependencies
+ session.run("python", "-m", "pip", "freeze")
+
+ # Run py.test against the system tests.
+ session.run(
+ "py.test",
+ "--quiet",
+ "-W default::PendingDeprecationWarning",
+ os.path.join("tests", "system"),
+ *session.posargs,
+ )
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+@_calculate_duration
+def mypy_samples(session):
+ """Run type checks with mypy."""
+
+ session.install("pytest")
+ for requirements_path in CURRENT_DIRECTORY.glob("samples/*/requirements.txt"):
+ session.install("-r", str(requirements_path))
+ session.install(MYPY_VERSION)
+
+ # requirements.txt might include this package. Install from source so that
+ # we can author samples with unreleased features.
+ session.install("-e", ".[all]")
+
+ # Just install the dependencies' type info directly, since "mypy --install-types"
+ # might require an additional pass.
+ session.install(
+ "types-mock",
+ "types-pytz",
+ "types-protobuf!=4.24.0.20240106", # This version causes an error: 'Module "google.oauth2" has no attribute "service_account"'
+ "types-python-dateutil",
+ "types-requests",
+ "types-setuptools",
+ )
+
+ session.install("typing-extensions") # for TypedDict in pre-3.8 Python versions
+
+ session.run(
+ "mypy",
+ "--config-file",
+ str(CURRENT_DIRECTORY / "samples" / "mypy.ini"),
+ "--no-incremental", # Required by warn-unused-configs from mypy.ini to work
+ "samples/",
+ )
+
+
+@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
+@_calculate_duration
+def snippets(session):
+ """Run the snippets test suite."""
+
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+
+ # Install all test dependencies, then install local packages in place.
+ session.install("pytest", "google-cloud-testutils", "-c", constraints_path)
+ session.install("google-cloud-storage", "-c", constraints_path)
+ session.install("grpcio", "-c", constraints_path)
+
+ if session.python in ["3.11", "3.12"]:
+ extras = "[bqstorage,ipywidgets,pandas,tqdm,opentelemetry]"
+ else:
+ extras = "[all]"
+ session.install("-e", f".{extras}", "-c", constraints_path)
+
+ # Run py.test against the snippets tests.
+ # Skip tests in samples/snippets, as those are run in a different session
+ # using the nox config from that directory.
+ session.run("py.test", os.path.join("docs", "snippets.py"), *session.posargs)
+ session.run(
+ "py.test",
+ "samples",
+ "-W default::PendingDeprecationWarning",
+ "--ignore=samples/desktopapp",
+ "--ignore=samples/magics",
+ "--ignore=samples/geography",
+ "--ignore=samples/notebooks",
+ "--ignore=samples/snippets",
+ *session.posargs,
+ )
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+@_calculate_duration
+def cover(session):
+ """Run the final coverage report.
+
+ This outputs the coverage report aggregating coverage from the unit
+ test runs (not system test runs), and then erases coverage data.
+ """
+
+ session.install("coverage", "pytest-cov")
+ session.run("coverage", "report", "--show-missing", "--fail-under=100")
+ session.run("coverage", "erase")
+
+
+@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
+@_calculate_duration
+def prerelease_deps(session):
+ """Run all tests with prerelease versions of dependencies installed.
+
+ https://github.com/googleapis/python-bigquery/issues/95
+ """
+ # PyArrow prerelease packages are published to an alternative PyPI host.
+ # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages
+ session.install(
+ "--extra-index-url",
+ "https://pypi.fury.io/arrow-nightlies/",
+ "--prefer-binary",
+ "--pre",
+ "--upgrade",
+ "pyarrow",
+ )
+ session.install(
+ "--pre",
+ "--upgrade",
+ "IPython",
+ "ipykernel",
+ "ipywidgets",
+ "tqdm",
+ "git+https://github.com/pypa/packaging.git",
+ "pandas",
+ )
+
+ session.install(
+ "--pre",
+ "--upgrade",
+ "google-api-core",
+ "google-cloud-bigquery-storage",
+ "google-cloud-core",
+ "google-resumable-media",
+ # Exclude version 1.49.0rc1 which has a known issue. See https://github.com/grpc/grpc/pull/30642
+ "grpcio!=1.49.0rc1",
+ )
+ session.install(
+ "freezegun",
+ "google-cloud-datacatalog",
+ "google-cloud-storage",
+ "google-cloud-testutils",
+ "psutil",
+ "pytest",
+ "pytest-cov",
+ )
+
+ # Because we test minimum dependency versions on the minimum Python
+ # version, the first version we test with in the unit tests sessions has a
+ # constraints file containing all dependencies and extras.
+ with open(
+ CURRENT_DIRECTORY
+ / "testing"
+ / f"constraints-{UNIT_TEST_PYTHON_VERSIONS[0]}.txt",
+ encoding="utf-8",
+ ) as constraints_file:
+ constraints_text = constraints_file.read()
+
+ # Ignore leading whitespace and comment lines.
+ deps = [
+ match.group(1)
+ for match in re.finditer(
+ r"^\s*(\S+)(?===\S+)", constraints_text, flags=re.MULTILINE
+ )
+ ]
+
+ # We use --no-deps to ensure that pre-release versions aren't overwritten
+ # by the version ranges in setup.py.
+ session.install(*deps)
+ session.install("--no-deps", "-e", ".[all]")
+
+ # Print out prerelease package versions.
+ session.run("python", "-c", "import grpc; print(grpc.__version__)")
+ session.run("python", "-c", "import pandas; print(pandas.__version__)")
+ session.run("python", "-c", "import pyarrow; print(pyarrow.__version__)")
+ session.run("python", "-m", "pip", "freeze")
+
+ # Run all tests, except a few samples tests which require extra dependencies.
+ session.run(
+ "py.test",
+ "tests/unit",
+ "-W default::PendingDeprecationWarning",
+ )
+
+ session.run(
+ "py.test",
+ "tests/system",
+ "-W default::PendingDeprecationWarning",
+ )
+
+ session.run(
+ "py.test",
+ "samples/tests",
+ "-W default::PendingDeprecationWarning",
+ )
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+@_calculate_duration
+def lint(session):
+ """Run linters.
+
+ Returns a failure if the linters find linting errors or sufficiently
+ serious code quality issues.
+ """
+
+ session.install("flake8", BLACK_VERSION)
+ session.install("-e", ".")
+ session.run("flake8", os.path.join("google", "cloud", "bigquery"))
+ session.run("flake8", "tests")
+ session.run("flake8", os.path.join("docs", "samples"))
+ session.run("flake8", os.path.join("docs", "snippets.py"))
+ session.run("flake8", "benchmark")
+ session.run("black", "--check", *BLACK_PATHS)
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+@_calculate_duration
+def lint_setup_py(session):
+ """Verify that setup.py is valid (including RST check)."""
+
+ session.install("docutils", "Pygments")
+ session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+@_calculate_duration
+def blacken(session):
+ """Run black.
+ Format code to uniform standard.
+ """
+
+ session.install(BLACK_VERSION)
+ session.run("black", *BLACK_PATHS)
+
+
+@nox.session(python="3.9")
+@_calculate_duration
+def docs(session):
+ """Build the docs."""
+
+ session.install(
+ # We need to pin to specific versions of the `sphinxcontrib-*` packages
+ # which still support sphinx 4.x.
+ # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344
+ # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345.
+ "sphinxcontrib-applehelp==1.0.4",
+ "sphinxcontrib-devhelp==1.0.2",
+ "sphinxcontrib-htmlhelp==2.0.1",
+ "sphinxcontrib-qthelp==1.0.3",
+ "sphinxcontrib-serializinghtml==1.1.5",
+ "sphinx==4.5.0",
+ "alabaster",
+ "recommonmark",
+ )
+ session.install("google-cloud-storage")
+ session.install("-e", ".[all]")
+
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
+ session.run(
+ "sphinx-build",
+ "-W", # warnings as errors
+ "-T", # show full traceback on exception
+ "-N", # no colors
+ "-b",
+ "html",
+ "-d",
+ os.path.join("docs", "_build", "doctrees", ""),
+ os.path.join("docs", ""),
+ os.path.join("docs", "_build", "html", ""),
+ )
+
+
+@nox.session(python="3.10")
+@_calculate_duration
+def docfx(session):
+ """Build the docfx yaml files for this library."""
+
+ session.install("-e", ".")
+ session.install(
+ # We need to pin to specific versions of the `sphinxcontrib-*` packages
+ # which still support sphinx 4.x.
+ # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344
+ # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345.
+ "sphinxcontrib-applehelp==1.0.4",
+ "sphinxcontrib-devhelp==1.0.2",
+ "sphinxcontrib-htmlhelp==2.0.1",
+ "sphinxcontrib-qthelp==1.0.3",
+ "sphinxcontrib-serializinghtml==1.1.5",
+ "gcp-sphinx-docfx-yaml",
+ "alabaster",
+ "recommonmark",
+ )
+
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
+ session.run(
+ "sphinx-build",
+ "-T", # show full traceback on exception
+ "-N", # no colors
+ "-D",
+ (
+ "extensions=sphinx.ext.autodoc,"
+ "sphinx.ext.autosummary,"
+ "docfx_yaml.extension,"
+ "sphinx.ext.intersphinx,"
+ "sphinx.ext.coverage,"
+ "sphinx.ext.napoleon,"
+ "sphinx.ext.todo,"
+ "sphinx.ext.viewcode,"
+ "recommonmark"
+ ),
+ "-b",
+ "html",
+ "-d",
+ os.path.join("docs", "_build", "doctrees", ""),
+ os.path.join("docs", ""),
+ os.path.join("docs", "_build", "html", ""),
+ )
diff --git a/testbed/googleapis__python-bigquery/owlbot.py b/testbed/googleapis__python-bigquery/owlbot.py
new file mode 100644
index 0000000000000000000000000000000000000000..07805d11a7eb77eb235cdc64448105671ad43146
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/owlbot.py
@@ -0,0 +1,106 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This script is used to synthesize generated parts of this library."""
+from pathlib import Path
+import textwrap
+
+import synthtool as s
+from synthtool import gcp
+from synthtool.languages import python
+
+REPO_ROOT = Path(__file__).parent.absolute()
+
+default_version = "v2"
+
+for library in s.get_staging_dirs(default_version):
+ # Avoid breaking change due to change in field renames.
+ # https://github.com/googleapis/python-bigquery/issues/319
+ s.replace(
+ library / f"google/cloud/bigquery_{library.name}/types/standard_sql.py",
+ r"type_ ",
+ "type ",
+ )
+ # Patch docs issue
+ s.replace(
+ library / f"google/cloud/bigquery_{library.name}/types/model.py",
+ r"""\"predicted_\"""",
+ """`predicted_`""",
+ )
+ s.move(library / f"google/cloud/bigquery_{library.name}/types")
+s.remove_staging_dirs()
+
+common = gcp.CommonTemplates()
+
+# ----------------------------------------------------------------------------
+# Add templated files
+# ----------------------------------------------------------------------------
+templated_files = common.py_library(
+ cov_level=100,
+ samples=True,
+ microgenerator=True,
+ split_system_tests=True,
+ intersphinx_dependencies={
+ "dateutil": "https://dateutil.readthedocs.io/en/latest/",
+ "geopandas": "https://geopandas.org/",
+ "pandas": "https://pandas.pydata.org/pandas-docs/stable/",
+ },
+)
+
+# BigQuery has a custom multiprocessing note
+s.move(
+ templated_files,
+ excludes=[
+ "noxfile.py",
+ "docs/multiprocessing.rst",
+ "docs/index.rst",
+ ".coveragerc",
+ ".github/CODEOWNERS",
+ # Include custom SNIPPETS_TESTS job for performance.
+ # https://github.com/googleapis/python-bigquery/issues/191
+ ".kokoro/presubmit/presubmit.cfg",
+ ".kokoro/continuous/prerelease-deps.cfg",
+ ".github/workflows", # exclude gh actions as credentials are needed for tests
+ "README.rst",
+ ],
+)
+
+python.configure_previous_major_version_branches()
+# ----------------------------------------------------------------------------
+# Samples templates
+# ----------------------------------------------------------------------------
+
+python.py_samples()
+
+s.replace(
+ "docs/conf.py",
+ r'\{"members": True\}',
+ '{"members": True, "inherited-members": True}',
+)
+s.replace(
+ "docs/conf.py",
+ r"exclude_patterns = \[",
+ '\\g<0>\n "google/cloud/bigquery_v2/**", # Legacy proto-based types.',
+)
+
+# ----------------------------------------------------------------------------
+# pytype-related changes
+# ----------------------------------------------------------------------------
+
+# Add .pytype to .gitignore
+s.replace(".gitignore", r"\.pytest_cache", "\\g<0>\n.pytype")
+
+s.shell.run(["nox", "-s", "blacken"], hide_output=False)
+for noxfile in REPO_ROOT.glob("samples/**/noxfile.py"):
+ s.shell.run(["nox", "-s", "blacken"], cwd=noxfile.parent, hide_output=False)
diff --git a/testbed/googleapis__python-bigquery/pylint.config.py b/testbed/googleapis__python-bigquery/pylint.config.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d64b9d2f2562704d0b009821e0ede521fc34dd5
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/pylint.config.py
@@ -0,0 +1,25 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module is used to configure gcp-devrel-py-tools run-pylint."""
+
+# Library configuration
+
+# library_additions = {}
+# library_replacements = {}
+
+# Test configuration
+
+# test_additions = copy.deepcopy(library_additions)
+# test_replacements = copy.deepcopy(library_replacements)
diff --git a/testbed/googleapis__python-bigquery/renovate.json b/testbed/googleapis__python-bigquery/renovate.json
new file mode 100644
index 0000000000000000000000000000000000000000..39b2a0ec929660c4db978654ede22ce079c22b4a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/renovate.json
@@ -0,0 +1,12 @@
+{
+ "extends": [
+ "config:base",
+ "group:all",
+ ":preserveSemverRanges",
+ ":disableDependencyDashboard"
+ ],
+ "ignorePaths": [".pre-commit-config.yaml", ".kokoro/requirements.txt", "setup.py"],
+ "pip_requirements": {
+ "fileMatch": ["requirements-test.txt", "samples/[\\S/]*constraints.txt", "samples/[\\S/]*constraints-test.txt"]
+ }
+}
diff --git a/testbed/googleapis__python-bigquery/samples/AUTHORING_GUIDE.md b/testbed/googleapis__python-bigquery/samples/AUTHORING_GUIDE.md
new file mode 100644
index 0000000000000000000000000000000000000000..8249522ffc2dd904072688c9948fefb42bde6e77
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/AUTHORING_GUIDE.md
@@ -0,0 +1 @@
+See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/AUTHORING_GUIDE.md
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/samples/browse_table_data.py b/testbed/googleapis__python-bigquery/samples/browse_table_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..2fba65aeb3c4e60171ad6f1bc312303433b5e782
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/browse_table_data.py
@@ -0,0 +1,56 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def browse_table_data(table_id: str) -> None:
+ # [START bigquery_browse_table]
+
+ from google.cloud import bigquery
+
+ # Construct a BigQuery client object.
+ client = bigquery.Client()
+
+ # TODO(developer): Set table_id to the ID of the table to browse data rows.
+ # table_id = "your-project.your_dataset.your_table_name"
+
+ # Download all rows from a table.
+ rows_iter = client.list_rows(table_id) # Make an API request.
+
+ # Iterate over rows to make the API requests to fetch row data.
+ rows = list(rows_iter)
+ print("Downloaded {} rows from table {}".format(len(rows), table_id))
+
+ # Download at most 10 rows.
+ rows_iter = client.list_rows(table_id, max_results=10)
+ rows = list(rows_iter)
+ print("Downloaded {} rows from table {}".format(len(rows), table_id))
+
+ # Specify selected fields to limit the results to certain columns.
+ table = client.get_table(table_id) # Make an API request.
+ fields = table.schema[:2] # First two columns.
+ rows_iter = client.list_rows(table_id, selected_fields=fields, max_results=10)
+ print("Selected {} columns from table {}.".format(len(rows_iter.schema), table_id))
+
+ rows = list(rows_iter)
+ print("Downloaded {} rows from table {}".format(len(rows), table_id))
+
+ # Print row data in tabular format.
+ rows_iter = client.list_rows(table_id, max_results=10)
+ format_string = "{!s:<16} " * len(rows_iter.schema)
+ field_names = [field.name for field in rows_iter.schema]
+ print(format_string.format(*field_names)) # Prints column headers.
+
+ for row in rows_iter:
+ print(format_string.format(*row)) # Prints row data.
+ # [END bigquery_browse_table]
diff --git a/testbed/googleapis__python-bigquery/samples/client_list_jobs.py b/testbed/googleapis__python-bigquery/samples/client_list_jobs.py
new file mode 100644
index 0000000000000000000000000000000000000000..335d2ecec7b75f7b68317c0f2886a6e4a1097371
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/client_list_jobs.py
@@ -0,0 +1,49 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def client_list_jobs() -> None:
+ # [START bigquery_list_jobs]
+
+ from google.cloud import bigquery
+
+ import datetime
+
+ # Construct a BigQuery client object.
+ client = bigquery.Client()
+
+ # List the 10 most recent jobs in reverse chronological order.
+ # Omit the max_results parameter to list jobs from the past 6 months.
+ print("Last 10 jobs:")
+ for job in client.list_jobs(max_results=10): # API request(s)
+ print("{}".format(job.job_id))
+
+ # The following are examples of additional optional parameters:
+
+ # Use min_creation_time and/or max_creation_time to specify a time window.
+ print("Jobs from the last ten minutes:")
+ ten_mins_ago = datetime.datetime.utcnow() - datetime.timedelta(minutes=10)
+ for job in client.list_jobs(min_creation_time=ten_mins_ago):
+ print("{}".format(job.job_id))
+
+ # Use all_users to include jobs run by all users in the project.
+ print("Last 10 jobs run by all users:")
+ for job in client.list_jobs(max_results=10, all_users=True):
+ print("{} run by user: {}".format(job.job_id, job.user_email))
+
+ # Use state_filter to filter by job state.
+ print("Last 10 jobs done:")
+ for job in client.list_jobs(max_results=10, state_filter="DONE"):
+ print("{}".format(job.job_id))
+ # [END bigquery_list_jobs]
diff --git a/testbed/googleapis__python-bigquery/samples/client_load_partitioned_table.py b/testbed/googleapis__python-bigquery/samples/client_load_partitioned_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfdf248194b479de5e2d4c170f25c070cef8fe39
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/client_load_partitioned_table.py
@@ -0,0 +1,49 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def client_load_partitioned_table(table_id: str) -> None:
+ # [START bigquery_load_table_partitioned]
+ from google.cloud import bigquery
+
+ # Construct a BigQuery client object.
+ client = bigquery.Client()
+
+ # TODO(developer): Set table_id to the ID of the table to create.
+ # table_id = "your-project.your_dataset.your_table_name"
+
+ job_config = bigquery.LoadJobConfig(
+ schema=[
+ bigquery.SchemaField("name", "STRING"),
+ bigquery.SchemaField("post_abbr", "STRING"),
+ bigquery.SchemaField("date", "DATE"),
+ ],
+ skip_leading_rows=1,
+ time_partitioning=bigquery.TimePartitioning(
+ type_=bigquery.TimePartitioningType.DAY,
+ field="date", # Name of the column to use for partitioning.
+ expiration_ms=7776000000, # 90 days.
+ ),
+ )
+ uri = "gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv"
+
+ load_job = client.load_table_from_uri(
+ uri, table_id, job_config=job_config
+ ) # Make an API request.
+
+ load_job.result() # Wait for the job to complete.
+
+ table = client.get_table(table_id)
+ print("Loaded {} rows to table {}".format(table.num_rows, table_id))
+ # [END bigquery_load_table_partitioned]
diff --git a/testbed/googleapis__python-bigquery/samples/client_query_add_column.py b/testbed/googleapis__python-bigquery/samples/client_query_add_column.py
new file mode 100644
index 0000000000000000000000000000000000000000..6aae5fce4c2ff13bd8e8f15299ab4cc2761a5eb6
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/client_query_add_column.py
@@ -0,0 +1,50 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def client_query_add_column(table_id: str) -> None:
+ # [START bigquery_add_column_query_append]
+ from google.cloud import bigquery
+
+ # Construct a BigQuery client object.
+ client = bigquery.Client()
+
+ # TODO(developer): Set table_id to the ID of the destination table.
+ # table_id = "your-project.your_dataset.your_table_name"
+
+ # Retrieves the destination table and checks the length of the schema.
+ table = client.get_table(table_id) # Make an API request.
+ print("Table {} contains {} columns".format(table_id, len(table.schema)))
+
+ # Configures the query to append the results to a destination table,
+ # allowing field addition.
+ job_config = bigquery.QueryJobConfig(
+ destination=table_id,
+ schema_update_options=[bigquery.SchemaUpdateOption.ALLOW_FIELD_ADDITION],
+ write_disposition=bigquery.WriteDisposition.WRITE_APPEND,
+ )
+
+ # Start the query, passing in the extra configuration.
+ client.query_and_wait(
+ # In this example, the existing table contains only the 'full_name' and
+ # 'age' columns, while the results of this query will contain an
+ # additional 'favorite_color' column.
+ 'SELECT "Timmy" as full_name, 85 as age, "Blue" as favorite_color;',
+ job_config=job_config,
+ ) # Make an API request and wait for job to complete.
+
+ # Checks the updated length of the schema.
+ table = client.get_table(table_id) # Make an API request.
+ print("Table {} now contains {} columns".format(table_id, len(table.schema)))
+ # [END bigquery_add_column_query_append]
diff --git a/testbed/googleapis__python-bigquery/samples/client_query_relax_column.py b/testbed/googleapis__python-bigquery/samples/client_query_relax_column.py
new file mode 100644
index 0000000000000000000000000000000000000000..26dce888fea416a0fad0da440091d3fdeb8460d3
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/client_query_relax_column.py
@@ -0,0 +1,53 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def client_query_relax_column(table_id: str) -> None:
+ # [START bigquery_relax_column_query_append]
+ from google.cloud import bigquery
+
+ # Construct a BigQuery client object.
+ client = bigquery.Client()
+
+ # TODO(developer): Set table_id to the ID of the destination table.
+ # table_id = "your-project.your_dataset.your_table_name"
+
+ # Retrieves the destination table and checks the number of required fields.
+ table = client.get_table(table_id) # Make an API request.
+ original_required_fields = sum(field.mode == "REQUIRED" for field in table.schema)
+
+ # In this example, the existing table has 2 required fields.
+ print("{} fields in the schema are required.".format(original_required_fields))
+
+ # Configures the query to append the results to a destination table,
+ # allowing field relaxation.
+ job_config = bigquery.QueryJobConfig(
+ destination=table_id,
+ schema_update_options=[bigquery.SchemaUpdateOption.ALLOW_FIELD_RELAXATION],
+ write_disposition=bigquery.WriteDisposition.WRITE_APPEND,
+ )
+
+ # Start the query, passing in the extra configuration.
+ client.query_and_wait(
+ # In this example, the existing table contains 'full_name' and 'age' as
+ # required columns, but the query results will omit the second column.
+ 'SELECT "Beyonce" as full_name;',
+ job_config=job_config,
+ ) # Make an API request and wait for job to complete
+
+ # Checks the updated number of required fields.
+ table = client.get_table(table_id) # Make an API request.
+ current_required_fields = sum(field.mode == "REQUIRED" for field in table.schema)
+ print("{} fields in the schema are now required.".format(current_required_fields))
+ # [END bigquery_relax_column_query_append]
diff --git a/testbed/googleapis__python-bigquery/samples/client_query_w_array_params.py b/testbed/googleapis__python-bigquery/samples/client_query_w_array_params.py
new file mode 100644
index 0000000000000000000000000000000000000000..25592a94a30a9f67263ccbb78d4ebd954496cf27
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/client_query_w_array_params.py
@@ -0,0 +1,42 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def client_query_w_array_params() -> None:
+ # [START bigquery_query_params_arrays]
+ from google.cloud import bigquery
+
+ # Construct a BigQuery client object.
+ client = bigquery.Client()
+
+ query = """
+ SELECT name, sum(number) as count
+ FROM `bigquery-public-data.usa_names.usa_1910_2013`
+ WHERE gender = @gender
+ AND state IN UNNEST(@states)
+ GROUP BY name
+ ORDER BY count DESC
+ LIMIT 10;
+ """
+ job_config = bigquery.QueryJobConfig(
+ query_parameters=[
+ bigquery.ScalarQueryParameter("gender", "STRING", "M"),
+ bigquery.ArrayQueryParameter("states", "STRING", ["WA", "WI", "WV", "WY"]),
+ ]
+ )
+ query_job = client.query(query, job_config=job_config) # Make an API request.
+
+ for row in query_job:
+ print("{}: \t{}".format(row.name, row.count))
+ # [END bigquery_query_params_arrays]
diff --git a/testbed/googleapis__python-bigquery/samples/desktopapp/__init__.py b/testbed/googleapis__python-bigquery/samples/desktopapp/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4fbd93bb2ca4d982f578388ee47499e8a421f50e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/desktopapp/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/testbed/googleapis__python-bigquery/samples/desktopapp/conftest.py b/testbed/googleapis__python-bigquery/samples/desktopapp/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..fdc85a8522c13e6737296a63009bbf65643eef74
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/desktopapp/conftest.py
@@ -0,0 +1,23 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from google.cloud import bigquery
+import pytest
+
+
+@pytest.fixture
+def bigquery_client_patch(
+ monkeypatch: pytest.MonkeyPatch, bigquery_client: bigquery.Client
+) -> None:
+ monkeypatch.setattr(bigquery, "Client", lambda: bigquery_client)
diff --git a/testbed/googleapis__python-bigquery/samples/desktopapp/mypy.ini b/testbed/googleapis__python-bigquery/samples/desktopapp/mypy.ini
new file mode 100644
index 0000000000000000000000000000000000000000..d27b6b599d82638b70297871a92d5e77080f599e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/desktopapp/mypy.ini
@@ -0,0 +1,8 @@
+[mypy]
+; We require type annotations in all samples.
+strict = True
+exclude = noxfile\.py
+warn_unused_configs = True
+
+[mypy-google.auth,google.oauth2,geojson,google_auth_oauthlib,IPython.*]
+ignore_missing_imports = True
diff --git a/testbed/googleapis__python-bigquery/samples/desktopapp/noxfile.py b/testbed/googleapis__python-bigquery/samples/desktopapp/noxfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b7135946fd5e16ec56529107714432b2e42a84d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/desktopapp/noxfile.py
@@ -0,0 +1,293 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import glob
+import os
+from pathlib import Path
+import sys
+from typing import Callable, Dict, Optional
+
+import nox
+
+
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# DO NOT EDIT THIS FILE EVER!
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# WARNING - WARNING - WARNING - WARNING - WARNING
+
+BLACK_VERSION = "black==22.3.0"
+ISORT_VERSION = "isort==5.10.1"
+
+# Copy `noxfile_config.py` to your directory and modify it instead.
+
+# `TEST_CONFIG` dict is a configuration hook that allows users to
+# modify the test configurations. The values here should be in sync
+# with `noxfile_config.py`. Users will copy `noxfile_config.py` into
+# their directory and modify it.
+
+TEST_CONFIG = {
+ # You can opt out from the test for specific Python versions.
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
+ # An envvar key for determining the project id to use. Change it
+ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
+ # build specific Cloud project. You can also use your own string
+ # to use your own Cloud project.
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
+ # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
+ # A dictionary you want to inject into your test. Don't put any
+ # secrets here. These values will override predefined values.
+ "envs": {},
+}
+
+
+try:
+ # Ensure we can import noxfile_config in the project's directory.
+ sys.path.append(".")
+ from noxfile_config import TEST_CONFIG_OVERRIDE
+except ImportError as e:
+ print("No user noxfile_config found: detail: {}".format(e))
+ TEST_CONFIG_OVERRIDE = {}
+
+# Update the TEST_CONFIG with the user supplied values.
+TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
+
+
+def get_pytest_env_vars() -> Dict[str, str]:
+ """Returns a dict for pytest invocation."""
+ ret = {}
+
+ # Override the GCLOUD_PROJECT and the alias.
+ env_key = TEST_CONFIG["gcloud_project_env"]
+ # This should error out if not set.
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
+
+ # Apply user supplied envs.
+ ret.update(TEST_CONFIG["envs"])
+ return ret
+
+
+# DO NOT EDIT - automatically generated.
+# All versions used to test samples.
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
+
+# Any default versions that should be ignored.
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
+
+TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
+
+INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in (
+ "True",
+ "true",
+)
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
+#
+# Style Checks
+#
+
+
+# Linting with flake8.
+#
+# We ignore the following rules:
+# E203: whitespace before ‘:’
+# E266: too many leading ‘#’ for block comment
+# E501: line too long
+# I202: Additional newline in a section of imports
+#
+# We also need to specify the rules which are ignored by default:
+# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121']
+FLAKE8_COMMON_ARGS = [
+ "--show-source",
+ "--builtin=gettext",
+ "--max-complexity=20",
+ "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py",
+ "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202",
+ "--max-line-length=88",
+]
+
+
+@nox.session
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8")
+ else:
+ session.install("flake8", "flake8-annotations")
+
+ args = FLAKE8_COMMON_ARGS + [
+ ".",
+ ]
+ session.run("flake8", *args)
+
+
+#
+# Black
+#
+
+
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ """Run black. Format code to uniform standard."""
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+
+ session.run("black", *python_files)
+
+
+#
+# format = isort + black
+#
+
+
+@nox.session
+def format(session: nox.sessions.Session) -> None:
+ """
+ Run isort to sort imports. Then run black
+ to format code to uniform standard.
+ """
+ session.install(BLACK_VERSION, ISORT_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+
+ # Use the --fss option to sort imports using strict alphabetical order.
+ # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections
+ session.run("isort", "--fss", *python_files)
+ session.run("black", *python_files)
+
+
+#
+# Sample Tests
+#
+
+
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob(
+ "**/test_*.py", recursive=True
+ )
+ test_list.extend(glob.glob("**/tests", recursive=True))
+
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ return
+
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ concurrent_args = []
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+ with open("requirements.txt") as rfile:
+ packages = rfile.read()
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt")
+ else:
+ session.install("-r", "requirements-test.txt")
+ with open("requirements-test.txt") as rtfile:
+ packages += rtfile.read()
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ if "pytest-parallel" in packages:
+ concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"])
+ elif "pytest-xdist" in packages:
+ concurrent_args.extend(["-n", "auto"])
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
+
+
+@nox.session(python=ALL_VERSIONS)
+def py(session: nox.sessions.Session) -> None:
+ """Runs py.test for a sample using the specified version of Python."""
+ if session.python in TESTED_VERSIONS:
+ _session_tests(session)
+ else:
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
+
+
+#
+# Readmegen
+#
+
+
+def _get_repo_root() -> Optional[str]:
+ """Returns the root folder of the project."""
+ # Get root of this repository. Assume we don't have directories nested deeper than 10 items.
+ p = Path(os.getcwd())
+ for i in range(10):
+ if p is None:
+ break
+ if Path(p / ".git").exists():
+ return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
+ p = p.parent
+ raise Exception("Unable to detect repository root.")
+
+
+GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")])
+
+
+@nox.session
+@nox.parametrize("path", GENERATED_READMES)
+def readmegen(session: nox.sessions.Session, path: str) -> None:
+ """(Re-)generates the readme for a sample."""
+ session.install("jinja2", "pyyaml")
+ dir_ = os.path.dirname(path)
+
+ if os.path.exists(os.path.join(dir_, "requirements.txt")):
+ session.install("-r", os.path.join(dir_, "requirements.txt"))
+
+ in_file = os.path.join(dir_, "README.rst.in")
+ session.run(
+ "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file
+ )
diff --git a/testbed/googleapis__python-bigquery/samples/desktopapp/noxfile_config.py b/testbed/googleapis__python-bigquery/samples/desktopapp/noxfile_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..315bd5be8cd96033e1df2b66d47b3cd307f18f46
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/desktopapp/noxfile_config.py
@@ -0,0 +1,40 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Default TEST_CONFIG_OVERRIDE for python repos.
+
+# You can copy this file into your directory, then it will be inported from
+# the noxfile.py.
+
+# The source of truth:
+# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/noxfile_config.py
+
+TEST_CONFIG_OVERRIDE = {
+ # You can opt out from the test for specific Python versions.
+ "ignored_versions": [
+ "2.7",
+ # TODO: Enable 3.10 once there is a geopandas/fiona release.
+ # https://github.com/Toblerity/Fiona/issues/1043
+ "3.10",
+ ],
+ # An envvar key for determining the project id to use. Change it
+ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
+ # build specific Cloud project. You can also use your own string
+ # to use your own Cloud project.
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
+ # "gcloud_project_env": "BUILD_SPECIFIC_GCLOUD_PROJECT",
+ # A dictionary you want to inject into your test. Don't put any
+ # secrets here. These values will override predefined values.
+ "envs": {},
+}
diff --git a/testbed/googleapis__python-bigquery/samples/desktopapp/requirements-test.txt b/testbed/googleapis__python-bigquery/samples/desktopapp/requirements-test.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1640e1a9502a5a2c6159a72c6b70b440273004f7
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/desktopapp/requirements-test.txt
@@ -0,0 +1,4 @@
+google-cloud-testutils==1.4.0
+pytest===7.4.4; python_version == '3.7'
+pytest==8.3.3; python_version >= '3.8'
+mock==5.1.0
diff --git a/testbed/googleapis__python-bigquery/samples/desktopapp/requirements.txt b/testbed/googleapis__python-bigquery/samples/desktopapp/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..383829d7d756872e47b2a64ebaa55c24d4c71739
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/desktopapp/requirements.txt
@@ -0,0 +1,2 @@
+google-cloud-bigquery==3.26.0
+google-auth-oauthlib==1.2.1
diff --git a/testbed/googleapis__python-bigquery/samples/desktopapp/user_credentials.py b/testbed/googleapis__python-bigquery/samples/desktopapp/user_credentials.py
new file mode 100644
index 0000000000000000000000000000000000000000..68236d1260fe3814ab141618cfa1a14482f07997
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/desktopapp/user_credentials.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+
+# Copyright 2017 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Command-line application to run a query using user credentials.
+
+You must supply a client secrets file, which would normally be bundled with
+your application.
+"""
+
+import argparse
+
+
+def main(project: str) -> None:
+ # [START bigquery_auth_user_flow]
+ from google_auth_oauthlib import flow
+
+ # A local server is used as the callback URL in the auth flow.
+ appflow = flow.InstalledAppFlow.from_client_secrets_file(
+ "client_secrets.json", scopes=["https://www.googleapis.com/auth/bigquery"]
+ )
+
+ # This launches a local server to be used as the callback URL in the desktop
+ # app auth flow. If you are accessing the application remotely, such as over
+ # SSH or a remote Jupyter notebook, this flow will not work. Use the
+ # `gcloud auth application-default login --no-browser` command or workload
+ # identity federation to get authentication tokens, instead.
+ #
+ appflow.run_local_server()
+
+ credentials = appflow.credentials
+ # [END bigquery_auth_user_flow]
+
+ # [START bigquery_auth_user_query]
+ from google.cloud import bigquery
+
+ # TODO: Uncomment the line below to set the `project` variable.
+ # project = 'user-project-id'
+ #
+ # The `project` variable defines the project to be billed for query
+ # processing. The user must have the bigquery.jobs.create permission on
+ # this project to run a query. See:
+ # https://cloud.google.com/bigquery/docs/access-control#permissions
+
+ client = bigquery.Client(project=project, credentials=credentials)
+
+ query_string = """SELECT name, SUM(number) as total
+ FROM `bigquery-public-data.usa_names.usa_1910_current`
+ WHERE name = 'William'
+ GROUP BY name;
+ """
+ results = client.query_and_wait(query_string)
+
+ # Print the results.
+ for row in results: # Wait for the job to complete.
+ print("{}: {}".format(row["name"], row["total"]))
+ # [END bigquery_auth_user_query]
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
+ )
+ parser.add_argument("project", help="Project to use for BigQuery billing.")
+ args = parser.parse_args()
+ main(args.project)
diff --git a/testbed/googleapis__python-bigquery/samples/desktopapp/user_credentials_test.py b/testbed/googleapis__python-bigquery/samples/desktopapp/user_credentials_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..252b843c4ffa40d7175698e22689bfe8c05d8871
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/desktopapp/user_credentials_test.py
@@ -0,0 +1,52 @@
+# Copyright 2017 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+from typing import Iterator, Union
+from unittest import mock
+
+import google.auth
+import pytest
+
+from .user_credentials import main # type: ignore
+
+PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
+
+
+if sys.version_info >= (3, 8):
+ # Python 3.8+ has an AsyncMock attribute in unittest.mock, but 3.7 does not
+ MockType = Union[mock.MagicMock, mock.AsyncMock]
+else:
+ # Other definitions and imports
+ MockType = Union[mock.MagicMock]
+
+
+@pytest.fixture
+def mock_flow() -> Iterator[MockType]:
+ flow_patch = mock.patch("google_auth_oauthlib.flow.InstalledAppFlow", autospec=True)
+
+ with flow_patch as flow_mock:
+ flow_mock.from_client_secrets_file.return_value = flow_mock
+ flow_mock.credentials = google.auth.default()[0]
+ yield flow_mock
+
+
+def test_auth_query_console(
+ mock_flow: MockType, capsys: pytest.CaptureFixture[str]
+) -> None:
+ main(PROJECT)
+ out, _ = capsys.readouterr()
+ # Fun fact: William P. Wood was the 1st director of the US Secret Service.
+ assert "William" in out
diff --git a/testbed/googleapis__python-bigquery/samples/geography/__init__.py b/testbed/googleapis__python-bigquery/samples/geography/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6334245aea5aa2deb2f00ec6bc3de455e9cc132
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/geography/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/testbed/googleapis__python-bigquery/samples/geography/conftest.py b/testbed/googleapis__python-bigquery/samples/geography/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..14823d10a7f60ca89703b3265f7b6dc4fe7d0435
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/geography/conftest.py
@@ -0,0 +1,58 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+from typing import Iterator
+import uuid
+
+from google.cloud import bigquery
+import pytest
+
+
+def temp_suffix() -> str:
+ now = datetime.datetime.now()
+ return f"{now.strftime('%Y%m%d%H%M%S')}_{uuid.uuid4().hex[:8]}"
+
+
+@pytest.fixture(scope="session")
+def bigquery_client() -> bigquery.Client:
+ bigquery_client = bigquery.Client()
+ return bigquery_client
+
+
+@pytest.fixture(scope="session")
+def project_id(bigquery_client: bigquery.Client) -> str:
+ return bigquery_client.project
+
+
+@pytest.fixture
+def dataset_id(bigquery_client: bigquery.Client) -> Iterator[str]:
+ dataset_id = f"geography_{temp_suffix()}"
+ bigquery_client.create_dataset(dataset_id)
+ yield dataset_id
+ bigquery_client.delete_dataset(dataset_id, delete_contents=True)
+
+
+@pytest.fixture
+def table_id(
+ bigquery_client: bigquery.Client, project_id: str, dataset_id: str
+) -> Iterator[str]:
+ table_id = f"{project_id}.{dataset_id}.geography_{temp_suffix()}"
+ table = bigquery.Table(table_id)
+ table.schema = [
+ bigquery.SchemaField("geo", bigquery.SqlTypeNames.GEOGRAPHY),
+ ]
+ bigquery_client.create_table(table)
+ yield table_id
+ bigquery_client.delete_table(table_id)
diff --git a/testbed/googleapis__python-bigquery/samples/geography/insert_geojson.py b/testbed/googleapis__python-bigquery/samples/geography/insert_geojson.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a6f6c413f90934d6ed2f026c664dd6834a448f6
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/geography/insert_geojson.py
@@ -0,0 +1,56 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, Mapping, Optional, Sequence
+
+
+def insert_geojson(
+ override_values: Optional[Mapping[str, str]] = None
+) -> Sequence[Dict[str, object]]:
+ if override_values is None:
+ override_values = {}
+
+ # [START bigquery_insert_geojson]
+ import geojson
+ from google.cloud import bigquery
+
+ bigquery_client = bigquery.Client()
+
+ # This example uses a table containing a column named "geo" with the
+ # GEOGRAPHY data type.
+ table_id = "my-project.my_dataset.my_table"
+ # [END bigquery_insert_geojson]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ table_id = override_values.get("table_id", table_id)
+ # [START bigquery_insert_geojson]
+
+ # Use the python-geojson library to generate GeoJSON of a line from LAX to
+ # JFK airports. Alternatively, you may define GeoJSON data directly, but it
+ # must be converted to a string before loading it into BigQuery.
+ my_geography = geojson.LineString([(-118.4085, 33.9416), (-73.7781, 40.6413)])
+ rows = [
+ # Convert GeoJSON data into a string.
+ {"geo": geojson.dumps(my_geography)}
+ ]
+
+ # table already exists and has a column
+ # named "geo" with data type GEOGRAPHY.
+ errors = bigquery_client.insert_rows_json(table_id, rows)
+ if errors:
+ raise RuntimeError(f"row insert failed: {errors}")
+ else:
+ print(f"wrote 1 row to {table_id}")
+ # [END bigquery_insert_geojson]
+ return errors
diff --git a/testbed/googleapis__python-bigquery/samples/geography/insert_geojson_test.py b/testbed/googleapis__python-bigquery/samples/geography/insert_geojson_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..507201872cc1d16cd5bc53b6cea81ce8faf507f8
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/geography/insert_geojson_test.py
@@ -0,0 +1,20 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import insert_geojson
+
+
+def test_insert_geojson(table_id: str) -> None:
+ errors = insert_geojson.insert_geojson(override_values={"table_id": table_id})
+ assert not errors
diff --git a/testbed/googleapis__python-bigquery/samples/geography/insert_wkt.py b/testbed/googleapis__python-bigquery/samples/geography/insert_wkt.py
new file mode 100644
index 0000000000000000000000000000000000000000..2923d2596ecee37d81693983b4166b68947ac99a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/geography/insert_wkt.py
@@ -0,0 +1,58 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, Mapping, Optional, Sequence
+
+
+def insert_wkt(
+ override_values: Optional[Mapping[str, str]] = None
+) -> Sequence[Dict[str, object]]:
+ if override_values is None:
+ override_values = {}
+
+ # [START bigquery_insert_geography_wkt]
+ from google.cloud import bigquery
+ import shapely.geometry
+ import shapely.wkt
+
+ bigquery_client = bigquery.Client()
+
+ # This example uses a table containing a column named "geo" with the
+ # GEOGRAPHY data type.
+ table_id = "my-project.my_dataset.my_table"
+ # [END bigquery_insert_geography_wkt]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ table_id = override_values.get("table_id", table_id)
+ # [START bigquery_insert_geography_wkt]
+
+ # Use the Shapely library to generate WKT of a line from LAX to
+ # JFK airports. Alternatively, you may define WKT data directly.
+ my_geography = shapely.geometry.LineString(
+ [(-118.4085, 33.9416), (-73.7781, 40.6413)]
+ )
+ rows = [
+ # Convert data into a WKT string.
+ {"geo": shapely.wkt.dumps(my_geography)},
+ ]
+
+ # table already exists and has a column
+ # named "geo" with data type GEOGRAPHY.
+ errors = bigquery_client.insert_rows_json(table_id, rows)
+ if errors:
+ raise RuntimeError(f"row insert failed: {errors}")
+ else:
+ print(f"wrote 1 row to {table_id}")
+ # [END bigquery_insert_geography_wkt]
+ return errors
diff --git a/testbed/googleapis__python-bigquery/samples/geography/insert_wkt_test.py b/testbed/googleapis__python-bigquery/samples/geography/insert_wkt_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..a7c3d4ed3fa304a8d395643d5d965fc21224ffc9
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/geography/insert_wkt_test.py
@@ -0,0 +1,20 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import insert_wkt
+
+
+def test_insert_wkt(table_id: str) -> None:
+ errors = insert_wkt.insert_wkt(override_values={"table_id": table_id})
+ assert not errors
diff --git a/testbed/googleapis__python-bigquery/samples/geography/mypy.ini b/testbed/googleapis__python-bigquery/samples/geography/mypy.ini
new file mode 100644
index 0000000000000000000000000000000000000000..41898432f6b94ba9a19da45f939f2100dae38de9
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/geography/mypy.ini
@@ -0,0 +1,8 @@
+[mypy]
+; We require type annotations in all samples.
+strict = True
+exclude = noxfile\.py
+warn_unused_configs = True
+
+[mypy-geojson,pandas,shapely.*]
+ignore_missing_imports = True
diff --git a/testbed/googleapis__python-bigquery/samples/geography/noxfile.py b/testbed/googleapis__python-bigquery/samples/geography/noxfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b7135946fd5e16ec56529107714432b2e42a84d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/geography/noxfile.py
@@ -0,0 +1,293 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import glob
+import os
+from pathlib import Path
+import sys
+from typing import Callable, Dict, Optional
+
+import nox
+
+
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# DO NOT EDIT THIS FILE EVER!
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# WARNING - WARNING - WARNING - WARNING - WARNING
+
+BLACK_VERSION = "black==22.3.0"
+ISORT_VERSION = "isort==5.10.1"
+
+# Copy `noxfile_config.py` to your directory and modify it instead.
+
+# `TEST_CONFIG` dict is a configuration hook that allows users to
+# modify the test configurations. The values here should be in sync
+# with `noxfile_config.py`. Users will copy `noxfile_config.py` into
+# their directory and modify it.
+
+TEST_CONFIG = {
+ # You can opt out from the test for specific Python versions.
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
+ # An envvar key for determining the project id to use. Change it
+ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
+ # build specific Cloud project. You can also use your own string
+ # to use your own Cloud project.
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
+ # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
+ # A dictionary you want to inject into your test. Don't put any
+ # secrets here. These values will override predefined values.
+ "envs": {},
+}
+
+
+try:
+ # Ensure we can import noxfile_config in the project's directory.
+ sys.path.append(".")
+ from noxfile_config import TEST_CONFIG_OVERRIDE
+except ImportError as e:
+ print("No user noxfile_config found: detail: {}".format(e))
+ TEST_CONFIG_OVERRIDE = {}
+
+# Update the TEST_CONFIG with the user supplied values.
+TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
+
+
+def get_pytest_env_vars() -> Dict[str, str]:
+ """Returns a dict for pytest invocation."""
+ ret = {}
+
+ # Override the GCLOUD_PROJECT and the alias.
+ env_key = TEST_CONFIG["gcloud_project_env"]
+ # This should error out if not set.
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
+
+ # Apply user supplied envs.
+ ret.update(TEST_CONFIG["envs"])
+ return ret
+
+
+# DO NOT EDIT - automatically generated.
+# All versions used to test samples.
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
+
+# Any default versions that should be ignored.
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
+
+TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
+
+INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in (
+ "True",
+ "true",
+)
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
+#
+# Style Checks
+#
+
+
+# Linting with flake8.
+#
+# We ignore the following rules:
+# E203: whitespace before ‘:’
+# E266: too many leading ‘#’ for block comment
+# E501: line too long
+# I202: Additional newline in a section of imports
+#
+# We also need to specify the rules which are ignored by default:
+# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121']
+FLAKE8_COMMON_ARGS = [
+ "--show-source",
+ "--builtin=gettext",
+ "--max-complexity=20",
+ "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py",
+ "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202",
+ "--max-line-length=88",
+]
+
+
+@nox.session
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8")
+ else:
+ session.install("flake8", "flake8-annotations")
+
+ args = FLAKE8_COMMON_ARGS + [
+ ".",
+ ]
+ session.run("flake8", *args)
+
+
+#
+# Black
+#
+
+
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ """Run black. Format code to uniform standard."""
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+
+ session.run("black", *python_files)
+
+
+#
+# format = isort + black
+#
+
+
+@nox.session
+def format(session: nox.sessions.Session) -> None:
+ """
+ Run isort to sort imports. Then run black
+ to format code to uniform standard.
+ """
+ session.install(BLACK_VERSION, ISORT_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+
+ # Use the --fss option to sort imports using strict alphabetical order.
+ # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections
+ session.run("isort", "--fss", *python_files)
+ session.run("black", *python_files)
+
+
+#
+# Sample Tests
+#
+
+
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob(
+ "**/test_*.py", recursive=True
+ )
+ test_list.extend(glob.glob("**/tests", recursive=True))
+
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ return
+
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ concurrent_args = []
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+ with open("requirements.txt") as rfile:
+ packages = rfile.read()
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt")
+ else:
+ session.install("-r", "requirements-test.txt")
+ with open("requirements-test.txt") as rtfile:
+ packages += rtfile.read()
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ if "pytest-parallel" in packages:
+ concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"])
+ elif "pytest-xdist" in packages:
+ concurrent_args.extend(["-n", "auto"])
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
+
+
+@nox.session(python=ALL_VERSIONS)
+def py(session: nox.sessions.Session) -> None:
+ """Runs py.test for a sample using the specified version of Python."""
+ if session.python in TESTED_VERSIONS:
+ _session_tests(session)
+ else:
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
+
+
+#
+# Readmegen
+#
+
+
+def _get_repo_root() -> Optional[str]:
+ """Returns the root folder of the project."""
+ # Get root of this repository. Assume we don't have directories nested deeper than 10 items.
+ p = Path(os.getcwd())
+ for i in range(10):
+ if p is None:
+ break
+ if Path(p / ".git").exists():
+ return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
+ p = p.parent
+ raise Exception("Unable to detect repository root.")
+
+
+GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")])
+
+
+@nox.session
+@nox.parametrize("path", GENERATED_READMES)
+def readmegen(session: nox.sessions.Session, path: str) -> None:
+ """(Re-)generates the readme for a sample."""
+ session.install("jinja2", "pyyaml")
+ dir_ = os.path.dirname(path)
+
+ if os.path.exists(os.path.join(dir_, "requirements.txt")):
+ session.install("-r", os.path.join(dir_, "requirements.txt"))
+
+ in_file = os.path.join(dir_, "README.rst.in")
+ session.run(
+ "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file
+ )
diff --git a/testbed/googleapis__python-bigquery/samples/geography/noxfile_config.py b/testbed/googleapis__python-bigquery/samples/geography/noxfile_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..315bd5be8cd96033e1df2b66d47b3cd307f18f46
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/geography/noxfile_config.py
@@ -0,0 +1,40 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Default TEST_CONFIG_OVERRIDE for python repos.
+
+# You can copy this file into your directory, then it will be inported from
+# the noxfile.py.
+
+# The source of truth:
+# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/noxfile_config.py
+
+TEST_CONFIG_OVERRIDE = {
+ # You can opt out from the test for specific Python versions.
+ "ignored_versions": [
+ "2.7",
+ # TODO: Enable 3.10 once there is a geopandas/fiona release.
+ # https://github.com/Toblerity/Fiona/issues/1043
+ "3.10",
+ ],
+ # An envvar key for determining the project id to use. Change it
+ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
+ # build specific Cloud project. You can also use your own string
+ # to use your own Cloud project.
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
+ # "gcloud_project_env": "BUILD_SPECIFIC_GCLOUD_PROJECT",
+ # A dictionary you want to inject into your test. Don't put any
+ # secrets here. These values will override predefined values.
+ "envs": {},
+}
diff --git a/testbed/googleapis__python-bigquery/samples/geography/requirements-test.txt b/testbed/googleapis__python-bigquery/samples/geography/requirements-test.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1ccebd9cd20e69bdee9aeed9bcbcb7ab4ca5a1ed
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/geography/requirements-test.txt
@@ -0,0 +1,3 @@
+pytest===7.4.4; python_version == '3.7'
+pytest==8.3.3; python_version >= '3.8'
+mock==5.1.0
diff --git a/testbed/googleapis__python-bigquery/samples/geography/requirements.txt b/testbed/googleapis__python-bigquery/samples/geography/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1089dc1957ab02a50660d32023d6fcb7826b7d0e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/geography/requirements.txt
@@ -0,0 +1,59 @@
+attrs==24.2.0
+certifi==2024.8.30
+cffi===1.15.1; python_version == '3.7'
+cffi==1.17.1; python_version >= '3.8'
+charset-normalizer==3.3.2
+click==8.1.7
+click-plugins==1.1.1
+cligj==0.7.2
+dataclasses==0.8; python_version < '3.7'
+db-dtypes==1.3.0
+Fiona===1.9.6; python_version == '3.7'
+Fiona==1.10.1; python_version >= '3.8'
+geojson==3.1.0
+geopandas===0.10.2; python_version == '3.7'
+geopandas===0.13.2; python_version == '3.8'
+geopandas==1.0.1; python_version >= '3.9'
+google-api-core==2.20.0
+google-auth==2.35.0
+google-cloud-bigquery==3.26.0
+google-cloud-bigquery-storage==2.26.0
+google-cloud-core==2.4.1
+google-crc32c===1.5.0; python_version < '3.9'
+google-crc32c==1.6.0; python_version >= '3.9'
+google-resumable-media==2.7.2
+googleapis-common-protos==1.65.0
+grpcio===1.62.2; python_version == '3.7'
+grpcio==1.66.1; python_version >= '3.8'
+idna==3.10
+munch==4.0.0
+mypy-extensions==1.0.0
+packaging===24.0; python_version == '3.7'
+packaging==24.1; python_version >= '3.8'
+pandas===1.3.5; python_version == '3.7'
+pandas===2.0.3; python_version == '3.8'
+pandas==2.2.3; python_version >= '3.9'
+proto-plus==1.24.0
+pyarrow==12.0.1; python_version == '3.7'
+pyarrow==17.0.0; python_version >= '3.8'
+pyasn1===0.5.1; python_version == '3.7'
+pyasn1==0.6.1; python_version >= '3.8'
+pyasn1-modules===0.3.0; python_version == '3.7'
+pyasn1-modules==0.4.1; python_version >= '3.8'
+pycparser===2.21; python_version == '3.7'
+pycparser==2.22; python_version >= '3.8'
+pyparsing==3.1.4
+python-dateutil==2.9.0.post0
+pytz==2024.2
+PyYAML===6.0.1; python_version == '3.7'
+PyYAML==6.0.2; python_version >= '3.8'
+requests==2.31.0; python_version == '3.7'
+requests==2.32.3; python_version >= '3.8'
+rsa==4.9
+Shapely==2.0.6
+six==1.16.0
+typing-extensions===4.7.1; python_version == '3.7'
+typing-extensions==4.12.2; python_version >= '3.8'
+typing-inspect==0.9.0
+urllib3===1.26.18; python_version == '3.7'
+urllib3==2.2.3; python_version >= '3.8'
diff --git a/testbed/googleapis__python-bigquery/samples/geography/to_geodataframe.py b/testbed/googleapis__python-bigquery/samples/geography/to_geodataframe.py
new file mode 100644
index 0000000000000000000000000000000000000000..630d8d0bf61ccb93d7cd1dc3758f4dc7aecb951e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/geography/to_geodataframe.py
@@ -0,0 +1,38 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from google.cloud import bigquery
+
+if typing.TYPE_CHECKING:
+ import pandas
+
+
+client: bigquery.Client = bigquery.Client()
+
+
+def get_austin_service_requests_as_geography() -> "pandas.DataFrame":
+ # [START bigquery_query_results_geodataframe]
+
+ sql = """
+ SELECT created_date, complaint_description,
+ ST_GEOGPOINT(longitude, latitude) as location
+ FROM bigquery-public-data.austin_311.311_service_requests
+ LIMIT 10
+ """
+
+ df = client.query_and_wait(sql).to_geodataframe()
+ # [END bigquery_query_results_geodataframe]
+ return df
diff --git a/testbed/googleapis__python-bigquery/samples/geography/to_geodataframe_test.py b/testbed/googleapis__python-bigquery/samples/geography/to_geodataframe_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..7499d7001d5fe19382e45bae1cf2d710165dc31d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/geography/to_geodataframe_test.py
@@ -0,0 +1,25 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from .to_geodataframe import get_austin_service_requests_as_geography
+
+
+def test_get_austin_service_requests_as_geography() -> None:
+ geopandas = pytest.importorskip("geopandas")
+ df = get_austin_service_requests_as_geography()
+ assert isinstance(df, geopandas.GeoDataFrame)
+ assert len(list(df)) == 3 # verify the number of columns
+ assert len(df) == 10 # verify the number of rows
diff --git a/testbed/googleapis__python-bigquery/samples/magics/__init__.py b/testbed/googleapis__python-bigquery/samples/magics/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4fbd93bb2ca4d982f578388ee47499e8a421f50e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/magics/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/testbed/googleapis__python-bigquery/samples/magics/_helpers.py b/testbed/googleapis__python-bigquery/samples/magics/_helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7248ee3d67eb75af29b8fb1a16edfa19aa5db88
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/magics/_helpers.py
@@ -0,0 +1,21 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def strip_region_tags(sample_text: str) -> str:
+ """Remove blank lines and region tags from sample text"""
+ magic_lines = [
+ line for line in sample_text.split("\n") if len(line) > 0 and "# [" not in line
+ ]
+ return "\n".join(magic_lines)
diff --git a/testbed/googleapis__python-bigquery/samples/magics/conftest.py b/testbed/googleapis__python-bigquery/samples/magics/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..55ea30f90d70d80e0c15730fd6d482c67ab2500b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/magics/conftest.py
@@ -0,0 +1,44 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+from typing import Iterator
+
+import pytest
+
+if typing.TYPE_CHECKING:
+ from IPython.core.interactiveshell import TerminalInteractiveShell
+
+interactiveshell = pytest.importorskip("IPython.terminal.interactiveshell")
+tools = pytest.importorskip("IPython.testing.tools")
+
+
+@pytest.fixture(scope="session")
+def ipython() -> "TerminalInteractiveShell":
+ config = tools.default_config()
+ config.TerminalInteractiveShell.simple_prompt = True
+ shell = interactiveshell.TerminalInteractiveShell.instance(config=config)
+ return shell
+
+
+@pytest.fixture(autouse=True)
+def ipython_interactive(
+ ipython: "TerminalInteractiveShell",
+) -> Iterator["TerminalInteractiveShell"]:
+ """Activate IPython's builtin hooks
+
+ for the duration of the test scope.
+ """
+ with ipython.builtin_trap:
+ yield ipython
diff --git a/testbed/googleapis__python-bigquery/samples/magics/mypy.ini b/testbed/googleapis__python-bigquery/samples/magics/mypy.ini
new file mode 100644
index 0000000000000000000000000000000000000000..af328dc5eba9437a50abb443f71113854831cf38
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/magics/mypy.ini
@@ -0,0 +1,8 @@
+[mypy]
+; We require type annotations in all samples.
+strict = True
+exclude = noxfile\.py
+warn_unused_configs = True
+
+[mypy-IPython.*,nox,noxfile_config,pandas]
+ignore_missing_imports = True
diff --git a/testbed/googleapis__python-bigquery/samples/magics/noxfile.py b/testbed/googleapis__python-bigquery/samples/magics/noxfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b7135946fd5e16ec56529107714432b2e42a84d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/magics/noxfile.py
@@ -0,0 +1,293 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import glob
+import os
+from pathlib import Path
+import sys
+from typing import Callable, Dict, Optional
+
+import nox
+
+
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# DO NOT EDIT THIS FILE EVER!
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# WARNING - WARNING - WARNING - WARNING - WARNING
+
+BLACK_VERSION = "black==22.3.0"
+ISORT_VERSION = "isort==5.10.1"
+
+# Copy `noxfile_config.py` to your directory and modify it instead.
+
+# `TEST_CONFIG` dict is a configuration hook that allows users to
+# modify the test configurations. The values here should be in sync
+# with `noxfile_config.py`. Users will copy `noxfile_config.py` into
+# their directory and modify it.
+
+TEST_CONFIG = {
+ # You can opt out from the test for specific Python versions.
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
+ # An envvar key for determining the project id to use. Change it
+ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
+ # build specific Cloud project. You can also use your own string
+ # to use your own Cloud project.
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
+ # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
+ # A dictionary you want to inject into your test. Don't put any
+ # secrets here. These values will override predefined values.
+ "envs": {},
+}
+
+
+try:
+ # Ensure we can import noxfile_config in the project's directory.
+ sys.path.append(".")
+ from noxfile_config import TEST_CONFIG_OVERRIDE
+except ImportError as e:
+ print("No user noxfile_config found: detail: {}".format(e))
+ TEST_CONFIG_OVERRIDE = {}
+
+# Update the TEST_CONFIG with the user supplied values.
+TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
+
+
+def get_pytest_env_vars() -> Dict[str, str]:
+ """Returns a dict for pytest invocation."""
+ ret = {}
+
+ # Override the GCLOUD_PROJECT and the alias.
+ env_key = TEST_CONFIG["gcloud_project_env"]
+ # This should error out if not set.
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
+
+ # Apply user supplied envs.
+ ret.update(TEST_CONFIG["envs"])
+ return ret
+
+
+# DO NOT EDIT - automatically generated.
+# All versions used to test samples.
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
+
+# Any default versions that should be ignored.
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
+
+TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
+
+INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in (
+ "True",
+ "true",
+)
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
+#
+# Style Checks
+#
+
+
+# Linting with flake8.
+#
+# We ignore the following rules:
+# E203: whitespace before ‘:’
+# E266: too many leading ‘#’ for block comment
+# E501: line too long
+# I202: Additional newline in a section of imports
+#
+# We also need to specify the rules which are ignored by default:
+# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121']
+FLAKE8_COMMON_ARGS = [
+ "--show-source",
+ "--builtin=gettext",
+ "--max-complexity=20",
+ "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py",
+ "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202",
+ "--max-line-length=88",
+]
+
+
+@nox.session
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8")
+ else:
+ session.install("flake8", "flake8-annotations")
+
+ args = FLAKE8_COMMON_ARGS + [
+ ".",
+ ]
+ session.run("flake8", *args)
+
+
+#
+# Black
+#
+
+
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ """Run black. Format code to uniform standard."""
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+
+ session.run("black", *python_files)
+
+
+#
+# format = isort + black
+#
+
+
+@nox.session
+def format(session: nox.sessions.Session) -> None:
+ """
+ Run isort to sort imports. Then run black
+ to format code to uniform standard.
+ """
+ session.install(BLACK_VERSION, ISORT_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+
+ # Use the --fss option to sort imports using strict alphabetical order.
+ # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections
+ session.run("isort", "--fss", *python_files)
+ session.run("black", *python_files)
+
+
+#
+# Sample Tests
+#
+
+
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob(
+ "**/test_*.py", recursive=True
+ )
+ test_list.extend(glob.glob("**/tests", recursive=True))
+
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ return
+
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ concurrent_args = []
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+ with open("requirements.txt") as rfile:
+ packages = rfile.read()
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt")
+ else:
+ session.install("-r", "requirements-test.txt")
+ with open("requirements-test.txt") as rtfile:
+ packages += rtfile.read()
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ if "pytest-parallel" in packages:
+ concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"])
+ elif "pytest-xdist" in packages:
+ concurrent_args.extend(["-n", "auto"])
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
+
+
+@nox.session(python=ALL_VERSIONS)
+def py(session: nox.sessions.Session) -> None:
+ """Runs py.test for a sample using the specified version of Python."""
+ if session.python in TESTED_VERSIONS:
+ _session_tests(session)
+ else:
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
+
+
+#
+# Readmegen
+#
+
+
+def _get_repo_root() -> Optional[str]:
+ """Returns the root folder of the project."""
+ # Get root of this repository. Assume we don't have directories nested deeper than 10 items.
+ p = Path(os.getcwd())
+ for i in range(10):
+ if p is None:
+ break
+ if Path(p / ".git").exists():
+ return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
+ p = p.parent
+ raise Exception("Unable to detect repository root.")
+
+
+GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")])
+
+
+@nox.session
+@nox.parametrize("path", GENERATED_READMES)
+def readmegen(session: nox.sessions.Session, path: str) -> None:
+ """(Re-)generates the readme for a sample."""
+ session.install("jinja2", "pyyaml")
+ dir_ = os.path.dirname(path)
+
+ if os.path.exists(os.path.join(dir_, "requirements.txt")):
+ session.install("-r", os.path.join(dir_, "requirements.txt"))
+
+ in_file = os.path.join(dir_, "README.rst.in")
+ session.run(
+ "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file
+ )
diff --git a/testbed/googleapis__python-bigquery/samples/magics/noxfile_config.py b/testbed/googleapis__python-bigquery/samples/magics/noxfile_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..982751b8bfd156e7dd4d7815b506690396aa0152
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/magics/noxfile_config.py
@@ -0,0 +1,37 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Default TEST_CONFIG_OVERRIDE for python repos.
+
+# You can copy this file into your directory, then it will be inported from
+# the noxfile.py.
+
+# The source of truth:
+# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/noxfile_config.py
+
+TEST_CONFIG_OVERRIDE = {
+ # You can opt out from the test for specific Python versions.
+ "ignored_versions": [
+ "2.7",
+ ],
+ # An envvar key for determining the project id to use. Change it
+ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
+ # build specific Cloud project. You can also use your own string
+ # to use your own Cloud project.
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
+ # "gcloud_project_env": "BUILD_SPECIFIC_GCLOUD_PROJECT",
+ # A dictionary you want to inject into your test. Don't put any
+ # secrets here. These values will override predefined values.
+ "envs": {},
+}
diff --git a/testbed/googleapis__python-bigquery/samples/magics/query.py b/testbed/googleapis__python-bigquery/samples/magics/query.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ac947db0d09044cea67870338e4cee1d055b8ac
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/magics/query.py
@@ -0,0 +1,42 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import IPython
+
+from . import _helpers
+
+if typing.TYPE_CHECKING:
+ import pandas
+
+
+def query() -> "pandas.DataFrame":
+ ip = IPython.get_ipython()
+ ip.extension_manager.load_extension("bigquery_magics")
+
+ sample = """
+ # [START bigquery_jupyter_query]
+ %%bigquery
+ SELECT name, SUM(number) as count
+ FROM `bigquery-public-data.usa_names.usa_1910_current`
+ GROUP BY name
+ ORDER BY count DESC
+ LIMIT 3
+ # [END bigquery_jupyter_query]
+ """
+ result = ip.run_cell(_helpers.strip_region_tags(sample))
+ result.raise_error() # Throws an exception if the cell failed.
+ df = ip.user_ns["_"] # Retrieves last returned object in notebook session
+ return df
diff --git a/testbed/googleapis__python-bigquery/samples/magics/query_params_scalars.py b/testbed/googleapis__python-bigquery/samples/magics/query_params_scalars.py
new file mode 100644
index 0000000000000000000000000000000000000000..74f665acbb8b97f801b12d36eec53d1832468e4e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/magics/query_params_scalars.py
@@ -0,0 +1,43 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import IPython
+
+from . import _helpers
+
+if typing.TYPE_CHECKING:
+ import pandas
+
+
+def query_with_parameters() -> "pandas.DataFrame":
+ ip = IPython.get_ipython()
+ ip.extension_manager.load_extension("bigquery_magics")
+
+ sample = """
+ # [START bigquery_jupyter_query_params_scalars]
+ %%bigquery --params {"corpus_name": "hamlet", "limit": 10}
+ SELECT word, SUM(word_count) as count
+ FROM `bigquery-public-data.samples.shakespeare`
+ WHERE corpus = @corpus_name
+ GROUP BY word
+ ORDER BY count DESC
+ LIMIT @limit
+ # [END bigquery_jupyter_query_params_scalars]
+ """
+ result = ip.run_cell(_helpers.strip_region_tags(sample))
+ result.raise_error() # Throws an exception if the cell failed.
+ df = ip.user_ns["_"] # Retrieves last returned object in notebook session
+ return df
diff --git a/testbed/googleapis__python-bigquery/samples/magics/query_params_scalars_test.py b/testbed/googleapis__python-bigquery/samples/magics/query_params_scalars_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f481cbe92499b2acdc355984469d9b3a468367f
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/magics/query_params_scalars_test.py
@@ -0,0 +1,23 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pandas
+
+from . import query_params_scalars
+
+
+def test_query_with_parameters() -> None:
+ df = query_params_scalars.query_with_parameters()
+ assert isinstance(df, pandas.DataFrame)
+ assert len(df) == 10
diff --git a/testbed/googleapis__python-bigquery/samples/magics/query_test.py b/testbed/googleapis__python-bigquery/samples/magics/query_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..1aaa9c1bb3bd344a8855550ce85d4ee013f5d928
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/magics/query_test.py
@@ -0,0 +1,23 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pandas
+
+from . import query
+
+
+def test_query() -> None:
+ df = query.query()
+ assert isinstance(df, pandas.DataFrame)
+ assert len(df) == 3
diff --git a/testbed/googleapis__python-bigquery/samples/magics/requirements-test.txt b/testbed/googleapis__python-bigquery/samples/magics/requirements-test.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1640e1a9502a5a2c6159a72c6b70b440273004f7
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/magics/requirements-test.txt
@@ -0,0 +1,4 @@
+google-cloud-testutils==1.4.0
+pytest===7.4.4; python_version == '3.7'
+pytest==8.3.3; python_version >= '3.8'
+mock==5.1.0
diff --git a/testbed/googleapis__python-bigquery/samples/magics/requirements.txt b/testbed/googleapis__python-bigquery/samples/magics/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6386fb6d2462590219792ecc675b763209f548ab
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/magics/requirements.txt
@@ -0,0 +1,10 @@
+bigquery_magics==0.4.0
+db-dtypes==1.3.0
+google.cloud.bigquery==3.26.0
+google-cloud-bigquery-storage==2.26.0
+ipython===7.31.1; python_version == '3.7'
+ipython===8.0.1; python_version == '3.8'
+ipython===8.18.1; python_version >= '3.9'
+pandas===1.3.5; python_version == '3.7'
+pandas===2.0.3; python_version == '3.8'
+pandas==2.2.3; python_version >= '3.9'
diff --git a/testbed/googleapis__python-bigquery/samples/notebooks/__init__.py b/testbed/googleapis__python-bigquery/samples/notebooks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4fbd93bb2ca4d982f578388ee47499e8a421f50e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/notebooks/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/testbed/googleapis__python-bigquery/samples/notebooks/conftest.py b/testbed/googleapis__python-bigquery/samples/notebooks/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..fdc85a8522c13e6737296a63009bbf65643eef74
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/notebooks/conftest.py
@@ -0,0 +1,23 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from google.cloud import bigquery
+import pytest
+
+
+@pytest.fixture
+def bigquery_client_patch(
+ monkeypatch: pytest.MonkeyPatch, bigquery_client: bigquery.Client
+) -> None:
+ monkeypatch.setattr(bigquery, "Client", lambda: bigquery_client)
diff --git a/testbed/googleapis__python-bigquery/samples/notebooks/jupyter_tutorial_test.py b/testbed/googleapis__python-bigquery/samples/notebooks/jupyter_tutorial_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c2cf9390f879612cebdfaef2b50b903a423eb8e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/notebooks/jupyter_tutorial_test.py
@@ -0,0 +1,175 @@
+# Copyright 2018 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+from typing import Iterator
+
+import pytest
+
+if typing.TYPE_CHECKING:
+ from IPython.terminal.interactiveshell import TerminalInteractiveShell
+
+IPython = pytest.importorskip("IPython")
+interactiveshell = pytest.importorskip("IPython.terminal.interactiveshell")
+tools = pytest.importorskip("IPython.testing.tools")
+matplotlib = pytest.importorskip("matplotlib")
+
+# Ignore semicolon lint warning because semicolons are used in notebooks
+# flake8: noqa E703
+
+
+@pytest.fixture(scope="session")
+def ipython() -> "TerminalInteractiveShell":
+ config = tools.default_config()
+ config.TerminalInteractiveShell.simple_prompt = True
+ shell = interactiveshell.TerminalInteractiveShell.instance(config=config)
+ return shell
+
+
+@pytest.fixture()
+def ipython_interactive(
+ request: pytest.FixtureRequest, ipython: "TerminalInteractiveShell"
+) -> Iterator["TerminalInteractiveShell"]:
+ """Activate IPython's builtin hooks
+
+ for the duration of the test scope.
+ """
+ with ipython.builtin_trap:
+ yield ipython
+
+
+def _strip_region_tags(sample_text: str) -> str:
+ """Remove blank lines and region tags from sample text"""
+ magic_lines = [
+ line for line in sample_text.split("\n") if len(line) > 0 and "# [" not in line
+ ]
+ return "\n".join(magic_lines)
+
+
+def test_jupyter_tutorial(ipython: "TerminalInteractiveShell") -> None:
+ matplotlib.use("agg")
+ ip = IPython.get_ipython()
+ ip.extension_manager.load_extension("bigquery_magics")
+
+ sample = """
+ # [START bigquery_jupyter_magic_gender_by_year]
+ %%bigquery
+ SELECT
+ source_year AS year,
+ COUNT(is_male) AS birth_count
+ FROM `bigquery-public-data.samples.natality`
+ GROUP BY year
+ ORDER BY year DESC
+ LIMIT 15
+ # [END bigquery_jupyter_magic_gender_by_year]
+ """
+ result = ip.run_cell(_strip_region_tags(sample))
+ result.raise_error() # Throws an exception if the cell failed.
+
+ sample = """
+ # [START bigquery_jupyter_magic_gender_by_year_var]
+ %%bigquery total_births
+ SELECT
+ source_year AS year,
+ COUNT(is_male) AS birth_count
+ FROM `bigquery-public-data.samples.natality`
+ GROUP BY year
+ ORDER BY year DESC
+ LIMIT 15
+ # [END bigquery_jupyter_magic_gender_by_year_var]
+ """
+ result = ip.run_cell(_strip_region_tags(sample))
+ result.raise_error() # Throws an exception if the cell failed.
+
+ assert "total_births" in ip.user_ns # verify that variable exists
+ total_births = ip.user_ns["total_births"]
+ # [START bigquery_jupyter_plot_births_by_year]
+ total_births.plot(kind="bar", x="year", y="birth_count")
+ # [END bigquery_jupyter_plot_births_by_year]
+
+ sample = """
+ # [START bigquery_jupyter_magic_gender_by_weekday]
+ %%bigquery births_by_weekday
+ SELECT
+ wday,
+ SUM(CASE WHEN is_male THEN 1 ELSE 0 END) AS male_births,
+ SUM(CASE WHEN is_male THEN 0 ELSE 1 END) AS female_births
+ FROM `bigquery-public-data.samples.natality`
+ WHERE wday IS NOT NULL
+ GROUP BY wday
+ ORDER BY wday ASC
+ # [END bigquery_jupyter_magic_gender_by_weekday]
+ """
+ result = ip.run_cell(_strip_region_tags(sample))
+ result.raise_error() # Throws an exception if the cell failed.
+
+ assert "births_by_weekday" in ip.user_ns # verify that variable exists
+ births_by_weekday = ip.user_ns["births_by_weekday"]
+ # [START bigquery_jupyter_plot_births_by_weekday]
+ births_by_weekday.plot(x="wday")
+ # [END bigquery_jupyter_plot_births_by_weekday]
+
+ # [START bigquery_jupyter_import_and_client]
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+ # [END bigquery_jupyter_import_and_client]
+
+ # [START bigquery_jupyter_query_plurality_by_year]
+ sql = """
+ SELECT
+ plurality,
+ COUNT(1) AS count,
+ year
+ FROM
+ `bigquery-public-data.samples.natality`
+ WHERE
+ NOT IS_NAN(plurality) AND plurality > 1
+ GROUP BY
+ plurality, year
+ ORDER BY
+ count DESC
+ """
+ df = client.query(sql).to_dataframe()
+ df.head()
+ # [END bigquery_jupyter_query_plurality_by_year]
+
+ # [START bigquery_jupyter_plot_plurality_by_year]
+ pivot_table = df.pivot(index="year", columns="plurality", values="count")
+ pivot_table.plot(kind="bar", stacked=True, figsize=(15, 7))
+ # [END bigquery_jupyter_plot_plurality_by_year]
+
+ # [START bigquery_jupyter_query_births_by_gestation]
+ sql = """
+ SELECT
+ gestation_weeks,
+ COUNT(1) AS count
+ FROM
+ `bigquery-public-data.samples.natality`
+ WHERE
+ NOT IS_NAN(gestation_weeks) AND gestation_weeks <> 99
+ GROUP BY
+ gestation_weeks
+ ORDER BY
+ gestation_weeks
+ """
+ df = client.query(sql).to_dataframe()
+ # [END bigquery_jupyter_query_births_by_gestation]
+
+ # [START bigquery_jupyter_plot_births_by_gestation]
+ ax = df.plot(kind="bar", x="gestation_weeks", y="count", figsize=(15, 7))
+ ax.set_title("Count of Births by Gestation Weeks")
+ ax.set_xlabel("Gestation Weeks")
+ ax.set_ylabel("Count")
+ # [END bigquery_jupyter_plot_births_by_gestation]
diff --git a/testbed/googleapis__python-bigquery/samples/notebooks/mypy.ini b/testbed/googleapis__python-bigquery/samples/notebooks/mypy.ini
new file mode 100644
index 0000000000000000000000000000000000000000..dea60237bd35a7cb161321ec72a155fac5748f89
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/notebooks/mypy.ini
@@ -0,0 +1,8 @@
+[mypy]
+; We require type annotations in all samples.
+strict = True
+exclude = noxfile\.py
+warn_unused_configs = True
+
+[mypy-IPython.*,nox,noxfile_config,pandas]
+ignore_missing_imports = True
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/samples/notebooks/noxfile.py b/testbed/googleapis__python-bigquery/samples/notebooks/noxfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b7135946fd5e16ec56529107714432b2e42a84d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/notebooks/noxfile.py
@@ -0,0 +1,293 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import glob
+import os
+from pathlib import Path
+import sys
+from typing import Callable, Dict, Optional
+
+import nox
+
+
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# DO NOT EDIT THIS FILE EVER!
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# WARNING - WARNING - WARNING - WARNING - WARNING
+
+BLACK_VERSION = "black==22.3.0"
+ISORT_VERSION = "isort==5.10.1"
+
+# Copy `noxfile_config.py` to your directory and modify it instead.
+
+# `TEST_CONFIG` dict is a configuration hook that allows users to
+# modify the test configurations. The values here should be in sync
+# with `noxfile_config.py`. Users will copy `noxfile_config.py` into
+# their directory and modify it.
+
+TEST_CONFIG = {
+ # You can opt out from the test for specific Python versions.
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
+ # An envvar key for determining the project id to use. Change it
+ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
+ # build specific Cloud project. You can also use your own string
+ # to use your own Cloud project.
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
+ # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
+ # A dictionary you want to inject into your test. Don't put any
+ # secrets here. These values will override predefined values.
+ "envs": {},
+}
+
+
+try:
+ # Ensure we can import noxfile_config in the project's directory.
+ sys.path.append(".")
+ from noxfile_config import TEST_CONFIG_OVERRIDE
+except ImportError as e:
+ print("No user noxfile_config found: detail: {}".format(e))
+ TEST_CONFIG_OVERRIDE = {}
+
+# Update the TEST_CONFIG with the user supplied values.
+TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
+
+
+def get_pytest_env_vars() -> Dict[str, str]:
+ """Returns a dict for pytest invocation."""
+ ret = {}
+
+ # Override the GCLOUD_PROJECT and the alias.
+ env_key = TEST_CONFIG["gcloud_project_env"]
+ # This should error out if not set.
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
+
+ # Apply user supplied envs.
+ ret.update(TEST_CONFIG["envs"])
+ return ret
+
+
+# DO NOT EDIT - automatically generated.
+# All versions used to test samples.
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
+
+# Any default versions that should be ignored.
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
+
+TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
+
+INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in (
+ "True",
+ "true",
+)
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
+#
+# Style Checks
+#
+
+
+# Linting with flake8.
+#
+# We ignore the following rules:
+# E203: whitespace before ‘:’
+# E266: too many leading ‘#’ for block comment
+# E501: line too long
+# I202: Additional newline in a section of imports
+#
+# We also need to specify the rules which are ignored by default:
+# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121']
+FLAKE8_COMMON_ARGS = [
+ "--show-source",
+ "--builtin=gettext",
+ "--max-complexity=20",
+ "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py",
+ "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202",
+ "--max-line-length=88",
+]
+
+
+@nox.session
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8")
+ else:
+ session.install("flake8", "flake8-annotations")
+
+ args = FLAKE8_COMMON_ARGS + [
+ ".",
+ ]
+ session.run("flake8", *args)
+
+
+#
+# Black
+#
+
+
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ """Run black. Format code to uniform standard."""
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+
+ session.run("black", *python_files)
+
+
+#
+# format = isort + black
+#
+
+
+@nox.session
+def format(session: nox.sessions.Session) -> None:
+ """
+ Run isort to sort imports. Then run black
+ to format code to uniform standard.
+ """
+ session.install(BLACK_VERSION, ISORT_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+
+ # Use the --fss option to sort imports using strict alphabetical order.
+ # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections
+ session.run("isort", "--fss", *python_files)
+ session.run("black", *python_files)
+
+
+#
+# Sample Tests
+#
+
+
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob(
+ "**/test_*.py", recursive=True
+ )
+ test_list.extend(glob.glob("**/tests", recursive=True))
+
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ return
+
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ concurrent_args = []
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+ with open("requirements.txt") as rfile:
+ packages = rfile.read()
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt")
+ else:
+ session.install("-r", "requirements-test.txt")
+ with open("requirements-test.txt") as rtfile:
+ packages += rtfile.read()
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ if "pytest-parallel" in packages:
+ concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"])
+ elif "pytest-xdist" in packages:
+ concurrent_args.extend(["-n", "auto"])
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
+
+
+@nox.session(python=ALL_VERSIONS)
+def py(session: nox.sessions.Session) -> None:
+ """Runs py.test for a sample using the specified version of Python."""
+ if session.python in TESTED_VERSIONS:
+ _session_tests(session)
+ else:
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
+
+
+#
+# Readmegen
+#
+
+
+def _get_repo_root() -> Optional[str]:
+ """Returns the root folder of the project."""
+ # Get root of this repository. Assume we don't have directories nested deeper than 10 items.
+ p = Path(os.getcwd())
+ for i in range(10):
+ if p is None:
+ break
+ if Path(p / ".git").exists():
+ return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
+ p = p.parent
+ raise Exception("Unable to detect repository root.")
+
+
+GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")])
+
+
+@nox.session
+@nox.parametrize("path", GENERATED_READMES)
+def readmegen(session: nox.sessions.Session, path: str) -> None:
+ """(Re-)generates the readme for a sample."""
+ session.install("jinja2", "pyyaml")
+ dir_ = os.path.dirname(path)
+
+ if os.path.exists(os.path.join(dir_, "requirements.txt")):
+ session.install("-r", os.path.join(dir_, "requirements.txt"))
+
+ in_file = os.path.join(dir_, "README.rst.in")
+ session.run(
+ "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file
+ )
diff --git a/testbed/googleapis__python-bigquery/samples/notebooks/noxfile_config.py b/testbed/googleapis__python-bigquery/samples/notebooks/noxfile_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..315bd5be8cd96033e1df2b66d47b3cd307f18f46
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/notebooks/noxfile_config.py
@@ -0,0 +1,40 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Default TEST_CONFIG_OVERRIDE for python repos.
+
+# You can copy this file into your directory, then it will be inported from
+# the noxfile.py.
+
+# The source of truth:
+# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/noxfile_config.py
+
+TEST_CONFIG_OVERRIDE = {
+ # You can opt out from the test for specific Python versions.
+ "ignored_versions": [
+ "2.7",
+ # TODO: Enable 3.10 once there is a geopandas/fiona release.
+ # https://github.com/Toblerity/Fiona/issues/1043
+ "3.10",
+ ],
+ # An envvar key for determining the project id to use. Change it
+ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
+ # build specific Cloud project. You can also use your own string
+ # to use your own Cloud project.
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
+ # "gcloud_project_env": "BUILD_SPECIFIC_GCLOUD_PROJECT",
+ # A dictionary you want to inject into your test. Don't put any
+ # secrets here. These values will override predefined values.
+ "envs": {},
+}
diff --git a/testbed/googleapis__python-bigquery/samples/notebooks/requirements-test.txt b/testbed/googleapis__python-bigquery/samples/notebooks/requirements-test.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1640e1a9502a5a2c6159a72c6b70b440273004f7
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/notebooks/requirements-test.txt
@@ -0,0 +1,4 @@
+google-cloud-testutils==1.4.0
+pytest===7.4.4; python_version == '3.7'
+pytest==8.3.3; python_version >= '3.8'
+mock==5.1.0
diff --git a/testbed/googleapis__python-bigquery/samples/notebooks/requirements.txt b/testbed/googleapis__python-bigquery/samples/notebooks/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7463e1afced692dbcfb5883bb33b66ad96b2b1a8
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/notebooks/requirements.txt
@@ -0,0 +1,13 @@
+bigquery-magics==0.4.0
+db-dtypes==1.3.0
+google-cloud-bigquery==3.26.0
+google-cloud-bigquery-storage==2.26.0
+ipython===7.31.1; python_version == '3.7'
+ipython===8.0.1; python_version == '3.8'
+ipython===8.18.1; python_version >= '3.9'
+matplotlib===3.5.3; python_version == '3.7'
+matplotlib===3.7.4; python_version == '3.8'
+matplotlib==3.9.2; python_version >= '3.9'
+pandas===1.3.5; python_version == '3.7'
+pandas===2.0.3; python_version == '3.8'
+pandas==2.2.3; python_version >= '3.9'
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/.gitignore b/testbed/googleapis__python-bigquery/samples/snippets/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..0dc05ffadec21794f4d1a54e75a8c6ff1e256b14
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/.gitignore
@@ -0,0 +1,2 @@
+client_secrets.json
+service_account.json
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/README.rst b/testbed/googleapis__python-bigquery/samples/snippets/README.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b5865a6ce9c17b954928e941de6843123e0d72e6
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/README.rst
@@ -0,0 +1,149 @@
+.. This file is automatically generated. Do not edit this file directly.
+
+Google BigQuery Python Samples
+===============================================================================
+
+.. image:: https://gstatic.com/cloudssh/images/open-btn.png
+ :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigquery/cloud-client/README.rst
+
+
+This directory contains samples for Google BigQuery. `Google BigQuery`_ is Google's fully managed, petabyte scale, low cost analytics data warehouse. BigQuery is NoOps—there is no infrastructure to manage and you don't need a database administrator—so you can focus on analyzing data to find meaningful insights, use familiar SQL, and take advantage of our pay-as-you-go model.
+
+
+
+
+.. _Google BigQuery: https://cloud.google.com/bigquery/docs
+
+
+To run the sample, you need to have the `BigQuery Admin` role.
+
+
+
+Setup
+-------------------------------------------------------------------------------
+
+
+Authentication
+++++++++++++++
+
+This sample requires you to have authentication setup. Refer to the
+`Authentication Getting Started Guide`_ for instructions on setting up
+credentials for applications.
+
+.. _Authentication Getting Started Guide:
+ https://cloud.google.com/docs/authentication/getting-started
+
+Install Dependencies
+++++++++++++++++++++
+
+#. Clone python-docs-samples and change directory to the sample directory you want to use.
+
+ .. code-block:: bash
+
+ $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git
+
+#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions.
+
+ .. _Python Development Environment Setup Guide:
+ https://cloud.google.com/python/setup
+
+#. Create a virtualenv. Samples are compatible with Python 3.7+.
+
+ .. code-block:: bash
+
+ $ virtualenv env
+ $ source env/bin/activate
+
+#. Install the dependencies needed to run the samples.
+
+ .. code-block:: bash
+
+ $ pip install -r requirements.txt
+
+.. _pip: https://pip.pypa.io/
+.. _virtualenv: https://virtualenv.pypa.io/
+
+Samples
+-------------------------------------------------------------------------------
+
+Quickstart
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. image:: https://gstatic.com/cloudssh/images/open-btn.png
+ :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigquery/cloud-client/quickstart.py,bigquery/cloud-client/README.rst
+
+
+
+
+To run this sample:
+
+.. code-block:: bash
+
+ $ python quickstart.py
+
+
+Simple Application
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. image:: https://gstatic.com/cloudssh/images/open-btn.png
+ :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigquery/cloud-client/simple_app.py,bigquery/cloud-client/README.rst
+
+
+
+
+To run this sample:
+
+.. code-block:: bash
+
+ $ python simple_app.py
+
+
+User Credentials
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. image:: https://gstatic.com/cloudssh/images/open-btn.png
+ :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigquery/cloud-client/user_credentials.py,bigquery/cloud-client/README.rst
+
+
+
+
+To run this sample:
+
+.. code-block:: bash
+
+ $ python user_credentials.py
+
+ usage: user_credentials.py [-h] [--launch-browser] project
+
+ Command-line application to run a query using user credentials.
+
+ You must supply a client secrets file, which would normally be bundled with
+ your application.
+
+ positional arguments:
+ project Project to use for BigQuery billing.
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --launch-browser Use a local server flow to authenticate.
+
+
+
+
+
+The client library
+-------------------------------------------------------------------------------
+
+This sample uses the `Google Cloud Client Library for Python`_.
+You can read the documentation for more details on API usage and use GitHub
+to `browse the source`_ and `report issues`_.
+
+.. _Google Cloud Client Library for Python:
+ https://googlecloudplatform.github.io/google-cloud-python/
+.. _browse the source:
+ https://github.com/GoogleCloudPlatform/google-cloud-python
+.. _report issues:
+ https://github.com/GoogleCloudPlatform/google-cloud-python/issues
+
+
+.. _Google Cloud SDK: https://cloud.google.com/sdk/
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/README.rst.in b/testbed/googleapis__python-bigquery/samples/snippets/README.rst.in
new file mode 100644
index 0000000000000000000000000000000000000000..74b7fa940376ebfa22d54828a210fe84fe04484e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/README.rst.in
@@ -0,0 +1,31 @@
+# This file is used to generate README.rst
+
+product:
+ name: Google BigQuery
+ short_name: BigQuery
+ url: https://cloud.google.com/bigquery/docs
+ description: >
+ `Google BigQuery`_ is Google's fully managed, petabyte scale, low cost
+ analytics data warehouse. BigQuery is NoOps—there is no infrastructure to
+ manage and you don't need a database administrator—so you can focus on
+ analyzing data to find meaningful insights, use familiar SQL, and take
+ advantage of our pay-as-you-go model.
+
+required_role: BigQuery Admin
+
+setup:
+- auth
+- install_deps
+
+samples:
+- name: Quickstart
+ file: quickstart.py
+- name: Simple Application
+ file: simple_app.py
+- name: User Credentials
+ file: user_credentials.py
+ show_help: true
+
+cloud_client_library: true
+
+folder: bigquery/cloud-client
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/authenticate_service_account.py b/testbed/googleapis__python-bigquery/samples/snippets/authenticate_service_account.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a8c9557dc9b16ad019c18ea6b676a7c55476012
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/authenticate_service_account.py
@@ -0,0 +1,52 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import typing
+
+if typing.TYPE_CHECKING:
+ from google.cloud import bigquery
+
+
+def main() -> "bigquery.Client":
+ key_path = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS")
+
+ # [START bigquery_client_json_credentials]
+ from google.cloud import bigquery
+ from google.oauth2 import service_account
+
+ # TODO(developer): Set key_path to the path to the service account key
+ # file.
+ # key_path = "path/to/service_account.json"
+
+ credentials = service_account.Credentials.from_service_account_file(
+ key_path,
+ scopes=["https://www.googleapis.com/auth/cloud-platform"],
+ )
+
+ # Alternatively, use service_account.Credentials.from_service_account_info()
+ # to set credentials directly via a json object rather than set a filepath
+ # TODO(developer): Set key_json to the content of the service account key file.
+ # credentials = service_account.Credentials.from_service_account_info(key_json)
+
+ client = bigquery.Client(
+ credentials=credentials,
+ project=credentials.project_id,
+ )
+ # [END bigquery_client_json_credentials]
+ return client
+
+
+if __name__ == "__main__":
+ main()
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/authenticate_service_account_test.py b/testbed/googleapis__python-bigquery/samples/snippets/authenticate_service_account_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbdd2d0649ffe8916ce9604f678abc3ccc81343b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/authenticate_service_account_test.py
@@ -0,0 +1,39 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+from typing import Any
+
+import google.auth
+
+import authenticate_service_account # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def mock_credentials(*args: Any, **kwargs: Any) -> google.auth.credentials.Credentials:
+ credentials, _ = google.auth.default(
+ ["https://www.googleapis.com/auth/cloud-platform"]
+ )
+ return credentials
+
+
+def test_main(monkeypatch: "pytest.MonkeyPatch") -> None:
+ monkeypatch.setattr(
+ "google.oauth2.service_account.Credentials.from_service_account_file",
+ mock_credentials,
+ )
+ client = authenticate_service_account.main()
+ assert client is not None
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/authorized_view_tutorial.py b/testbed/googleapis__python-bigquery/samples/snippets/authorized_view_tutorial.py
new file mode 100644
index 0000000000000000000000000000000000000000..f52170bc60a80eeb3954079ea2d3d1196396b568
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/authorized_view_tutorial.py
@@ -0,0 +1,138 @@
+#!/usr/bin/env python
+
+# Copyright 2018 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, Optional
+
+
+def run_authorized_view_tutorial(
+ override_values: Optional[Dict[str, str]] = None
+) -> None:
+ # Note to user: This is a group email for testing purposes. Replace with
+ # your own group email address when running this code.
+ analyst_group_email = "example-analyst-group@google.com"
+
+ if override_values is None:
+ override_values = {}
+
+ # [START bigquery_authorized_view_tutorial]
+ # Create a source dataset
+ # [START bigquery_avt_create_source_dataset]
+ from google.cloud import bigquery
+ from google.cloud.bigquery.enums import EntityTypes
+
+ client = bigquery.Client()
+ source_dataset_id = "github_source_data"
+ source_dataset_id_full = "{}.{}".format(client.project, source_dataset_id)
+
+ # [END bigquery_authorized_view_tutorial]
+ # [END bigquery_avt_create_source_dataset]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ source_dataset_id = override_values.get("source_dataset_id", source_dataset_id)
+ source_dataset_id_full = "{}.{}".format(client.project, source_dataset_id)
+ # [START bigquery_authorized_view_tutorial]
+ # [START bigquery_avt_create_source_dataset]
+
+ source_dataset = bigquery.Dataset(source_dataset_id_full)
+ # Specify the geographic location where the dataset should reside.
+ source_dataset.location = "US"
+ source_dataset = client.create_dataset(source_dataset) # API request
+ # [END bigquery_avt_create_source_dataset]
+
+ # Populate a source table
+ # [START bigquery_avt_create_source_table]
+ source_table_id = "github_contributors"
+ job_config = bigquery.QueryJobConfig()
+ job_config.destination = source_dataset.table(source_table_id)
+ sql = """
+ SELECT commit, author, committer, repo_name
+ FROM `bigquery-public-data.github_repos.commits`
+ LIMIT 1000
+ """
+ client.query_and_wait(
+ sql,
+ # Location must match that of the dataset(s) referenced in the query
+ # and of the destination table.
+ location="US",
+ job_config=job_config,
+ ) # API request - starts the query and waits for query to finish
+ # [END bigquery_avt_create_source_table]
+
+ # Create a separate dataset to store your view
+ # [START bigquery_avt_create_shared_dataset]
+ shared_dataset_id = "shared_views"
+ shared_dataset_id_full = "{}.{}".format(client.project, shared_dataset_id)
+
+ # [END bigquery_authorized_view_tutorial]
+ # [END bigquery_avt_create_shared_dataset]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ shared_dataset_id = override_values.get("shared_dataset_id", shared_dataset_id)
+ shared_dataset_id_full = "{}.{}".format(client.project, shared_dataset_id)
+ # [START bigquery_authorized_view_tutorial]
+ # [START bigquery_avt_create_shared_dataset]
+
+ shared_dataset = bigquery.Dataset(shared_dataset_id_full)
+ shared_dataset.location = "US"
+ shared_dataset = client.create_dataset(shared_dataset) # API request
+ # [END bigquery_avt_create_shared_dataset]
+
+ # Create the view in the new dataset
+ # [START bigquery_avt_create_view]
+ shared_view_id = "github_analyst_view"
+ view = bigquery.Table(shared_dataset.table(shared_view_id))
+ sql_template = """
+ SELECT
+ commit, author.name as author,
+ committer.name as committer, repo_name
+ FROM
+ `{}.{}.{}`
+ """
+ view.view_query = sql_template.format(
+ client.project, source_dataset_id, source_table_id
+ )
+ view = client.create_table(view) # API request
+ # [END bigquery_avt_create_view]
+
+ # Assign access controls to the dataset containing the view
+ # [START bigquery_avt_shared_dataset_access]
+ # analyst_group_email = 'data_analysts@example.com'
+ access_entries = shared_dataset.access_entries
+ access_entries.append(
+ bigquery.AccessEntry("READER", EntityTypes.GROUP_BY_EMAIL, analyst_group_email)
+ )
+ shared_dataset.access_entries = access_entries
+ shared_dataset = client.update_dataset(
+ shared_dataset, ["access_entries"]
+ ) # API request
+ # [END bigquery_avt_shared_dataset_access]
+
+ # Authorize the view to access the source dataset
+ # [START bigquery_avt_source_dataset_access]
+ access_entries = source_dataset.access_entries
+ access_entries.append(
+ bigquery.AccessEntry(None, EntityTypes.VIEW, view.reference.to_api_repr())
+ )
+ source_dataset.access_entries = access_entries
+ source_dataset = client.update_dataset(
+ source_dataset, ["access_entries"]
+ ) # API request
+ # [END bigquery_avt_source_dataset_access]
+ # [END bigquery_authorized_view_tutorial]
+
+
+if __name__ == "__main__":
+ run_authorized_view_tutorial()
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/authorized_view_tutorial_test.py b/testbed/googleapis__python-bigquery/samples/snippets/authorized_view_tutorial_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..04f6312d3316410ef154998b68cc28d0ea0d455f
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/authorized_view_tutorial_test.py
@@ -0,0 +1,76 @@
+# Copyright 2018 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Iterator, List
+
+from google.cloud import bigquery
+import pytest
+
+import authorized_view_tutorial # type: ignore
+from conftest import prefixer # type: ignore
+
+
+@pytest.fixture(scope="module")
+def client() -> bigquery.Client:
+ return bigquery.Client()
+
+
+@pytest.fixture
+def datasets_to_delete(client: bigquery.Client) -> Iterator[List[str]]:
+ doomed: List[str] = []
+ yield doomed
+ for item in doomed:
+ client.delete_dataset(item, delete_contents=True, not_found_ok=True)
+
+
+def test_authorized_view_tutorial(
+ client: bigquery.Client, datasets_to_delete: List[str]
+) -> None:
+ override_values = {
+ "source_dataset_id": f"{prefixer.create_prefix()}_authorized_view_tutorial",
+ "shared_dataset_id": f"{prefixer.create_prefix()}_authorized_view_tutorial_shared_views",
+ }
+ source_dataset_ref = "{}.{}".format(
+ client.project, override_values["source_dataset_id"]
+ )
+ shared_dataset_ref = "{}.{}".format(
+ client.project, override_values["shared_dataset_id"]
+ )
+ datasets_to_delete.extend(
+ [override_values["source_dataset_id"], override_values["shared_dataset_id"]]
+ )
+
+ authorized_view_tutorial.run_authorized_view_tutorial(override_values)
+
+ source_dataset = client.get_dataset(source_dataset_ref)
+ shared_dataset = client.get_dataset(shared_dataset_ref)
+ analyst_email = "example-analyst-group@google.com"
+ analyst_entries = [
+ entry
+ for entry in shared_dataset.access_entries
+ if entry.entity_id == analyst_email
+ ]
+ assert len(analyst_entries) == 1
+ assert analyst_entries[0].role == "READER"
+
+ authorized_view_entries = [
+ entry for entry in source_dataset.access_entries if entry.entity_type == "view"
+ ]
+ expected_view_ref = {
+ "projectId": client.project,
+ "datasetId": override_values["shared_dataset_id"],
+ "tableId": "github_analyst_view",
+ }
+ assert len(authorized_view_entries) == 1
+ assert authorized_view_entries[0].entity_id == expected_view_ref
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/client_query.py b/testbed/googleapis__python-bigquery/samples/snippets/client_query.py
new file mode 100644
index 0000000000000000000000000000000000000000..ccae2e8bded067021d1705623a52efceb689e6f5
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/client_query.py
@@ -0,0 +1,37 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def client_query() -> None:
+ # [START bigquery_query]
+ from google.cloud import bigquery
+
+ # Construct a BigQuery client object.
+ client = bigquery.Client()
+
+ query = """
+ SELECT name, SUM(number) as total_people
+ FROM `bigquery-public-data.usa_names.usa_1910_2013`
+ WHERE state = 'TX'
+ GROUP BY name, state
+ ORDER BY total_people DESC
+ LIMIT 20
+ """
+ rows = client.query_and_wait(query) # Make an API request.
+
+ print("The query data:")
+ for row in rows:
+ # Row values can be accessed by field name or index.
+ print("name={}, count={}".format(row[0], row["total_people"]))
+ # [END bigquery_query]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/client_query_test.py b/testbed/googleapis__python-bigquery/samples/snippets/client_query_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..1bc83a2309793bd5f0182dc341194e3f08f8e577
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/client_query_test.py
@@ -0,0 +1,38 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import client_query # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_query(capsys: "pytest.CaptureFixture[str]") -> None:
+ client_query.client_query()
+ out, _ = capsys.readouterr()
+ assert "The query data:" in out
+ assert "name=James, count=272793" in out
+
+
+def test_client_query_job_optional(
+ capsys: "pytest.CaptureFixture[str]", monkeypatch: "pytest.MonkeyPatch"
+) -> None:
+ monkeypatch.setenv("QUERY_PREVIEW_ENABLED", "true")
+
+ client_query.client_query()
+ out, _ = capsys.readouterr()
+ assert "The query data:" in out
+ assert "name=James, count=272793" in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/conftest.py b/testbed/googleapis__python-bigquery/samples/snippets/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..f53509d44cdb6a2f524fdc69c79b01e265bb30e0
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/conftest.py
@@ -0,0 +1,118 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Iterator
+
+from google.cloud import bigquery
+import pytest
+import test_utils.prefixer
+
+prefixer = test_utils.prefixer.Prefixer("python-bigquery", "samples/snippets")
+
+
+@pytest.fixture(scope="session", autouse=True)
+def cleanup_datasets(bigquery_client: bigquery.Client) -> None:
+ for dataset in bigquery_client.list_datasets():
+ if prefixer.should_cleanup(dataset.dataset_id):
+ bigquery_client.delete_dataset(
+ dataset, delete_contents=True, not_found_ok=True
+ )
+
+
+@pytest.fixture(scope="session")
+def bigquery_client() -> bigquery.Client:
+ bigquery_client = bigquery.Client()
+ return bigquery_client
+
+
+@pytest.fixture(scope="session")
+def project_id(bigquery_client: bigquery.Client) -> str:
+ return bigquery_client.project
+
+
+@pytest.fixture(scope="session")
+def dataset_id(bigquery_client: bigquery.Client, project_id: str) -> Iterator[str]:
+ dataset_id = prefixer.create_prefix()
+ full_dataset_id = f"{project_id}.{dataset_id}"
+ dataset = bigquery.Dataset(full_dataset_id)
+ bigquery_client.create_dataset(dataset)
+ yield dataset_id
+ bigquery_client.delete_dataset(dataset, delete_contents=True, not_found_ok=True)
+
+
+@pytest.fixture
+def table_id(
+ bigquery_client: bigquery.Client, project_id: str, dataset_id: str
+) -> Iterator[str]:
+ table_id = prefixer.create_prefix()
+ full_table_id = f"{project_id}.{dataset_id}.{table_id}"
+ table = bigquery.Table(
+ full_table_id, schema=[bigquery.SchemaField("string_col", "STRING")]
+ )
+ bigquery_client.create_table(table)
+ yield full_table_id
+ bigquery_client.delete_table(table, not_found_ok=True)
+
+
+@pytest.fixture(scope="session")
+def entity_id(bigquery_client: bigquery.Client, dataset_id: str) -> str:
+ return "cloud-developer-relations@google.com"
+
+
+@pytest.fixture(scope="session")
+def dataset_id_us_east1(
+ bigquery_client: bigquery.Client,
+ project_id: str,
+) -> Iterator[str]:
+ dataset_id = prefixer.create_prefix()
+ full_dataset_id = f"{project_id}.{dataset_id}"
+ dataset = bigquery.Dataset(full_dataset_id)
+ dataset.location = "us-east1"
+ bigquery_client.create_dataset(dataset)
+ yield dataset_id
+ bigquery_client.delete_dataset(dataset, delete_contents=True, not_found_ok=True)
+
+
+@pytest.fixture(scope="session")
+def table_id_us_east1(
+ bigquery_client: bigquery.Client, project_id: str, dataset_id_us_east1: str
+) -> Iterator[str]:
+ table_id = prefixer.create_prefix()
+ full_table_id = f"{project_id}.{dataset_id_us_east1}.{table_id}"
+ table = bigquery.Table(
+ full_table_id, schema=[bigquery.SchemaField("string_col", "STRING")]
+ )
+ bigquery_client.create_table(table)
+ yield full_table_id
+ bigquery_client.delete_table(table, not_found_ok=True)
+
+
+@pytest.fixture
+def random_table_id(
+ bigquery_client: bigquery.Client, project_id: str, dataset_id: str
+) -> Iterator[str]:
+ """Create a new table ID each time, so random_table_id can be used as
+ target for load jobs.
+ """
+ random_table_id = prefixer.create_prefix()
+ full_table_id = f"{project_id}.{dataset_id}.{random_table_id}"
+ yield full_table_id
+ bigquery_client.delete_table(full_table_id, not_found_ok=True)
+
+
+@pytest.fixture
+def bigquery_client_patch(
+ monkeypatch: pytest.MonkeyPatch, bigquery_client: bigquery.Client
+) -> None:
+ monkeypatch.setattr(bigquery, "Client", lambda: bigquery_client)
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/create_iam_policy_test.py b/testbed/googleapis__python-bigquery/samples/snippets/create_iam_policy_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..c41ced2cd36ac7e2bcf67f75dd3e7f30bf6e41a7
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/create_iam_policy_test.py
@@ -0,0 +1,44 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def test_create_iam_policy(table_id: str):
+ your_table_id = table_id
+
+ # [START bigquery_create_iam_policy]
+ from google.cloud import bigquery
+
+ bqclient = bigquery.Client()
+
+ policy = bqclient.get_iam_policy(
+ your_table_id, # e.g. "project.dataset.table"
+ )
+
+ analyst_email = "example-analyst-group@google.com"
+ binding = {
+ "role": "roles/bigquery.dataViewer",
+ "members": {f"group:{analyst_email}"},
+ }
+ policy.bindings.append(binding)
+
+ updated_policy = bqclient.set_iam_policy(
+ your_table_id, # e.g. "project.dataset.table"
+ policy,
+ )
+
+ for binding in updated_policy.bindings:
+ print(repr(binding))
+ # [END bigquery_create_iam_policy]
+
+ assert binding in updated_policy.bindings
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/create_partitioned_table.py b/testbed/googleapis__python-bigquery/samples/snippets/create_partitioned_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..0277d7d0f29434780739644463490ff96f908468
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/create_partitioned_table.py
@@ -0,0 +1,45 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def create_partitioned_table(table_id):
+ your_fully_qualified_table_id = table_id
+
+ # [START bigquery_create_table_partitioned]
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ # Use format "your-project.your_dataset.your_table_name" for table_id
+ table_id = your_fully_qualified_table_id
+ schema = [
+ bigquery.SchemaField("name", "STRING"),
+ bigquery.SchemaField("post_abbr", "STRING"),
+ bigquery.SchemaField("date", "DATE"),
+ ]
+ table = bigquery.Table(table_id, schema=schema)
+ table.time_partitioning = bigquery.TimePartitioning(
+ type_=bigquery.TimePartitioningType.DAY,
+ field="date", # name of column to use for partitioning
+ expiration_ms=1000 * 60 * 60 * 24 * 90,
+ ) # 90 days
+
+ table = client.create_table(table)
+
+ print(
+ f"Created table {table.project}.{table.dataset_id}.{table.table_id}, "
+ f"partitioned on column {table.time_partitioning.field}."
+ )
+ # [END bigquery_create_table_partitioned]
+ return table
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/create_partitioned_table_test.py b/testbed/googleapis__python-bigquery/samples/snippets/create_partitioned_table_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4d7ec20e5180a3445f2836af86b2047ae8fd410
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/create_partitioned_table_test.py
@@ -0,0 +1,34 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import create_partitioned_table # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_create_partitioned_table(
+ capsys: "pytest.CaptureFixture[str]",
+ random_table_id: str,
+) -> None:
+ table = create_partitioned_table.create_partitioned_table(random_table_id)
+
+ out, _ = capsys.readouterr()
+ assert "Created" in out
+ assert random_table_id in out
+
+ assert table.time_partitioning.type_ == "DAY"
+ assert table.time_partitioning.field == "date"
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/create_table_cmek.py b/testbed/googleapis__python-bigquery/samples/snippets/create_table_cmek.py
new file mode 100644
index 0000000000000000000000000000000000000000..011c56d4e8e3ecbc7e16f28c79a06b1a0f3ae603
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/create_table_cmek.py
@@ -0,0 +1,46 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def create_table_cmek(table_id: str, kms_key_name: str) -> None:
+ orig_table_id = table_id
+ orig_key_name = kms_key_name
+ # [START bigquery_create_table_cmek]
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ # TODO(dev): Change table_id to the full name of the table you want to create.
+ table_id = "your-project.your_dataset.your_table_name"
+
+ # Set the encryption key to use for the table.
+ # TODO: Replace this key with a key you have created in Cloud KMS.
+ kms_key_name = "projects/your-project/locations/us/keyRings/test/cryptoKeys/test"
+
+ # [END bigquery_create_table_cmek]
+
+ table_id = orig_table_id
+ kms_key_name = orig_key_name
+
+ # [START bigquery_create_table_cmek]
+ table = bigquery.Table(table_id)
+ table.encryption_configuration = bigquery.EncryptionConfiguration(
+ kms_key_name=kms_key_name
+ )
+ table = client.create_table(table) # API request
+
+ print(f"Created {table_id}.")
+ print(f"Key: {table.encryption_configuration.kms_key_name}.")
+
+ # [END bigquery_create_table_cmek]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/create_table_cmek_test.py b/testbed/googleapis__python-bigquery/samples/snippets/create_table_cmek_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8626b84cca527cd98efe7016b393fd60ddb5fe0
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/create_table_cmek_test.py
@@ -0,0 +1,36 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import create_table_cmek # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_create_table(
+ capsys: "pytest.CaptureFixture[str]",
+ random_table_id: str,
+) -> None:
+ kms_key_name = (
+ "projects/cloud-samples-tests/locations/us/keyRings/test/cryptoKeys/test"
+ )
+
+ create_table_cmek.create_table_cmek(random_table_id, kms_key_name)
+
+ out, _ = capsys.readouterr()
+ assert "Created" in out
+ assert random_table_id in out
+ assert kms_key_name in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/create_table_external_data_configuration.py b/testbed/googleapis__python-bigquery/samples/snippets/create_table_external_data_configuration.py
new file mode 100644
index 0000000000000000000000000000000000000000..cbb15d40a27f7e2c5f84443ef6cefce468c54c01
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/create_table_external_data_configuration.py
@@ -0,0 +1,70 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def create_table_external_data_configuration(
+ table_id: str,
+) -> None:
+ """Create a table using an external data source"""
+ orig_table_id = table_id
+ # [START bigquery_query_external_gcs_perm]
+ # [START bigquery_create_table_external_data_configuration]
+ # [START bigquery_create_external_table_definition]
+ from google.cloud import bigquery
+
+ # Construct a BigQuery client object.
+ client = bigquery.Client()
+
+ # TODO(developer): Set table_id to the ID of the table to create.
+ table_id = "your-project.your_dataset.your_table_name"
+ # [END bigquery_create_table_external_data_configuration]
+ # [END bigquery_query_external_gcs_perm]
+ table_id = orig_table_id
+ # [START bigquery_query_external_gcs_perm]
+ # [START bigquery_create_table_external_data_configuration]
+
+ # TODO(developer): Set the external source format of your table.
+ # Note that the set of allowed values for external data sources is
+ # different than the set used for loading data (see :class:`~google.cloud.bigquery.job.SourceFormat`).
+ external_source_format = "AVRO"
+
+ # TODO(developer): Set the source_uris to point to your data in Google Cloud
+ source_uris = [
+ "gs://cloud-samples-data/bigquery/federated-formats-reference-file-schema/a-twitter.avro",
+ "gs://cloud-samples-data/bigquery/federated-formats-reference-file-schema/b-twitter.avro",
+ "gs://cloud-samples-data/bigquery/federated-formats-reference-file-schema/c-twitter.avro",
+ ]
+
+ # Create ExternalConfig object with external source format
+ external_config = bigquery.ExternalConfig(external_source_format)
+ # Set source_uris that point to your data in Google Cloud
+ external_config.source_uris = source_uris
+
+ # TODO(developer) You have the option to set a reference_file_schema_uri, which points to
+ # a reference file for the table schema
+ reference_file_schema_uri = "gs://cloud-samples-data/bigquery/federated-formats-reference-file-schema/b-twitter.avro"
+
+ external_config.reference_file_schema_uri = reference_file_schema_uri
+ # [END bigquery_create_external_table_definition]
+
+ table = bigquery.Table(table_id)
+ # Set the external data configuration of the table
+ table.external_data_configuration = external_config
+ table = client.create_table(table) # Make an API request.
+
+ print(
+ f"Created table with external source format {table.external_data_configuration.source_format}"
+ )
+ # [END bigquery_create_table_external_data_configuration]
+ # [END bigquery_query_external_gcs_perm]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/create_table_external_data_configuration_test.py b/testbed/googleapis__python-bigquery/samples/snippets/create_table_external_data_configuration_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf81a75f9b7c3317b71b4d825640137a4eb1cf63
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/create_table_external_data_configuration_test.py
@@ -0,0 +1,31 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import create_table_external_data_configuration # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_create_table_external_data_configuration(
+ capsys: "pytest.CaptureFixture[str]",
+ random_table_id: str,
+) -> None:
+ create_table_external_data_configuration.create_table_external_data_configuration(
+ random_table_id
+ )
+ out, _ = capsys.readouterr()
+ assert "Created table with external source format AVRO" in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/create_table_external_hive_partitioned.py b/testbed/googleapis__python-bigquery/samples/snippets/create_table_external_hive_partitioned.py
new file mode 100644
index 0000000000000000000000000000000000000000..aecf8ca4c13099656e2afbb1c49a76a45c7ea34a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/create_table_external_hive_partitioned.py
@@ -0,0 +1,78 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+if typing.TYPE_CHECKING:
+ from google.cloud import bigquery
+
+
+def create_table_external_hive_partitioned(table_id: str) -> "bigquery.Table":
+ original_table_id = table_id
+ # [START bigquery_create_table_external_hivepartitioned]
+ # Demonstrates creating an external table with hive partitioning.
+
+ # TODO(developer): Set table_id to the ID of the table to create.
+ table_id = "your-project.your_dataset.your_table_name"
+
+ # TODO(developer): Set source uri.
+ # Example file:
+ # gs://cloud-samples-data/bigquery/hive-partitioning-samples/autolayout/dt=2020-11-15/file1.parquet
+ uri = "gs://cloud-samples-data/bigquery/hive-partitioning-samples/autolayout/*"
+
+ # TODO(developer): Set source uri prefix.
+ source_uri_prefix = (
+ "gs://cloud-samples-data/bigquery/hive-partitioning-samples/autolayout/"
+ )
+
+ # [END bigquery_create_table_external_hivepartitioned]
+ table_id = original_table_id
+ # [START bigquery_create_table_external_hivepartitioned]
+ from google.cloud import bigquery
+
+ # Construct a BigQuery client object.
+ client = bigquery.Client()
+
+ # Configure the external data source.
+ external_config = bigquery.ExternalConfig("PARQUET")
+ external_config.source_uris = [uri]
+ external_config.autodetect = True
+
+ # Configure partitioning options.
+ hive_partitioning_opts = bigquery.HivePartitioningOptions()
+
+ # The layout of the files in here is compatible with the layout requirements for hive partitioning,
+ # so we can add an optional Hive partitioning configuration to leverage the object paths for deriving
+ # partitioning column information.
+
+ # For more information on how partitions are extracted, see:
+ # https://cloud.google.com/bigquery/docs/hive-partitioned-queries-gcs
+
+ # We have a "/dt=YYYY-MM-DD/" path component in our example files as documented above.
+ # Autolayout will expose this as a column named "dt" of type DATE.
+ hive_partitioning_opts.mode = "AUTO"
+ hive_partitioning_opts.require_partition_filter = True
+ hive_partitioning_opts.source_uri_prefix = source_uri_prefix
+
+ external_config.hive_partitioning = hive_partitioning_opts
+
+ table = bigquery.Table(table_id)
+ table.external_data_configuration = external_config
+
+ table = client.create_table(table) # Make an API request.
+ print(
+ "Created table {}.{}.{}".format(table.project, table.dataset_id, table.table_id)
+ )
+ # [END bigquery_create_table_external_hivepartitioned]
+ return table
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/create_table_external_hive_partitioned_test.py b/testbed/googleapis__python-bigquery/samples/snippets/create_table_external_hive_partitioned_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b8cbe1c357975e96893df055510e5c712f8a8f4
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/create_table_external_hive_partitioned_test.py
@@ -0,0 +1,40 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import create_table_external_hive_partitioned # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_create_table_external_hive_partitioned(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ table = (
+ create_table_external_hive_partitioned.create_table_external_hive_partitioned(
+ random_table_id
+ )
+ )
+
+ out, _ = capsys.readouterr()
+ hive_partioning = table.external_data_configuration.hive_partitioning
+ assert "Created table {}".format(random_table_id) in out
+ assert (
+ hive_partioning.source_uri_prefix
+ == "gs://cloud-samples-data/bigquery/hive-partitioning-samples/autolayout/"
+ )
+ assert hive_partioning.require_partition_filter is True
+ assert hive_partioning.mode == "AUTO"
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/create_table_schema_from_json.py b/testbed/googleapis__python-bigquery/samples/snippets/create_table_schema_from_json.py
new file mode 100644
index 0000000000000000000000000000000000000000..b866e2ebe64bd64e83ddfa2ae27f97bdcc07913f
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/create_table_schema_from_json.py
@@ -0,0 +1,42 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pathlib
+
+
+def create_table(table_id: str) -> None:
+ orig_table_id = table_id
+ current_directory = pathlib.Path(__file__).parent
+ orig_schema_path = str(current_directory / "schema.json")
+ # [START bigquery_schema_file_create]
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ # TODO(dev): Change table_id to the full name of the table you want to create.
+ table_id = "your-project.your_dataset.your_table_name"
+ # TODO(dev): Change schema_path variable to the path of your schema file.
+ schema_path = "path/to/schema.json"
+ # [END bigquery_schema_file_create]
+ table_id = orig_table_id
+ schema_path = orig_schema_path
+
+ # [START bigquery_schema_file_create]
+ # To load a schema file use the schema_from_json method.
+ schema = client.schema_from_json(schema_path)
+
+ table = bigquery.Table(table_id, schema=schema)
+ table = client.create_table(table) # API request
+ print(f"Created table {table_id}.")
+ # [END bigquery_schema_file_create]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/create_table_schema_from_json_test.py b/testbed/googleapis__python-bigquery/samples/snippets/create_table_schema_from_json_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..e725d3ccfd681ccba2c73a0dae1219a146ecadfa
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/create_table_schema_from_json_test.py
@@ -0,0 +1,31 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import create_table_schema_from_json # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_create_table(
+ capsys: "pytest.CaptureFixture[str]",
+ random_table_id: str,
+) -> None:
+ create_table_schema_from_json.create_table(random_table_id)
+
+ out, _ = capsys.readouterr()
+ assert "Created" in out
+ assert random_table_id in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/create_table_snapshot.py b/testbed/googleapis__python-bigquery/samples/snippets/create_table_snapshot.py
new file mode 100644
index 0000000000000000000000000000000000000000..846495e5cfe9b968f1c91cb0213df8938af5f9ba
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/create_table_snapshot.py
@@ -0,0 +1,43 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def create_table_snapshot(source_table_id: str, snapshot_table_id: str) -> None:
+ original_source_table_id = source_table_id
+ original_snapshot_table_id = snapshot_table_id
+ # [START bigquery_create_table_snapshot]
+ from google.cloud import bigquery
+
+ # TODO(developer): Set table_id to the ID of the table to create.
+ source_table_id = "your-project.your_dataset.your_table_name"
+ snapshot_table_id = "your-project.your_dataset.snapshot_table_name"
+ # [END bigquery_create_table_snapshot]
+ source_table_id = original_source_table_id
+ snapshot_table_id = original_snapshot_table_id
+ # [START bigquery_create_table_snapshot]
+
+ # Construct a BigQuery client object.
+ client = bigquery.Client()
+ copy_config = bigquery.CopyJobConfig()
+ copy_config.operation_type = bigquery.OperationType.SNAPSHOT
+
+ copy_job = client.copy_table(
+ sources=source_table_id,
+ destination=snapshot_table_id,
+ job_config=copy_config,
+ )
+ copy_job.result()
+
+ print("Created table snapshot {}".format(snapshot_table_id))
+ # [END bigquery_create_table_snapshot]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/create_table_snapshot_test.py b/testbed/googleapis__python-bigquery/samples/snippets/create_table_snapshot_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..17ef24d26c50d6186bdbaa69f79488738469f8f4
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/create_table_snapshot_test.py
@@ -0,0 +1,32 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import create_table_snapshot # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_create_table_snapshot(
+ capsys: "pytest.CaptureFixture[str]",
+ table_id: str,
+ random_table_id: str,
+) -> None:
+ create_table_snapshot.create_table_snapshot(table_id, random_table_id)
+
+ out, _ = capsys.readouterr()
+
+ assert "Created table snapshot {}".format(random_table_id) in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/dataset_access_test.py b/testbed/googleapis__python-bigquery/samples/snippets/dataset_access_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3a53b084458c88122639775c361391c43ef5720
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/dataset_access_test.py
@@ -0,0 +1,59 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import revoke_dataset_access # type: ignore
+import update_dataset_access # type: ignore
+
+if typing.TYPE_CHECKING:
+ from google.cloud import bigquery
+ import pytest
+
+
+def test_dataset_access_permissions(
+ capsys: "pytest.CaptureFixture[str]",
+ dataset_id: str,
+ entity_id: str,
+ bigquery_client: "bigquery.Client",
+) -> None:
+ original_dataset = bigquery_client.get_dataset(dataset_id)
+ update_dataset_access.update_dataset_access(dataset_id, entity_id)
+ full_dataset_id = "{}.{}".format(
+ original_dataset.project, original_dataset.dataset_id
+ )
+
+ out, err = capsys.readouterr()
+ assert (
+ "Updated dataset '{}' with modified user permissions.".format(full_dataset_id)
+ in out
+ )
+
+ updated_dataset = bigquery_client.get_dataset(dataset_id)
+ updated_dataset_entries = list(updated_dataset.access_entries)
+ updated_dataset_entity_ids = {entry.entity_id for entry in updated_dataset_entries}
+ assert entity_id in updated_dataset_entity_ids
+ revoke_dataset_access.revoke_dataset_access(dataset_id, entity_id)
+ revoked_dataset = bigquery_client.get_dataset(dataset_id)
+ revoked_dataset_entries = list(revoked_dataset.access_entries)
+
+ full_dataset_id = f"{updated_dataset.project}.{updated_dataset.dataset_id}"
+ out, err = capsys.readouterr()
+ assert (
+ f"Revoked dataset access for '{entity_id}' to ' dataset '{full_dataset_id}.'"
+ in out
+ )
+ assert len(revoked_dataset_entries) == len(updated_dataset_entries) - 1
+ revoked_dataset_entity_ids = {entry.entity_id for entry in revoked_dataset_entries}
+ assert entity_id not in revoked_dataset_entity_ids
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/delete_job.py b/testbed/googleapis__python-bigquery/samples/snippets/delete_job.py
new file mode 100644
index 0000000000000000000000000000000000000000..2aeb53849961934d67ff16f110613110a40a006c
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/delete_job.py
@@ -0,0 +1,44 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def delete_job_metadata(job_id: str, location: str) -> None:
+ orig_job_id = job_id
+ orig_location = location
+ # [START bigquery_delete_job]
+ from google.api_core import exceptions
+ from google.cloud import bigquery
+
+ # TODO(developer): Set the job ID to the ID of the job whose metadata you
+ # wish to delete.
+ job_id = "abcd-efgh-ijkl-mnop"
+
+ # TODO(developer): Set the location to the region or multi-region
+ # containing the job.
+ location = "us-east1"
+
+ # [END bigquery_delete_job]
+ job_id = orig_job_id
+ location = orig_location
+
+ # [START bigquery_delete_job]
+ client = bigquery.Client()
+
+ client.delete_job_metadata(job_id, location=location)
+
+ try:
+ client.get_job(job_id, location=location)
+ except exceptions.NotFound:
+ print(f"Job metadata for job {location}:{job_id} was deleted.")
+ # [END bigquery_delete_job]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/delete_job_test.py b/testbed/googleapis__python-bigquery/samples/snippets/delete_job_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..88eeae1ed2af21afebc8cd89aee2de745283357b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/delete_job_test.py
@@ -0,0 +1,41 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from google.cloud import bigquery
+
+import delete_job # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_delete_job_metadata(
+ capsys: "pytest.CaptureFixture[str]",
+ bigquery_client: bigquery.Client,
+ table_id_us_east1: str,
+) -> None:
+ query_job: bigquery.QueryJob = bigquery_client.query(
+ f"SELECT COUNT(*) FROM `{table_id_us_east1}`",
+ location="us-east1",
+ )
+ query_job.result()
+ assert query_job.job_id is not None
+
+ delete_job.delete_job_metadata(query_job.job_id, "us-east1")
+
+ out, _ = capsys.readouterr()
+ assert "deleted" in out
+ assert f"us-east1:{query_job.job_id}" in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/delete_label_table.py b/testbed/googleapis__python-bigquery/samples/snippets/delete_label_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e9eaaf8f78cbbb31e07e208e6d0095bbfc27290
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/delete_label_table.py
@@ -0,0 +1,43 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from google.cloud import bigquery
+
+
+def delete_label_table(table_id: str, label_key: str) -> bigquery.Table:
+ orig_table_id = table_id
+ orig_label_key = label_key
+ # [START bigquery_delete_label_table]
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ # TODO(dev): Change table_id to the full name of the table you wish to delete from.
+ table_id = "your-project.your_dataset.your_table_name"
+ # TODO(dev): Change label_key to the name of the label you want to remove.
+ label_key = "color"
+ # [END bigquery_delete_label_table]
+ table_id = orig_table_id
+ label_key = orig_label_key
+ # [START bigquery_delete_label_table]
+ table = client.get_table(table_id) # API request
+
+ # To delete a label from a table, set its value to None
+ table.labels[label_key] = None
+
+ table = client.update_table(table, ["labels"]) # API request
+
+ print(f"Deleted label '{label_key}' from {table_id}.")
+ # [END bigquery_delete_label_table]
+ return table
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/delete_label_table_test.py b/testbed/googleapis__python-bigquery/samples/snippets/delete_label_table_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..01e538ae30160a064a91255721787f23c4324cba
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/delete_label_table_test.py
@@ -0,0 +1,33 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import delete_label_table # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_delete_label_table(
+ capsys: "pytest.CaptureFixture[str]",
+ table_id: str,
+) -> None:
+ table = delete_label_table.delete_label_table(table_id, "color")
+
+ out, _ = capsys.readouterr()
+ assert "Deleted" in out
+ assert "color" in out
+ assert table_id in out
+ assert table.labels is None or "color" not in table.labels
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/get_table_labels.py b/testbed/googleapis__python-bigquery/samples/snippets/get_table_labels.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cfbd4ee2d21c6204a8d2e857ce03b3728a47690
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/get_table_labels.py
@@ -0,0 +1,39 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def get_table_labels(table_id: str) -> None:
+ orig_table_id = table_id
+ # [START bigquery_get_table_labels]
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ # TODO(dev): Change table_id to the full name of the table you want to create.
+ table_id = "your-project.your_dataset.your_table_name"
+
+ # [END bigquery_get_table_labels]
+ table_id = orig_table_id
+
+ # [START bigquery_get_table_labels]
+ table = client.get_table(table_id) # API Request
+
+ # View table labels
+ print(f"Table ID: {table_id}.")
+ if table.labels:
+ for label, value in table.labels.items():
+ print(f"\t{label}: {value}")
+ else:
+ print("\tTable has no labels defined.")
+ # [END bigquery_get_table_labels]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/get_table_labels_test.py b/testbed/googleapis__python-bigquery/samples/snippets/get_table_labels_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..e910d6a655b869f29654dc611f45dcc56ffbc37c
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/get_table_labels_test.py
@@ -0,0 +1,49 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from google.cloud import bigquery
+
+import get_table_labels # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_get_table_labels(
+ capsys: "pytest.CaptureFixture[str]",
+ table_id: str,
+ bigquery_client: bigquery.Client,
+) -> None:
+ table = bigquery_client.get_table(table_id)
+ table.labels = {"color": "green"}
+ bigquery_client.update_table(table, ["labels"])
+
+ get_table_labels.get_table_labels(table_id)
+
+ out, _ = capsys.readouterr()
+ assert table_id in out
+ assert "color" in out
+
+
+def test_get_table_labels_no_label(
+ capsys: "pytest.CaptureFixture[str]",
+ table_id: str,
+) -> None:
+ get_table_labels.get_table_labels(table_id)
+
+ out, _ = capsys.readouterr()
+ assert "no labels defined" in out
+ assert table_id in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/get_table_make_schema.py b/testbed/googleapis__python-bigquery/samples/snippets/get_table_make_schema.py
new file mode 100644
index 0000000000000000000000000000000000000000..f870b42dee011bf80daf8a8ccb16a71e06f6f019
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/get_table_make_schema.py
@@ -0,0 +1,47 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def get_table_make_schema(table_id: str, schema_path: str) -> None:
+ orig_table_id = table_id
+ orig_schema_path = schema_path
+ # [START bigquery_schema_file_get]
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ # TODO(dev): Change the table_id variable to the full name of the
+ # table you want to get schema from.
+ table_id = "your-project.your_dataset.your_table_name"
+
+ # TODO(dev): Change schema_path variable to the path
+ # of your schema file.
+ schema_path = "path/to/schema.json"
+ # [END bigquery_schema_file_get]
+ table_id = orig_table_id
+ schema_path = orig_schema_path
+ # [START bigquery_schema_file_get]
+ table = client.get_table(table_id) # Make an API request.
+
+ # Write a schema file to schema_path with the schema_to_json method.
+ client.schema_to_json(table.schema, schema_path)
+
+ with open(schema_path, "r", encoding="utf-8") as schema_file:
+ schema_contents = schema_file.read()
+
+ # View table properties
+ print(f"Got table '{table.project}.{table.dataset_id}.{table.table_id}'.")
+ print(f"Table schema: {schema_contents}")
+
+ # [END bigquery_schema_file_get]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/get_table_make_schema_test.py b/testbed/googleapis__python-bigquery/samples/snippets/get_table_make_schema_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1a1623bb24ccc558462d6370626dd01fe124cd6
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/get_table_make_schema_test.py
@@ -0,0 +1,36 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import get_table_make_schema # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pathlib
+
+ import pytest
+
+
+def test_get_table_make_schema(
+ capsys: "pytest.CaptureFixture[str]",
+ table_id: str,
+ tmp_path: "pathlib.Path",
+) -> None:
+ schema_path = str(tmp_path / "test_schema.json")
+
+ get_table_make_schema.get_table_make_schema(table_id, schema_path)
+
+ out, _ = capsys.readouterr()
+ assert "Got table" in out
+ assert table_id in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/label_table.py b/testbed/googleapis__python-bigquery/samples/snippets/label_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..5fce08d62afe40ce62a685a834e19bf64cf28c4d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/label_table.py
@@ -0,0 +1,37 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def label_table(table_id: str) -> None:
+ orig_table_id = table_id
+ # [START bigquery_label_table]
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ # TODO(dev): Change table_id to the full name of the table you want to create.
+ table_id = "your-project.your_dataset.your_table_name"
+
+ # [END bigquery_label_table]
+ table_id = orig_table_id
+ # [START bigquery_label_table]
+ table = client.get_table(table_id) # API request
+
+ labels = {"color": "green"}
+ table.labels = labels
+
+ table = client.update_table(table, ["labels"]) # API request
+
+ print(f"Added {table.labels} to {table_id}.")
+ # [END bigquery_label_table]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/label_table_test.py b/testbed/googleapis__python-bigquery/samples/snippets/label_table_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..49f5406ab4afc8d6bde86da6755b19d31c81786a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/label_table_test.py
@@ -0,0 +1,31 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import label_table # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_label_table(
+ capsys: "pytest.CaptureFixture[str]",
+ table_id: str,
+) -> None:
+ label_table.label_table(table_id)
+
+ out, _ = capsys.readouterr()
+ assert "color" in out
+ assert table_id in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/load_table_schema_from_json.py b/testbed/googleapis__python-bigquery/samples/snippets/load_table_schema_from_json.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f1f854301d5efc0b9c3cb609db7c5354db73ff0
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/load_table_schema_from_json.py
@@ -0,0 +1,60 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pathlib
+
+
+def load_table(table_id: str) -> None:
+ orig_uri = "gs://cloud-samples-data/bigquery/us-states/us-states.csv"
+ orig_table_id = table_id
+ current_directory = pathlib.Path(__file__).parent
+ orig_schema_path = str(current_directory / "schema_us_states.json")
+ # [START bigquery_schema_file_load]
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ # TODO(dev): Change uri variable to the path of your data file.
+ uri = "gs://your-bucket/path/to/your-file.csv"
+ # TODO(dev): Change table_id to the full name of the table you want to create.
+ table_id = "your-project.your_dataset.your_table"
+ # TODO(dev): Change schema_path variable to the path of your schema file.
+ schema_path = "path/to/schema.json"
+ # [END bigquery_schema_file_load]
+ uri = orig_uri
+ table_id = orig_table_id
+ schema_path = orig_schema_path
+ # [START bigquery_schema_file_load]
+ # To load a schema file use the schema_from_json method.
+ schema = client.schema_from_json(schema_path)
+
+ job_config = bigquery.LoadJobConfig(
+ # To use the schema you loaded pass it into the
+ # LoadJobConfig constructor.
+ schema=schema,
+ skip_leading_rows=1,
+ )
+
+ # Pass the job_config object to the load_table_from_file,
+ # load_table_from_json, or load_table_from_uri method
+ # to use the schema on a new table.
+ load_job = client.load_table_from_uri(
+ uri, table_id, job_config=job_config
+ ) # Make an API request.
+
+ load_job.result() # Waits for the job to complete.
+
+ destination_table = client.get_table(table_id) # Make an API request.
+ print(f"Loaded {destination_table.num_rows} rows to {table_id}.")
+ # [END bigquery_schema_file_load]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/load_table_schema_from_json_test.py b/testbed/googleapis__python-bigquery/samples/snippets/load_table_schema_from_json_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..745793cd7569811cf3b83a4d100c0855ca180ec3
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/load_table_schema_from_json_test.py
@@ -0,0 +1,31 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import load_table_schema_from_json # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_load_table(
+ capsys: "pytest.CaptureFixture[str]",
+ random_table_id: str,
+) -> None:
+ load_table_schema_from_json.load_table(random_table_id)
+
+ out, _ = capsys.readouterr()
+ assert "Loaded" in out
+ assert random_table_id in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/load_table_uri_firestore.py b/testbed/googleapis__python-bigquery/samples/snippets/load_table_uri_firestore.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c33fd0ff5760a2fc0c7a87c68c2399f2732d1c9
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/load_table_uri_firestore.py
@@ -0,0 +1,55 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def load_table_uri_firestore(table_id: str) -> None:
+ orig_table_id = table_id
+ # [START bigquery_load_table_gcs_firestore]
+ # TODO(developer): Set table_id to the ID of the table to create.
+ table_id = "your-project.your_dataset.your_table_name"
+
+ # TODO(developer): Set uri to the path of the kind export metadata
+ uri = (
+ "gs://cloud-samples-data/bigquery/us-states"
+ "/2021-07-02T16:04:48_70344/all_namespaces/kind_us-states"
+ "/all_namespaces_kind_us-states.export_metadata"
+ )
+
+ # TODO(developer): Set projection_fields to a list of document properties
+ # to import. Leave unset or set to `None` for all fields.
+ projection_fields = ["name", "post_abbr"]
+
+ # [END bigquery_load_table_gcs_firestore]
+ table_id = orig_table_id
+
+ # [START bigquery_load_table_gcs_firestore]
+ from google.cloud import bigquery
+
+ # Construct a BigQuery client object.
+ client = bigquery.Client()
+
+ job_config = bigquery.LoadJobConfig(
+ source_format=bigquery.SourceFormat.DATASTORE_BACKUP,
+ projection_fields=projection_fields,
+ )
+
+ load_job = client.load_table_from_uri(
+ uri, table_id, job_config=job_config
+ ) # Make an API request.
+
+ load_job.result() # Waits for the job to complete.
+
+ destination_table = client.get_table(table_id)
+ print("Loaded {} rows.".format(destination_table.num_rows))
+ # [END bigquery_load_table_gcs_firestore]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/load_table_uri_firestore_test.py b/testbed/googleapis__python-bigquery/samples/snippets/load_table_uri_firestore_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..e19378a04a6cb561d4f8207f7941973fd3e7eb21
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/load_table_uri_firestore_test.py
@@ -0,0 +1,28 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import load_table_uri_firestore # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_load_table_uri_firestore(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ load_table_uri_firestore.load_table_uri_firestore(random_table_id)
+ out, _ = capsys.readouterr()
+ assert "Loaded 50 rows." in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/manage_job_cancel.py b/testbed/googleapis__python-bigquery/samples/snippets/manage_job_cancel.py
new file mode 100644
index 0000000000000000000000000000000000000000..9cbdef4501e9eefee55eccf01e7bee24d40a5553
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/manage_job_cancel.py
@@ -0,0 +1,28 @@
+# Copyright 2016-2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START bigquery_cancel_job]
+from google.cloud import bigquery
+
+
+def cancel_job(
+ client: bigquery.Client,
+ location: str = "us",
+ job_id: str = "abcd-efgh-ijkl-mnop",
+) -> None:
+ job = client.cancel_job(job_id, location=location)
+ print(f"{job.location}:{job.job_id} cancelled")
+
+
+# [END bigquery_cancel_job]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/manage_job_get.py b/testbed/googleapis__python-bigquery/samples/snippets/manage_job_get.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca7ffc0c9bf181ee79b2f693267f62a831516e87
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/manage_job_get.py
@@ -0,0 +1,35 @@
+# Copyright 2016-2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START bigquery_get_job]
+from google.cloud import bigquery
+
+
+def get_job(
+ client: bigquery.Client,
+ location: str = "us",
+ job_id: str = "abcd-efgh-ijkl-mnop",
+) -> None:
+ job = client.get_job(job_id, location=location)
+
+ # All job classes have "location" and "job_id" string properties.
+ # Use these properties for job operations such as "cancel_job" and
+ # "delete_job".
+ print(f"{job.location}:{job.job_id}")
+ print(f"Type: {job.job_type}")
+ print(f"State: {job.state}")
+ print(f"Created: {job.created.isoformat()}")
+
+
+# [END bigquery_get_job]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/manage_job_test.py b/testbed/googleapis__python-bigquery/samples/snippets/manage_job_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ef4be2e02a058155d9205a86b36ff1f25e67149
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/manage_job_test.py
@@ -0,0 +1,39 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from google.cloud import bigquery
+import pytest
+
+import manage_job_cancel # type: ignore
+import manage_job_get # type: ignore
+
+
+def test_manage_job(capsys: pytest.CaptureFixture[str]) -> None:
+ client = bigquery.Client()
+ sql = """
+ SELECT corpus
+ FROM `bigquery-public-data.samples.shakespeare`
+ GROUP BY corpus;
+ """
+ location = "us"
+ job = client.query(sql, location=location)
+
+ manage_job_cancel.cancel_job(client, location=location, job_id=job.job_id)
+ out, _ = capsys.readouterr()
+ assert f"{job.location}:{job.job_id} cancelled" in out
+
+ manage_job_get.get_job(client, location=location, job_id=job.job_id)
+ out, _ = capsys.readouterr()
+ assert f"{job.location}:{job.job_id}" in out
+ assert "Type: query" in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/materialized_view.py b/testbed/googleapis__python-bigquery/samples/snippets/materialized_view.py
new file mode 100644
index 0000000000000000000000000000000000000000..a47ee5b81a65c4cf6713960edeae589f14a737be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/materialized_view.py
@@ -0,0 +1,106 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+from typing import Dict, Optional
+
+if typing.TYPE_CHECKING:
+ from google.cloud import bigquery
+
+
+def create_materialized_view(
+ override_values: Optional[Dict[str, str]] = None
+) -> "bigquery.Table":
+ if override_values is None:
+ override_values = {}
+
+ # [START bigquery_create_materialized_view]
+ from google.cloud import bigquery
+
+ bigquery_client = bigquery.Client()
+
+ view_id = "my-project.my_dataset.my_materialized_view"
+ base_table_id = "my-project.my_dataset.my_base_table"
+ # [END bigquery_create_materialized_view]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ view_id = override_values.get("view_id", view_id)
+ base_table_id = override_values.get("base_table_id", base_table_id)
+ # [START bigquery_create_materialized_view]
+ view = bigquery.Table(view_id)
+ view.mview_query = f"""
+ SELECT product_id, SUM(clicks) AS sum_clicks
+ FROM `{base_table_id}`
+ GROUP BY 1
+ """
+
+ # Make an API request to create the materialized view.
+ view = bigquery_client.create_table(view)
+ print(f"Created {view.table_type}: {str(view.reference)}")
+ # [END bigquery_create_materialized_view]
+ return view
+
+
+def update_materialized_view(
+ override_values: Optional[Dict[str, str]] = None
+) -> "bigquery.Table":
+ if override_values is None:
+ override_values = {}
+
+ # [START bigquery_update_materialized_view]
+ import datetime
+
+ from google.cloud import bigquery
+
+ bigquery_client = bigquery.Client()
+
+ view_id = "my-project.my_dataset.my_materialized_view"
+ # [END bigquery_update_materialized_view]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ view_id = override_values.get("view_id", view_id)
+ # [START bigquery_update_materialized_view]
+ view = bigquery.Table(view_id)
+ view.mview_enable_refresh = True
+ view.mview_refresh_interval = datetime.timedelta(hours=1)
+
+ # Make an API request to update the materialized view.
+ view = bigquery_client.update_table(
+ view,
+ # Pass in a list of any fields you need to modify.
+ ["mview_enable_refresh", "mview_refresh_interval"],
+ )
+ print(f"Updated {view.table_type}: {str(view.reference)}")
+ # [END bigquery_update_materialized_view]
+ return view
+
+
+def delete_materialized_view(override_values: Optional[Dict[str, str]] = None) -> None:
+ if override_values is None:
+ override_values = {}
+
+ # [START bigquery_delete_materialized_view]
+ from google.cloud import bigquery
+
+ bigquery_client = bigquery.Client()
+
+ view_id = "my-project.my_dataset.my_materialized_view"
+ # [END bigquery_delete_materialized_view]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ view_id = override_values.get("view_id", view_id)
+ # [START bigquery_delete_materialized_view]
+ # Make an API request to delete the materialized view.
+ bigquery_client.delete_table(view_id)
+ # [END bigquery_delete_materialized_view]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/materialized_view_test.py b/testbed/googleapis__python-bigquery/samples/snippets/materialized_view_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b464af6f332d9478f4be011bf3ec3ce09e4c18f
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/materialized_view_test.py
@@ -0,0 +1,96 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+from typing import Iterator
+import uuid
+
+from google.api_core import exceptions
+from google.cloud import bigquery
+import pytest
+
+import materialized_view # type: ignore
+from conftest import prefixer # type: ignore
+
+
+def temp_suffix() -> str:
+ now = datetime.datetime.now()
+ return f"{now.strftime('%Y%m%d%H%M%S')}_{uuid.uuid4().hex[:8]}"
+
+
+@pytest.fixture(autouse=True)
+def bigquery_client_patch(
+ monkeypatch: pytest.MonkeyPatch, bigquery_client: bigquery.Client
+) -> None:
+ monkeypatch.setattr(bigquery, "Client", lambda: bigquery_client)
+
+
+@pytest.fixture(scope="module")
+def dataset_id(bigquery_client: bigquery.Client) -> Iterator[str]:
+ dataset_id = f"{prefixer.create_prefix()}_materialized_view"
+ bigquery_client.create_dataset(dataset_id)
+ yield dataset_id
+ bigquery_client.delete_dataset(dataset_id, delete_contents=True)
+
+
+@pytest.fixture(scope="module")
+def base_table_id(
+ bigquery_client: bigquery.Client, project_id: str, dataset_id: str
+) -> Iterator[str]:
+ base_table_id = f"{project_id}.{dataset_id}.base_{temp_suffix()}"
+ # Schema from materialized views guide:
+ # https://cloud.google.com/bigquery/docs/materialized-views#create
+ base_table = bigquery.Table(base_table_id)
+ base_table.schema = [
+ bigquery.SchemaField("product_id", bigquery.SqlTypeNames.INT64),
+ bigquery.SchemaField("clicks", bigquery.SqlTypeNames.INT64),
+ ]
+ bigquery_client.create_table(base_table)
+ yield base_table_id
+ bigquery_client.delete_table(base_table_id)
+
+
+@pytest.fixture(scope="module")
+def view_id(
+ bigquery_client: bigquery.Client, project_id: str, dataset_id: str
+) -> Iterator[str]:
+ view_id = f"{project_id}.{dataset_id}.mview_{temp_suffix()}"
+ yield view_id
+ bigquery_client.delete_table(view_id, not_found_ok=True)
+
+
+def test_materialized_view(
+ capsys: pytest.CaptureFixture[str],
+ bigquery_client: bigquery.Client,
+ base_table_id: str,
+ view_id: str,
+) -> None:
+ override_values = {
+ "base_table_id": base_table_id,
+ "view_id": view_id,
+ }
+ view = materialized_view.create_materialized_view(override_values)
+ assert base_table_id in view.mview_query
+ out, _ = capsys.readouterr()
+ assert view_id in out
+
+ view = materialized_view.update_materialized_view(override_values)
+ assert view.mview_enable_refresh
+ assert view.mview_refresh_interval == datetime.timedelta(hours=1)
+ out, _ = capsys.readouterr()
+ assert view_id in out
+
+ materialized_view.delete_materialized_view(override_values)
+ with pytest.raises(exceptions.NotFound):
+ bigquery_client.get_table(view_id)
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/mypy.ini b/testbed/googleapis__python-bigquery/samples/snippets/mypy.ini
new file mode 100644
index 0000000000000000000000000000000000000000..3cc4b8965a685115128493476da439950aae91b5
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/mypy.ini
@@ -0,0 +1,8 @@
+[mypy]
+; We require type annotations in all samples.
+strict = True
+exclude = noxfile\.py
+warn_unused_configs = True
+
+[mypy-google.auth,google.oauth2,google_auth_oauthlib,IPython.*,test_utils.*]
+ignore_missing_imports = True
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/natality_tutorial.py b/testbed/googleapis__python-bigquery/samples/snippets/natality_tutorial.py
new file mode 100644
index 0000000000000000000000000000000000000000..df9fc15bed49698e1f281dfa2b3fc29222797788
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/natality_tutorial.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+
+# Copyright 2018 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, Optional
+
+
+def run_natality_tutorial(override_values: Optional[Dict[str, str]] = None) -> None:
+ if override_values is None:
+ override_values = {}
+
+ # [START bigquery_query_natality_tutorial]
+ """Create a Google BigQuery linear regression input table.
+
+ In the code below, the following actions are taken:
+ * A new dataset is created "natality_regression."
+ * A query is run against the public dataset,
+ bigquery-public-data.samples.natality, selecting only the data of
+ interest to the regression, the output of which is stored in a new
+ "regression_input" table.
+ * The output table is moved over the wire to the user's default project via
+ the built-in BigQuery Connector for Spark that bridges BigQuery and
+ Cloud Dataproc.
+ """
+
+ from google.cloud import bigquery
+
+ # Create a new Google BigQuery client using Google Cloud Platform project
+ # defaults.
+ client = bigquery.Client()
+
+ # Prepare a reference to a new dataset for storing the query results.
+ dataset_id = "natality_regression"
+ dataset_id_full = f"{client.project}.{dataset_id}"
+ # [END bigquery_query_natality_tutorial]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ dataset_id = override_values.get("dataset_id", dataset_id)
+ dataset_id_full = f"{client.project}.{dataset_id}"
+ # [START bigquery_query_natality_tutorial]
+
+ dataset = bigquery.Dataset(dataset_id_full)
+
+ # Create the new BigQuery dataset.
+ dataset = client.create_dataset(dataset)
+
+ # Configure the query job.
+ job_config = bigquery.QueryJobConfig()
+
+ # Set the destination table to where you want to store query results.
+ # As of google-cloud-bigquery 1.11.0, a fully qualified table ID can be
+ # used in place of a TableReference.
+ job_config.destination = f"{dataset_id_full}.regression_input"
+
+ # Set up a query in Standard SQL, which is the default for the BigQuery
+ # Python client library.
+ # The query selects the fields of interest.
+ query = """
+ SELECT
+ weight_pounds, mother_age, father_age, gestation_weeks,
+ weight_gain_pounds, apgar_5min
+ FROM
+ `bigquery-public-data.samples.natality`
+ WHERE
+ weight_pounds IS NOT NULL
+ AND mother_age IS NOT NULL
+ AND father_age IS NOT NULL
+ AND gestation_weeks IS NOT NULL
+ AND weight_gain_pounds IS NOT NULL
+ AND apgar_5min IS NOT NULL
+ """
+
+ # Run the query.
+ client.query_and_wait(query, job_config=job_config) # Waits for the query to finish
+ # [END bigquery_query_natality_tutorial]
+
+
+if __name__ == "__main__":
+ run_natality_tutorial()
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/natality_tutorial_test.py b/testbed/googleapis__python-bigquery/samples/snippets/natality_tutorial_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..603d142f25a6aade595f607ce4fb24959fe969a3
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/natality_tutorial_test.py
@@ -0,0 +1,49 @@
+# Copyright 2018 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Iterator, List
+
+from google.cloud import bigquery
+import pytest
+
+import natality_tutorial # type: ignore
+from conftest import prefixer # type: ignore
+
+
+@pytest.fixture(scope="module")
+def client() -> bigquery.Client:
+ return bigquery.Client()
+
+
+@pytest.fixture
+def datasets_to_delete(client: bigquery.Client) -> Iterator[List[str]]:
+ doomed: List[str] = []
+ yield doomed
+ for item in doomed:
+ client.delete_dataset(item, delete_contents=True)
+
+
+def test_natality_tutorial(
+ client: bigquery.Client, datasets_to_delete: List[str]
+) -> None:
+ override_values = {"dataset_id": f"{prefixer.create_prefix()}_natality_tutorial"}
+ datasets_to_delete.append(override_values["dataset_id"])
+
+ natality_tutorial.run_natality_tutorial(override_values)
+
+ table_ref = "{}.{}.{}".format(
+ client.project, override_values["dataset_id"], "regression_input"
+ )
+ table = client.get_table(table_ref)
+ assert table.num_rows > 0
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/nested_repeated_schema.py b/testbed/googleapis__python-bigquery/samples/snippets/nested_repeated_schema.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d55860ccec3ca0003b532b9665aebe018695c13
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/nested_repeated_schema.py
@@ -0,0 +1,54 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def nested_schema(table_id: str) -> None:
+ orig_table_id = table_id
+ # [START bigquery_nested_repeated_schema]
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ # TODO(dev): Change table_id to the full name of the table you want to create.
+ table_id = "your-project.your_dataset.your_table_name"
+
+ schema = [
+ bigquery.SchemaField("id", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("first_name", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("last_name", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("dob", "DATE", mode="NULLABLE"),
+ bigquery.SchemaField(
+ "addresses",
+ "RECORD",
+ mode="REPEATED",
+ fields=[
+ bigquery.SchemaField("status", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("address", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("city", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("state", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("zip", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("numberOfYears", "STRING", mode="NULLABLE"),
+ ],
+ ),
+ ]
+ # [END bigquery_nested_repeated_schema]
+
+ table_id = orig_table_id
+
+ # [START bigquery_nested_repeated_schema]
+ table = bigquery.Table(table_id, schema=schema)
+ table = client.create_table(table) # API request
+
+ print(f"Created table {table.project}.{table.dataset_id}.{table.table_id}.")
+ # [END bigquery_nested_repeated_schema]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/nested_repeated_schema_test.py b/testbed/googleapis__python-bigquery/samples/snippets/nested_repeated_schema_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..67815dcf6ea69ee156a6c6a5562fab7e9c9aeef1
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/nested_repeated_schema_test.py
@@ -0,0 +1,31 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import nested_repeated_schema # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_create_table(
+ capsys: "pytest.CaptureFixture[str]",
+ random_table_id: str,
+) -> None:
+ nested_repeated_schema.nested_schema(random_table_id)
+
+ out, _ = capsys.readouterr()
+ assert "Created" in out
+ assert random_table_id in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/noxfile.py b/testbed/googleapis__python-bigquery/samples/snippets/noxfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b7135946fd5e16ec56529107714432b2e42a84d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/noxfile.py
@@ -0,0 +1,293 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import glob
+import os
+from pathlib import Path
+import sys
+from typing import Callable, Dict, Optional
+
+import nox
+
+
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# DO NOT EDIT THIS FILE EVER!
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# WARNING - WARNING - WARNING - WARNING - WARNING
+
+BLACK_VERSION = "black==22.3.0"
+ISORT_VERSION = "isort==5.10.1"
+
+# Copy `noxfile_config.py` to your directory and modify it instead.
+
+# `TEST_CONFIG` dict is a configuration hook that allows users to
+# modify the test configurations. The values here should be in sync
+# with `noxfile_config.py`. Users will copy `noxfile_config.py` into
+# their directory and modify it.
+
+TEST_CONFIG = {
+ # You can opt out from the test for specific Python versions.
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
+ # An envvar key for determining the project id to use. Change it
+ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
+ # build specific Cloud project. You can also use your own string
+ # to use your own Cloud project.
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
+ # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
+ # A dictionary you want to inject into your test. Don't put any
+ # secrets here. These values will override predefined values.
+ "envs": {},
+}
+
+
+try:
+ # Ensure we can import noxfile_config in the project's directory.
+ sys.path.append(".")
+ from noxfile_config import TEST_CONFIG_OVERRIDE
+except ImportError as e:
+ print("No user noxfile_config found: detail: {}".format(e))
+ TEST_CONFIG_OVERRIDE = {}
+
+# Update the TEST_CONFIG with the user supplied values.
+TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
+
+
+def get_pytest_env_vars() -> Dict[str, str]:
+ """Returns a dict for pytest invocation."""
+ ret = {}
+
+ # Override the GCLOUD_PROJECT and the alias.
+ env_key = TEST_CONFIG["gcloud_project_env"]
+ # This should error out if not set.
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
+
+ # Apply user supplied envs.
+ ret.update(TEST_CONFIG["envs"])
+ return ret
+
+
+# DO NOT EDIT - automatically generated.
+# All versions used to test samples.
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
+
+# Any default versions that should be ignored.
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
+
+TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
+
+INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in (
+ "True",
+ "true",
+)
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
+#
+# Style Checks
+#
+
+
+# Linting with flake8.
+#
+# We ignore the following rules:
+# E203: whitespace before ‘:’
+# E266: too many leading ‘#’ for block comment
+# E501: line too long
+# I202: Additional newline in a section of imports
+#
+# We also need to specify the rules which are ignored by default:
+# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121']
+FLAKE8_COMMON_ARGS = [
+ "--show-source",
+ "--builtin=gettext",
+ "--max-complexity=20",
+ "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py",
+ "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202",
+ "--max-line-length=88",
+]
+
+
+@nox.session
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8")
+ else:
+ session.install("flake8", "flake8-annotations")
+
+ args = FLAKE8_COMMON_ARGS + [
+ ".",
+ ]
+ session.run("flake8", *args)
+
+
+#
+# Black
+#
+
+
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ """Run black. Format code to uniform standard."""
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+
+ session.run("black", *python_files)
+
+
+#
+# format = isort + black
+#
+
+
+@nox.session
+def format(session: nox.sessions.Session) -> None:
+ """
+ Run isort to sort imports. Then run black
+ to format code to uniform standard.
+ """
+ session.install(BLACK_VERSION, ISORT_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+
+ # Use the --fss option to sort imports using strict alphabetical order.
+ # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections
+ session.run("isort", "--fss", *python_files)
+ session.run("black", *python_files)
+
+
+#
+# Sample Tests
+#
+
+
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob(
+ "**/test_*.py", recursive=True
+ )
+ test_list.extend(glob.glob("**/tests", recursive=True))
+
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ return
+
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ concurrent_args = []
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+ with open("requirements.txt") as rfile:
+ packages = rfile.read()
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt")
+ else:
+ session.install("-r", "requirements-test.txt")
+ with open("requirements-test.txt") as rtfile:
+ packages += rtfile.read()
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ if "pytest-parallel" in packages:
+ concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"])
+ elif "pytest-xdist" in packages:
+ concurrent_args.extend(["-n", "auto"])
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
+
+
+@nox.session(python=ALL_VERSIONS)
+def py(session: nox.sessions.Session) -> None:
+ """Runs py.test for a sample using the specified version of Python."""
+ if session.python in TESTED_VERSIONS:
+ _session_tests(session)
+ else:
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
+
+
+#
+# Readmegen
+#
+
+
+def _get_repo_root() -> Optional[str]:
+ """Returns the root folder of the project."""
+ # Get root of this repository. Assume we don't have directories nested deeper than 10 items.
+ p = Path(os.getcwd())
+ for i in range(10):
+ if p is None:
+ break
+ if Path(p / ".git").exists():
+ return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
+ p = p.parent
+ raise Exception("Unable to detect repository root.")
+
+
+GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")])
+
+
+@nox.session
+@nox.parametrize("path", GENERATED_READMES)
+def readmegen(session: nox.sessions.Session, path: str) -> None:
+ """(Re-)generates the readme for a sample."""
+ session.install("jinja2", "pyyaml")
+ dir_ = os.path.dirname(path)
+
+ if os.path.exists(os.path.join(dir_, "requirements.txt")):
+ session.install("-r", os.path.join(dir_, "requirements.txt"))
+
+ in_file = os.path.join(dir_, "README.rst.in")
+ session.run(
+ "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file
+ )
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/noxfile_config.py b/testbed/googleapis__python-bigquery/samples/snippets/noxfile_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d2e02346071f6f996a71cbd100ab60e65076ff1
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/noxfile_config.py
@@ -0,0 +1,35 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Default TEST_CONFIG_OVERRIDE for python repos.
+
+# You can copy this file into your directory, then it will be inported from
+# the noxfile.py.
+
+# The source of truth:
+# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/noxfile_config.py
+
+TEST_CONFIG_OVERRIDE = {
+ # You can opt out from the test for specific Python versions.
+ "ignored_versions": ["2.7"],
+ # An envvar key for determining the project id to use. Change it
+ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
+ # build specific Cloud project. You can also use your own string
+ # to use your own Cloud project.
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
+ # "gcloud_project_env": "BUILD_SPECIFIC_GCLOUD_PROJECT",
+ # A dictionary you want to inject into your test. Don't put any
+ # secrets here. These values will override predefined values.
+ "envs": {},
+}
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/quickstart.py b/testbed/googleapis__python-bigquery/samples/snippets/quickstart.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f7f05c7357e8abd2ce8c549f994aba481c54c4b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/quickstart.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, Optional
+
+
+def run_quickstart(override_values: Optional[Dict[str, str]] = None) -> None:
+ if override_values is None:
+ override_values = {}
+
+ # [START bigquery_quickstart]
+ # Imports the Google Cloud client library
+ from google.cloud import bigquery
+
+ # Instantiates a client
+ bigquery_client = bigquery.Client()
+
+ # The name for the new dataset
+ dataset_id = "my_new_dataset"
+
+ # [END bigquery_quickstart]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ dataset_id = override_values.get("dataset_id", dataset_id)
+ # [START bigquery_quickstart]
+
+ # Prepares a reference to the new dataset
+ dataset_id_full = "{}.{}".format(bigquery_client.project, dataset_id)
+ dataset = bigquery.Dataset(dataset_id_full)
+
+ # Creates the new dataset
+ dataset = bigquery_client.create_dataset(dataset)
+
+ print("Dataset {} created.".format(dataset.dataset_id))
+ # [END bigquery_quickstart]
+
+
+if __name__ == "__main__":
+ run_quickstart()
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/quickstart_test.py b/testbed/googleapis__python-bigquery/samples/snippets/quickstart_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..74a02a83a077bed4b76f7593aad989d070439723
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/quickstart_test.py
@@ -0,0 +1,51 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Iterator, List
+
+from google.cloud import bigquery
+import pytest
+
+import quickstart # type: ignore
+from conftest import prefixer # type: ignore
+
+# Must match the dataset listed in quickstart.py (there's no easy way to
+# extract this).
+DATASET_ID = "my_new_dataset"
+
+
+@pytest.fixture(scope="module")
+def client() -> bigquery.Client:
+ return bigquery.Client()
+
+
+@pytest.fixture
+def datasets_to_delete(client: bigquery.Client) -> Iterator[List[str]]:
+ doomed: List[str] = []
+ yield doomed
+ for item in doomed:
+ client.delete_dataset(item, delete_contents=True)
+
+
+def test_quickstart(
+ capsys: "pytest.CaptureFixture[str]",
+ client: bigquery.Client,
+ datasets_to_delete: List[str],
+) -> None:
+ override_values = {"dataset_id": f"{prefixer.create_prefix()}_quickstart"}
+ datasets_to_delete.append(override_values["dataset_id"])
+
+ quickstart.run_quickstart(override_values)
+ out, _ = capsys.readouterr()
+ assert override_values["dataset_id"] in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/relax_column.py b/testbed/googleapis__python-bigquery/samples/snippets/relax_column.py
new file mode 100644
index 0000000000000000000000000000000000000000..bcd79cee829a61a6b330830de9b36e6249822c64
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/relax_column.py
@@ -0,0 +1,52 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from google.cloud import bigquery
+
+
+def relax_column(table_id: str) -> bigquery.Table:
+ orig_table_id = table_id
+
+ # [START bigquery_relax_column]
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ # TODO(dev): Change table_id to full name of the table you want to create.
+ table_id = "your-project.your_dataset.your_table"
+
+ # [END bigquery_relax_column]
+ table_id = orig_table_id
+
+ # [START bigquery_relax_column]
+ table = client.get_table(table_id)
+ new_schema = []
+ for field in table.schema:
+ if field.mode != "REQUIRED":
+ new_schema.append(field)
+ else:
+ # SchemaField properties cannot be edited after initialization.
+ # To make changes, construct new SchemaField objects.
+ new_field = field.to_api_repr()
+ new_field["mode"] = "NULLABLE"
+ relaxed_field = bigquery.SchemaField.from_api_repr(new_field)
+ new_schema.append(relaxed_field)
+
+ table.schema = new_schema
+ table = client.update_table(table, ["schema"])
+
+ print(f"Updated {table_id} schema: {table.schema}.")
+
+ # [END bigquery_relax_column]
+ return table
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/relax_column_test.py b/testbed/googleapis__python-bigquery/samples/snippets/relax_column_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..ede1c3ab7f89bb028a48e0a8ca81bd39a7e6578b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/relax_column_test.py
@@ -0,0 +1,46 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from google.cloud import bigquery
+
+import relax_column # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_relax_column(
+ capsys: "pytest.CaptureFixture[str]",
+ bigquery_client: bigquery.Client,
+ random_table_id: str,
+) -> None:
+ table = bigquery.Table(
+ random_table_id,
+ schema=[
+ bigquery.SchemaField("string_col", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("string_col2", "STRING", mode="REQUIRED"),
+ ],
+ )
+
+ bigquery_client.create_table(table)
+ table = relax_column.relax_column(random_table_id)
+
+ out, _ = capsys.readouterr()
+
+ assert all(field.mode == "NULLABLE" for field in table.schema)
+ assert "REQUIRED" not in out
+ assert "NULLABLE" in out
+ assert random_table_id in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/requirements-test.txt b/testbed/googleapis__python-bigquery/samples/snippets/requirements-test.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bb0b2a6bff9d9101b295b199be2e7d9ef38abbd0
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/requirements-test.txt
@@ -0,0 +1,5 @@
+# samples/snippets should be runnable with no "extras"
+google-cloud-testutils==1.4.0
+pytest===7.4.4; python_version == '3.7'
+pytest==8.3.3; python_version >= '3.8'
+mock==5.1.0
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/requirements.txt b/testbed/googleapis__python-bigquery/samples/snippets/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..65ce0be9fa50e8f0881c17b248c9f5e639775ba5
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/requirements.txt
@@ -0,0 +1,2 @@
+# samples/snippets should be runnable with no "extras"
+google-cloud-bigquery==3.26.0
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/revoke_dataset_access.py b/testbed/googleapis__python-bigquery/samples/snippets/revoke_dataset_access.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8cb731ac4976d573886ac28e564160c9edff8d3
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/revoke_dataset_access.py
@@ -0,0 +1,52 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def revoke_dataset_access(dataset_id: str, entity_id: str) -> None:
+ original_dataset_id = dataset_id
+ original_entity_id = entity_id
+
+ # [START bigquery_revoke_dataset_access]
+
+ # TODO(developer): Set dataset_id to the ID of the dataset to fetch.
+ dataset_id = "your-project.your_dataset"
+
+ # TODO(developer): Set entity_id to the ID of the email or group from whom you are revoking access.
+ entity_id = "user-or-group-to-remove@example.com"
+ # [END bigquery_revoke_dataset_access]
+ dataset_id = original_dataset_id
+ entity_id = original_entity_id
+ # [START bigquery_revoke_dataset_access]
+
+ from google.cloud import bigquery
+
+ # Construct a BigQuery client object.
+ client = bigquery.Client()
+
+ dataset = client.get_dataset(dataset_id) # Make an API request.
+
+ entries = list(dataset.access_entries)
+ dataset.access_entries = [
+ entry for entry in entries if entry.entity_id != entity_id
+ ]
+
+ dataset = client.update_dataset(
+ dataset,
+ # Update just the `access_entries` property of the dataset.
+ ["access_entries"],
+ ) # Make an API request.
+
+ full_dataset_id = f"{dataset.project}.{dataset.dataset_id}"
+ print(f"Revoked dataset access for '{entity_id}' to ' dataset '{full_dataset_id}.'")
+ # [END bigquery_revoke_dataset_access]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/schema.json b/testbed/googleapis__python-bigquery/samples/snippets/schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..bd2164dad35968d8a71ab4d3cb03e6c2c2277f46
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/schema.json
@@ -0,0 +1,20 @@
+[
+ {
+ "name": "qtr",
+ "type": "STRING",
+ "mode": "REQUIRED",
+ "description": "quarter"
+ },
+ {
+ "name": "rep",
+ "type": "STRING",
+ "mode": "NULLABLE",
+ "description": "sales representative"
+ },
+ {
+ "name": "sales",
+ "type": "FLOAT",
+ "mode": "NULLABLE",
+ "defaultValueExpression": "2.55"
+ }
+]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/schema_us_states.json b/testbed/googleapis__python-bigquery/samples/snippets/schema_us_states.json
new file mode 100644
index 0000000000000000000000000000000000000000..7f2ccc2775248ae9d41fad59e8d4bc0698a4bf7d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/schema_us_states.json
@@ -0,0 +1,12 @@
+[
+ {
+ "name": "name",
+ "type": "STRING",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "post_abbr",
+ "type": "STRING",
+ "mode": "NULLABLE"
+ }
+]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/simple_app.py b/testbed/googleapis__python-bigquery/samples/snippets/simple_app.py
new file mode 100644
index 0000000000000000000000000000000000000000..8281e1877a7039ff80ec9a02f438a4152ffe4a48
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/simple_app.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Simple application that performs a query with BigQuery."""
+# [START bigquery_simple_app_all]
+# [START bigquery_simple_app_deps]
+from google.cloud import bigquery
+
+# [END bigquery_simple_app_deps]
+
+
+def query_stackoverflow() -> None:
+ # [START bigquery_simple_app_client]
+ client = bigquery.Client()
+ # [END bigquery_simple_app_client]
+ # [START bigquery_simple_app_query]
+ results = client.query_and_wait(
+ """
+ SELECT
+ CONCAT(
+ 'https://stackoverflow.com/questions/',
+ CAST(id as STRING)) as url,
+ view_count
+ FROM `bigquery-public-data.stackoverflow.posts_questions`
+ WHERE tags like '%google-bigquery%'
+ ORDER BY view_count DESC
+ LIMIT 10"""
+ ) # Waits for job to complete.
+ # [END bigquery_simple_app_query]
+
+ # [START bigquery_simple_app_print]
+ for row in results:
+ print("{} : {} views".format(row.url, row.view_count))
+ # [END bigquery_simple_app_print]
+
+
+if __name__ == "__main__":
+ query_stackoverflow()
+# [END bigquery_simple_app_all]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/simple_app_test.py b/testbed/googleapis__python-bigquery/samples/snippets/simple_app_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bf0bb49c127298a4e9c49a5724ee3951d18af5e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/simple_app_test.py
@@ -0,0 +1,26 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import simple_app # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_query_stackoverflow(capsys: "pytest.CaptureFixture[str]") -> None:
+ simple_app.query_stackoverflow()
+ out, _ = capsys.readouterr()
+ assert "views" in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/test_update_with_dml.py b/testbed/googleapis__python-bigquery/samples/snippets/test_update_with_dml.py
new file mode 100644
index 0000000000000000000000000000000000000000..d03114a361fe39ca7f4323d971b17e52a3a2ffd5
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/test_update_with_dml.py
@@ -0,0 +1,42 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Iterator
+
+from google.cloud import bigquery
+import pytest
+
+from conftest import prefixer # type: ignore
+import update_with_dml # type: ignore
+
+
+@pytest.fixture
+def table_id(
+ bigquery_client: bigquery.Client, project_id: str, dataset_id: str
+) -> Iterator[str]:
+ table_id = f"{prefixer.create_prefix()}_update_with_dml"
+ yield table_id
+ full_table_id = f"{project_id}.{dataset_id}.{table_id}"
+ bigquery_client.delete_table(full_table_id, not_found_ok=True)
+
+
+def test_update_with_dml(
+ bigquery_client_patch: None, dataset_id: str, table_id: str
+) -> None:
+ override_values = {
+ "dataset_id": dataset_id,
+ "table_id": table_id,
+ }
+ num_rows = update_with_dml.run_sample(override_values=override_values)
+ assert num_rows > 0
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/update_dataset_access.py b/testbed/googleapis__python-bigquery/samples/snippets/update_dataset_access.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b3293ea55701519f1460e853e9fc1de09674ab9
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/update_dataset_access.py
@@ -0,0 +1,76 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def update_dataset_access(dataset_id: str, entity_id: str) -> None:
+ original_dataset_id = dataset_id
+ original_entity_id = entity_id
+
+ # [START bigquery_update_dataset_access]
+
+ # TODO(developer): Set dataset_id to the ID of the dataset to fetch.
+ dataset_id = "your-project.your_dataset"
+
+ # TODO(developer): Set entity_id to the ID of the email or group from whom
+ # you are adding access. Alternatively, to the JSON REST API representation
+ # of the entity, such as a view's table reference.
+ entity_id = "user-or-group-to-add@example.com"
+
+ from google.cloud.bigquery.enums import EntityTypes
+
+ # TODO(developer): Set entity_type to the type of entity you are granting access to.
+ # Common types include:
+ #
+ # * "userByEmail" -- A single user or service account. For example "fred@example.com"
+ # * "groupByEmail" -- A group of users. For example "example@googlegroups.com"
+ # * "view" -- An authorized view. For example
+ # {"projectId": "p", "datasetId": "d", "tableId": "v"}
+ #
+ # For a complete reference, see the REST API reference documentation:
+ # https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#Dataset.FIELDS.access
+ entity_type = EntityTypes.GROUP_BY_EMAIL
+
+ # TODO(developer): Set role to a one of the "Basic roles for datasets"
+ # described here:
+ # https://cloud.google.com/bigquery/docs/access-control-basic-roles#dataset-basic-roles
+ role = "READER"
+ # [END bigquery_update_dataset_access]
+ dataset_id = original_dataset_id
+ entity_id = original_entity_id
+ # [START bigquery_update_dataset_access]
+
+ from google.cloud import bigquery
+
+ # Construct a BigQuery client object.
+ client = bigquery.Client()
+
+ dataset = client.get_dataset(dataset_id) # Make an API request.
+
+ entries = list(dataset.access_entries)
+ entries.append(
+ bigquery.AccessEntry(
+ role=role,
+ entity_type=entity_type,
+ entity_id=entity_id,
+ )
+ )
+ dataset.access_entries = entries
+
+ dataset = client.update_dataset(dataset, ["access_entries"]) # Make an API request.
+
+ full_dataset_id = "{}.{}".format(dataset.project, dataset.dataset_id)
+ print(
+ "Updated dataset '{}' with modified user permissions.".format(full_dataset_id)
+ )
+ # [END bigquery_update_dataset_access]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/update_table_expiration.py b/testbed/googleapis__python-bigquery/samples/snippets/update_table_expiration.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf944800fd190668f4936db8812bff63348b2d85
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/update_table_expiration.py
@@ -0,0 +1,45 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+
+
+def update_table_expiration(table_id, expiration):
+ orig_table_id = table_id
+ orig_expiration = expiration
+
+ # [START bigquery_update_table_expiration]
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ # TODO(dev): Change table_id to the full name of the table you want to update.
+ table_id = "your-project.your_dataset.your_table_name"
+
+ # TODO(dev): Set table to expire for desired days days from now.
+ expiration = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
+ days=5
+ )
+ # [END bigquery_update_table_expiration]
+
+ table_id = orig_table_id
+ expiration = orig_expiration
+
+ # [START bigquery_update_table_expiration]
+ table = client.get_table(table_id) # Make an API request.
+ table.expires = expiration
+ table = client.update_table(table, ["expires"]) # API request
+
+ print(f"Updated {table_id}, expires {table.expires}.")
+ # [END bigquery_update_table_expiration]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/update_table_expiration_test.py b/testbed/googleapis__python-bigquery/samples/snippets/update_table_expiration_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed68a8c2c80cf9343955ae4b87457bb808e9e215
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/update_table_expiration_test.py
@@ -0,0 +1,43 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import typing
+
+import update_table_expiration # type: ignore
+
+if typing.TYPE_CHECKING:
+ import pathlib
+
+ import pytest
+
+
+def test_update_table_expiration(
+ capsys: "pytest.CaptureFixture[str]",
+ table_id: str,
+ tmp_path: "pathlib.Path",
+) -> None:
+ # This was not needed for function, only for test
+ expiration = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
+ days=5
+ )
+
+ update_table_expiration.update_table_expiration(table_id, expiration)
+
+ out, _ = capsys.readouterr()
+ assert "Updated" in out
+ assert table_id in out
+ assert str(expiration.day) in out
+ assert str(expiration.month) in out
+ assert str(expiration.year) in out
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/update_with_dml.py b/testbed/googleapis__python-bigquery/samples/snippets/update_with_dml.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d0294eadec46d738c065f09388f021253ef59d4
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/update_with_dml.py
@@ -0,0 +1,88 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START bigquery_update_with_dml]
+import pathlib
+from typing import Dict, Optional
+
+from google.cloud import bigquery
+from google.cloud.bigquery import enums
+
+
+def load_from_newline_delimited_json(
+ client: bigquery.Client,
+ filepath: pathlib.Path,
+ project_id: str,
+ dataset_id: str,
+ table_id: str,
+) -> None:
+ full_table_id = f"{project_id}.{dataset_id}.{table_id}"
+ job_config = bigquery.LoadJobConfig()
+ job_config.source_format = enums.SourceFormat.NEWLINE_DELIMITED_JSON
+ job_config.schema = [
+ bigquery.SchemaField("id", enums.SqlTypeNames.STRING),
+ bigquery.SchemaField("user_id", enums.SqlTypeNames.INTEGER),
+ bigquery.SchemaField("login_time", enums.SqlTypeNames.TIMESTAMP),
+ bigquery.SchemaField("logout_time", enums.SqlTypeNames.TIMESTAMP),
+ bigquery.SchemaField("ip_address", enums.SqlTypeNames.STRING),
+ ]
+
+ with open(filepath, "rb") as json_file:
+ load_job = client.load_table_from_file(
+ json_file, full_table_id, job_config=job_config
+ )
+
+ # Wait for load job to finish.
+ load_job.result()
+
+
+def update_with_dml(
+ client: bigquery.Client, project_id: str, dataset_id: str, table_id: str
+) -> int:
+ query_text = f"""
+ UPDATE `{project_id}.{dataset_id}.{table_id}`
+ SET ip_address = REGEXP_REPLACE(ip_address, r"(\\.[0-9]+)$", ".0")
+ WHERE TRUE
+ """
+ query_job = client.query(query_text)
+
+ # Wait for query job to finish.
+ query_job.result()
+
+ assert query_job.num_dml_affected_rows is not None
+
+ print(f"DML query modified {query_job.num_dml_affected_rows} rows.")
+ return query_job.num_dml_affected_rows
+
+
+def run_sample(override_values: Optional[Dict[str, str]] = None) -> int:
+ if override_values is None:
+ override_values = {}
+
+ client = bigquery.Client()
+ filepath = pathlib.Path(__file__).parent / "user_sessions_data.json"
+ project_id = client.project
+ dataset_id = "sample_db"
+ table_id = "UserSessions"
+ # [END bigquery_update_with_dml]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ dataset_id = override_values.get("dataset_id", dataset_id)
+ table_id = override_values.get("table_id", table_id)
+ # [START bigquery_update_with_dml]
+ load_from_newline_delimited_json(client, filepath, project_id, dataset_id, table_id)
+ return update_with_dml(client, project_id, dataset_id, table_id)
+
+
+# [END bigquery_update_with_dml]
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/user_sessions_data.json b/testbed/googleapis__python-bigquery/samples/snippets/user_sessions_data.json
new file mode 100644
index 0000000000000000000000000000000000000000..7ea3715adf5fbfc2cf6d72561ac7e48b93114263
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/user_sessions_data.json
@@ -0,0 +1,10 @@
+{"id":"2ad525d6-c832-4c3d-b7fe-59d104885519","user_id":"38","login_time":"1.47766087E9","logout_time":"1.477661109E9","ip_address":"192.0.2.12"}
+{"id":"53d65e20-6ea9-4650-98d9-a2111fbd1122","user_id":"88","login_time":"1.47707544E9","logout_time":"1.477075519E9","ip_address":"192.0.2.88"}
+{"id":"5e6c3021-d5e7-4ccd-84b2-adfa9176d13d","user_id":"39","login_time":"1.474022869E9","logout_time":"1.474022961E9","ip_address":"203.0.113.52"}
+{"id":"6196eefa-1498-4567-8ef0-498845b888d9","user_id":"52","login_time":"1.478604612E9","logout_time":"1.478604691E9","ip_address":"203.0.113.169"}
+{"id":"70656dc5-7e0f-49cf-9e00-f06ed93c1f5b","user_id":"46","login_time":"1.474089924E9","logout_time":"1.474090227E9","ip_address":"192.0.2.10"}
+{"id":"aafa5eef-ad49-49a7-9a0f-fbc7fd639bd3","user_id":"40","login_time":"1.478031161E9","logout_time":"1.478031388E9","ip_address":"203.0.113.18"}
+{"id":"d2792fc2-24dd-4260-9456-3fbe6cdfdd90","user_id":"5","login_time":"1.481259081E9","logout_time":"1.481259247E9","ip_address":"192.0.2.140"}
+{"id":"d835dc49-32f9-4790-b4eb-dddee62e0dcc","user_id":"62","login_time":"1.478892977E9","logout_time":"1.478893219E9","ip_address":"203.0.113.83"}
+{"id":"f4a0d3c7-351f-471c-8e11-e093e7a6ce75","user_id":"89","login_time":"1.459031555E9","logout_time":"1.459031831E9","ip_address":"203.0.113.233"}
+{"id":"f6e9f526-5b22-4679-9c3e-56a636e815bb","user_id":"97","login_time":"1.482426034E9","logout_time":"1.482426415E9","ip_address":"203.0.113.167"}
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/view.py b/testbed/googleapis__python-bigquery/samples/snippets/view.py
new file mode 100644
index 0000000000000000000000000000000000000000..30e719c794b605545e2e31bb06c829c68ec88feb
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/view.py
@@ -0,0 +1,198 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+from typing import Dict, Optional, Tuple
+
+try:
+ from typing import TypedDict
+except ImportError:
+ from typing_extensions import TypedDict
+
+if typing.TYPE_CHECKING:
+ from google.cloud import bigquery
+
+
+class OverridesDict(TypedDict, total=False):
+ analyst_group_email: str
+ view_dataset_id: str
+ view_id: str
+ view_reference: Dict[str, str]
+ source_dataset_id: str
+ source_id: str
+
+
+def create_view(override_values: Optional[Dict[str, str]] = None) -> "bigquery.Table":
+ if override_values is None:
+ override_values = {}
+
+ # [START bigquery_create_view]
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ view_id = "my-project.my_dataset.my_view"
+ source_id = "my-project.my_dataset.my_table"
+ # [END bigquery_create_view]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ view_id = override_values.get("view_id", view_id)
+ source_id = override_values.get("source_id", source_id)
+ # [START bigquery_create_view]
+ view = bigquery.Table(view_id)
+
+ # The source table in this example is created from a CSV file in Google
+ # Cloud Storage located at
+ # `gs://cloud-samples-data/bigquery/us-states/us-states.csv`. It contains
+ # 50 US states, while the view returns only those states with names
+ # starting with the letter 'W'.
+ view.view_query = f"SELECT name, post_abbr FROM `{source_id}` WHERE name LIKE 'W%'"
+
+ # Make an API request to create the view.
+ view = client.create_table(view)
+ print(f"Created {view.table_type}: {str(view.reference)}")
+ # [END bigquery_create_view]
+ return view
+
+
+def get_view(override_values: Optional[Dict[str, str]] = None) -> "bigquery.Table":
+ if override_values is None:
+ override_values = {}
+
+ # [START bigquery_get_view]
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ view_id = "my-project.my_dataset.my_view"
+ # [END bigquery_get_view]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ view_id = override_values.get("view_id", view_id)
+ # [START bigquery_get_view]
+ # Make an API request to get the table resource.
+ view = client.get_table(view_id)
+
+ # Display view properties
+ print(f"Retrieved {view.table_type}: {str(view.reference)}")
+ print(f"View Query:\n{view.view_query}")
+ # [END bigquery_get_view]
+ return view
+
+
+def update_view(override_values: Optional[Dict[str, str]] = None) -> "bigquery.Table":
+ if override_values is None:
+ override_values = {}
+
+ # [START bigquery_update_view_query]
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ view_id = "my-project.my_dataset.my_view"
+ source_id = "my-project.my_dataset.my_table"
+ # [END bigquery_update_view_query]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ view_id = override_values.get("view_id", view_id)
+ source_id = override_values.get("source_id", source_id)
+ # [START bigquery_update_view_query]
+ view = bigquery.Table(view_id)
+
+ # The source table in this example is created from a CSV file in Google
+ # Cloud Storage located at
+ # `gs://cloud-samples-data/bigquery/us-states/us-states.csv`. It contains
+ # 50 US states, while the view returns only those states with names
+ # starting with the letter 'M'.
+ view.view_query = f"SELECT name, post_abbr FROM `{source_id}` WHERE name LIKE 'M%'"
+
+ # Make an API request to update the query property of the view.
+ view = client.update_table(view, ["view_query"])
+ print(f"Updated {view.table_type}: {str(view.reference)}")
+ # [END bigquery_update_view_query]
+ return view
+
+
+def grant_access(
+ override_values: Optional[OverridesDict] = None,
+) -> Tuple["bigquery.Dataset", "bigquery.Dataset"]:
+ if override_values is None:
+ override_values = {}
+
+ # [START bigquery_grant_view_access]
+ from google.cloud import bigquery
+
+ client = bigquery.Client()
+
+ # To use a view, the analyst requires ACLs to both the view and the source
+ # table. Create an authorized view to allow an analyst to use a view
+ # without direct access permissions to the source table.
+ view_dataset_id = "my-project.my_view_dataset"
+ # [END bigquery_grant_view_access]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ view_dataset_id = override_values.get("view_dataset_id", view_dataset_id)
+ # [START bigquery_grant_view_access]
+ # Make an API request to get the view dataset ACLs.
+ view_dataset = client.get_dataset(view_dataset_id)
+
+ analyst_group_email = "example-analyst-group@google.com"
+ # [END bigquery_grant_view_access]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ analyst_group_email = override_values.get(
+ "analyst_group_email", analyst_group_email
+ )
+ # [START bigquery_grant_view_access]
+ access_entries = view_dataset.access_entries
+ access_entries.append(
+ bigquery.AccessEntry("READER", "groupByEmail", analyst_group_email)
+ )
+ view_dataset.access_entries = access_entries
+
+ # Make an API request to update the ACLs property of the view dataset.
+ view_dataset = client.update_dataset(view_dataset, ["access_entries"])
+ print(f"Access to view: {view_dataset.access_entries}")
+
+ # Group members of "data_analysts@example.com" now have access to the view,
+ # but they require access to the source table to use it. To remove this
+ # restriction, authorize the view to access the source dataset.
+ source_dataset_id = "my-project.my_source_dataset"
+ # [END bigquery_grant_view_access]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ source_dataset_id = override_values.get("source_dataset_id", source_dataset_id)
+ # [START bigquery_grant_view_access]
+ # Make an API request to set the source dataset ACLs.
+ source_dataset = client.get_dataset(source_dataset_id)
+
+ view_reference = {
+ "projectId": "my-project",
+ "datasetId": "my_view_dataset",
+ "tableId": "my_authorized_view",
+ }
+ # [END bigquery_grant_view_access]
+ # To facilitate testing, we replace values with alternatives
+ # provided by the testing harness.
+ view_reference = override_values.get("view_reference", view_reference)
+ # [START bigquery_grant_view_access]
+ access_entries = source_dataset.access_entries
+ access_entries.append(bigquery.AccessEntry(None, "view", view_reference))
+ source_dataset.access_entries = access_entries
+
+ # Make an API request to update the ACLs property of the source dataset.
+ source_dataset = client.update_dataset(source_dataset, ["access_entries"])
+ print(f"Access to source: {source_dataset.access_entries}")
+ # [END bigquery_grant_view_access]
+ return view_dataset, source_dataset
diff --git a/testbed/googleapis__python-bigquery/samples/snippets/view_test.py b/testbed/googleapis__python-bigquery/samples/snippets/view_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..d46595695cd68a1c487ba2bf870f624c70004fed
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/snippets/view_test.py
@@ -0,0 +1,130 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+from typing import Iterator
+import uuid
+
+from google.cloud import bigquery
+import pytest
+
+import view # type: ignore
+from conftest import prefixer # type: ignore
+
+
+def temp_suffix() -> str:
+ now = datetime.datetime.now()
+ return f"{now.strftime('%Y%m%d%H%M%S')}_{uuid.uuid4().hex[:8]}"
+
+
+@pytest.fixture(autouse=True)
+def bigquery_client_patch(
+ monkeypatch: pytest.MonkeyPatch, bigquery_client: bigquery.Client
+) -> None:
+ monkeypatch.setattr(bigquery, "Client", lambda: bigquery_client)
+
+
+@pytest.fixture(scope="module")
+def view_dataset_id(bigquery_client: bigquery.Client, project_id: str) -> Iterator[str]:
+ dataset_id = f"{project_id}.view_{temp_suffix()}"
+ bigquery_client.create_dataset(dataset_id)
+ yield dataset_id
+ bigquery_client.delete_dataset(dataset_id, delete_contents=True)
+
+
+@pytest.fixture(scope="module")
+def view_id(bigquery_client: bigquery.Client, view_dataset_id: str) -> Iterator[str]:
+ view_id = f"{view_dataset_id}.my_view"
+ yield view_id
+ bigquery_client.delete_table(view_id, not_found_ok=True)
+
+
+@pytest.fixture(scope="module")
+def source_dataset_id(
+ bigquery_client: bigquery.Client, project_id: str
+) -> Iterator[str]:
+ dataset_id = f"{prefixer.create_prefix()}_view"
+ bigquery_client.create_dataset(dataset_id)
+ yield dataset_id
+ bigquery_client.delete_dataset(dataset_id, delete_contents=True)
+
+
+@pytest.fixture(scope="module")
+def source_table_id(
+ bigquery_client: bigquery.Client, source_dataset_id: str
+) -> Iterator[str]:
+ source_table_id = f"{source_dataset_id}.us_states"
+ job_config = bigquery.LoadJobConfig(
+ schema=[
+ bigquery.SchemaField("name", "STRING"),
+ bigquery.SchemaField("post_abbr", "STRING"),
+ ],
+ skip_leading_rows=1,
+ )
+ load_job = bigquery_client.load_table_from_uri(
+ "gs://cloud-samples-data/bigquery/us-states/us-states.csv",
+ source_table_id,
+ job_config=job_config,
+ )
+ load_job.result()
+ yield source_table_id
+ bigquery_client.delete_table(source_table_id, not_found_ok=True)
+
+
+def test_view(
+ capsys: pytest.CaptureFixture[str],
+ view_id: str,
+ view_dataset_id: str,
+ source_table_id: str,
+ source_dataset_id: str,
+) -> None:
+ override_values = {
+ "view_id": view_id,
+ "source_id": source_table_id,
+ }
+ got = view.create_view(override_values)
+ assert source_table_id in got.view_query
+ out, _ = capsys.readouterr()
+ assert view_id in out
+
+ got = view.get_view(override_values)
+ assert source_table_id in got.view_query
+ assert "'W%'" in got.view_query
+ out, _ = capsys.readouterr()
+ assert view_id in out
+ assert source_table_id in out
+ assert "'W%'" in out
+
+ got = view.update_view(override_values)
+ assert source_table_id in got.view_query
+ assert "'M%'" in got.view_query
+ out, _ = capsys.readouterr()
+ assert view_id in out
+
+ project_id, dataset_id, table_id = view_id.split(".")
+ overrides: view.OverridesDict = {
+ "view_dataset_id": view_dataset_id,
+ "source_dataset_id": source_dataset_id,
+ "view_reference": {
+ "projectId": project_id,
+ "datasetId": dataset_id,
+ "tableId": table_id,
+ },
+ }
+ view_dataset, source_dataset = view.grant_access(overrides)
+ assert len(view_dataset.access_entries) != 0
+ assert len(source_dataset.access_entries) != 0
+ out, _ = capsys.readouterr()
+ assert "example-analyst-group@google.com" in out
+ assert table_id in out
diff --git a/testbed/googleapis__python-bigquery/samples/table_insert_rows.py b/testbed/googleapis__python-bigquery/samples/table_insert_rows.py
new file mode 100644
index 0000000000000000000000000000000000000000..d680b4c1e3bb64dc68052929311477cd8a6e64d0
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/table_insert_rows.py
@@ -0,0 +1,36 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def table_insert_rows(table_id: str) -> None:
+ # [START bigquery_table_insert_rows]
+ from google.cloud import bigquery
+
+ # Construct a BigQuery client object.
+ client = bigquery.Client()
+
+ # TODO(developer): Set table_id to the ID of table to append to.
+ # table_id = "your-project.your_dataset.your_table"
+
+ rows_to_insert = [
+ {"full_name": "Phred Phlyntstone", "age": 32},
+ {"full_name": "Wylma Phlyntstone", "age": 29},
+ ]
+
+ errors = client.insert_rows_json(table_id, rows_to_insert) # Make an API request.
+ if errors == []:
+ print("New rows have been added.")
+ else:
+ print("Encountered errors while inserting rows: {}".format(errors))
+ # [END bigquery_table_insert_rows]
diff --git a/testbed/googleapis__python-bigquery/samples/tests/__init__.py b/testbed/googleapis__python-bigquery/samples/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/testbed/googleapis__python-bigquery/samples/tests/conftest.py b/testbed/googleapis__python-bigquery/samples/tests/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..cdf52b38835b8733e3e1b0b3cd330d81864702e3
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/conftest.py
@@ -0,0 +1,183 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+from typing import Iterator, List
+from unittest import mock
+import uuid
+
+import google.auth
+import pytest
+
+from google.cloud import bigquery
+
+
+@pytest.fixture(scope="session", autouse=True)
+def client() -> bigquery.Client:
+ credentials, project = google.auth.default(
+ scopes=[
+ "https://www.googleapis.com/auth/drive",
+ "https://www.googleapis.com/auth/bigquery",
+ ]
+ )
+ real_client = bigquery.Client(credentials=credentials, project=project)
+ mock_client = mock.create_autospec(bigquery.Client)
+ mock_client.return_value = real_client
+ bigquery.Client = mock_client # type: ignore
+ return real_client
+
+
+@pytest.fixture
+def random_table_id(dataset_id: str) -> str:
+ now = datetime.datetime.now()
+ random_table_id = "example_table_{}_{}".format(
+ now.strftime("%Y%m%d%H%M%S"), uuid.uuid4().hex[:8]
+ )
+ return "{}.{}".format(dataset_id, random_table_id)
+
+
+@pytest.fixture
+def avro_source_uris() -> List[str]:
+ avro_source_uris = [
+ "gs://cloud-samples-data/bigquery/federated-formats-reference-file-schema/a-twitter.avro",
+ "gs://cloud-samples-data/bigquery/federated-formats-reference-file-schema/b-twitter.avro",
+ "gs://cloud-samples-data/bigquery/federated-formats-reference-file-schema/c-twitter.avro",
+ ]
+ return avro_source_uris
+
+
+@pytest.fixture
+def reference_file_schema_uri() -> str:
+ reference_file_schema_uri = "gs://cloud-samples-data/bigquery/federated-formats-reference-file-schema/b-twitter.avro"
+ return reference_file_schema_uri
+
+
+@pytest.fixture
+def random_dataset_id(client: bigquery.Client) -> Iterator[str]:
+ now = datetime.datetime.now()
+ random_dataset_id = "example_dataset_{}_{}".format(
+ now.strftime("%Y%m%d%H%M%S"), uuid.uuid4().hex[:8]
+ )
+ yield "{}.{}".format(client.project, random_dataset_id)
+ client.delete_dataset(random_dataset_id, delete_contents=True, not_found_ok=True)
+
+
+@pytest.fixture
+def random_routine_id(dataset_id: str) -> str:
+ now = datetime.datetime.now()
+ random_routine_id = "example_routine_{}_{}".format(
+ now.strftime("%Y%m%d%H%M%S"), uuid.uuid4().hex[:8]
+ )
+ return "{}.{}".format(dataset_id, random_routine_id)
+
+
+@pytest.fixture
+def dataset_id(client: bigquery.Client) -> Iterator[str]:
+ now = datetime.datetime.now()
+ dataset_id = "python_dataset_sample_{}_{}".format(
+ now.strftime("%Y%m%d%H%M%S"), uuid.uuid4().hex[:8]
+ )
+ dataset = client.create_dataset(dataset_id)
+ yield "{}.{}".format(dataset.project, dataset.dataset_id)
+ client.delete_dataset(dataset, delete_contents=True, not_found_ok=True)
+
+
+@pytest.fixture
+def table_id(client: bigquery.Client, dataset_id: str) -> Iterator[str]:
+ now = datetime.datetime.now()
+ table_id = "python_table_sample_{}_{}".format(
+ now.strftime("%Y%m%d%H%M%S"), uuid.uuid4().hex[:8]
+ )
+
+ table = bigquery.Table("{}.{}".format(dataset_id, table_id))
+ table = client.create_table(table)
+ yield "{}.{}.{}".format(table.project, table.dataset_id, table.table_id)
+ client.delete_table(table, not_found_ok=True)
+
+
+@pytest.fixture
+def table_with_schema_id(client: bigquery.Client, dataset_id: str) -> Iterator[str]:
+ now = datetime.datetime.now()
+ table_id = "python_table_with_schema_{}_{}".format(
+ now.strftime("%Y%m%d%H%M%S"), uuid.uuid4().hex[:8]
+ )
+ schema = [
+ bigquery.SchemaField("full_name", "STRING"),
+ bigquery.SchemaField("age", "INTEGER"),
+ ]
+ table = bigquery.Table("{}.{}".format(dataset_id, table_id), schema=schema)
+ table = client.create_table(table)
+ yield "{}.{}.{}".format(table.project, table.dataset_id, table.table_id)
+ client.delete_table(table, not_found_ok=True)
+
+
+@pytest.fixture
+def table_with_data_id() -> str:
+ return "bigquery-public-data.samples.shakespeare"
+
+
+@pytest.fixture
+def routine_id(client: bigquery.Client, dataset_id: str) -> Iterator[str]:
+ now = datetime.datetime.now()
+ routine_id = "python_routine_sample_{}_{}".format(
+ now.strftime("%Y%m%d%H%M%S"), uuid.uuid4().hex[:8]
+ )
+
+ routine = bigquery.Routine("{}.{}".format(dataset_id, routine_id))
+ routine.type_ = "SCALAR_FUNCTION"
+ routine.language = "SQL"
+ routine.body = "x * 3"
+ routine.arguments = [
+ bigquery.RoutineArgument(
+ name="x",
+ data_type=bigquery.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.INT64
+ ),
+ )
+ ]
+
+ routine = client.create_routine(routine)
+ yield "{}.{}.{}".format(routine.project, routine.dataset_id, routine.routine_id)
+ client.delete_routine(routine, not_found_ok=True)
+
+
+@pytest.fixture
+def model_id(client: bigquery.Client, dataset_id: str) -> str:
+ model_id = "{}.{}".format(dataset_id, uuid.uuid4().hex)
+
+ # The only way to create a model resource is via SQL.
+ # Use a very small dataset (2 points), to train a model quickly.
+ sql = """
+ CREATE MODEL `{}`
+ OPTIONS (
+ model_type='linear_reg',
+ max_iterations=1,
+ learn_rate=0.4,
+ learn_rate_strategy='constant'
+ ) AS (
+ SELECT 'a' AS f1, 2.0 AS label
+ UNION ALL
+ SELECT 'b' AS f1, 3.8 AS label
+ )
+ """.format(
+ model_id
+ )
+
+ client.query_and_wait(sql)
+ return model_id
+
+
+@pytest.fixture
+def kms_key_name() -> str:
+ return "projects/cloud-samples-tests/locations/us/keyRings/test/cryptoKeys/test"
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_add_empty_column.py b/testbed/googleapis__python-bigquery/samples/tests/test_add_empty_column.py
new file mode 100644
index 0000000000000000000000000000000000000000..95d5546217a8dfd95e26b7843ebedd8723b48c37
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_add_empty_column.py
@@ -0,0 +1,26 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import add_empty_column
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_add_empty_column(capsys: "pytest.CaptureFixture[str]", table_id: str) -> None:
+ add_empty_column.add_empty_column(table_id)
+ out, err = capsys.readouterr()
+ assert "A new column has been added." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_browse_table_data.py b/testbed/googleapis__python-bigquery/samples/tests/test_browse_table_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..670eb7ccfaf84813786059d5966f35f34fda8f8a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_browse_table_data.py
@@ -0,0 +1,33 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import browse_table_data
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_browse_table_data(
+ capsys: "pytest.CaptureFixture[str]", table_with_data_id: str
+) -> None:
+ browse_table_data.browse_table_data(table_with_data_id)
+ out, err = capsys.readouterr()
+ assert "Downloaded 164656 rows from table {}".format(table_with_data_id) in out
+ assert "Downloaded 10 rows from table {}".format(table_with_data_id) in out
+ assert "Selected 2 columns from table {}".format(table_with_data_id) in out
+ assert "Downloaded 10 rows from table {}".format(table_with_data_id) in out
+ assert "word" in out
+ assert "LVII" in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_list_jobs.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_list_jobs.py
new file mode 100644
index 0000000000000000000000000000000000000000..6bb1bbd1985eab26e7e2fd38f33487ddb034cd08
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_list_jobs.py
@@ -0,0 +1,37 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import client_list_jobs
+from .. import create_job
+
+if typing.TYPE_CHECKING:
+ from google.cloud import bigquery
+ import pytest
+
+
+def test_client_list_jobs(
+ capsys: "pytest.CaptureFixture[str]", client: "bigquery.Client"
+) -> None:
+ job = create_job.create_job()
+ client.cancel_job(job.job_id)
+ job.cancel()
+ client_list_jobs.client_list_jobs()
+ out, err = capsys.readouterr()
+ assert "Started job: {}".format(job.job_id) in out
+ assert "Last 10 jobs:" in out
+ assert "Jobs from the last ten minutes:" in out
+ assert "Last 10 jobs run by all users:" in out
+ assert "Last 10 jobs done:" in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_load_partitioned_table.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_load_partitioned_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f6564afa858e0a21a3a7ba03982c17202d14017
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_load_partitioned_table.py
@@ -0,0 +1,28 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import client_load_partitioned_table
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_load_partitioned_table(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ client_load_partitioned_table.client_load_partitioned_table(random_table_id)
+ out, err = capsys.readouterr()
+ assert "Loaded 50 rows to table {}".format(random_table_id) in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_query_add_column.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_add_column.py
new file mode 100644
index 0000000000000000000000000000000000000000..c80f195a5744f5cd5e768b1a4a84dc53eacabf24
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_add_column.py
@@ -0,0 +1,38 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from google.cloud import bigquery
+
+from .. import client_query_add_column
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_query_add_column(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str, client: bigquery.Client
+) -> None:
+ schema = [
+ bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+
+ client.create_table(bigquery.Table(random_table_id, schema=schema))
+
+ client_query_add_column.client_query_add_column(random_table_id)
+ out, err = capsys.readouterr()
+ assert "Table {} contains 2 columns".format(random_table_id) in out
+ assert "Table {} now contains 3 columns".format(random_table_id) in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_query_batch.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_batch.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1e0e264755ad4fd5a9bafeb7137f77c3b157d54
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_batch.py
@@ -0,0 +1,26 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import client_query_batch
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_query_batch(capsys: "pytest.CaptureFixture[str]") -> None:
+ job = client_query_batch.client_query_batch()
+ out, err = capsys.readouterr()
+ assert "Job {} is currently in state {}".format(job.job_id, job.state) in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_query_destination_table.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_destination_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..1487f6e65b2b0a6569cfacfe220f22621492a037
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_destination_table.py
@@ -0,0 +1,28 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import client_query_destination_table
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_query_destination_table(
+ capsys: "pytest.CaptureFixture[str]", table_id: str
+) -> None:
+ client_query_destination_table.client_query_destination_table(table_id)
+ out, err = capsys.readouterr()
+ assert "Query results loaded to the table {}".format(table_id) in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_query_destination_table_clustered.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_destination_table_clustered.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a1e5bcd4dffc3dc4d156e55cb4437c1834f810b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_destination_table_clustered.py
@@ -0,0 +1,33 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import client_query_destination_table_clustered
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_query_destination_table_clustered(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ client_query_destination_table_clustered.client_query_destination_table_clustered(
+ random_table_id
+ )
+ out, err = capsys.readouterr()
+ assert (
+ "The destination table is written using the cluster_fields configuration."
+ in out
+ )
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_query_destination_table_cmek.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_destination_table_cmek.py
new file mode 100644
index 0000000000000000000000000000000000000000..4cb76be8eae75e0a51cfbd564ee018e922ff3b37
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_destination_table_cmek.py
@@ -0,0 +1,30 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import client_query_destination_table_cmek
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_query_destination_table_cmek(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str, kms_key_name: str
+) -> None:
+ client_query_destination_table_cmek.client_query_destination_table_cmek(
+ random_table_id, kms_key_name
+ )
+ out, err = capsys.readouterr()
+ assert "The destination table is written using the encryption configuration" in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_query_destination_table_legacy.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_destination_table_legacy.py
new file mode 100644
index 0000000000000000000000000000000000000000..78a199beac4e77909f8c12cae7f70a605b8ba062
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_destination_table_legacy.py
@@ -0,0 +1,30 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import client_query_destination_table_legacy
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_query_destination_table_legacy(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ client_query_destination_table_legacy.client_query_destination_table_legacy(
+ random_table_id
+ )
+ out, err = capsys.readouterr()
+ assert "Query results loaded to the table {}".format(random_table_id) in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_query_dry_run.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_dry_run.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfc8100a1ae466b0c8a94c37fcdaf203622e206d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_dry_run.py
@@ -0,0 +1,29 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import client_query_dry_run
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_query_dry_run(capsys: "pytest.CaptureFixture[str]") -> None:
+ query_job = client_query_dry_run.client_query_dry_run()
+ out, err = capsys.readouterr()
+ assert "This query will process" in out
+ assert query_job.state == "DONE"
+ assert query_job.dry_run
+ assert query_job.total_bytes_processed > 0
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_query_legacy_sql.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_legacy_sql.py
new file mode 100644
index 0000000000000000000000000000000000000000..98303cde9314385c12ee3b3897af0af909c30e1a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_legacy_sql.py
@@ -0,0 +1,27 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+import typing
+
+from .. import client_query_legacy_sql
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_query_legacy_sql(capsys: "pytest.CaptureFixture[str]") -> None:
+ client_query_legacy_sql.client_query_legacy_sql()
+ out, err = capsys.readouterr()
+ assert re.search(r"(Row[\w(){}:', ]+)$", out)
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_query_relax_column.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_relax_column.py
new file mode 100644
index 0000000000000000000000000000000000000000..0df8463bed504e0432ed5e760262aeb0878c7478
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_relax_column.py
@@ -0,0 +1,40 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from google.cloud import bigquery
+
+from .. import client_query_relax_column
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_query_relax_column(
+ capsys: "pytest.CaptureFixture[str]",
+ random_table_id: str,
+ client: bigquery.Client,
+) -> None:
+ schema = [
+ bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+
+ client.create_table(bigquery.Table(random_table_id, schema=schema))
+
+ client_query_relax_column.client_query_relax_column(random_table_id)
+ out, err = capsys.readouterr()
+ assert "2 fields in the schema are required." in out
+ assert "0 fields in the schema are now required." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_query_shortmode.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_shortmode.py
new file mode 100644
index 0000000000000000000000000000000000000000..41132f24cbea77af7e56af8c452fa27c48c63646
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_shortmode.py
@@ -0,0 +1,26 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import client_query_shortmode
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_query_shortmode(capsys: "pytest.CaptureFixture[str]") -> None:
+ client_query_shortmode.client_query_shortmode()
+ out, err = capsys.readouterr()
+ assert "Query was run" in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_query_w_array_params.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_w_array_params.py
new file mode 100644
index 0000000000000000000000000000000000000000..c302712fee28aae86150dc87d6ba87786249d0eb
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_w_array_params.py
@@ -0,0 +1,26 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import client_query_w_array_params
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_query_w_array_params(capsys: "pytest.CaptureFixture[str]") -> None:
+ client_query_w_array_params.client_query_w_array_params()
+ out, err = capsys.readouterr()
+ assert "James" in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_query_w_named_params.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_w_named_params.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4d66be4181f7b9a7ba1b74587f2139cfd2d7c61
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_w_named_params.py
@@ -0,0 +1,26 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import client_query_w_named_params
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_query_w_named_params(capsys: "pytest.CaptureFixture[str]") -> None:
+ client_query_w_named_params.client_query_w_named_params()
+ out, err = capsys.readouterr()
+ assert "the" in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_query_w_positional_params.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_w_positional_params.py
new file mode 100644
index 0000000000000000000000000000000000000000..61df76aaae07bccfed632ce35974199d73fada82
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_w_positional_params.py
@@ -0,0 +1,26 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import client_query_w_positional_params
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_query_w_positional_params(capsys: "pytest.CaptureFixture[str]") -> None:
+ client_query_w_positional_params.client_query_w_positional_params()
+ out, err = capsys.readouterr()
+ assert "the" in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_query_w_struct_params.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_w_struct_params.py
new file mode 100644
index 0000000000000000000000000000000000000000..5eea993ced9ba3f549efaf05e573ae12a7fc01df
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_w_struct_params.py
@@ -0,0 +1,27 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import client_query_w_struct_params
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_query_w_struct_params(capsys: "pytest.CaptureFixture[str]") -> None:
+ client_query_w_struct_params.client_query_w_struct_params()
+ out, err = capsys.readouterr()
+ assert "1" in out
+ assert "foo" in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_client_query_w_timestamp_params.py b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_w_timestamp_params.py
new file mode 100644
index 0000000000000000000000000000000000000000..8147d4a96535b82a7242f2c3590f4a577a15bfba
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_client_query_w_timestamp_params.py
@@ -0,0 +1,26 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import client_query_w_timestamp_params
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_client_query_w_timestamp_params(capsys: "pytest.CaptureFixture[str]") -> None:
+ client_query_w_timestamp_params.client_query_w_timestamp_params()
+ out, err = capsys.readouterr()
+ assert "2016, 12, 7, 9, 0" in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_copy_table.py b/testbed/googleapis__python-bigquery/samples/tests/test_copy_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..3953e31625abf803a50f22fb5fdd221863882c44
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_copy_table.py
@@ -0,0 +1,37 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import pytest
+
+from .. import copy_table
+
+if typing.TYPE_CHECKING:
+ from google.cloud import bigquery
+
+
+def test_copy_table(
+ capsys: "pytest.CaptureFixture[str]",
+ table_with_data_id: str,
+ random_table_id: str,
+ client: "bigquery.Client",
+) -> None:
+ copy_table.copy_table(table_with_data_id, random_table_id)
+ out, err = capsys.readouterr()
+ assert "A copy of the table created." in out
+ assert (
+ client.get_table(random_table_id).num_rows
+ == client.get_table(table_with_data_id).num_rows
+ )
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_copy_table_cmek.py b/testbed/googleapis__python-bigquery/samples/tests/test_copy_table_cmek.py
new file mode 100644
index 0000000000000000000000000000000000000000..7cac15723aee961d2e53406f9ba0cd2e497808ab
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_copy_table_cmek.py
@@ -0,0 +1,28 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from .. import copy_table_cmek
+
+
+def test_copy_table_cmek(
+ capsys: "pytest.CaptureFixture[str]",
+ random_table_id: str,
+ table_with_data_id: str,
+ kms_key_name: str,
+) -> None:
+ copy_table_cmek.copy_table_cmek(random_table_id, table_with_data_id, kms_key_name)
+ out, err = capsys.readouterr()
+ assert "A copy of the table created" in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_copy_table_multiple_source.py b/testbed/googleapis__python-bigquery/samples/tests/test_copy_table_multiple_source.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d7991c91af7f9923d4c5ee09433686eab549eac
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_copy_table_multiple_source.py
@@ -0,0 +1,61 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import typing
+
+from google.cloud import bigquery
+
+from .. import copy_table_multiple_source
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_copy_table_multiple_source(
+ capsys: "pytest.CaptureFixture[str]",
+ random_table_id: str,
+ random_dataset_id: str,
+ client: bigquery.Client,
+) -> None:
+ dataset = bigquery.Dataset(random_dataset_id)
+ dataset.location = "US"
+ dataset = client.create_dataset(dataset)
+ table_data = {"table1": b"Washington,WA", "table2": b"California,CA"}
+ for table_id, data in table_data.items():
+ table_ref = dataset.table(table_id)
+ job_config = bigquery.LoadJobConfig(
+ schema=[
+ bigquery.SchemaField("name", "STRING"),
+ bigquery.SchemaField("post_abbr", "STRING"),
+ ]
+ )
+ body = io.BytesIO(data)
+ client.load_table_from_file(
+ body, table_ref, location="US", job_config=job_config
+ ).result()
+
+ table_ids = [
+ "{}.table1".format(random_dataset_id),
+ "{}.table2".format(random_dataset_id),
+ ]
+
+ copy_table_multiple_source.copy_table_multiple_source(random_table_id, table_ids)
+ dest_table = client.get_table(random_table_id)
+ out, err = capsys.readouterr()
+ assert (
+ "The tables {} have been appended to {}".format(table_ids, random_table_id)
+ in out
+ )
+ assert dest_table.num_rows > 0
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_create_dataset.py b/testbed/googleapis__python-bigquery/samples/tests/test_create_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..ecf5ef129e7390dca81fc35947cf61d48fb7cf92
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_create_dataset.py
@@ -0,0 +1,28 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import create_dataset
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_create_dataset(
+ capsys: "pytest.CaptureFixture[str]", random_dataset_id: str
+) -> None:
+ create_dataset.create_dataset(random_dataset_id)
+ out, err = capsys.readouterr()
+ assert "Created dataset {}".format(random_dataset_id) in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_create_job.py b/testbed/googleapis__python-bigquery/samples/tests/test_create_job.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e6621e91b59245c974ae30332b41465e567cbd9
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_create_job.py
@@ -0,0 +1,30 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import create_job
+
+if typing.TYPE_CHECKING:
+ import pytest
+ from google.cloud import bigquery
+
+
+def test_create_job(
+ capsys: "pytest.CaptureFixture[str]", client: "bigquery.Client"
+) -> None:
+ query_job = create_job.create_job()
+ client.cancel_job(query_job.job_id, location=query_job.location)
+ out, err = capsys.readouterr()
+ assert "Started job: {}".format(query_job.job_id) in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_create_table.py b/testbed/googleapis__python-bigquery/samples/tests/test_create_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..98a0fa936ed18862875c72f5521ef15823dcdefd
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_create_table.py
@@ -0,0 +1,28 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import create_table
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_create_table(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ create_table.create_table(random_table_id)
+ out, err = capsys.readouterr()
+ assert "Created table {}".format(random_table_id) in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_create_table_clustered.py b/testbed/googleapis__python-bigquery/samples/tests/test_create_table_clustered.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3e483441354f5cc52412dc9d6d7b6712d0db359
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_create_table_clustered.py
@@ -0,0 +1,29 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import create_table_clustered
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_create_table_clustered(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ table = create_table_clustered.create_table_clustered(random_table_id)
+ out, _ = capsys.readouterr()
+ assert "Created clustered table {}".format(random_table_id) in out
+ assert table.clustering_fields == ["city", "zipcode"]
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_create_table_range_partitioned.py b/testbed/googleapis__python-bigquery/samples/tests/test_create_table_range_partitioned.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c06b66fe18a36db4d36a0c5b7772ebe9277d966
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_create_table_range_partitioned.py
@@ -0,0 +1,34 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import create_table_range_partitioned
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_create_table_range_partitioned(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ table = create_table_range_partitioned.create_table_range_partitioned(
+ random_table_id
+ )
+ out, _ = capsys.readouterr()
+ assert "Created table {}".format(random_table_id) in out
+ assert table.range_partitioning.field == "zipcode"
+ assert table.range_partitioning.range_.start == 0
+ assert table.range_partitioning.range_.end == 100000
+ assert table.range_partitioning.range_.interval == 10
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_dataset_exists.py b/testbed/googleapis__python-bigquery/samples/tests/test_dataset_exists.py
new file mode 100644
index 0000000000000000000000000000000000000000..744122e370cf22d3278da92c5037598b09743ea9
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_dataset_exists.py
@@ -0,0 +1,37 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from google.cloud import bigquery
+
+from .. import dataset_exists
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_dataset_exists(
+ capsys: "pytest.CaptureFixture[str]",
+ random_dataset_id: str,
+ client: bigquery.Client,
+) -> None:
+ dataset_exists.dataset_exists(random_dataset_id)
+ out, err = capsys.readouterr()
+ assert "Dataset {} is not found".format(random_dataset_id) in out
+ dataset = bigquery.Dataset(random_dataset_id)
+ dataset = client.create_dataset(dataset)
+ dataset_exists.dataset_exists(random_dataset_id)
+ out, err = capsys.readouterr()
+ assert "Dataset {} already exists".format(random_dataset_id) in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_dataset_label_samples.py b/testbed/googleapis__python-bigquery/samples/tests/test_dataset_label_samples.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec9ff922847d09f46c87b8546c0dacab72cab078
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_dataset_label_samples.py
@@ -0,0 +1,39 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import delete_dataset_labels
+from .. import get_dataset_labels
+from .. import label_dataset
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_dataset_label_samples(
+ capsys: "pytest.CaptureFixture[str]", dataset_id: str
+) -> None:
+ label_dataset.label_dataset(dataset_id)
+ out, err = capsys.readouterr()
+ assert "Labels added to {}".format(dataset_id) in out
+
+ get_dataset_labels.get_dataset_labels(dataset_id)
+ out, err = capsys.readouterr()
+ assert "color: green" in out
+
+ dataset = delete_dataset_labels.delete_dataset_labels(dataset_id)
+ out, err = capsys.readouterr()
+ assert "Labels deleted from {}".format(dataset_id) in out
+ assert dataset.labels.get("color") is None
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_delete_dataset.py b/testbed/googleapis__python-bigquery/samples/tests/test_delete_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2a77c475246be8333edee818dee96db1d02316a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_delete_dataset.py
@@ -0,0 +1,26 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import delete_dataset
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_delete_dataset(capsys: "pytest.CaptureFixture[str]", dataset_id: str) -> None:
+ delete_dataset.delete_dataset(dataset_id)
+ out, err = capsys.readouterr()
+ assert "Deleted dataset '{}'.".format(dataset_id) in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_delete_table.py b/testbed/googleapis__python-bigquery/samples/tests/test_delete_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ba5622e8618a5ee910fc6b8a3d79f25ca7c7c32
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_delete_table.py
@@ -0,0 +1,26 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import delete_table
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_delete_table(capsys: "pytest.CaptureFixture[str]", table_id: str) -> None:
+ delete_table.delete_table(table_id)
+ out, err = capsys.readouterr()
+ assert "Deleted table '{}'.".format(table_id) in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_download_public_data.py b/testbed/googleapis__python-bigquery/samples/tests/test_download_public_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..02c2c6f9cbb513961d8387d21c36708f423eb2d7
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_download_public_data.py
@@ -0,0 +1,40 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+import pytest
+
+from .. import download_public_data
+
+pytest.importorskip("google.cloud.bigquery_storage_v1")
+
+
+def test_download_public_data(
+ caplog: pytest.LogCaptureFixture, capsys: pytest.CaptureFixture[str]
+) -> None:
+ # Enable debug-level logging to verify the BigQuery Storage API is used.
+ caplog.set_level(logging.DEBUG)
+
+ download_public_data.download_public_data()
+ out, _ = capsys.readouterr()
+ assert "year" in out
+ assert "gender" in out
+ assert "name" in out
+
+ assert any(
+ "Started reading table 'bigquery-public-data.usa_names.usa_1910_current' with BQ Storage API session"
+ in message
+ for message in caplog.messages
+ )
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_get_dataset.py b/testbed/googleapis__python-bigquery/samples/tests/test_get_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..07c7a28b763d2409e7da292dc463f268390383b7
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_get_dataset.py
@@ -0,0 +1,26 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import get_dataset
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_get_dataset(capsys: "pytest.CaptureFixture[str]", dataset_id: str) -> None:
+ get_dataset.get_dataset(dataset_id)
+ out, err = capsys.readouterr()
+ assert dataset_id in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_get_table.py b/testbed/googleapis__python-bigquery/samples/tests/test_get_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..edf09762d985b7b5b0f5f91f2829e2c2df65e99a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_get_table.py
@@ -0,0 +1,43 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from google.cloud import bigquery
+
+from .. import get_table
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_get_table(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str, client: bigquery.Client
+) -> None:
+ schema = [
+ bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+
+ table = bigquery.Table(random_table_id, schema)
+ table.description = "Sample Table"
+ table = client.create_table(table)
+
+ get_table.get_table(random_table_id)
+ out, err = capsys.readouterr()
+ assert "Got table '{}'.".format(random_table_id) in out
+ assert "full_name" in out
+ assert "Table description: Sample Table" in out
+ assert "Table has 0 rows" in out
+ client.delete_table(table, not_found_ok=True)
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_list_datasets.py b/testbed/googleapis__python-bigquery/samples/tests/test_list_datasets.py
new file mode 100644
index 0000000000000000000000000000000000000000..f51fe18f1ba3d60150a59d04f739d00c38c89d65
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_list_datasets.py
@@ -0,0 +1,29 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import list_datasets
+
+if typing.TYPE_CHECKING:
+ import pytest
+ from google.cloud import bigquery
+
+
+def test_list_datasets(
+ capsys: "pytest.CaptureFixture[str]", dataset_id: str, client: "bigquery.Client"
+) -> None:
+ list_datasets.list_datasets()
+ out, err = capsys.readouterr()
+ assert "Datasets in project {}:".format(client.project) in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_list_datasets_by_label.py b/testbed/googleapis__python-bigquery/samples/tests/test_list_datasets_by_label.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee6b9a999065301b0f31407e9f47a138877da0e6
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_list_datasets_by_label.py
@@ -0,0 +1,32 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import list_datasets_by_label
+
+if typing.TYPE_CHECKING:
+ import pytest
+ from google.cloud import bigquery
+
+
+def test_list_datasets_by_label(
+ capsys: "pytest.CaptureFixture[str]", dataset_id: str, client: "bigquery.Client"
+) -> None:
+ dataset = client.get_dataset(dataset_id)
+ dataset.labels = {"color": "green"}
+ dataset = client.update_dataset(dataset, ["labels"])
+ list_datasets_by_label.list_datasets_by_label()
+ out, err = capsys.readouterr()
+ assert dataset_id in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_list_tables.py b/testbed/googleapis__python-bigquery/samples/tests/test_list_tables.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8a66b656eb9cc0eb57bdb92cc8605b368c57224
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_list_tables.py
@@ -0,0 +1,29 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import list_tables
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_list_tables(
+ capsys: "pytest.CaptureFixture[str]", dataset_id: str, table_id: str
+) -> None:
+ list_tables.list_tables(dataset_id)
+ out, err = capsys.readouterr()
+ assert "Tables contained in '{}':".format(dataset_id) in out
+ assert table_id in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_load_table_clustered.py b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_clustered.py
new file mode 100644
index 0000000000000000000000000000000000000000..89059271a28c622babdc9337be17142541a060d9
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_clustered.py
@@ -0,0 +1,36 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import load_table_clustered
+
+if typing.TYPE_CHECKING:
+ import pytest
+ from google.cloud import bigquery
+
+
+def test_load_table_clustered(
+ capsys: "pytest.CaptureFixture[str]",
+ random_table_id: str,
+ client: "bigquery.Client",
+) -> None:
+ table = load_table_clustered.load_table_clustered(random_table_id)
+
+ out, _ = capsys.readouterr()
+ assert "rows and 4 columns" in out
+
+ rows = list(client.list_rows(table)) # Make an API request.
+ assert len(rows) > 0
+ assert table.clustering_fields == ["origin", "destination"]
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_load_table_dataframe.py b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_dataframe.py
new file mode 100644
index 0000000000000000000000000000000000000000..4aa872fa42bfa9fd6e47ec79051f804eb8aaaa88
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_dataframe.py
@@ -0,0 +1,80 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+import pytest
+
+from .. import load_table_dataframe
+
+if typing.TYPE_CHECKING:
+ from google.cloud import bigquery
+
+
+pandas = pytest.importorskip("pandas")
+pyarrow = pytest.importorskip("pyarrow")
+
+
+def test_load_table_dataframe(
+ capsys: pytest.CaptureFixture[str],
+ client: "bigquery.Client",
+ random_table_id: str,
+) -> None:
+ table = load_table_dataframe.load_table_dataframe(random_table_id)
+ out, _ = capsys.readouterr()
+ expected_column_names = [
+ "wikidata_id",
+ "title",
+ "release_year",
+ "length_minutes",
+ "release_date",
+ "dvd_release",
+ ]
+ assert "Loaded 4 rows and {} columns".format(len(expected_column_names)) in out
+
+ column_names = [field.name for field in table.schema]
+ assert column_names == expected_column_names
+ column_types = [field.field_type for field in table.schema]
+ assert column_types == [
+ "STRING",
+ "STRING",
+ "INTEGER",
+ "FLOAT",
+ "TIMESTAMP",
+ "DATETIME",
+ ]
+
+ df = client.list_rows(table).to_dataframe()
+ df.sort_values("release_year", inplace=True)
+ assert df["title"].tolist() == [
+ "And Now for Something Completely Different",
+ "Monty Python and the Holy Grail",
+ "Life of Brian",
+ "The Meaning of Life",
+ ]
+ assert df["release_year"].tolist() == [1971, 1975, 1979, 1983]
+ assert df["length_minutes"].tolist() == [88.0, 91.5, 94.25, 112.5]
+ assert df["release_date"].tolist() == [
+ pandas.Timestamp("1971-09-28T22:59:07+00:00"),
+ pandas.Timestamp("1975-04-09T22:59:02+00:00"),
+ pandas.Timestamp("1979-08-18T03:59:05+00:00"),
+ pandas.Timestamp("1983-05-09T11:00:00+00:00"),
+ ]
+ assert df["dvd_release"].tolist() == [
+ pandas.Timestamp("2003-10-22T10:00:00"),
+ pandas.Timestamp("2002-07-16T09:00:00"),
+ pandas.Timestamp("2008-01-14T08:00:00"),
+ pandas.Timestamp("2002-01-22T07:00:00"),
+ ]
+ assert df["wikidata_id"].tolist() == ["Q16403", "Q25043", "Q24953", "Q24980"]
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_load_table_file.py b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_file.py
new file mode 100644
index 0000000000000000000000000000000000000000..95b06c7f6fd771631a334e266aae650a43e6ce93
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_file.py
@@ -0,0 +1,44 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import typing
+
+from google.cloud import bigquery
+
+from .. import load_table_file
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_load_table_file(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str, client: bigquery.Client
+) -> None:
+ samples_test_dir = os.path.abspath(os.path.dirname(__file__))
+ file_path = os.path.join(
+ samples_test_dir, "..", "..", "tests", "data", "people.csv"
+ )
+ table = load_table_file.load_table_file(file_path, random_table_id)
+
+ out, _ = capsys.readouterr()
+ assert "Loaded 2 rows and 2 columns" in out
+
+ rows = list(client.list_rows(table)) # Make an API request.
+ assert len(rows) == 2
+ # Order is not preserved, so compare individually
+ row1 = bigquery.Row(("Wylma Phlyntstone", 29), {"full_name": 0, "age": 1})
+ assert row1 in rows
+ row2 = bigquery.Row(("Phred Phlyntstone", 32), {"full_name": 0, "age": 1})
+ assert row2 in rows
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_autodetect_csv.py b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_autodetect_csv.py
new file mode 100644
index 0000000000000000000000000000000000000000..46b5937139ebf5c25ca9a2ff51910d1e636c0bae
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_autodetect_csv.py
@@ -0,0 +1,28 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import load_table_uri_autodetect_csv
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_load_table_uri_autodetect_csv(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ load_table_uri_autodetect_csv.load_table_uri_autodetect_csv(random_table_id)
+ out, err = capsys.readouterr()
+ assert "Loaded 50 rows." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_autodetect_json.py b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_autodetect_json.py
new file mode 100644
index 0000000000000000000000000000000000000000..43bf4e1b397f10a51ba39e745c8c3bf5ec0bd414
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_autodetect_json.py
@@ -0,0 +1,28 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import load_table_uri_autodetect_json
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_load_table_uri_autodetect_csv(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ load_table_uri_autodetect_json.load_table_uri_autodetect_json(random_table_id)
+ out, err = capsys.readouterr()
+ assert "Loaded 50 rows." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_avro.py b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_avro.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0be44aca76a116d1979fe65e276fe507a204c45
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_avro.py
@@ -0,0 +1,28 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import load_table_uri_avro
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_load_table_uri_avro(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ load_table_uri_avro.load_table_uri_avro(random_table_id)
+ out, _ = capsys.readouterr()
+ assert "Loaded 50 rows." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_cmek.py b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_cmek.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ae8689f975d61afb277bdf29d87f23a1403ff12
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_cmek.py
@@ -0,0 +1,28 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import load_table_uri_cmek
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_load_table_uri_cmek(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str, kms_key_name: str
+) -> None:
+ load_table_uri_cmek.load_table_uri_cmek(random_table_id, kms_key_name)
+ out, _ = capsys.readouterr()
+ assert "A table loaded with encryption configuration key" in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_csv.py b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_csv.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b4c733e8f1d447f5d0f26d7a70b909a2fa3087f
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_csv.py
@@ -0,0 +1,28 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import load_table_uri_csv
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_load_table_uri_csv(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ load_table_uri_csv.load_table_uri_csv(random_table_id)
+ out, _ = capsys.readouterr()
+ assert "Loaded 50 rows." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_json.py b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_json.py
new file mode 100644
index 0000000000000000000000000000000000000000..751c3867acf26e6338d351f0bd349729e428f28d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_json.py
@@ -0,0 +1,28 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import load_table_uri_json
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_load_table_uri_json(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ load_table_uri_json.load_table_uri_json(random_table_id)
+ out, _ = capsys.readouterr()
+ assert "Loaded 50 rows." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_orc.py b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_orc.py
new file mode 100644
index 0000000000000000000000000000000000000000..23d8288b731070d059fdddeae9f32ec7505b54ad
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_orc.py
@@ -0,0 +1,28 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import load_table_uri_orc
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_load_table_uri_orc(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ load_table_uri_orc.load_table_uri_orc(random_table_id)
+ out, _ = capsys.readouterr()
+ assert "Loaded 50 rows." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_parquet.py b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_parquet.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee7682388cc77ee6de61f1c9ffeabab7dc631f19
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_parquet.py
@@ -0,0 +1,28 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import load_table_uri_parquet
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_load_table_uri_json(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ load_table_uri_parquet.load_table_uri_parquet(random_table_id)
+ out, _ = capsys.readouterr()
+ assert "Loaded 50 rows." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_truncate_avro.py b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_truncate_avro.py
new file mode 100644
index 0000000000000000000000000000000000000000..19b62fe7e063028446aaa49271a41d3b6c2357f2
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_truncate_avro.py
@@ -0,0 +1,28 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import load_table_uri_truncate_avro
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_load_table_uri_truncate_avro(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ load_table_uri_truncate_avro.load_table_uri_truncate_avro(random_table_id)
+ out, _ = capsys.readouterr()
+ assert "Loaded 50 rows." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_truncate_csv.py b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_truncate_csv.py
new file mode 100644
index 0000000000000000000000000000000000000000..9bc467cd04fadb337ed0c56e210c36279b047c2a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_truncate_csv.py
@@ -0,0 +1,28 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import load_table_uri_truncate_csv
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_load_table_uri_truncate_csv(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ load_table_uri_truncate_csv.load_table_uri_truncate_csv(random_table_id)
+ out, _ = capsys.readouterr()
+ assert "Loaded 50 rows." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_truncate_json.py b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_truncate_json.py
new file mode 100644
index 0000000000000000000000000000000000000000..cdf96454be57795188f7409b0467621cf4c15d9a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_truncate_json.py
@@ -0,0 +1,28 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import load_table_uri_truncate_json
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_load_table_uri_truncate_json(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ load_table_uri_truncate_json.load_table_uri_truncate_json(random_table_id)
+ out, _ = capsys.readouterr()
+ assert "Loaded 50 rows." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_truncate_orc.py b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_truncate_orc.py
new file mode 100644
index 0000000000000000000000000000000000000000..041923da911cae10fc29b34bab30bbf4c20f14a5
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_truncate_orc.py
@@ -0,0 +1,28 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import load_table_uri_truncate_orc
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_load_table_uri_truncate_orc(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ load_table_uri_truncate_orc.load_table_uri_truncate_orc(random_table_id)
+ out, _ = capsys.readouterr()
+ assert "Loaded 50 rows." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_truncate_parquet.py b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_truncate_parquet.py
new file mode 100644
index 0000000000000000000000000000000000000000..2139f316ff835444655b0f020986b8fdfd7d7ed9
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_load_table_uri_truncate_parquet.py
@@ -0,0 +1,28 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import load_table_uri_truncate_parquet
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_load_table_uri_truncate_parquet(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str
+) -> None:
+ load_table_uri_truncate_parquet.load_table_uri_truncate_parquet(random_table_id)
+ out, _ = capsys.readouterr()
+ assert "Loaded 50 rows." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_model_samples.py b/testbed/googleapis__python-bigquery/samples/tests/test_model_samples.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed82dd678c2f104779586f523aeefb3e7b00a9f1
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_model_samples.py
@@ -0,0 +1,46 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import delete_model
+from .. import get_model
+from .. import list_models
+from .. import update_model
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_model_samples(
+ capsys: "pytest.CaptureFixture[str]", dataset_id: str, model_id: str
+) -> None:
+ """Since creating a model is a long operation, test all model samples in
+ the same test, following a typical end-to-end flow.
+ """
+ get_model.get_model(model_id)
+ out, err = capsys.readouterr()
+ assert model_id in out
+
+ list_models.list_models(dataset_id)
+ out, err = capsys.readouterr()
+ assert "Models contained in '{}':".format(dataset_id) in out
+
+ update_model.update_model(model_id)
+ out, err = capsys.readouterr()
+ assert "This model was modified from a Python program." in out
+
+ delete_model.delete_model(model_id)
+ out, err = capsys.readouterr()
+ assert "Deleted model '{}'.".format(model_id) in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_query_external_gcs_temporary_table.py b/testbed/googleapis__python-bigquery/samples/tests/test_query_external_gcs_temporary_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..75b3ce6d806b02af79db6ef3efcf0abb28fb4b23
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_query_external_gcs_temporary_table.py
@@ -0,0 +1,28 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import query_external_gcs_temporary_table
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_query_external_gcs_temporary_table(
+ capsys: "pytest.CaptureFixture[str]",
+) -> None:
+ query_external_gcs_temporary_table.query_external_gcs_temporary_table()
+ out, err = capsys.readouterr()
+ assert "There are 4 states with names starting with W." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_query_external_sheets_permanent_table.py b/testbed/googleapis__python-bigquery/samples/tests/test_query_external_sheets_permanent_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a4c21330a5c5adf8604da502e8e11fec8d6863d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_query_external_sheets_permanent_table.py
@@ -0,0 +1,30 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import query_external_sheets_permanent_table
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_query_external_sheets_permanent_table(
+ capsys: "pytest.CaptureFixture[str]", dataset_id: str
+) -> None:
+ query_external_sheets_permanent_table.query_external_sheets_permanent_table(
+ dataset_id
+ )
+ out, err = capsys.readouterr()
+ assert "There are 2 states with names starting with W in the selected range." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_query_external_sheets_temporary_table.py b/testbed/googleapis__python-bigquery/samples/tests/test_query_external_sheets_temporary_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ada205663ce9e0701027c0c574b21245c9e7640
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_query_external_sheets_temporary_table.py
@@ -0,0 +1,28 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import query_external_sheets_temporary_table
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_query_external_sheets_temporary_table(
+ capsys: "pytest.CaptureFixture[str]",
+) -> None:
+ query_external_sheets_temporary_table.query_external_sheets_temporary_table()
+ out, err = capsys.readouterr()
+ assert "There are 2 states with names starting with W in the selected range." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_query_no_cache.py b/testbed/googleapis__python-bigquery/samples/tests/test_query_no_cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..fffa5dac7cad690ccaec1b2fc2e1a3bbb736da62
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_query_no_cache.py
@@ -0,0 +1,27 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+import typing
+
+from .. import query_no_cache
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_query_no_cache(capsys: "pytest.CaptureFixture[str]") -> None:
+ query_no_cache.query_no_cache()
+ out, err = capsys.readouterr()
+ assert re.search(r"(Row[\w(){}:', ]+)$", out)
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_query_pagination.py b/testbed/googleapis__python-bigquery/samples/tests/test_query_pagination.py
new file mode 100644
index 0000000000000000000000000000000000000000..adc9463999e5b443954cfaf9782f2cd3dde4c50c
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_query_pagination.py
@@ -0,0 +1,27 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import query_pagination
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_query_pagination(capsys: "pytest.CaptureFixture[str]") -> None:
+ query_pagination.query_pagination()
+ out, _ = capsys.readouterr()
+ assert "The query data:" in out
+ assert "name=James, count=4942431" in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_query_script.py b/testbed/googleapis__python-bigquery/samples/tests/test_query_script.py
new file mode 100644
index 0000000000000000000000000000000000000000..50c9730246b8462f01ec193adce1f38ce283dd25
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_query_script.py
@@ -0,0 +1,32 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import query_script
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_query_script(capsys: "pytest.CaptureFixture[str]") -> None:
+ query_script.query_script()
+ out, _ = capsys.readouterr()
+ assert "Script created 2 child jobs." in out
+ assert (
+ "53 of the top 100 names from year 2000 also appear in Shakespeare's works."
+ in out
+ )
+ assert "produced 53 row(s)" in out
+ assert "produced 1 row(s)" in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_query_to_arrow.py b/testbed/googleapis__python-bigquery/samples/tests/test_query_to_arrow.py
new file mode 100644
index 0000000000000000000000000000000000000000..9fc8571e9ecbc4bb2735048612664b9358b8e993
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_query_to_arrow.py
@@ -0,0 +1,29 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from .. import query_to_arrow
+
+pyarrow = pytest.importorskip("pyarrow")
+
+
+def test_query_to_arrow(capsys: "pytest.CaptureFixture[str]") -> None:
+ arrow_table = query_to_arrow.query_to_arrow()
+ out, err = capsys.readouterr()
+ assert "Downloaded 8 rows, 2 columns." in out
+ arrow_schema = arrow_table.schema
+ assert arrow_schema.names == ["race", "participant"]
+ assert pyarrow.types.is_string(arrow_schema.types[0])
+ assert pyarrow.types.is_struct(arrow_schema.types[1])
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_routine_samples.py b/testbed/googleapis__python-bigquery/samples/tests/test_routine_samples.py
new file mode 100644
index 0000000000000000000000000000000000000000..57bca074a7cd453513520a9873f2e36649d590ba
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_routine_samples.py
@@ -0,0 +1,112 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from google.cloud import bigquery
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_create_routine(
+ capsys: "pytest.CaptureFixture[str]", random_routine_id: str
+) -> None:
+ from .. import create_routine
+
+ create_routine.create_routine(random_routine_id)
+ out, err = capsys.readouterr()
+ assert "Created routine {}".format(random_routine_id) in out
+
+
+def test_create_routine_ddl(
+ capsys: "pytest.CaptureFixture[str]",
+ random_routine_id: str,
+ client: bigquery.Client,
+) -> None:
+ from .. import create_routine_ddl
+
+ create_routine_ddl.create_routine_ddl(random_routine_id)
+ routine = client.get_routine(random_routine_id)
+ out, err = capsys.readouterr()
+
+ assert "Created routine {}".format(random_routine_id) in out
+ assert routine.type_ == "SCALAR_FUNCTION"
+ assert routine.language == "SQL"
+ expected_arguments = [
+ bigquery.RoutineArgument(
+ name="arr",
+ data_type=bigquery.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.ARRAY,
+ array_element_type=bigquery.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.STRUCT,
+ struct_type=bigquery.StandardSqlStructType(
+ fields=[
+ bigquery.StandardSqlField(
+ name="name",
+ type=bigquery.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.STRING
+ ),
+ ),
+ bigquery.StandardSqlField(
+ name="val",
+ type=bigquery.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.INT64
+ ),
+ ),
+ ]
+ ),
+ ),
+ ),
+ )
+ ]
+ assert routine.arguments == expected_arguments
+
+
+def test_list_routines(
+ capsys: "pytest.CaptureFixture[str]", dataset_id: str, routine_id: str
+) -> None:
+ from .. import list_routines
+
+ list_routines.list_routines(dataset_id)
+ out, err = capsys.readouterr()
+ assert "Routines contained in dataset {}:".format(dataset_id) in out
+ assert routine_id in out
+
+
+def test_get_routine(capsys: "pytest.CaptureFixture[str]", routine_id: str) -> None:
+ from .. import get_routine
+
+ get_routine.get_routine(routine_id)
+ out, err = capsys.readouterr()
+ assert "Routine '{}':".format(routine_id) in out
+ assert "Type: 'SCALAR_FUNCTION'" in out
+ assert "Language: 'SQL'" in out
+ assert "Name: 'x'" in out
+ assert "type_kind=" in out
+
+
+def test_delete_routine(capsys: "pytest.CaptureFixture[str]", routine_id: str) -> None:
+ from .. import delete_routine
+
+ delete_routine.delete_routine(routine_id)
+ out, err = capsys.readouterr()
+ assert "Deleted routine {}.".format(routine_id) in out
+
+
+def test_update_routine(routine_id: str) -> None:
+ from .. import update_routine
+
+ routine = update_routine.update_routine(routine_id)
+ assert routine.body == "x * 4"
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_table_exists.py b/testbed/googleapis__python-bigquery/samples/tests/test_table_exists.py
new file mode 100644
index 0000000000000000000000000000000000000000..35cf61cc89514fbc7ba693b87eff97a405225838
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_table_exists.py
@@ -0,0 +1,35 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from google.cloud import bigquery
+
+from .. import table_exists
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_table_exists(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str, client: bigquery.Client
+) -> None:
+ table_exists.table_exists(random_table_id)
+ out, err = capsys.readouterr()
+ assert "Table {} is not found.".format(random_table_id) in out
+ table = bigquery.Table(random_table_id)
+ table = client.create_table(table)
+ table_exists.table_exists(random_table_id)
+ out, err = capsys.readouterr()
+ assert "Table {} already exists.".format(random_table_id) in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_table_insert_rows.py b/testbed/googleapis__python-bigquery/samples/tests/test_table_insert_rows.py
new file mode 100644
index 0000000000000000000000000000000000000000..13400d69cd5479a006c5700e45d1445ff1f226e0
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_table_insert_rows.py
@@ -0,0 +1,40 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from google.cloud import bigquery
+
+from .. import table_insert_rows
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_table_insert_rows(
+ capsys: "pytest.CaptureFixture[str]",
+ random_table_id: str,
+ client: bigquery.Client,
+) -> None:
+ schema = [
+ bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+
+ table = bigquery.Table(random_table_id, schema=schema)
+ table = client.create_table(table)
+
+ table_insert_rows.table_insert_rows(random_table_id)
+ out, err = capsys.readouterr()
+ assert "New rows have been added." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_table_insert_rows_explicit_none_insert_ids.py b/testbed/googleapis__python-bigquery/samples/tests/test_table_insert_rows_explicit_none_insert_ids.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6bfbf392753a466083cece51d07c36bef981859
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_table_insert_rows_explicit_none_insert_ids.py
@@ -0,0 +1,38 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from google.cloud import bigquery
+
+from .. import table_insert_rows_explicit_none_insert_ids as mut
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_table_insert_rows_explicit_none_insert_ids(
+ capsys: "pytest.CaptureFixture[str]", random_table_id: str, client: bigquery.Client
+) -> None:
+ schema = [
+ bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+
+ table = bigquery.Table(random_table_id, schema=schema)
+ table = client.create_table(table)
+
+ mut.table_insert_rows_explicit_none_insert_ids(random_table_id)
+ out, err = capsys.readouterr()
+ assert "New rows have been added." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_undelete_table.py b/testbed/googleapis__python-bigquery/samples/tests/test_undelete_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..08841ad729223361ad7789f586be98e84bb46803
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_undelete_table.py
@@ -0,0 +1,35 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import undelete_table
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_undelete_table(
+ capsys: "pytest.CaptureFixture[str]",
+ table_with_schema_id: str,
+ random_table_id: str,
+) -> None:
+ undelete_table.undelete_table(table_with_schema_id, random_table_id)
+ out, _ = capsys.readouterr()
+ assert (
+ "Copied data from deleted table {} to {}".format(
+ table_with_schema_id, random_table_id
+ )
+ in out
+ )
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_update_dataset_access.py b/testbed/googleapis__python-bigquery/samples/tests/test_update_dataset_access.py
new file mode 100644
index 0000000000000000000000000000000000000000..f17634fb0a21e776bb4299c515330416d32ab232
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_update_dataset_access.py
@@ -0,0 +1,30 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import update_dataset_access
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_update_dataset_access(
+ capsys: "pytest.CaptureFixture[str]", dataset_id: str
+) -> None:
+ update_dataset_access.update_dataset_access(dataset_id)
+ out, err = capsys.readouterr()
+ assert (
+ "Updated dataset '{}' with modified user permissions.".format(dataset_id) in out
+ )
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_update_dataset_default_partition_expiration.py b/testbed/googleapis__python-bigquery/samples/tests/test_update_dataset_default_partition_expiration.py
new file mode 100644
index 0000000000000000000000000000000000000000..4dd0d9296c8ed962d2fe3601b055b93d109f8a39
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_update_dataset_default_partition_expiration.py
@@ -0,0 +1,37 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import update_dataset_default_partition_expiration
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_update_dataset_default_partition_expiration(
+ capsys: "pytest.CaptureFixture[str]", dataset_id: str
+) -> None:
+ ninety_days_ms = 90 * 24 * 60 * 60 * 1000 # in milliseconds
+
+ update_dataset_default_partition_expiration.update_dataset_default_partition_expiration(
+ dataset_id
+ )
+ out, _ = capsys.readouterr()
+ assert (
+ "Updated dataset {} with new default partition expiration {}".format(
+ dataset_id, ninety_days_ms
+ )
+ in out
+ )
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_update_dataset_default_table_expiration.py b/testbed/googleapis__python-bigquery/samples/tests/test_update_dataset_default_table_expiration.py
new file mode 100644
index 0000000000000000000000000000000000000000..24df5446d712e0c7a4926ad24794b200abb9cce4
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_update_dataset_default_table_expiration.py
@@ -0,0 +1,35 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import update_dataset_default_table_expiration
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_update_dataset_default_table_expiration(
+ capsys: "pytest.CaptureFixture[str]", dataset_id: str
+) -> None:
+ one_day_ms = 24 * 60 * 60 * 1000 # in milliseconds
+
+ update_dataset_default_table_expiration.update_dataset_default_table_expiration(
+ dataset_id
+ )
+ out, err = capsys.readouterr()
+ assert (
+ "Updated dataset {} with new expiration {}".format(dataset_id, one_day_ms)
+ in out
+ )
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_update_dataset_description.py b/testbed/googleapis__python-bigquery/samples/tests/test_update_dataset_description.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d76337dccc2c955722c65dc4af1676f2771923a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_update_dataset_description.py
@@ -0,0 +1,28 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from .. import update_dataset_description
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_update_dataset_description(
+ capsys: "pytest.CaptureFixture[str]", dataset_id: str
+) -> None:
+ update_dataset_description.update_dataset_description(dataset_id)
+ out, err = capsys.readouterr()
+ assert "Updated description." in out
diff --git a/testbed/googleapis__python-bigquery/samples/tests/test_update_table_require_partition_filter.py b/testbed/googleapis__python-bigquery/samples/tests/test_update_table_require_partition_filter.py
new file mode 100644
index 0000000000000000000000000000000000000000..c86a227691b48c87aad13bf7747a901d42f2df74
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/tests/test_update_table_require_partition_filter.py
@@ -0,0 +1,43 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from google.cloud import bigquery
+
+from .. import update_table_require_partition_filter
+
+if typing.TYPE_CHECKING:
+ import pytest
+
+
+def test_update_table_require_partition_filter(
+ capsys: "pytest.CaptureFixture[str]",
+ random_table_id: str,
+ client: bigquery.Client,
+) -> None:
+ # Make a partitioned table.
+ schema = [bigquery.SchemaField("transaction_timestamp", "TIMESTAMP")]
+ table = bigquery.Table(random_table_id, schema=schema)
+ table.time_partitioning = bigquery.TimePartitioning(field="transaction_timestamp")
+ table = client.create_table(table)
+
+ update_table_require_partition_filter.update_table_require_partition_filter(
+ random_table_id
+ )
+ out, _ = capsys.readouterr()
+ assert (
+ "Updated table '{}' with require_partition_filter=True".format(random_table_id)
+ in out
+ )
diff --git a/testbed/googleapis__python-bigquery/samples/update_dataset_access.py b/testbed/googleapis__python-bigquery/samples/update_dataset_access.py
new file mode 100644
index 0000000000000000000000000000000000000000..2fb21aff27b9489f93458592b2aa84c46ef5afad
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/samples/update_dataset_access.py
@@ -0,0 +1,44 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def update_dataset_access(dataset_id: str) -> None:
+ # [START bigquery_update_dataset_access]
+ from google.cloud import bigquery
+
+ # Construct a BigQuery client object.
+ client = bigquery.Client()
+
+ # TODO(developer): Set dataset_id to the ID of the dataset to fetch.
+ # dataset_id = 'your-project.your_dataset'
+
+ dataset = client.get_dataset(dataset_id) # Make an API request.
+
+ entry = bigquery.AccessEntry(
+ role="READER",
+ entity_type="groupByEmail",
+ entity_id="cloud-developer-relations@google.com",
+ )
+
+ entries = list(dataset.access_entries)
+ entries.append(entry)
+ dataset.access_entries = entries
+
+ dataset = client.update_dataset(dataset, ["access_entries"]) # Make an API request.
+
+ full_dataset_id = "{}.{}".format(dataset.project, dataset.dataset_id)
+ print(
+ "Updated dataset '{}' with modified user permissions.".format(full_dataset_id)
+ )
+ # [END bigquery_update_dataset_access]
diff --git a/testbed/googleapis__python-bigquery/scripts/decrypt-secrets.sh b/testbed/googleapis__python-bigquery/scripts/decrypt-secrets.sh
new file mode 100644
index 0000000000000000000000000000000000000000..120b0ddc43642d9be721745b311302ef25aebc41
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/scripts/decrypt-secrets.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+# Copyright 2024 Google LLC All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+ROOT=$( dirname "$DIR" )
+
+# Work from the project root.
+cd $ROOT
+
+# Prevent it from overriding files.
+# We recommend that sample authors use their own service account files and cloud project.
+# In that case, they are supposed to prepare these files by themselves.
+if [[ -f "testing/test-env.sh" ]] || \
+ [[ -f "testing/service-account.json" ]] || \
+ [[ -f "testing/client-secrets.json" ]]; then
+ echo "One or more target files exist, aborting."
+ exit 1
+fi
+
+# Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources.
+PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}"
+
+gcloud secrets versions access latest --secret="python-docs-samples-test-env" \
+ --project="${PROJECT_ID}" \
+ > testing/test-env.sh
+gcloud secrets versions access latest \
+ --secret="python-docs-samples-service-account" \
+ --project="${PROJECT_ID}" \
+ > testing/service-account.json
+gcloud secrets versions access latest \
+ --secret="python-docs-samples-client-secrets" \
+ --project="${PROJECT_ID}" \
+ > testing/client-secrets.json
diff --git a/testbed/googleapis__python-bigquery/scripts/readme-gen/templates/auth_api_key.tmpl.rst b/testbed/googleapis__python-bigquery/scripts/readme-gen/templates/auth_api_key.tmpl.rst
new file mode 100644
index 0000000000000000000000000000000000000000..11957ce2714a5749fa07c793fbf3e8222d7479c6
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/scripts/readme-gen/templates/auth_api_key.tmpl.rst
@@ -0,0 +1,14 @@
+Authentication
+++++++++++++++
+
+Authentication for this service is done via an `API Key`_. To obtain an API
+Key:
+
+1. Open the `Cloud Platform Console`_
+2. Make sure that billing is enabled for your project.
+3. From the **Credentials** page, create a new **API Key** or use an existing
+ one for your project.
+
+.. _API Key:
+ https://developers.google.com/api-client-library/python/guide/aaa_apikeys
+.. _Cloud Console: https://console.cloud.google.com/project?_
diff --git a/testbed/googleapis__python-bigquery/scripts/readme-gen/templates/install_deps.tmpl.rst b/testbed/googleapis__python-bigquery/scripts/readme-gen/templates/install_deps.tmpl.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6f069c6c87a55d7aa0aafa62df5b4e76fcacd8c4
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/scripts/readme-gen/templates/install_deps.tmpl.rst
@@ -0,0 +1,29 @@
+Install Dependencies
+++++++++++++++++++++
+
+#. Clone python-docs-samples and change directory to the sample directory you want to use.
+
+ .. code-block:: bash
+
+ $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git
+
+#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions.
+
+ .. _Python Development Environment Setup Guide:
+ https://cloud.google.com/python/setup
+
+#. Create a virtualenv. Samples are compatible with Python 3.7+.
+
+ .. code-block:: bash
+
+ $ virtualenv env
+ $ source env/bin/activate
+
+#. Install the dependencies needed to run the samples.
+
+ .. code-block:: bash
+
+ $ pip install -r requirements.txt
+
+.. _pip: https://pip.pypa.io/
+.. _virtualenv: https://virtualenv.pypa.io/
diff --git a/testbed/googleapis__python-bigquery/scripts/readme-gen/templates/install_portaudio.tmpl.rst b/testbed/googleapis__python-bigquery/scripts/readme-gen/templates/install_portaudio.tmpl.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5ea33d18c00cfa606b49e4a3be879925d7896885
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/scripts/readme-gen/templates/install_portaudio.tmpl.rst
@@ -0,0 +1,35 @@
+Install PortAudio
++++++++++++++++++
+
+Install `PortAudio`_. This is required by the `PyAudio`_ library to stream
+audio from your computer's microphone. PyAudio depends on PortAudio for cross-platform compatibility, and is installed differently depending on the
+platform.
+
+* For Mac OS X, you can use `Homebrew`_::
+
+ brew install portaudio
+
+ **Note**: if you encounter an error when running `pip install` that indicates
+ it can't find `portaudio.h`, try running `pip install` with the following
+ flags::
+
+ pip install --global-option='build_ext' \
+ --global-option='-I/usr/local/include' \
+ --global-option='-L/usr/local/lib' \
+ pyaudio
+
+* For Debian / Ubuntu Linux::
+
+ apt-get install portaudio19-dev python-all-dev
+
+* Windows may work without having to install PortAudio explicitly (it will get
+ installed with PyAudio).
+
+For more details, see the `PyAudio installation`_ page.
+
+
+.. _PyAudio: https://people.csail.mit.edu/hubert/pyaudio/
+.. _PortAudio: http://www.portaudio.com/
+.. _PyAudio installation:
+ https://people.csail.mit.edu/hubert/pyaudio/#downloads
+.. _Homebrew: http://brew.sh
diff --git a/testbed/googleapis__python-bigquery/setup.cfg b/testbed/googleapis__python-bigquery/setup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..37b63aa49e3a4b2c5b6530b92c8973dffd4803d2
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/setup.cfg
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Generated by synthtool. DO NOT EDIT!
+[bdist_wheel]
+universal = 1
+
+[pytype]
+python_version = 3.8
+inputs =
+ google/cloud/
+exclude =
+ tests/
+ google/cloud/bigquery_v2/ # Legacy proto-based types.
+output = .pytype/
+disable =
+ # There's some issue with finding some pyi files, thus disabling.
+ # The issue https://github.com/google/pytype/issues/150 is closed, but the
+ # error still occurs for some reason.
+ pyi-error
diff --git a/testbed/googleapis__python-bigquery/setup.py b/testbed/googleapis__python-bigquery/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..617685543ca9b7d0d2e398eb4fbff4ee27c47e5d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/setup.py
@@ -0,0 +1,142 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import os
+
+import setuptools
+
+
+# Package metadata.
+
+name = "google-cloud-bigquery"
+description = "Google BigQuery API client library"
+
+# Should be one of:
+# 'Development Status :: 3 - Alpha'
+# 'Development Status :: 4 - Beta'
+# 'Development Status :: 5 - Production/Stable'
+release_status = "Development Status :: 5 - Production/Stable"
+dependencies = [
+ "google-api-core[grpc] >= 2.11.1, <3.0.0dev",
+ "google-auth >= 2.14.1, <3.0.0dev",
+ "google-cloud-core >= 2.4.1, <3.0.0dev",
+ "google-resumable-media >= 2.0.0, < 3.0dev",
+ "packaging >= 20.0.0",
+ "python-dateutil >= 2.7.3, <3.0dev",
+ "requests >= 2.21.0, < 3.0.0dev",
+]
+pyarrow_dependency = "pyarrow >= 3.0.0"
+extras = {
+ # bqstorage had a period where it was a required dependency, and has been
+ # moved back to optional due to bloat. See
+ # https://github.com/googleapis/python-bigquery/issues/1196 for more background.
+ "bqstorage": [
+ "google-cloud-bigquery-storage >= 2.6.0, <3.0.0dev",
+ # Due to an issue in pip's dependency resolver, the `grpc` extra is not
+ # installed, even though `google-cloud-bigquery-storage` specifies it
+ # as `google-api-core[grpc]`. We thus need to explicitly specify it here.
+ # See: https://github.com/googleapis/python-bigquery/issues/83 The
+ # grpc.Channel.close() method isn't added until 1.32.0.
+ # https://github.com/grpc/grpc/pull/15254
+ "grpcio >= 1.47.0, < 2.0dev",
+ "grpcio >= 1.49.1, < 2.0dev; python_version>='3.11'",
+ pyarrow_dependency,
+ ],
+ "pandas": [
+ "pandas>=1.1.0",
+ pyarrow_dependency,
+ "db-dtypes>=0.3.0,<2.0.0dev",
+ "importlib_metadata>=1.0.0; python_version<'3.8'",
+ ],
+ "ipywidgets": [
+ "ipywidgets>=7.7.0",
+ "ipykernel>=6.0.0",
+ ],
+ "geopandas": ["geopandas>=0.9.0, <1.0dev", "Shapely>=1.8.4, <3.0.0dev"],
+ "ipython": [
+ "bigquery-magics >= 0.1.0",
+ ],
+ "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
+ "opentelemetry": [
+ "opentelemetry-api >= 1.1.0",
+ "opentelemetry-sdk >= 1.1.0",
+ "opentelemetry-instrumentation >= 0.20b0",
+ ],
+ "bigquery_v2": [
+ "proto-plus >= 1.22.3, <2.0.0dev",
+ "protobuf>=3.20.2,<6.0.0dev,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", # For the legacy proto-based types.
+ ],
+}
+
+all_extras = []
+
+for extra in extras:
+ all_extras.extend(extras[extra])
+
+extras["all"] = all_extras
+
+# Setup boilerplate below this line.
+
+package_root = os.path.abspath(os.path.dirname(__file__))
+
+readme_filename = os.path.join(package_root, "README.rst")
+with io.open(readme_filename, encoding="utf-8") as readme_file:
+ readme = readme_file.read()
+
+version = {}
+with open(os.path.join(package_root, "google/cloud/bigquery/version.py")) as fp:
+ exec(fp.read(), version)
+version = version["__version__"]
+
+# Only include packages under the 'google' namespace. Do not include tests,
+# benchmarks, etc.
+packages = [
+ package
+ for package in setuptools.find_namespace_packages()
+ if package.startswith("google")
+]
+
+setuptools.setup(
+ name=name,
+ version=version,
+ description=description,
+ long_description=readme,
+ author="Google LLC",
+ author_email="googleapis-packages@google.com",
+ license="Apache 2.0",
+ url="https://github.com/googleapis/python-bigquery",
+ classifiers=[
+ release_status,
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: Apache Software License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Operating System :: OS Independent",
+ "Topic :: Internet",
+ ],
+ platforms="Posix; MacOS X; Windows",
+ packages=packages,
+ install_requires=dependencies,
+ extras_require=extras,
+ python_requires=">=3.7",
+ include_package_data=True,
+ zip_safe=False,
+)
diff --git a/testbed/googleapis__python-bigquery/testing/.gitignore b/testbed/googleapis__python-bigquery/testing/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..b05fbd630881531732d3137a7ae6882b3881614e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/testing/.gitignore
@@ -0,0 +1,3 @@
+test-env.sh
+service-account.json
+client-secrets.json
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/testing/constraints-3.7.txt b/testbed/googleapis__python-bigquery/testing/constraints-3.7.txt
new file mode 100644
index 0000000000000000000000000000000000000000..55e63449f98bb96e1948c034ee89e755bf71aca4
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/testing/constraints-3.7.txt
@@ -0,0 +1,36 @@
+# This constraints file is used to check that lower bounds
+# are correct in setup.py
+# List *all* library dependencies and extras in this file.
+# Pin the version to the lower bound.
+#
+# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev",
+# Then this file should have foo==1.14.0
+bigquery-magics==0.1.0
+db-dtypes==0.3.0
+geopandas==0.9.0
+google-api-core==2.11.1
+google-auth==2.14.1
+google-cloud-bigquery-storage==2.24.0
+google-cloud-core==2.4.1
+google-cloud-testutils==1.4.0
+google-crc32c==1.5.0
+google-resumable-media==2.0.0
+googleapis-common-protos==1.62.0
+grpcio==1.47.0
+grpcio-status==1.47.0
+ipywidgets==7.7.1
+ipython==7.23.1
+ipykernel==6.0.0
+opentelemetry-api==1.1.0
+opentelemetry-instrumentation==0.20b0
+opentelemetry-sdk==1.1.0
+packaging==20.0.0
+pandas==1.1.0
+proto-plus==1.22.3
+protobuf==3.20.2
+pyarrow==3.0.0
+python-dateutil==2.7.3
+requests==2.21.0
+Shapely==1.8.4
+six==1.13.0
+tqdm==4.7.4
diff --git a/testbed/googleapis__python-bigquery/testing/constraints-3.8.txt b/testbed/googleapis__python-bigquery/testing/constraints-3.8.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e5e73c5c7e2d1eefa37bd3998c3a3b9b317b4234
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/testing/constraints-3.8.txt
@@ -0,0 +1,2 @@
+grpcio==1.47.0
+pandas==1.2.0
diff --git a/testbed/googleapis__python-bigquery/testing/constraints-3.9.txt b/testbed/googleapis__python-bigquery/testing/constraints-3.9.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d4c302867578b8bc5291ee01c82c689bcec08c62
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/testing/constraints-3.9.txt
@@ -0,0 +1,8 @@
+# This constraints file is used to make sure that the latest dependency versions
+# we claim to support in setup.py are indeed installed in test sessions in the most
+# recent Python version supported (3.9 at the time of writing - 2021-05-05).
+#
+# NOTE: Not comprehensive yet, will eventually be maintained semi-automatically by
+# the renovate bot.
+grpcio==1.47.0
+pyarrow>=4.0.0
diff --git a/testbed/googleapis__python-bigquery/tests/__init__.py b/testbed/googleapis__python-bigquery/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8e1c3845db5b44e0d5727e3354929c81d631f15
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/testbed/googleapis__python-bigquery/tests/data/characters.json b/testbed/googleapis__python-bigquery/tests/data/characters.json
new file mode 100644
index 0000000000000000000000000000000000000000..d3863681019641c5fa60abdb0f434f4cfb207a43
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/data/characters.json
@@ -0,0 +1,68 @@
+[
+ {
+ "Age" : "111",
+ "Spells" : [],
+ "Name" : "Bilbo",
+ "Weight" : 67.2,
+ "TeaTime" : "10:00:00",
+ "NextVacation" : "2017-09-22",
+ "FavoriteTime" : "2031-04-01T05:09:27",
+ "IsMagic" : false
+ },
+ {
+ "Age" : "1000",
+ "Name" : "Gandalf",
+ "Spells" : [
+ {
+ "Name" : "Skydragon",
+ "Properties" : [
+ {
+ "Power" : 1,
+ "Name" : "Flying"
+ },
+ {
+ "Name" : "Creature",
+ "Power" : 1
+ },
+ {
+ "Power" : 11,
+ "Name" : "Explodey"
+ }
+ ],
+ "LastUsed" : "2015-10-31 23:59:56 UTC",
+ "Icon" : "iVBORw0KGgoAAAANSUhEUgAAAB4AAAAgCAYAAAAFQMh/AAAAAXNSR0IArs4c6QAAA9lJREFUSA21lk9OVEEQxvsRDImoiMG9mLjjCG5mEg7gEfQGsIcF7p0EDsBBSJiNO7ZsFRZqosb/QkSj7fer7ur33sw8GDFUUq+7q6vqq6qu7pkQzqG4EeI521e7FePVgM9cGPYwhCi6UO8qFOK+YY+Br66ujsmmxb84Yzwp6zCsxjJfWVkxnMsEMGuWHZ9Wcz11cM48hkq0vLwc1tbW4mAwqDpcdIqnMmgF0JMv2CiGnZ2dcHR0FA4PD8Pe3t5U/tx6bCSlb+JT8XfxT3HsUek0Li0tRdjWl+z6iRF+FNA1hXPDQ/IMNyRg3s8bD/OaZS+VP+9cOLSa64cA34oXZWagDkRzAaJxXaE+ufc4rCN7LrazZ2+8+STtpAL8WYDvpTaHKlkB2iQARMvb2+H27m4YaL7zaDtUw1BZAASi6T8T2UZnPZV2pvnJfCH5p8bewcGB6TrIfz8wBZgHQ83kjpuj6RBYQpuo09Tvmpd7TPe+ktZN8cKwS92KWXGuaqWowlYEwthtMcWOZUNJc8at+zuF/Xkqo69baS7P+AvWjYwJ4jyHXXsEnd74ZO/Pq+uXUuv6WNlso6cvnDsZB1V/unJab3D1/KrJDw9NCM9wHf2FK2ejTKMejnBHfGtfH7LGGCdQDqaqJgfgzWjXK1nYV4jRbPGnxUT7cqUaZfJrVZeOm9QmB21L6xXgbu/ScsYusJFMoU0x2fsamRJOd6kOYDRLUxv94ENZe8+0gM+0dyz+KgU7X8rLHHCIOZyrna4y6ykIu0YCs02TBXmk3PZssmEgaTxTo83xjCIjoE21h0Yah3MrV4+9kR8MaabGze+9NEILGAFE5nMOiiA32KnAr/sb7tED3nzlzC4dB38WMC+EjaqHfqvUKHi2gJPdWQ6AbH8hgyQ7QY6jvjj3QZWvX6pUAtduTX5Dss96Q7NI9RQRJeeKvRFbt0v2gb1Gx/PooJsztn1c1DqpAU3Hde2dB2aEHBhjgOFjMeDvxLafjQ3YZQSgOcHJZX611H45sGLHWvYTz9hiURlpNoBZvxb/Ft9lAQ1DmBfUiR+j1hAPkMBTE9L9+zLva1QvGFHurRBaZ5xLVitoBviiRkD/sIMDztKA5FA0b9/0OclzO2/XAQymJ0TcghZwEo9/AX8gMeAJMOvIsWWt5bwCoiFhVSllrdH0t5Q1JHAFlKJNkvTVdn2GHb9KdmacMT+d/Os05imJUccRX2YuZ93Sxf0Ilc4DPDeAq5SAvFEAY94cQc6BA26dzb4HWAJI4DPmQE5KCVUyvb2FcDZem7JdT2ggKUP3xX6n9XNq1DpzSf4Cy4ZqSlmM8d8AAAAASUVORK5CYII=",
+ "DiscoveredBy" : "Firebreather"
+ }
+ ],
+ "NextVacation" : "2666-06-06",
+ "TeaTime" : "15:00:00",
+ "Weight" : 198.6,
+ "FavoriteTime" : "2001-12-19T23:59:59",
+ "FavoriteNumber" : "3.141592654",
+ "IsMagic" : true
+ },
+ {
+ "Weight" : 128.3,
+ "TeaTime" : "12:00:00",
+ "NextVacation" : "2017-03-14",
+ "IsMagic" : true,
+ "FavoriteTime" : "2000-10-31T23:27:46",
+ "Age" : "17",
+ "FavoriteNumber" : "13",
+ "Spells" : [
+ {
+ "LastUsed" : "2017-02-14 12:07:23 UTC",
+ "Properties" : [
+ {
+ "Name" : "Makes you look crazy",
+ "Power" : 1
+ }
+ ],
+ "Icon" : "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAAAXNSR0IArs4c6QAAABxpRE9UAAAAAgAAAAAAAAAgAAAAKAAAACAAAAAgAAABxj2CfowAAAGSSURBVHgB7Jc9TsNAEIX3JDkCPUV6KlpKFHEGlD4nyA04ACUXQKTgCEipUnKGNEbP0otentayicZ24SlWs7tjO/N9u/5J2b2+NUtuZcnwYE8BuQPyGZAPwXwLLPk5kG+BJa9+fgfkh1B+CeancL4F8i2Q/wWm/S/w+XFoTseftn0dvhu0OXfhpM+AGvzcEiYVAFisPqE9zrETJhHAlXfg2lglMK9z0f3RBfB+ZyRUV3x+erzsEIjjOBqc1xtNAIrvguybV3A9lkVHxlEE6GrrPb/ZvAySwlUnfCmlPQ+R8JCExvGtcRQBLFwj4FGkznX1VYDKPG/f2/MjwCksXACgdNUxJjwK9xwl4JihOwTFR0kIF+CABEPRnvsvPFctMoYKqAFSAFaMwB4pp3Y+bodIYL9WmIAaIOHxo7W8wiHvAjTvhUeNwwSgeAeAABbqOewC5hBdwFD4+9+7puzXV9fS6/b1wwT4tsaYAhwOOQdUQch5vgZCeAhAv3ZM31yYAAUgvApQQQ6n5w6FB/RVe1jdJOAPAAD//1eMQwoAAAGQSURBVO1UMU4DQQy8X9AgWopIUINEkS4VlJQo4gvwAV7AD3gEH4iSgidESpWSXyyZExP5lr0c7K5PsXBhec/2+jzjuWtent9CLdtu1mG5+gjz+WNr7IsY7eH+tvO+xfuqk4vz7CH91edFaF5v9nb6dBKm13edvrL+0Lk5lMzJkQDeJSkkgHF6mR8CHwMHCQR/NAQQGD0BAlwK4FCefQiefq+A2Vn29tG7igLAfmwcnJu/nJy3BMQkMN9HEPr8AL3bfBv7Bp+7/SoExMDjZwKEJwmyhnnmQIQEBIlz2x0iKoAvJkAC6TsTIH6MqRrEWUMSZF2zAwqT4Eu/e6pzFAIkmNSZ4OFT+VYBIIF//UqbJwnF/4DU0GwOn8r/JQYCpPGufEfJuZiA37ycQw/5uFeqPq4pfR6FADmkBCXjfWdZj3NfXW58dAJyB9W65wRoMWulryvAyqa05nQFaDFrpa8rwMqmtOZ0BWgxa6WvK8DKprTmdAVoMWulryvAyqa05nQFaDFrpa8rwMqmtOb89wr4AtQ4aPoL6yVpAAAAAElFTkSuQmCC",
+ "Name" : "Talking cats",
+ "DiscoveredBy" : "Salem"
+ }
+ ],
+ "Name" : "Sabrina"
+ }
+]
diff --git a/testbed/googleapis__python-bigquery/tests/data/characters.jsonl b/testbed/googleapis__python-bigquery/tests/data/characters.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..42b5bdc6a15249e40608fd47468b83d7e5e55612
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/data/characters.jsonl
@@ -0,0 +1,3 @@
+{"Name":"Bilbo","Age":"111","Weight":67.2,"IsMagic":false,"Spells":[],"TeaTime":"10:00:00","NextVacation":"2017-09-22","FavoriteTime":"2031-04-01T05:09:27","FavoriteNumber":"111"}
+{"Name":"Gandalf","Age":"1000","Weight":198.6,"IsMagic":true,"Spells":[{"Name": "Skydragon", "Icon":"iVBORw0KGgoAAAANSUhEUgAAAB4AAAAgCAYAAAAFQMh/AAAAAXNSR0IArs4c6QAAA9lJREFUSA21lk9OVEEQxvsRDImoiMG9mLjjCG5mEg7gEfQGsIcF7p0EDsBBSJiNO7ZsFRZqosb/QkSj7fer7ur33sw8GDFUUq+7q6vqq6qu7pkQzqG4EeI521e7FePVgM9cGPYwhCi6UO8qFOK+YY+Br66ujsmmxb84Yzwp6zCsxjJfWVkxnMsEMGuWHZ9Wcz11cM48hkq0vLwc1tbW4mAwqDpcdIqnMmgF0JMv2CiGnZ2dcHR0FA4PD8Pe3t5U/tx6bCSlb+JT8XfxT3HsUek0Li0tRdjWl+z6iRF+FNA1hXPDQ/IMNyRg3s8bD/OaZS+VP+9cOLSa64cA34oXZWagDkRzAaJxXaE+ufc4rCN7LrazZ2+8+STtpAL8WYDvpTaHKlkB2iQARMvb2+H27m4YaL7zaDtUw1BZAASi6T8T2UZnPZV2pvnJfCH5p8bewcGB6TrIfz8wBZgHQ83kjpuj6RBYQpuo09Tvmpd7TPe+ktZN8cKwS92KWXGuaqWowlYEwthtMcWOZUNJc8at+zuF/Xkqo69baS7P+AvWjYwJ4jyHXXsEnd74ZO/Pq+uXUuv6WNlso6cvnDsZB1V/unJab3D1/KrJDw9NCM9wHf2FK2ejTKMejnBHfGtfH7LGGCdQDqaqJgfgzWjXK1nYV4jRbPGnxUT7cqUaZfJrVZeOm9QmB21L6xXgbu/ScsYusJFMoU0x2fsamRJOd6kOYDRLUxv94ENZe8+0gM+0dyz+KgU7X8rLHHCIOZyrna4y6ykIu0YCs02TBXmk3PZssmEgaTxTo83xjCIjoE21h0Yah3MrV4+9kR8MaabGze+9NEILGAFE5nMOiiA32KnAr/sb7tED3nzlzC4dB38WMC+EjaqHfqvUKHi2gJPdWQ6AbH8hgyQ7QY6jvjj3QZWvX6pUAtduTX5Dss96Q7NI9RQRJeeKvRFbt0v2gb1Gx/PooJsztn1c1DqpAU3Hde2dB2aEHBhjgOFjMeDvxLafjQ3YZQSgOcHJZX611H45sGLHWvYTz9hiURlpNoBZvxb/Ft9lAQ1DmBfUiR+j1hAPkMBTE9L9+zLva1QvGFHurRBaZ5xLVitoBviiRkD/sIMDztKA5FA0b9/0OclzO2/XAQymJ0TcghZwEo9/AX8gMeAJMOvIsWWt5bwCoiFhVSllrdH0t5Q1JHAFlKJNkvTVdn2GHb9KdmacMT+d/Os05imJUccRX2YuZ93Sxf0Ilc4DPDeAq5SAvFEAY94cQc6BA26dzb4HWAJI4DPmQE5KCVUyvb2FcDZem7JdT2ggKUP3xX6n9XNq1DpzSf4Cy4ZqSlmM8d8AAAAASUVORK5CYII=","DiscoveredBy":"Firebreather","Properties":[{"Name":"Flying","Power":1},{"Name":"Creature","Power":1},{"Name":"Explodey","Power":11}],"LastUsed":"2015-10-31 23:59:56 UTC"}],"TeaTime":"15:00:00","NextVacation":"2666-06-06","FavoriteTime":"2001-12-19T23:59:59","FavoriteNumber":"1.618033989"}
+{"Name":"Sabrina","Age":"17","Weight":128.3,"IsMagic":true,"Spells":[{"Name": "Talking cats", "Icon":"iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAAAXNSR0IArs4c6QAAABxpRE9UAAAAAgAAAAAAAAAgAAAAKAAAACAAAAAgAAABxj2CfowAAAGSSURBVHgB7Jc9TsNAEIX3JDkCPUV6KlpKFHEGlD4nyA04ACUXQKTgCEipUnKGNEbP0otentayicZ24SlWs7tjO/N9u/5J2b2+NUtuZcnwYE8BuQPyGZAPwXwLLPk5kG+BJa9+fgfkh1B+CeancL4F8i2Q/wWm/S/w+XFoTseftn0dvhu0OXfhpM+AGvzcEiYVAFisPqE9zrETJhHAlXfg2lglMK9z0f3RBfB+ZyRUV3x+erzsEIjjOBqc1xtNAIrvguybV3A9lkVHxlEE6GrrPb/ZvAySwlUnfCmlPQ+R8JCExvGtcRQBLFwj4FGkznX1VYDKPG/f2/MjwCksXACgdNUxJjwK9xwl4JihOwTFR0kIF+CABEPRnvsvPFctMoYKqAFSAFaMwB4pp3Y+bodIYL9WmIAaIOHxo7W8wiHvAjTvhUeNwwSgeAeAABbqOewC5hBdwFD4+9+7puzXV9fS6/b1wwT4tsaYAhwOOQdUQch5vgZCeAhAv3ZM31yYAAUgvApQQQ6n5w6FB/RVe1jdJOAPAAD//1eMQwoAAAGQSURBVO1UMU4DQQy8X9AgWopIUINEkS4VlJQo4gvwAV7AD3gEH4iSgidESpWSXyyZExP5lr0c7K5PsXBhec/2+jzjuWtent9CLdtu1mG5+gjz+WNr7IsY7eH+tvO+xfuqk4vz7CH91edFaF5v9nb6dBKm13edvrL+0Lk5lMzJkQDeJSkkgHF6mR8CHwMHCQR/NAQQGD0BAlwK4FCefQiefq+A2Vn29tG7igLAfmwcnJu/nJy3BMQkMN9HEPr8AL3bfBv7Bp+7/SoExMDjZwKEJwmyhnnmQIQEBIlz2x0iKoAvJkAC6TsTIH6MqRrEWUMSZF2zAwqT4Eu/e6pzFAIkmNSZ4OFT+VYBIIF//UqbJwnF/4DU0GwOn8r/JQYCpPGufEfJuZiA37ycQw/5uFeqPq4pfR6FADmkBCXjfWdZj3NfXW58dAJyB9W65wRoMWulryvAyqa05nQFaDFrpa8rwMqmtOZ0BWgxa6WvK8DKprTmdAVoMWulryvAyqa05nQFaDFrpa8rwMqmtOb89wr4AtQ4aPoL6yVpAAAAAElFTkSuQmCC","DiscoveredBy":"Salem","Properties":[{"Name":"Makes you look crazy","Power":1}],"LastUsed":"2017-02-14 12:07:23 UTC"}],"TeaTime":"12:00:00","NextVacation":"2017-03-14","FavoriteTime":"2000-10-31T23:27:46","FavoriteNumber":"13"}
diff --git a/testbed/googleapis__python-bigquery/tests/data/colors.avro b/testbed/googleapis__python-bigquery/tests/data/colors.avro
new file mode 100644
index 0000000000000000000000000000000000000000..e0133fd027f49093d638b1f2d82e70948306b2dd
Binary files /dev/null and b/testbed/googleapis__python-bigquery/tests/data/colors.avro differ
diff --git a/testbed/googleapis__python-bigquery/tests/data/people.csv b/testbed/googleapis__python-bigquery/tests/data/people.csv
new file mode 100644
index 0000000000000000000000000000000000000000..d3c7d063892a479c2990073a88794b1423f397f1
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/data/people.csv
@@ -0,0 +1,3 @@
+full_name,age
+Phred Phlyntstone,32
+Wylma Phlyntstone,29
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/tests/data/scalars.csv b/testbed/googleapis__python-bigquery/tests/data/scalars.csv
new file mode 100644
index 0000000000000000000000000000000000000000..7af97583fa348e5f60baa275579b5ff17aef8f43
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/data/scalars.csv
@@ -0,0 +1,2 @@
+"[2020-01-01, 2020-02-01)"
+
diff --git a/testbed/googleapis__python-bigquery/tests/data/scalars.jsonl b/testbed/googleapis__python-bigquery/tests/data/scalars.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e06139e5c8488e158dfe3411647d3599060feaaf
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/data/scalars.jsonl
@@ -0,0 +1,2 @@
+{"bool_col": true, "bytes_col": "SGVsbG8sIFdvcmxkIQ==", "date_col": "2021-07-21", "datetime_col": "2021-07-21 11:39:45", "geography_col": "POINT(-122.0838511 37.3860517)", "int64_col": "123456789", "interval_col": "P7Y11M9DT4H15M37.123456S", "numeric_col": "1.23456789", "bignumeric_col": "10.111213141516171819", "float64_col": "1.25", "rowindex": 0, "string_col": "Hello, World!", "time_col": "11:41:43.07616", "timestamp_col": "2021-07-21T17:43:43.945289Z"}
+{"bool_col": null, "bytes_col": null, "date_col": null, "datetime_col": null, "geography_col": null, "int64_col": null, "interval_col": null, "numeric_col": null, "bignumeric_col": null, "float64_col": null, "rowindex": 1, "string_col": null, "time_col": null, "timestamp_col": null}
diff --git a/testbed/googleapis__python-bigquery/tests/data/scalars_extreme.jsonl b/testbed/googleapis__python-bigquery/tests/data/scalars_extreme.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..d0a33fdba066e1dee5a59714be876d0bb21c0f75
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/data/scalars_extreme.jsonl
@@ -0,0 +1,5 @@
+{"bool_col": true, "bytes_col": "DQo=\n", "date_col": "9999-12-31", "datetime_col": "9999-12-31 23:59:59.999999", "geography_col": "POINT(-135.0000 90.0000)", "int64_col": "9223372036854775807", "interval_col": "P-10000Y0M-3660000DT-87840000H0M0S", "numeric_col": "9.9999999999999999999999999999999999999E+28", "bignumeric_col": "9.999999999999999999999999999999999999999999999999999999999999999999999999999E+37", "float64_col": "+inf", "rowindex": 0, "string_col": "Hello, World", "time_col": "23:59:59.999999", "timestamp_col": "9999-12-31T23:59:59.999999Z"}
+{"bool_col": false, "bytes_col": "8J+Zgw==\n", "date_col": "0001-01-01", "datetime_col": "0001-01-01 00:00:00", "geography_col": "POINT(45.0000 -90.0000)", "int64_col": "-9223372036854775808", "interval_col": "P10000Y0M3660000DT87840000H0M0S", "numeric_col": "-9.9999999999999999999999999999999999999E+28", "bignumeric_col": "-9.999999999999999999999999999999999999999999999999999999999999999999999999999E+37", "float64_col": "-inf", "rowindex": 1, "string_col": "Hello, World", "time_col": "00:00:00", "timestamp_col": "0001-01-01T00:00:00.000000Z"}
+{"bool_col": true, "bytes_col": "AA==\n", "date_col": "1900-01-01", "datetime_col": "1900-01-01 00:00:00", "geography_col": "POINT(-180.0000 0.0000)", "int64_col": "-1", "interval_col": "P0Y0M0DT0H0M0.000001S", "numeric_col": "0.000000001", "bignumeric_col": "-0.00000000000000000000000000000000000001", "float64_col": "nan", "rowindex": 2, "string_col": "こんにちは", "time_col": "00:00:00.000001", "timestamp_col": "1900-01-01T00:00:00.000000Z"}
+{"bool_col": false, "bytes_col": "", "date_col": "1970-01-01", "datetime_col": "1970-01-01 00:00:00", "geography_col": "POINT(0 0)", "int64_col": "0", "interval_col": "P0Y0M0DT0H0M0S", "numeric_col": "0.0", "bignumeric_col": "0.0", "float64_col": 0.0, "rowindex": 3, "string_col": "", "time_col": "12:00:00", "timestamp_col": "1970-01-01T00:00:00.000000Z"}
+{"bool_col": null, "bytes_col": null, "date_col": null, "datetime_col": null, "geography_col": null, "int64_col": null, "interval_col": null, "numeric_col": null, "bignumeric_col": null, "float64_col": null, "rowindex": 4, "string_col": null, "time_col": null, "timestamp_col": null}
diff --git a/testbed/googleapis__python-bigquery/tests/data/scalars_schema.json b/testbed/googleapis__python-bigquery/tests/data/scalars_schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..676d37d56141b6dd7ab5ce06c672044d846b0189
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/data/scalars_schema.json
@@ -0,0 +1,72 @@
+[
+ {
+ "mode": "NULLABLE",
+ "name": "bool_col",
+ "type": "BOOLEAN"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "bignumeric_col",
+ "type": "BIGNUMERIC"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "bytes_col",
+ "type": "BYTES"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "date_col",
+ "type": "DATE"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "datetime_col",
+ "type": "DATETIME"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "float64_col",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "geography_col",
+ "type": "GEOGRAPHY"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "int64_col",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "interval_col",
+ "type": "INTERVAL"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "numeric_col",
+ "type": "NUMERIC"
+ },
+ {
+ "mode": "REQUIRED",
+ "name": "rowindex",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "string_col",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "time_col",
+ "type": "TIME"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "timestamp_col",
+ "type": "TIMESTAMP"
+ }
+]
diff --git a/testbed/googleapis__python-bigquery/tests/data/scalars_schema_csv.json b/testbed/googleapis__python-bigquery/tests/data/scalars_schema_csv.json
new file mode 100644
index 0000000000000000000000000000000000000000..82b878d95fcc517a9fd3081c29de42c78c7c513e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/data/scalars_schema_csv.json
@@ -0,0 +1,10 @@
+[
+ {
+ "mode" : "NULLABLE",
+ "name" : "range_date",
+ "type" : "RANGE",
+ "rangeElementType": {
+ "type": "DATE"
+ }
+ }
+ ]
\ No newline at end of file
diff --git a/testbed/googleapis__python-bigquery/tests/data/schema.json b/testbed/googleapis__python-bigquery/tests/data/schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..29542e82da983ee3a52128b6bcd9abebd0ba0760
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/data/schema.json
@@ -0,0 +1,96 @@
+{
+ "fields" : [
+ {
+ "type" : "STRING",
+ "name" : "Name",
+ "mode" : "NULLABLE"
+ },
+ {
+ "name" : "Age",
+ "mode" : "NULLABLE",
+ "type" : "INTEGER"
+ },
+ {
+ "type" : "FLOAT",
+ "name" : "Weight",
+ "mode" : "NULLABLE"
+ },
+ {
+ "mode" : "NULLABLE",
+ "name" : "IsMagic",
+ "type" : "BOOLEAN"
+ },
+ {
+ "name" : "Spells",
+ "fields" : [
+ {
+ "mode" : "NULLABLE",
+ "name" : "Name",
+ "type" : "STRING"
+ },
+ {
+ "mode" : "NULLABLE",
+ "name" : "LastUsed",
+ "type" : "TIMESTAMP"
+ },
+ {
+ "type" : "STRING",
+ "mode" : "NULLABLE",
+ "name" : "DiscoveredBy"
+ },
+ {
+ "name" : "Properties",
+ "fields" : [
+ {
+ "name" : "Name",
+ "mode" : "NULLABLE",
+ "type" : "STRING"
+ },
+ {
+ "type" : "FLOAT",
+ "name" : "Power",
+ "mode" : "NULLABLE"
+ }
+ ],
+ "mode" : "REPEATED",
+ "type" : "RECORD"
+ },
+ {
+ "mode" : "NULLABLE",
+ "name" : "Icon",
+ "type" : "BYTES"
+ }
+ ],
+ "mode" : "REPEATED",
+ "type" : "RECORD"
+ },
+ {
+ "type" : "TIME",
+ "mode" : "NULLABLE",
+ "name" : "TeaTime"
+ },
+ {
+ "type" : "DATE",
+ "name" : "NextVacation",
+ "mode" : "NULLABLE"
+ },
+ {
+ "mode" : "NULLABLE",
+ "name" : "FavoriteTime",
+ "type" : "DATETIME"
+ },
+ {
+ "mode" : "NULLABLE",
+ "name" : "FavoriteNumber",
+ "type" : "NUMERIC"
+ },
+ {
+ "mode" : "NULLABLE",
+ "name" : "TimeRange",
+ "type" : "RANGE",
+ "rangeElementType": {
+ "type": "DATETIME"
+ }
+ }
+ ]
+}
diff --git a/testbed/googleapis__python-bigquery/tests/scrub_datasets.py b/testbed/googleapis__python-bigquery/tests/scrub_datasets.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a8ab3e7b7c549030a25c1cda3bb442dbbbd6887
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/scrub_datasets.py
@@ -0,0 +1,25 @@
+import re
+import sys
+
+from google.api_core.exceptions import NotFound
+from google.cloud.bigquery import Client
+
+
+def main(prefixes):
+ client = Client()
+
+ pattern = re.compile("|".join("^{}.*$".format(prefix) for prefix in prefixes))
+
+ ds_items = list(client.list_datasets())
+ for dataset in ds_items:
+ ds_id = dataset.dataset_id
+ if pattern.match(ds_id):
+ print("Deleting dataset: {}".format(ds_id))
+ try:
+ client.delete_dataset(dataset.reference, delete_contents=True)
+ except NotFound:
+ print(" NOT FOUND")
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/testbed/googleapis__python-bigquery/tests/system/__init__.py b/testbed/googleapis__python-bigquery/tests/system/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4fbd93bb2ca4d982f578388ee47499e8a421f50e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/system/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/testbed/googleapis__python-bigquery/tests/system/conftest.py b/testbed/googleapis__python-bigquery/tests/system/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..8efa042af7ec191555d5300b6c3b52fac55ae7be
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/system/conftest.py
@@ -0,0 +1,174 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pathlib
+import random
+import re
+from typing import Tuple
+
+import pytest
+import test_utils.prefixer
+
+from google.cloud import bigquery
+from google.cloud.bigquery import enums
+from . import helpers
+
+
+prefixer = test_utils.prefixer.Prefixer("python-bigquery", "tests/system")
+
+DATA_DIR = pathlib.Path(__file__).parent.parent / "data"
+TOKYO_LOCATION = "asia-northeast1"
+
+
+@pytest.fixture(scope="session", autouse=True)
+def cleanup_datasets(bigquery_client: bigquery.Client):
+ for dataset in bigquery_client.list_datasets():
+ if prefixer.should_cleanup(dataset.dataset_id):
+ bigquery_client.delete_dataset(
+ dataset, delete_contents=True, not_found_ok=True
+ )
+
+
+@pytest.fixture(scope="session")
+def bigquery_client():
+ return bigquery.Client()
+
+
+@pytest.fixture(scope="session")
+def project_id(bigquery_client: bigquery.Client):
+ return bigquery_client.project
+
+
+@pytest.fixture(scope="session")
+def bqstorage_client(bigquery_client):
+ from google.cloud import bigquery_storage
+
+ return bigquery_storage.BigQueryReadClient(credentials=bigquery_client._credentials)
+
+
+@pytest.fixture(scope="session")
+def dataset_id(bigquery_client):
+ dataset_id = prefixer.create_prefix()
+ bigquery_client.create_dataset(dataset_id)
+ yield dataset_id
+ bigquery_client.delete_dataset(dataset_id, delete_contents=True, not_found_ok=True)
+
+
+@pytest.fixture(scope="session")
+def dataset_id_tokyo(bigquery_client: bigquery.Client, project_id: str):
+ dataset_id = prefixer.create_prefix() + "_tokyo"
+ dataset = bigquery.Dataset(f"{project_id}.{dataset_id}")
+ dataset.location = TOKYO_LOCATION
+ bigquery_client.create_dataset(dataset)
+ yield dataset_id
+ bigquery_client.delete_dataset(dataset_id, delete_contents=True, not_found_ok=True)
+
+
+@pytest.fixture()
+def dataset_client(bigquery_client, dataset_id):
+ import google.cloud.bigquery.job
+
+ return bigquery.Client(
+ default_query_job_config=google.cloud.bigquery.job.QueryJobConfig(
+ default_dataset=f"{bigquery_client.project}.{dataset_id}",
+ )
+ )
+
+
+@pytest.fixture
+def table_id(dataset_id):
+ return f"{dataset_id}.table_{helpers.temp_suffix()}"
+
+
+def load_scalars_table(
+ bigquery_client: bigquery.Client,
+ project_id: str,
+ dataset_id: str,
+ data_path: str = "scalars.jsonl",
+ source_format=enums.SourceFormat.NEWLINE_DELIMITED_JSON,
+ schema_source="scalars_schema.json",
+) -> str:
+ schema = bigquery_client.schema_from_json(DATA_DIR / schema_source)
+ table_id = data_path.replace(".", "_") + hex(random.randrange(1000000))
+ job_config = bigquery.LoadJobConfig()
+ job_config.schema = schema
+ job_config.source_format = source_format
+ full_table_id = f"{project_id}.{dataset_id}.{table_id}"
+ with open(DATA_DIR / data_path, "rb") as data_file:
+ job = bigquery_client.load_table_from_file(
+ data_file, full_table_id, job_config=job_config
+ )
+ job.result()
+ return full_table_id
+
+
+@pytest.fixture(scope="session")
+def scalars_table(bigquery_client: bigquery.Client, project_id: str, dataset_id: str):
+ full_table_id = load_scalars_table(bigquery_client, project_id, dataset_id)
+ yield full_table_id
+ bigquery_client.delete_table(full_table_id, not_found_ok=True)
+
+
+@pytest.fixture(scope="session")
+def scalars_table_tokyo(
+ bigquery_client: bigquery.Client, project_id: str, dataset_id_tokyo: str
+):
+ full_table_id = load_scalars_table(bigquery_client, project_id, dataset_id_tokyo)
+ yield full_table_id
+ bigquery_client.delete_table(full_table_id, not_found_ok=True)
+
+
+@pytest.fixture(scope="session")
+def scalars_extreme_table(
+ bigquery_client: bigquery.Client, project_id: str, dataset_id: str
+):
+ full_table_id = load_scalars_table(
+ bigquery_client, project_id, dataset_id, data_path="scalars_extreme.jsonl"
+ )
+ yield full_table_id
+ bigquery_client.delete_table(full_table_id, not_found_ok=True)
+
+
+@pytest.fixture(scope="session", params=["US", TOKYO_LOCATION])
+def scalars_table_multi_location(
+ request, scalars_table: str, scalars_table_tokyo: str
+) -> Tuple[str, str]:
+ if request.param == "US":
+ full_table_id = scalars_table
+ elif request.param == TOKYO_LOCATION:
+ full_table_id = scalars_table_tokyo
+ else:
+ raise ValueError(f"got unexpected location: {request.param}")
+ return request.param, full_table_id
+
+
+@pytest.fixture(scope="session")
+def scalars_table_csv(
+ bigquery_client: bigquery.Client, project_id: str, dataset_id: str
+):
+ full_table_id = load_scalars_table(
+ bigquery_client,
+ project_id,
+ dataset_id,
+ data_path="scalars.csv",
+ source_format=enums.SourceFormat.CSV,
+ schema_source="scalars_schema_csv.json",
+ )
+ yield full_table_id
+ bigquery_client.delete_table(full_table_id, not_found_ok=True)
+
+
+@pytest.fixture
+def test_table_name(request, replace_non_anum=re.compile(r"[^a-zA-Z0-9_]").sub):
+ return replace_non_anum("_", request.node.name)
diff --git a/testbed/googleapis__python-bigquery/tests/system/helpers.py b/testbed/googleapis__python-bigquery/tests/system/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..7fd344eeb0710cce2a8f6759ddff1d3f705dca3c
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/system/helpers.py
@@ -0,0 +1,106 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import decimal
+import uuid
+
+import google.api_core.exceptions
+import test_utils.retry
+
+from google.cloud._helpers import UTC
+
+
+_naive = datetime.datetime(2016, 12, 5, 12, 41, 9)
+_naive_microseconds = datetime.datetime(2016, 12, 5, 12, 41, 9, 250000)
+_stamp = "%s %s" % (_naive.date().isoformat(), _naive.time().isoformat())
+_date = _naive.date().isoformat()
+_stamp_microseconds = _stamp + ".250000"
+_zoned = _naive.replace(tzinfo=UTC)
+_zoned_microseconds = _naive_microseconds.replace(tzinfo=UTC)
+_numeric = decimal.Decimal("123456789.123456789")
+
+
+# Examples of most data types to test with query() and DB-API.
+STANDARD_SQL_EXAMPLES = [
+ ("SELECT 1", 1),
+ ("SELECT 1.3", 1.3),
+ ("SELECT TRUE", True),
+ ('SELECT "ABC"', "ABC"),
+ ('SELECT CAST("foo" AS BYTES)', b"foo"),
+ ('SELECT TIMESTAMP "%s"' % (_stamp,), _zoned),
+ (
+ 'SELECT TIMESTAMP "%s"' % (_stamp_microseconds,),
+ _zoned_microseconds,
+ ),
+ ('SELECT DATETIME(TIMESTAMP "%s")' % (_stamp,), _naive),
+ (
+ 'SELECT DATETIME(TIMESTAMP "%s")' % (_stamp_microseconds,),
+ _naive_microseconds,
+ ),
+ ('SELECT DATE(TIMESTAMP "%s")' % (_stamp,), _naive.date()),
+ ('SELECT TIME(TIMESTAMP "%s")' % (_stamp,), _naive.time()),
+ ('SELECT NUMERIC "%s"' % (_numeric,), _numeric),
+ ("SELECT (1, 2)", {"_field_1": 1, "_field_2": 2}),
+ (
+ "SELECT ((1, 2), (3, 4), 5)",
+ {
+ "_field_1": {"_field_1": 1, "_field_2": 2},
+ "_field_2": {"_field_1": 3, "_field_2": 4},
+ "_field_3": 5,
+ },
+ ),
+ ("SELECT [1, 2, 3]", [1, 2, 3]),
+ (
+ "SELECT ([1, 2], 3, [4, 5])",
+ {"_field_1": [1, 2], "_field_2": 3, "_field_3": [4, 5]},
+ ),
+ (
+ "SELECT [(1, 2, 3), (4, 5, 6)]",
+ [
+ {"_field_1": 1, "_field_2": 2, "_field_3": 3},
+ {"_field_1": 4, "_field_2": 5, "_field_3": 6},
+ ],
+ ),
+ (
+ "SELECT [([1, 2, 3], 4), ([5, 6], 7)]",
+ [{"_field_1": [1, 2, 3], "_field_2": 4}, {"_field_1": [5, 6], "_field_2": 7}],
+ ),
+ ("SELECT ARRAY(SELECT STRUCT([1, 2]))", [{"_field_1": [1, 2]}]),
+ ("SELECT ST_GeogPoint(1, 2)", "POINT(1 2)"),
+ (
+ "SELECT RANGE '[UNBOUNDED, %s)'" % _date,
+ {"start": None, "end": _naive.date()},
+ ),
+]
+
+
+def temp_suffix():
+ now = datetime.datetime.now()
+ return f"{now.strftime('%Y%m%d%H%M%S')}_{uuid.uuid4().hex[:8]}"
+
+
+def _rate_limit_exceeded(forbidden):
+ """Predicate: pass only exceptions with 'rateLimitExceeded' as reason."""
+ return any(error["reason"] == "rateLimitExceeded" for error in forbidden._errors)
+
+
+# We need to wait to stay within the rate limits.
+# The alternative outcome is a 403 Forbidden response from upstream, which
+# they return instead of the more appropriate 429.
+# See https://cloud.google.com/bigquery/quota-policy
+retry_403 = test_utils.retry.RetryErrors(
+ google.api_core.exceptions.Forbidden,
+ error_predicate=_rate_limit_exceeded,
+)
diff --git a/testbed/googleapis__python-bigquery/tests/system/test_arrow.py b/testbed/googleapis__python-bigquery/tests/system/test_arrow.py
new file mode 100644
index 0000000000000000000000000000000000000000..82cf11f859d3816f6a077a38ce00146f75df5fb5
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/system/test_arrow.py
@@ -0,0 +1,196 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""System tests for Arrow connector."""
+
+from typing import Optional
+
+import pyarrow
+import pytest
+
+from google.cloud import bigquery
+from google.cloud.bigquery import enums
+
+
+@pytest.mark.parametrize(
+ ("max_results", "scalars_table_name"),
+ (
+ (None, "scalars_table"), # Use BQ Storage API.
+ (10, "scalars_table"), # Use REST API.
+ (None, "scalars_extreme_table"), # Use BQ Storage API.
+ (10, "scalars_extreme_table"), # Use REST API.
+ ),
+)
+def test_list_rows_nullable_scalars_dtypes(
+ bigquery_client: bigquery.Client,
+ scalars_table: str,
+ scalars_extreme_table: str,
+ max_results: Optional[int],
+ scalars_table_name: str,
+):
+ table_id = scalars_table
+ if scalars_table_name == "scalars_extreme_table":
+ table_id = scalars_extreme_table
+
+ # TODO(GH#836): Avoid INTERVAL columns until they are supported by the
+ # BigQuery Storage API and pyarrow.
+ schema = [
+ bigquery.SchemaField("bool_col", enums.SqlTypeNames.BOOLEAN),
+ bigquery.SchemaField("bignumeric_col", enums.SqlTypeNames.BIGNUMERIC),
+ bigquery.SchemaField("bytes_col", enums.SqlTypeNames.BYTES),
+ bigquery.SchemaField("date_col", enums.SqlTypeNames.DATE),
+ bigquery.SchemaField("datetime_col", enums.SqlTypeNames.DATETIME),
+ bigquery.SchemaField("float64_col", enums.SqlTypeNames.FLOAT64),
+ bigquery.SchemaField("geography_col", enums.SqlTypeNames.GEOGRAPHY),
+ bigquery.SchemaField("int64_col", enums.SqlTypeNames.INT64),
+ bigquery.SchemaField("numeric_col", enums.SqlTypeNames.NUMERIC),
+ bigquery.SchemaField("string_col", enums.SqlTypeNames.STRING),
+ bigquery.SchemaField("time_col", enums.SqlTypeNames.TIME),
+ bigquery.SchemaField("timestamp_col", enums.SqlTypeNames.TIMESTAMP),
+ ]
+
+ arrow_table = bigquery_client.list_rows(
+ table_id,
+ max_results=max_results,
+ selected_fields=schema,
+ ).to_arrow()
+
+ schema = arrow_table.schema
+ bignumeric_type = schema.field("bignumeric_col").type
+ # 77th digit is partial.
+ # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#decimal_types
+ assert bignumeric_type.precision in {76, 77}
+ assert bignumeric_type.scale == 38
+
+ bool_type = schema.field("bool_col").type
+ assert bool_type.equals(pyarrow.bool_())
+
+ bytes_type = schema.field("bytes_col").type
+ assert bytes_type.equals(pyarrow.binary())
+
+ date_type = schema.field("date_col").type
+ assert date_type.equals(pyarrow.date32())
+
+ datetime_type = schema.field("datetime_col").type
+ assert datetime_type.unit == "us"
+ assert datetime_type.tz is None
+
+ float64_type = schema.field("float64_col").type
+ assert float64_type.equals(pyarrow.float64())
+
+ geography_type = schema.field("geography_col").type
+ assert geography_type.equals(pyarrow.string())
+
+ int64_type = schema.field("int64_col").type
+ assert int64_type.equals(pyarrow.int64())
+
+ numeric_type = schema.field("numeric_col").type
+ assert numeric_type.precision == 38
+ assert numeric_type.scale == 9
+
+ string_type = schema.field("string_col").type
+ assert string_type.equals(pyarrow.string())
+
+ time_type = schema.field("time_col").type
+ assert time_type.equals(pyarrow.time64("us"))
+
+ timestamp_type = schema.field("timestamp_col").type
+ assert timestamp_type.unit == "us"
+ assert timestamp_type.tz is not None
+
+
+@pytest.mark.parametrize("do_insert", [True, False])
+def test_arrow_extension_types_same_for_storage_and_REST_APIs_894(
+ dataset_client, test_table_name, do_insert
+):
+ types = dict(
+ astring=("STRING", "'x'"),
+ astring9=("STRING(9)", "'x'"),
+ abytes=("BYTES", "b'x'"),
+ abytes9=("BYTES(9)", "b'x'"),
+ anumeric=("NUMERIC", "42"),
+ anumeric9=("NUMERIC(9)", "42"),
+ anumeric92=("NUMERIC(9,2)", "42"),
+ abignumeric=("BIGNUMERIC", "42e30"),
+ abignumeric49=("BIGNUMERIC(37)", "42e30"),
+ abignumeric492=("BIGNUMERIC(37,2)", "42e30"),
+ abool=("BOOL", "true"),
+ adate=("DATE", "'2021-09-06'"),
+ adatetime=("DATETIME", "'2021-09-06T09:57:26'"),
+ ageography=("GEOGRAPHY", "ST_GEOGFROMTEXT('point(0 0)')"),
+ # Can't get arrow data for interval :(
+ # ainterval=('INTERVAL', "make_interval(1, 2, 3, 4, 5, 6)"),
+ aint64=("INT64", "42"),
+ afloat64=("FLOAT64", "42.0"),
+ astruct=("STRUCT", "struct(42)"),
+ atime=("TIME", "'1:2:3'"),
+ atimestamp=("TIMESTAMP", "'2021-09-06T09:57:26'"),
+ )
+ columns = ", ".join(f"{k} {t[0]}" for k, t in types.items())
+ dataset_client.query(f"create table {test_table_name} ({columns})").result()
+ if do_insert:
+ names = list(types)
+ values = ", ".join(types[name][1] for name in names)
+ names = ", ".join(names)
+ dataset_client.query(
+ f"insert into {test_table_name} ({names}) values ({values})"
+ ).result()
+ at = dataset_client.query(f"select * from {test_table_name}").result().to_arrow()
+ storage_api_metadata = {
+ at.field(i).name: at.field(i).metadata for i in range(at.num_columns)
+ }
+ at = (
+ dataset_client.query(f"select * from {test_table_name}")
+ .result()
+ .to_arrow(create_bqstorage_client=False)
+ )
+ rest_api_metadata = {
+ at.field(i).name: at.field(i).metadata for i in range(at.num_columns)
+ }
+
+ assert rest_api_metadata == storage_api_metadata
+ assert rest_api_metadata["adatetime"] == {
+ b"ARROW:extension:name": b"google:sqlType:datetime"
+ }
+ assert rest_api_metadata["ageography"] == {
+ b"ARROW:extension:name": b"google:sqlType:geography",
+ b"ARROW:extension:metadata": b'{"encoding": "WKT"}',
+ }
+
+
+def test_list_rows_range_csv(
+ bigquery_client: bigquery.Client,
+ scalars_table_csv: str,
+):
+ table_id = scalars_table_csv
+
+ schema = [
+ bigquery.SchemaField(
+ "range_date", enums.SqlTypeNames.RANGE, range_element_type="DATE"
+ ),
+ ]
+
+ arrow_table = bigquery_client.list_rows(
+ table_id,
+ selected_fields=schema,
+ ).to_arrow()
+
+ schema = arrow_table.schema
+
+ expected_type = pyarrow.struct(
+ [("start", pyarrow.date32()), ("end", pyarrow.date32())]
+ )
+
+ range_type = schema.field("range_date").type
+ assert range_type == expected_type
diff --git a/testbed/googleapis__python-bigquery/tests/system/test_client.py b/testbed/googleapis__python-bigquery/tests/system/test_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..95c679a149d63e97baa377529e722d9ef5b10df1
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/system/test_client.py
@@ -0,0 +1,2645 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import copy
+import csv
+import datetime
+import decimal
+import io
+import json
+import operator
+import os
+import pathlib
+import time
+import unittest
+import uuid
+from typing import Optional
+
+from google.api_core.exceptions import PreconditionFailed
+from google.api_core.exceptions import BadRequest
+from google.api_core.exceptions import ClientError
+from google.api_core.exceptions import Conflict
+from google.api_core.exceptions import GoogleAPICallError
+from google.api_core.exceptions import NotFound
+from google.api_core.exceptions import InternalServerError
+from google.api_core.exceptions import ServiceUnavailable
+from google.api_core.exceptions import TooManyRequests
+from google.cloud import bigquery
+from google.cloud.bigquery.dataset import Dataset
+from google.cloud.bigquery.dataset import DatasetReference
+from google.cloud.bigquery.table import Table
+from google.cloud._helpers import UTC
+from google.cloud.bigquery import dbapi, enums
+from google.cloud import storage
+from google.cloud.datacatalog_v1 import types as datacatalog_types
+from google.cloud.datacatalog_v1 import PolicyTagManagerClient
+import psutil
+import pytest
+from test_utils.retry import RetryErrors
+from test_utils.retry import RetryInstanceState
+from test_utils.retry import RetryResult
+from test_utils.system import unique_resource_id
+
+from . import helpers
+
+
+JOB_TIMEOUT = 120 # 2 minutes
+DATA_PATH = pathlib.Path(__file__).parent.parent / "data"
+
+# Common table data used for many tests.
+ROWS = [
+ ("Phred Phlyntstone", 32),
+ ("Bharney Rhubble", 33),
+ ("Wylma Phlyntstone", 29),
+ ("Bhettye Rhubble", 27),
+]
+HEADER_ROW = ("Full Name", "Age")
+SCHEMA = [
+ bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
+]
+CLUSTERING_SCHEMA = [
+ bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
+ bigquery.SchemaField("body_height_cm", "INTEGER", mode="REQUIRED"),
+ bigquery.SchemaField("date_of_birth", "DATE", mode="REQUIRED"),
+]
+TIME_PARTITIONING_CLUSTERING_FIELDS_SCHEMA = [
+ bigquery.SchemaField("transaction_time", "TIMESTAMP", mode="REQUIRED"),
+ bigquery.SchemaField("transaction_id", "INTEGER", mode="REQUIRED"),
+ bigquery.SchemaField("user_email", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("store_code", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField(
+ "items",
+ "RECORD",
+ mode="REPEATED",
+ fields=[
+ bigquery.SchemaField("item_code", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("quantity", "INTEGER", mode="REQUIRED"),
+ bigquery.SchemaField("comments", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("expiration_date", "DATE", mode="REQUIRED"),
+ ],
+ ),
+]
+
+SOURCE_URIS_AVRO = [
+ "gs://cloud-samples-data/bigquery/federated-formats-reference-file-schema/a-twitter.avro",
+ "gs://cloud-samples-data/bigquery/federated-formats-reference-file-schema/b-twitter.avro",
+ "gs://cloud-samples-data/bigquery/federated-formats-reference-file-schema/c-twitter.avro",
+]
+SOURCE_URIS_PARQUET = [
+ "gs://cloud-samples-data/bigquery/federated-formats-reference-file-schema/a-twitter.parquet",
+ "gs://cloud-samples-data/bigquery/federated-formats-reference-file-schema/b-twitter.parquet",
+ "gs://cloud-samples-data/bigquery/federated-formats-reference-file-schema/c-twitter.parquet",
+]
+REFERENCE_FILE_SCHEMA_URI_AVRO = "gs://cloud-samples-data/bigquery/federated-formats-reference-file-schema/a-twitter.avro"
+REFERENCE_FILE_SCHEMA_URI_PARQUET = "gs://cloud-samples-data/bigquery/federated-formats-reference-file-schema/a-twitter.parquet"
+
+
+# The VPC-SC team maintains a mirror of the GCS bucket used for code
+# samples. The public bucket crosses the configured security boundary.
+# See: https://github.com/googleapis/google-cloud-python/issues/8550
+SAMPLES_BUCKET = os.environ.get("GCLOUD_TEST_SAMPLES_BUCKET", "cloud-samples-data")
+
+retry_storage_errors = RetryErrors(
+ (TooManyRequests, InternalServerError, ServiceUnavailable)
+)
+
+MTLS_TESTING = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") == "true"
+
+
+def _has_rows(result):
+ return len(result) > 0
+
+
+def _make_dataset_id(prefix):
+ return f"python_bigquery_tests_system_{prefix}{unique_resource_id()}"
+
+
+def _load_json_schema(filename="schema.json"):
+ from google.cloud.bigquery.table import _parse_schema_resource
+
+ json_filename = DATA_PATH / filename
+
+ with open(json_filename, "r") as schema_file:
+ return _parse_schema_resource(json.load(schema_file))
+
+
+class Config(object):
+ """Run-time configuration to be modified at set-up.
+
+ This is a mutable stand-in to allow test set-up to modify
+ global state.
+ """
+
+ CLIENT: Optional[bigquery.Client] = None
+ CURSOR = None
+
+
+def setUpModule():
+ Config.CLIENT = bigquery.Client()
+ Config.CURSOR = dbapi.connect(Config.CLIENT).cursor()
+
+
+class TestBigQuery(unittest.TestCase):
+ def setUp(self):
+ self.to_delete = []
+
+ def tearDown(self):
+ policy_tag_client = PolicyTagManagerClient()
+
+ def _still_in_use(bad_request):
+ return any(
+ error["reason"] == "resourceInUse" for error in bad_request._errors
+ )
+
+ retry_in_use = RetryErrors(BadRequest, error_predicate=_still_in_use)
+ retry_storage_errors_conflict = RetryErrors(
+ (Conflict, TooManyRequests, InternalServerError, ServiceUnavailable)
+ )
+ for doomed in self.to_delete:
+ if isinstance(doomed, storage.Bucket):
+ retry_storage_errors_conflict(doomed.delete)(force=True)
+ elif isinstance(doomed, (Dataset, bigquery.DatasetReference)):
+ retry_in_use(Config.CLIENT.delete_dataset)(doomed, delete_contents=True)
+ elif isinstance(doomed, (Table, bigquery.TableReference)):
+ retry_in_use(Config.CLIENT.delete_table)(doomed)
+ elif isinstance(doomed, datacatalog_types.Taxonomy):
+ policy_tag_client.delete_taxonomy(name=doomed.name)
+ else:
+ doomed.delete()
+
+ def test_get_service_account_email(self):
+ client = Config.CLIENT
+
+ got = client.get_service_account_email()
+
+ self.assertIsInstance(got, str)
+ self.assertIn("@", got)
+
+ def _create_bucket(self, bucket_name, location=None):
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ retry_storage_errors(storage_client.create_bucket)(
+ bucket_name, location=location
+ )
+ self.to_delete.append(bucket)
+
+ return bucket
+
+ def test_close_releases_open_sockets(self):
+ current_process = psutil.Process()
+ conn_count_start = len(current_process.connections())
+
+ client = Config.CLIENT
+ client.query(
+ """
+ SELECT
+ source_year AS year, COUNT(is_male) AS birth_count
+ FROM `bigquery-public-data.samples.natality`
+ GROUP BY year
+ ORDER BY year DESC
+ LIMIT 15
+ """
+ )
+
+ client.close()
+
+ conn_count_end = len(current_process.connections())
+ self.assertLessEqual(conn_count_end, conn_count_start)
+
+ def test_create_dataset(self):
+ DATASET_ID = _make_dataset_id("create_dataset")
+ dataset = self.temp_dataset(DATASET_ID)
+
+ self.assertTrue(_dataset_exists(dataset))
+ self.assertEqual(dataset.dataset_id, DATASET_ID)
+ self.assertEqual(dataset.project, Config.CLIENT.project)
+ self.assertIs(dataset.is_case_insensitive, False)
+
+ def test_create_dataset_case_sensitive(self):
+ DATASET_ID = _make_dataset_id("create_cs_dataset")
+ dataset = self.temp_dataset(DATASET_ID, is_case_insensitive=False)
+ self.assertIs(dataset.is_case_insensitive, False)
+
+ def test_create_dataset_case_insensitive(self):
+ DATASET_ID = _make_dataset_id("create_ci_dataset")
+ dataset = self.temp_dataset(DATASET_ID, is_case_insensitive=True)
+ self.assertIs(dataset.is_case_insensitive, True)
+
+ def test_create_dataset_max_time_travel_hours(self):
+ DATASET_ID = _make_dataset_id("create_ci_dataset")
+ dataset = self.temp_dataset(DATASET_ID, max_time_travel_hours=24 * 2)
+ self.assertEqual(int(dataset.max_time_travel_hours), 24 * 2)
+
+ def test_get_dataset(self):
+ dataset_id = _make_dataset_id("get_dataset")
+ client = Config.CLIENT
+ project = client.project
+ dataset_ref = bigquery.DatasetReference(project, dataset_id)
+ dataset_arg = Dataset(dataset_ref)
+ dataset_arg.friendly_name = "Friendly"
+ dataset_arg.description = "Description"
+ dataset = helpers.retry_403(client.create_dataset)(dataset_arg)
+ self.to_delete.append(dataset)
+ dataset_ref = bigquery.DatasetReference(project, dataset_id)
+
+ # Get with a reference.
+ got = client.get_dataset(dataset_ref)
+ self.assertEqual(got.friendly_name, "Friendly")
+ self.assertEqual(got.description, "Description")
+
+ # Get with a string.
+ got = client.get_dataset(dataset_id)
+ self.assertEqual(got.friendly_name, "Friendly")
+ self.assertEqual(got.description, "Description")
+
+ # Get with a fully-qualified string.
+ got = client.get_dataset("{}.{}".format(client.project, dataset_id))
+ self.assertEqual(got.friendly_name, "Friendly")
+ self.assertEqual(got.description, "Description")
+
+ def test_create_dataset_with_default_rounding_mode(self):
+ DATASET_ID = _make_dataset_id("create_dataset_rounding_mode")
+ dataset = self.temp_dataset(DATASET_ID, default_rounding_mode="ROUND_HALF_EVEN")
+
+ self.assertTrue(_dataset_exists(dataset))
+ self.assertEqual(dataset.default_rounding_mode, "ROUND_HALF_EVEN")
+
+ def test_update_dataset(self):
+ dataset = self.temp_dataset(_make_dataset_id("update_dataset"))
+ self.assertTrue(_dataset_exists(dataset))
+ self.assertIsNone(dataset.friendly_name)
+ self.assertIsNone(dataset.description)
+ self.assertEqual(dataset.labels, {})
+ self.assertIs(dataset.is_case_insensitive, False)
+
+ dataset.friendly_name = "Friendly"
+ dataset.description = "Description"
+ dataset.labels = {"priority": "high", "color": "blue"}
+ dataset.is_case_insensitive = True
+ ds2 = Config.CLIENT.update_dataset(
+ dataset, ("friendly_name", "description", "labels", "is_case_insensitive")
+ )
+ self.assertEqual(ds2.friendly_name, "Friendly")
+ self.assertEqual(ds2.description, "Description")
+ self.assertEqual(ds2.labels, {"priority": "high", "color": "blue"})
+ self.assertIs(ds2.is_case_insensitive, True)
+
+ ds2.labels = {
+ "color": "green", # change
+ "shape": "circle", # add
+ "priority": None, # delete
+ }
+ ds3 = Config.CLIENT.update_dataset(ds2, ["labels"])
+ self.assertEqual(ds3.labels, {"color": "green", "shape": "circle"})
+
+ # If we try to update using d2 again, it will fail because the
+ # previous update changed the ETag.
+ ds2.description = "no good"
+ with self.assertRaises(PreconditionFailed):
+ Config.CLIENT.update_dataset(ds2, ["description"])
+
+ def test_list_datasets(self):
+ datasets_to_create = [
+ "new" + unique_resource_id(),
+ "newer" + unique_resource_id(),
+ "newest" + unique_resource_id(),
+ ]
+ for dataset_id in datasets_to_create:
+ self.temp_dataset(dataset_id)
+
+ # Retrieve the datasets.
+ iterator = Config.CLIENT.list_datasets()
+ all_datasets = list(iterator)
+ self.assertIsNone(iterator.next_page_token)
+ created = [
+ dataset
+ for dataset in all_datasets
+ if dataset.dataset_id in datasets_to_create
+ and dataset.project == Config.CLIENT.project
+ ]
+ self.assertEqual(len(created), len(datasets_to_create))
+
+ def test_list_datasets_w_project(self):
+ # Retrieve datasets from a different project.
+ iterator = Config.CLIENT.list_datasets(project="bigquery-public-data")
+ all_datasets = frozenset([dataset.dataset_id for dataset in iterator])
+ self.assertIn("usa_names", all_datasets)
+
+ def test_create_table(self):
+ dataset = self.temp_dataset(_make_dataset_id("create_table"))
+ table_id = "test_table"
+ table_arg = Table(dataset.table(table_id), schema=SCHEMA)
+ self.assertFalse(_table_exists(table_arg))
+
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+
+ self.assertTrue(_table_exists(table))
+ self.assertEqual(table.table_id, table_id)
+
+ def test_create_tables_in_case_insensitive_dataset(self):
+ ci_dataset = self.temp_dataset(
+ _make_dataset_id("create_table"), is_case_insensitive=True
+ )
+ table_arg = Table(ci_dataset.table("test_table2"), schema=SCHEMA)
+ tablemc_arg = Table(ci_dataset.table("Test_taBLe2")) # same name, in Mixed Case
+
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+
+ self.assertTrue(_table_exists(table_arg))
+ self.assertTrue(_table_exists(tablemc_arg))
+ self.assertIs(ci_dataset.is_case_insensitive, True)
+
+ def test_create_tables_in_case_sensitive_dataset(self):
+ ci_dataset = self.temp_dataset(
+ _make_dataset_id("create_table"), is_case_insensitive=False
+ )
+ table_arg = Table(ci_dataset.table("test_table3"), schema=SCHEMA)
+ tablemc_arg = Table(ci_dataset.table("Test_taBLe3")) # same name, in Mixed Case
+
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+
+ self.assertTrue(_table_exists(table_arg))
+ self.assertFalse(_table_exists(tablemc_arg))
+ self.assertIs(ci_dataset.is_case_insensitive, False)
+
+ def test_create_tables_in_default_sensitivity_dataset(self):
+ dataset = self.temp_dataset(_make_dataset_id("create_table"))
+ table_arg = Table(dataset.table("test_table4"), schema=SCHEMA)
+ tablemc_arg = Table(
+ dataset.table("Test_taBLe4")
+ ) # same name, in MC (Mixed Case)
+
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+
+ self.assertTrue(_table_exists(table_arg))
+ self.assertFalse(_table_exists(tablemc_arg))
+ self.assertIs(dataset.is_case_insensitive, False)
+
+ def test_create_table_with_real_custom_policy(self):
+ from google.cloud.bigquery.schema import PolicyTagList
+
+ policy_tag_client = PolicyTagManagerClient()
+ taxonomy_parent = f"projects/{Config.CLIENT.project}/locations/us"
+
+ new_taxonomy = datacatalog_types.Taxonomy(
+ display_name="Custom test taxonomy" + unique_resource_id(),
+ description="This taxonomy is ony used for a test.",
+ activated_policy_types=[
+ datacatalog_types.Taxonomy.PolicyType.FINE_GRAINED_ACCESS_CONTROL
+ ],
+ )
+
+ taxonomy = policy_tag_client.create_taxonomy(
+ parent=taxonomy_parent, taxonomy=new_taxonomy
+ )
+ self.to_delete.insert(0, taxonomy)
+
+ parent_policy_tag = policy_tag_client.create_policy_tag(
+ parent=taxonomy.name,
+ policy_tag=datacatalog_types.PolicyTag(
+ display_name="Parent policy tag", parent_policy_tag=None
+ ),
+ )
+ child_policy_tag = policy_tag_client.create_policy_tag(
+ parent=taxonomy.name,
+ policy_tag=datacatalog_types.PolicyTag(
+ display_name="Child policy tag",
+ parent_policy_tag=parent_policy_tag.name,
+ ),
+ )
+
+ dataset = self.temp_dataset(
+ _make_dataset_id("create_table_with_real_custom_policy")
+ )
+ table_id = "test_table"
+ policy_1 = PolicyTagList(names=[parent_policy_tag.name])
+ policy_2 = PolicyTagList(names=[child_policy_tag.name])
+
+ schema = [
+ bigquery.SchemaField(
+ "first_name", "STRING", mode="REQUIRED", policy_tags=policy_1
+ ),
+ bigquery.SchemaField(
+ "age", "INTEGER", mode="REQUIRED", policy_tags=policy_2
+ ),
+ ]
+ table_arg = Table(dataset.table(table_id), schema=schema)
+ self.assertFalse(_table_exists(table_arg))
+
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+
+ self.assertTrue(_table_exists(table))
+ self.assertCountEqual(
+ list(table.schema[0].policy_tags.names), [parent_policy_tag.name]
+ )
+ self.assertCountEqual(
+ list(table.schema[1].policy_tags.names), [child_policy_tag.name]
+ )
+
+ def test_create_table_with_default_value_expression(self):
+ dataset = self.temp_dataset(
+ _make_dataset_id("create_table_with_default_value_expression")
+ )
+
+ table_id = "test_table"
+ timestamp_field_name = "timestamp_field_with_default_value_expression"
+
+ string_default_val_expression = "'FOO'"
+ timestamp_default_val_expression = "CURRENT_TIMESTAMP"
+
+ schema = [
+ bigquery.SchemaField(
+ "username",
+ "STRING",
+ default_value_expression=string_default_val_expression,
+ ),
+ bigquery.SchemaField(
+ timestamp_field_name,
+ "TIMESTAMP",
+ default_value_expression=timestamp_default_val_expression,
+ ),
+ ]
+ table_arg = Table(dataset.table(table_id), schema=schema)
+ self.assertFalse(_table_exists(table_arg))
+
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+
+ self.assertTrue(_table_exists(table))
+
+ # Fetch the created table and its metadata to verify that the default
+ # value expression is assigned to fields
+ remote_table = Config.CLIENT.get_table(table)
+ remote_schema = remote_table.schema
+ self.assertEqual(remote_schema, schema)
+
+ for field in remote_schema:
+ if field.name == string_default_val_expression:
+ self.assertEqual("'FOO'", field.default_value_expression)
+ if field.name == timestamp_default_val_expression:
+ self.assertEqual("CURRENT_TIMESTAMP", field.default_value_expression)
+
+ # Insert rows into the created table to verify default values are populated
+ # when value is not provided
+ NOW_SECONDS = 1448911495.484366
+ NOW = datetime.datetime.utcfromtimestamp(NOW_SECONDS).replace(tzinfo=UTC)
+
+ # Rows to insert. Row #1 will have default `TIMESTAMP` defaultValueExpression CURRENT_TIME
+ # Row #2 will have default `STRING` defaultValueExpression "'FOO"
+ ROWS = [{"username": "john_doe"}, {timestamp_field_name: NOW}]
+
+ errors = Config.CLIENT.insert_rows(table, ROWS)
+ self.assertEqual(len(errors), 0)
+
+ # Get list of inserted rows
+ row_1, row_2 = [row for row in list(Config.CLIENT.list_rows(table))]
+
+ # Assert that row values are populated with default value expression
+ self.assertIsInstance(row_1.get(timestamp_field_name), datetime.datetime)
+ self.assertEqual("FOO", row_2.get("username"))
+
+ def test_create_table_w_time_partitioning_w_clustering_fields(self):
+ from google.cloud.bigquery.table import TimePartitioning
+ from google.cloud.bigquery.table import TimePartitioningType
+
+ dataset = self.temp_dataset(_make_dataset_id("create_table_tp_cf"))
+ table_id = "test_table"
+ table_arg = Table(
+ dataset.table(table_id), schema=TIME_PARTITIONING_CLUSTERING_FIELDS_SCHEMA
+ )
+ self.assertFalse(_table_exists(table_arg))
+
+ table_arg.time_partitioning = TimePartitioning(field="transaction_time")
+
+ table_arg.clustering_fields = ["user_email", "store_code"]
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+
+ self.assertTrue(_table_exists(table))
+ self.assertEqual(table.table_id, table_id)
+ time_partitioning = table.time_partitioning
+ self.assertEqual(time_partitioning.type_, TimePartitioningType.DAY)
+ self.assertEqual(time_partitioning.field, "transaction_time")
+ self.assertEqual(table.clustering_fields, ["user_email", "store_code"])
+
+ def test_delete_dataset_with_string(self):
+ dataset_id = _make_dataset_id("delete_table_true_with_string")
+ project = Config.CLIENT.project
+ dataset_ref = bigquery.DatasetReference(project, dataset_id)
+ helpers.retry_403(Config.CLIENT.create_dataset)(Dataset(dataset_ref))
+ self.assertTrue(_dataset_exists(dataset_ref))
+ Config.CLIENT.delete_dataset(dataset_id)
+ self.assertFalse(_dataset_exists(dataset_ref))
+
+ def test_delete_dataset_delete_contents_true(self):
+ dataset_id = _make_dataset_id("delete_table_true_with_content")
+ project = Config.CLIENT.project
+ dataset_ref = bigquery.DatasetReference(project, dataset_id)
+ dataset = helpers.retry_403(Config.CLIENT.create_dataset)(Dataset(dataset_ref))
+
+ table_id = "test_table"
+ table_arg = Table(dataset.table(table_id), schema=SCHEMA)
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ Config.CLIENT.delete_dataset(dataset, delete_contents=True)
+
+ self.assertFalse(_table_exists(table))
+
+ def test_delete_dataset_delete_contents_false(self):
+ from google.api_core import exceptions
+
+ dataset = self.temp_dataset(_make_dataset_id("delete_table_false"))
+ table_id = "test_table"
+ table_arg = Table(dataset.table(table_id), schema=SCHEMA)
+
+ helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ with self.assertRaises(exceptions.BadRequest):
+ Config.CLIENT.delete_dataset(dataset)
+
+ def test_get_table_w_public_dataset(self):
+ public = "bigquery-public-data"
+ dataset_id = "samples"
+ table_id = "shakespeare"
+ table_ref = DatasetReference(public, dataset_id).table(table_id)
+
+ # Get table with reference.
+ table = Config.CLIENT.get_table(table_ref)
+ self.assertEqual(table.table_id, table_id)
+ self.assertEqual(table.dataset_id, dataset_id)
+ self.assertEqual(table.project, public)
+ schema_names = [field.name for field in table.schema]
+ self.assertEqual(schema_names, ["word", "word_count", "corpus", "corpus_date"])
+
+ # Get table with string.
+ table = Config.CLIENT.get_table("{}.{}.{}".format(public, dataset_id, table_id))
+ self.assertEqual(table.table_id, table_id)
+ self.assertEqual(table.dataset_id, dataset_id)
+ self.assertEqual(table.project, public)
+
+ def test_list_partitions(self):
+ table_ref = DatasetReference(
+ "bigquery-public-data", "ethereum_blockchain"
+ ).table("blocks")
+ all_rows = Config.CLIENT.list_partitions(table_ref)
+ self.assertIn("20180801", all_rows)
+ self.assertGreater(len(all_rows), 1000)
+
+ def test_list_tables(self):
+ dataset_id = _make_dataset_id("list_tables")
+ dataset = self.temp_dataset(dataset_id)
+ # Retrieve tables before any are created for the dataset.
+ iterator = Config.CLIENT.list_tables(dataset)
+ all_tables = list(iterator)
+ self.assertEqual(all_tables, [])
+ self.assertIsNone(iterator.next_page_token)
+
+ # Insert some tables to be listed.
+ tables_to_create = [
+ "new" + unique_resource_id(),
+ "newer" + unique_resource_id(),
+ "newest" + unique_resource_id(),
+ ]
+ for table_name in tables_to_create:
+ table = Table(dataset.table(table_name), schema=SCHEMA)
+ created_table = helpers.retry_403(Config.CLIENT.create_table)(table)
+ self.to_delete.insert(0, created_table)
+
+ # Retrieve the tables.
+ iterator = Config.CLIENT.list_tables(dataset)
+ all_tables = list(iterator)
+ self.assertIsNone(iterator.next_page_token)
+ created = [
+ table
+ for table in all_tables
+ if (table.table_id in tables_to_create and table.dataset_id == dataset_id)
+ ]
+ self.assertEqual(len(created), len(tables_to_create))
+
+ # List tables with a string ID.
+ iterator = Config.CLIENT.list_tables(dataset_id)
+ self.assertGreater(len(list(iterator)), 0)
+
+ # List tables with a fully-qualified string ID.
+ iterator = Config.CLIENT.list_tables(
+ "{}.{}".format(Config.CLIENT.project, dataset_id)
+ )
+ self.assertGreater(len(list(iterator)), 0)
+
+ def test_update_table(self):
+ dataset = self.temp_dataset(_make_dataset_id("update_table"))
+
+ TABLE_NAME = "test_table"
+ table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
+ self.assertFalse(_table_exists(table_arg))
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+ self.assertTrue(_table_exists(table))
+ self.assertIsNone(table.friendly_name)
+ self.assertIsNone(table.description)
+ self.assertEqual(table.labels, {})
+ table.friendly_name = "Friendly"
+ table.description = "Description"
+ table.labels = {"priority": "high", "color": "blue"}
+
+ table2 = Config.CLIENT.update_table(
+ table, ["friendly_name", "description", "labels"]
+ )
+
+ self.assertEqual(table2.friendly_name, "Friendly")
+ self.assertEqual(table2.description, "Description")
+ self.assertEqual(table2.labels, {"priority": "high", "color": "blue"})
+
+ table2.description = None
+ table2.labels = {
+ "color": "green", # change
+ "shape": "circle", # add
+ "priority": None, # delete
+ }
+ table3 = Config.CLIENT.update_table(table2, ["description", "labels"])
+ self.assertIsNone(table3.description)
+ self.assertEqual(table3.labels, {"color": "green", "shape": "circle"})
+
+ # If we try to update using table2 again, it will fail because the
+ # previous update changed the ETag.
+ table2.description = "no good"
+ with self.assertRaises(PreconditionFailed):
+ Config.CLIENT.update_table(table2, ["description"])
+
+ def test_update_table_schema(self):
+ dataset = self.temp_dataset(_make_dataset_id("update_table"))
+
+ TABLE_NAME = "test_table"
+ table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
+ self.assertFalse(_table_exists(table_arg))
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+ self.assertTrue(_table_exists(table))
+ voter = bigquery.SchemaField("voter", "BOOLEAN", mode="NULLABLE")
+ schema = table.schema
+ schema.append(voter)
+ table.schema = schema
+
+ updated_table = Config.CLIENT.update_table(table, ["schema"])
+
+ self.assertEqual(len(updated_table.schema), len(schema))
+ for found, expected in zip(updated_table.schema, schema):
+ self.assertEqual(found.name, expected.name)
+ self.assertEqual(found.field_type, expected.field_type)
+ self.assertEqual(found.mode, expected.mode)
+
+ def test_unset_table_schema_attributes(self):
+ from google.cloud.bigquery.schema import PolicyTagList
+
+ dataset = self.temp_dataset(_make_dataset_id("unset_policy_tags"))
+ table_id = "test_table"
+ policy_tags = PolicyTagList(
+ names=[
+ "projects/{}/locations/us/taxonomies/1/policyTags/2".format(
+ Config.CLIENT.project
+ ),
+ ]
+ )
+
+ schema = [
+ bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField(
+ "secret_int",
+ "INTEGER",
+ mode="REQUIRED",
+ description="This field is numeric",
+ policy_tags=policy_tags,
+ ),
+ ]
+ table_arg = Table(dataset.table(table_id), schema=schema)
+ self.assertFalse(_table_exists(table_arg))
+
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+
+ self.assertTrue(_table_exists(table))
+ self.assertEqual(policy_tags, table.schema[1].policy_tags)
+
+ # Amend the schema to replace the policy tags
+ new_schema = table.schema[:]
+ old_field = table.schema[1]
+ new_schema[1] = bigquery.SchemaField(
+ name=old_field.name,
+ field_type=old_field.field_type,
+ mode=old_field.mode,
+ description=None,
+ fields=old_field.fields,
+ policy_tags=PolicyTagList(),
+ )
+
+ table.schema = new_schema
+ updated_table = Config.CLIENT.update_table(table, ["schema"])
+
+ self.assertFalse(updated_table.schema[1].description) # Empty string or None.
+ # policyTags key expected to be missing from response.
+ self.assertIsNone(updated_table.schema[1].policy_tags)
+
+ def test_update_table_clustering_configuration(self):
+ dataset = self.temp_dataset(_make_dataset_id("update_table"))
+
+ TABLE_NAME = "test_table"
+ table_arg = Table(dataset.table(TABLE_NAME), schema=CLUSTERING_SCHEMA)
+ self.assertFalse(_table_exists(table_arg))
+
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+ self.assertTrue(_table_exists(table))
+
+ table.clustering_fields = ["full_name", "date_of_birth"]
+ table2 = Config.CLIENT.update_table(table, ["clustering_fields"])
+ self.assertEqual(table2.clustering_fields, ["full_name", "date_of_birth"])
+
+ table2.clustering_fields = None
+ table3 = Config.CLIENT.update_table(table2, ["clustering_fields"])
+ self.assertIsNone(table3.clustering_fields, None)
+
+ @staticmethod
+ def _fetch_single_page(table, selected_fields=None):
+ iterator = Config.CLIENT.list_rows(table, selected_fields=selected_fields)
+ page = next(iterator.pages)
+ return list(page)
+
+ def test_insert_rows_then_dump_table(self):
+ NOW_SECONDS = 1448911495.484366
+ NOW = datetime.datetime.utcfromtimestamp(NOW_SECONDS).replace(tzinfo=UTC)
+ ROWS = [
+ ("Phred Phlyntstone", 32, NOW),
+ ("Bharney Rhubble", 33, NOW + datetime.timedelta(seconds=10)),
+ ("Wylma Phlyntstone", 29, NOW + datetime.timedelta(seconds=20)),
+ ("Bhettye Rhubble", 27, None),
+ ]
+ ROW_IDS = range(len(ROWS))
+
+ dataset = self.temp_dataset(_make_dataset_id("insert_rows_then_dump"))
+ TABLE_ID = "test_table"
+ schema = [
+ bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
+ bigquery.SchemaField("now", "TIMESTAMP"),
+ ]
+ table_arg = Table(dataset.table(TABLE_ID), schema=schema)
+ self.assertFalse(_table_exists(table_arg))
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+ self.assertTrue(_table_exists(table))
+
+ errors = Config.CLIENT.insert_rows(table, ROWS, row_ids=ROW_IDS)
+ self.assertEqual(len(errors), 0)
+
+ rows = ()
+
+ # Allow for "warm up" before rows visible. See
+ # https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
+ # 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
+ retry = RetryResult(_has_rows, max_tries=8)
+ rows = retry(self._fetch_single_page)(table)
+ row_tuples = [r.values() for r in rows]
+ by_age = operator.itemgetter(1)
+ self.assertEqual(sorted(row_tuples, key=by_age), sorted(ROWS, key=by_age))
+
+ def test_load_table_from_local_avro_file_then_dump_table(self):
+ from google.cloud.bigquery.job import SourceFormat
+ from google.cloud.bigquery.job import WriteDisposition
+
+ TABLE_NAME = "test_table_avro"
+ ROWS = [
+ ("violet", 400),
+ ("indigo", 445),
+ ("blue", 475),
+ ("green", 510),
+ ("yellow", 570),
+ ("orange", 590),
+ ("red", 650),
+ ]
+
+ dataset = self.temp_dataset(_make_dataset_id("load_local_then_dump"))
+ table_ref = dataset.table(TABLE_NAME)
+ table = Table(table_ref)
+ self.to_delete.insert(0, table)
+
+ with open(DATA_PATH / "colors.avro", "rb") as avrof:
+ config = bigquery.LoadJobConfig()
+ config.source_format = SourceFormat.AVRO
+ config.write_disposition = WriteDisposition.WRITE_TRUNCATE
+ job = Config.CLIENT.load_table_from_file(
+ avrof, table_ref, job_config=config
+ )
+ # Retry until done.
+ job.result(timeout=JOB_TIMEOUT)
+
+ self.assertEqual(job.output_rows, len(ROWS))
+
+ table = Config.CLIENT.get_table(table)
+ rows = self._fetch_single_page(table)
+ row_tuples = [r.values() for r in rows]
+ by_wavelength = operator.itemgetter(1)
+ self.assertEqual(
+ sorted(row_tuples, key=by_wavelength), sorted(ROWS, key=by_wavelength)
+ )
+
+ def test_load_table_from_local_parquet_file_decimal_types(self):
+ from google.cloud.bigquery.enums import DecimalTargetType
+ from google.cloud.bigquery.job import SourceFormat
+ from google.cloud.bigquery.job import WriteDisposition
+
+ TABLE_NAME = "test_table_parquet"
+
+ expected_rows = [
+ (decimal.Decimal("123.999999999999"),),
+ (decimal.Decimal("99999999999999999999999999.999999999999"),),
+ ]
+
+ dataset = self.temp_dataset(_make_dataset_id("load_local_parquet_then_dump"))
+ table_ref = dataset.table(TABLE_NAME)
+ table = Table(table_ref)
+ self.to_delete.insert(0, table)
+
+ job_config = bigquery.LoadJobConfig()
+ job_config.source_format = SourceFormat.PARQUET
+ job_config.write_disposition = WriteDisposition.WRITE_TRUNCATE
+ job_config.decimal_target_types = [
+ DecimalTargetType.NUMERIC,
+ DecimalTargetType.BIGNUMERIC,
+ DecimalTargetType.STRING,
+ ]
+
+ with open(DATA_PATH / "numeric_38_12.parquet", "rb") as parquet_file:
+ job = Config.CLIENT.load_table_from_file(
+ parquet_file, table_ref, job_config=job_config
+ )
+
+ job.result(timeout=JOB_TIMEOUT) # Retry until done.
+
+ self.assertEqual(job.output_rows, len(expected_rows))
+
+ table = Config.CLIENT.get_table(table)
+ rows = self._fetch_single_page(table)
+ row_tuples = [r.values() for r in rows]
+ self.assertEqual(sorted(row_tuples), sorted(expected_rows))
+
+ # Forcing the NUMERIC type, however, should result in an error.
+ job_config.decimal_target_types = [DecimalTargetType.NUMERIC]
+
+ with open(DATA_PATH / "numeric_38_12.parquet", "rb") as parquet_file:
+ job = Config.CLIENT.load_table_from_file(
+ parquet_file, table_ref, job_config=job_config
+ )
+
+ with self.assertRaises(BadRequest) as exc_info:
+ job.result(timeout=JOB_TIMEOUT)
+
+ exc_msg = str(exc_info.exception)
+ self.assertIn("out of valid NUMERIC range", exc_msg)
+
+ def test_load_table_from_json_basic_use(self):
+ table_schema = (
+ bigquery.SchemaField("name", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
+ bigquery.SchemaField("birthday", "DATE", mode="REQUIRED"),
+ bigquery.SchemaField("is_awesome", "BOOLEAN", mode="REQUIRED"),
+ )
+
+ json_rows = [
+ {"name": "John", "age": 18, "birthday": "2001-10-15", "is_awesome": False},
+ {"name": "Chuck", "age": 79, "birthday": "1940-03-10", "is_awesome": True},
+ ]
+
+ dataset_id = _make_dataset_id("bq_system_test")
+ self.temp_dataset(dataset_id)
+ table_id = "{}.{}.load_table_from_json_basic_use".format(
+ Config.CLIENT.project, dataset_id
+ )
+
+ # Create the table before loading so that schema mismatch errors are
+ # identified.
+ table = helpers.retry_403(Config.CLIENT.create_table)(
+ Table(table_id, schema=table_schema)
+ )
+ self.to_delete.insert(0, table)
+
+ job_config = bigquery.LoadJobConfig(schema=table_schema)
+ load_job = Config.CLIENT.load_table_from_json(
+ json_rows, table_id, job_config=job_config
+ )
+ load_job.result()
+
+ table = Config.CLIENT.get_table(table)
+ self.assertEqual(tuple(table.schema), table_schema)
+ self.assertEqual(table.num_rows, 2)
+
+ def test_load_table_from_json_schema_autodetect(self):
+ json_rows = [
+ {"name": "John", "age": 18, "birthday": "2001-10-15", "is_awesome": False},
+ {"name": "Chuck", "age": 79, "birthday": "1940-03-10", "is_awesome": True},
+ ]
+
+ dataset_id = _make_dataset_id("bq_system_test")
+ self.temp_dataset(dataset_id)
+ table_id = "{}.{}.load_table_from_json_basic_use".format(
+ Config.CLIENT.project, dataset_id
+ )
+
+ # Use schema with NULLABLE fields, because schema autodetection
+ # defaults to field mode NULLABLE.
+ table_schema = (
+ bigquery.SchemaField("name", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("age", "INTEGER", mode="NULLABLE"),
+ bigquery.SchemaField("birthday", "DATE", mode="NULLABLE"),
+ bigquery.SchemaField("is_awesome", "BOOLEAN", mode="NULLABLE"),
+ )
+ # create the table before loading so that the column order is predictable
+ table = helpers.retry_403(Config.CLIENT.create_table)(
+ Table(table_id, schema=table_schema)
+ )
+ self.to_delete.insert(0, table)
+
+ # do not pass an explicit job config to trigger automatic schema detection
+ load_job = Config.CLIENT.load_table_from_json(json_rows, table_id)
+ load_job.result()
+
+ table = Config.CLIENT.get_table(table)
+ self.assertEqual(tuple(table.schema), table_schema)
+ self.assertEqual(table.num_rows, 2)
+
+ # Autodetect makes best effort to infer the schema, but situations exist
+ # when the detected schema is wrong, and does not match existing schema.
+ # Thus the client sets autodetect = False when table exists and just uses
+ # the existing schema. This test case uses a special case where backend has
+ # no way to distinguish int from string.
+ def test_load_table_from_json_schema_autodetect_table_exists(self):
+ json_rows = [
+ {"name": "123", "age": 18, "birthday": "2001-10-15", "is_awesome": False},
+ {"name": "456", "age": 79, "birthday": "1940-03-10", "is_awesome": True},
+ ]
+
+ dataset_id = _make_dataset_id("bq_system_test")
+ self.temp_dataset(dataset_id)
+ table_id = "{}.{}.load_table_from_json_basic_use".format(
+ Config.CLIENT.project, dataset_id
+ )
+
+ # Use schema with NULLABLE fields, because schema autodetection
+ # defaults to field mode NULLABLE.
+ table_schema = (
+ bigquery.SchemaField("name", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("age", "INTEGER", mode="NULLABLE"),
+ bigquery.SchemaField("birthday", "DATE", mode="NULLABLE"),
+ bigquery.SchemaField("is_awesome", "BOOLEAN", mode="NULLABLE"),
+ )
+ # create the table before loading so that the column order is predictable
+ table = helpers.retry_403(Config.CLIENT.create_table)(
+ Table(table_id, schema=table_schema)
+ )
+ self.to_delete.insert(0, table)
+
+ # do not pass an explicit job config to trigger automatic schema detection
+ load_job = Config.CLIENT.load_table_from_json(json_rows, table_id)
+ load_job.result()
+
+ table = Config.CLIENT.get_table(table)
+ self.assertEqual(tuple(table.schema), table_schema)
+ self.assertEqual(table.num_rows, 2)
+
+ def test_load_avro_from_uri_then_dump_table(self):
+ from google.cloud.bigquery.job import CreateDisposition
+ from google.cloud.bigquery.job import SourceFormat
+ from google.cloud.bigquery.job import WriteDisposition
+
+ table_name = "test_table"
+ rows = [
+ ("violet", 400),
+ ("indigo", 445),
+ ("blue", 475),
+ ("green", 510),
+ ("yellow", 570),
+ ("orange", 590),
+ ("red", 650),
+ ]
+ with open(DATA_PATH / "colors.avro", "rb") as f:
+ GS_URL = self._write_avro_to_storage(
+ "bq_load_test" + unique_resource_id(), "colors.avro", f
+ )
+
+ dataset = self.temp_dataset(_make_dataset_id("bq_load_test"))
+ table_arg = dataset.table(table_name)
+ table = helpers.retry_403(Config.CLIENT.create_table)(Table(table_arg))
+ self.to_delete.insert(0, table)
+
+ config = bigquery.LoadJobConfig()
+ config.create_disposition = CreateDisposition.CREATE_NEVER
+ config.source_format = SourceFormat.AVRO
+ config.write_disposition = WriteDisposition.WRITE_EMPTY
+ job = Config.CLIENT.load_table_from_uri(GS_URL, table_arg, job_config=config)
+ job.result(timeout=JOB_TIMEOUT)
+ self.assertEqual(job.output_rows, len(rows))
+
+ table = Config.CLIENT.get_table(table)
+ fetched = self._fetch_single_page(table)
+ row_tuples = [r.values() for r in fetched]
+ self.assertEqual(
+ sorted(row_tuples, key=lambda x: x[1]), sorted(rows, key=lambda x: x[1])
+ )
+
+ def test_load_table_from_uri_then_dump_table(self):
+ from google.cloud.bigquery.job import CreateDisposition
+ from google.cloud.bigquery.job import SourceFormat
+ from google.cloud.bigquery.job import WriteDisposition
+
+ TABLE_ID = "test_table"
+ GS_URL = self._write_csv_to_storage(
+ "bq_load_test" + unique_resource_id(), "person_ages.csv", HEADER_ROW, ROWS
+ )
+
+ dataset = self.temp_dataset(_make_dataset_id("load_gcs_then_dump"))
+
+ table_arg = Table(dataset.table(TABLE_ID), schema=SCHEMA)
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+
+ config = bigquery.LoadJobConfig()
+ config.create_disposition = CreateDisposition.CREATE_NEVER
+ config.skip_leading_rows = 1
+ config.source_format = SourceFormat.CSV
+ config.write_disposition = WriteDisposition.WRITE_EMPTY
+ job = Config.CLIENT.load_table_from_uri(
+ GS_URL, dataset.table(TABLE_ID), job_config=config
+ )
+
+ # Allow for 90 seconds of "warm up" before rows visible. See
+ # https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
+ # 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
+ retry = RetryInstanceState(_job_done, max_tries=8)
+ retry(job.reload)()
+
+ rows = self._fetch_single_page(table)
+ row_tuples = [r.values() for r in rows]
+ by_age = operator.itemgetter(1)
+ self.assertEqual(sorted(row_tuples, key=by_age), sorted(ROWS, key=by_age))
+
+ def test_load_table_from_file_w_explicit_location(self):
+ # Create a temporary bucket for extract files.
+ bucket_name = "bq_load_table_eu_extract_test" + unique_resource_id()
+ self._create_bucket(bucket_name, location="eu")
+
+ # Create a temporary dataset & table in the EU.
+ table_bytes = io.BytesIO(b"a,3\nb,2\nc,1\n")
+ client = Config.CLIENT
+ dataset = self.temp_dataset(_make_dataset_id("eu_load_file"), location="EU")
+ table_ref = dataset.table("letters")
+ job_config = bigquery.LoadJobConfig()
+ job_config.skip_leading_rows = 0
+ job_config.schema = [
+ bigquery.SchemaField("letter", "STRING"),
+ bigquery.SchemaField("value", "INTEGER"),
+ ]
+
+ # Load the file to an EU dataset with an EU load job.
+ load_job = client.load_table_from_file(
+ table_bytes, table_ref, location="EU", job_config=job_config
+ )
+ load_job.result()
+ job_id = load_job.job_id
+
+ # Can get the job from the EU.
+ load_job = client.get_job(load_job)
+ self.assertEqual(job_id, load_job.job_id)
+ self.assertEqual("EU", load_job.location)
+ self.assertTrue(load_job.exists())
+
+ # Cannot get the job from the US.
+ with self.assertRaises(NotFound):
+ client.get_job(job_id, location="US")
+
+ load_job_us = client.get_job(job_id)
+ load_job_us._properties["jobReference"]["location"] = "US"
+ self.assertFalse(load_job_us.exists())
+ with self.assertRaises(NotFound):
+ load_job_us.reload()
+
+ # Can cancel the job from the EU.
+ self.assertTrue(load_job.cancel())
+ load_job = client.cancel_job(load_job)
+ self.assertEqual(job_id, load_job.job_id)
+ self.assertEqual("EU", load_job.location)
+
+ # Cannot cancel the job from the US.
+ with self.assertRaises(ClientError):
+ client.cancel_job(job_id, location="US")
+ with self.assertRaises(ClientError):
+ load_job_us.cancel()
+
+ # Can list the table rows.
+ table = client.get_table(table_ref)
+ self.assertEqual(table.num_rows, 3)
+ rows = [(row.letter, row.value) for row in client.list_rows(table)]
+ self.assertEqual(list(sorted(rows)), [("a", 3), ("b", 2), ("c", 1)])
+
+ # Verify location behavior with queries
+ query_config = bigquery.QueryJobConfig()
+ query_config.dry_run = True
+
+ query_string = "SELECT * FROM `{}.letters` LIMIT 1".format(dataset.dataset_id)
+
+ eu_query = client.query(query_string, location="EU", job_config=query_config)
+ self.assertTrue(eu_query.done)
+
+ # Cannot query from US.
+ with self.assertRaises(GoogleAPICallError):
+ list(client.query(query_string, location="US", job_config=query_config))
+
+ # Cannot copy from US.
+ with self.assertRaises(GoogleAPICallError):
+ client.copy_table(
+ table_ref, dataset.table("letters2_us"), location="US"
+ ).result()
+
+ # Cannot extract from US.
+ with self.assertRaises(GoogleAPICallError):
+ client.extract_table(
+ table_ref, "gs://{}/letters-us.csv".format(bucket_name), location="US"
+ ).result()
+
+ def test_create_external_table_with_reference_file_schema_uri_avro(self):
+ client = Config.CLIENT
+ dataset_id = _make_dataset_id("external_reference_file_avro")
+ self.temp_dataset(dataset_id)
+ dataset_ref = bigquery.DatasetReference(client.project, dataset_id)
+ table_id = "test_ref_file_avro"
+ table_ref = bigquery.TableReference(dataset_ref=dataset_ref, table_id=table_id)
+
+ expected_schema = [
+ bigquery.SchemaField("username", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("tweet", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("timestamp", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("likes", "INTEGER", mode="NULLABLE"),
+ ]
+
+ # By default, the table should have the c-twitter schema because it is lexicographically last
+ # in the `SOURCE_URIs` list:
+ # a-twitter schema: (username, tweet, timestamp, likes)
+ # b-twitter schema: (username, tweet, timestamp)
+ # c-twitter schema: (username, tweet)
+
+ # Because `referenceFileSchemaUri` is set as a-twitter, the table will have a-twitter schema
+
+ # Create external data configuration
+ external_config = bigquery.ExternalConfig(bigquery.ExternalSourceFormat.AVRO)
+ external_config.source_uris = SOURCE_URIS_AVRO
+ external_config.reference_file_schema_uri = REFERENCE_FILE_SCHEMA_URI_AVRO
+
+ table = bigquery.Table(table_ref)
+ table.external_data_configuration = external_config
+
+ table = client.create_table(table)
+
+ # Get table created by the create_table API call
+ generated_table = client.get_table(table_ref)
+
+ self.assertEqual(generated_table.schema, expected_schema)
+ self.assertEqual(
+ generated_table.external_data_configuration._properties[
+ "referenceFileSchemaUri"
+ ],
+ REFERENCE_FILE_SCHEMA_URI_AVRO,
+ )
+
+ # Clean up test
+ self.to_delete.insert(0, generated_table)
+
+ def test_load_table_from_uri_with_reference_file_schema_uri_avro(self):
+ dataset_id = _make_dataset_id("test_reference_file_avro")
+ self.temp_dataset(dataset_id)
+ client = Config.CLIENT
+ dataset_ref = bigquery.DatasetReference(client.project, dataset_id)
+ table_id = "test_ref_file_avro"
+ table_ref = bigquery.TableReference(dataset_ref=dataset_ref, table_id=table_id)
+
+ expected_schema = [
+ bigquery.SchemaField("username", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("tweet", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("timestamp", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("likes", "INTEGER", mode="NULLABLE"),
+ ]
+
+ # By default, the table should have the c-twitter schema because it is lexicographically last
+ # in the `SOURCE_URIS` list:
+ # a-twitter schema: (username, tweet, timestamp, likes)
+ # b-twitter schema: (username, tweet, timestamp)
+ # c-twitter schema: (username, tweet)
+
+ # Because `referenceFileSchemaUri` is set as a-twitter, the table will have a-twitter schema
+
+ # Create load job configuration
+ load_job_config = bigquery.LoadJobConfig(
+ source_format=bigquery.SourceFormat.AVRO
+ )
+ load_job_config.reference_file_schema_uri = REFERENCE_FILE_SCHEMA_URI_AVRO
+
+ load_job = client.load_table_from_uri(
+ source_uris=SOURCE_URIS_AVRO,
+ destination=table_ref,
+ job_config=load_job_config,
+ )
+ # Wait for load job to complete
+ result = load_job.result()
+
+ # Get table created by the load job
+ generated_table = client.get_table(table_ref)
+ self.assertEqual(generated_table.schema, expected_schema)
+ self.assertEqual(
+ result._properties["configuration"]["load"]["referenceFileSchemaUri"],
+ REFERENCE_FILE_SCHEMA_URI_AVRO,
+ )
+
+ # Clean up test
+ self.to_delete.insert(0, generated_table)
+
+ def test_create_external_table_with_reference_file_schema_uri_parquet(self):
+ client = Config.CLIENT
+ dataset_id = _make_dataset_id("external_table_ref_file_parquet")
+ self.temp_dataset(dataset_id)
+ dataset_ref = bigquery.DatasetReference(client.project, dataset_id)
+ table_id = "test_ref_file_parquet"
+ table_ref = bigquery.TableReference(dataset_ref=dataset_ref, table_id=table_id)
+
+ expected_schema = [
+ bigquery.SchemaField("username", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("tweet", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("timestamp", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("likes", "INTEGER", mode="NULLABLE"),
+ ]
+
+ # By default, the table should have the c-twitter schema because it is lexicographically last
+ # in the `SOURCE_URIS` list:
+ # a-twitter schema: (username, tweet, timestamp, likes)
+ # b-twitter schema: (username, tweet, timestamp)
+ # c-twitter schema: (username, tweet)
+
+ # Because `referenceFileSchemaUri` is set as a-twitter, the table will have a-twitter schema
+
+ # Create external data configuration
+ external_config = bigquery.ExternalConfig(bigquery.ExternalSourceFormat.PARQUET)
+ external_config.source_uris = SOURCE_URIS_PARQUET
+ external_config.reference_file_schema_uri = REFERENCE_FILE_SCHEMA_URI_PARQUET
+
+ table = bigquery.Table(table_ref)
+ table.external_data_configuration = external_config
+
+ table = client.create_table(table)
+
+ # Get table created by the create_table API call
+ generated_table = client.get_table(table_ref)
+ self.assertEqual(generated_table.schema, expected_schema)
+ self.assertEqual(
+ generated_table.external_data_configuration._properties[
+ "referenceFileSchemaUri"
+ ],
+ REFERENCE_FILE_SCHEMA_URI_PARQUET,
+ )
+
+ # Clean up test
+ self.to_delete.insert(0, generated_table)
+
+ def test_load_table_from_uri_with_reference_file_schema_uri_parquet(self):
+ dataset_id = _make_dataset_id("test_reference_file_parquet")
+ self.temp_dataset(dataset_id)
+ client = Config.CLIENT
+ dataset_ref = bigquery.DatasetReference(client.project, dataset_id)
+ table_id = "test_ref_file_parquet"
+ table_ref = bigquery.TableReference(dataset_ref=dataset_ref, table_id=table_id)
+
+ expected_schema = [
+ bigquery.SchemaField("username", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("tweet", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("timestamp", "STRING", mode="NULLABLE"),
+ bigquery.SchemaField("likes", "INTEGER", mode="NULLABLE"),
+ ]
+
+ # By default, the table should have the c-twitter schema because it is lexicographically last
+ # in the `SOURCE_URIS` list:
+ # a-twitter schema: (username, tweet, timestamp, likes)
+ # b-twitter schema: (username, tweet, timestamp)
+ # c-twitter schema: (username, tweet)
+
+ # Because `referenceFileSchemaUri` is set as a-twitter, the table will have a-twitter schema
+
+ # Create load job configuration
+ load_job_config = bigquery.LoadJobConfig(
+ source_format=bigquery.SourceFormat.PARQUET
+ )
+ load_job_config.reference_file_schema_uri = REFERENCE_FILE_SCHEMA_URI_PARQUET
+
+ load_job = client.load_table_from_uri(
+ source_uris=SOURCE_URIS_PARQUET,
+ destination=table_ref,
+ job_config=load_job_config,
+ )
+ # Wait for load job to complete
+ result = load_job.result()
+
+ # Get table created by the load job
+ generated_table = client.get_table(table_ref)
+ self.assertEqual(generated_table.schema, expected_schema)
+ self.assertEqual(
+ result._properties["configuration"]["load"]["referenceFileSchemaUri"],
+ REFERENCE_FILE_SCHEMA_URI_PARQUET,
+ )
+
+ # Clean up test
+ self.to_delete.insert(0, generated_table)
+
+ def _write_csv_to_storage(self, bucket_name, blob_name, header_row, data_rows):
+ from google.cloud._testing import _NamedTemporaryFile
+
+ bucket = self._create_bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+
+ with _NamedTemporaryFile() as temp:
+ with open(temp.name, "w") as csv_write:
+ writer = csv.writer(csv_write)
+ writer.writerow(header_row)
+ writer.writerows(data_rows)
+
+ with open(temp.name, "rb") as csv_read:
+ retry_storage_errors(blob.upload_from_file)(
+ csv_read, content_type="text/csv"
+ )
+
+ self.to_delete.insert(0, blob)
+ return "gs://{}/{}".format(bucket_name, blob_name)
+
+ def _write_avro_to_storage(self, bucket_name, blob_name, avro_file):
+ bucket = self._create_bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+ retry_storage_errors(blob.upload_from_file)(
+ avro_file, content_type="application/x-avro-binary"
+ )
+ self.to_delete.insert(0, blob)
+ return "gs://{}/{}".format(bucket_name, blob_name)
+
+ def _load_table_for_extract_table(self, bucket, blob_name, table, rows):
+ from google.cloud._testing import _NamedTemporaryFile
+
+ blob = bucket.blob(blob_name)
+ with _NamedTemporaryFile() as temp:
+ with open(temp.name, "w") as csv_write:
+ writer = csv.writer(csv_write)
+ writer.writerow(HEADER_ROW)
+ writer.writerows(rows)
+
+ with open(temp.name, "rb") as csv_read:
+ retry_storage_errors(blob.upload_from_file)(
+ csv_read, content_type="text/csv"
+ )
+
+ self.to_delete.insert(0, blob)
+
+ dataset = self.temp_dataset(table.dataset_id)
+ table_ref = dataset.table(table.table_id)
+ config = bigquery.LoadJobConfig()
+ config.autodetect = True
+ gs_url = "gs://{}/{}".format(bucket.name, blob_name)
+ job = Config.CLIENT.load_table_from_uri(gs_url, table_ref, job_config=config)
+ # TODO(jba): do we need this retry now that we have job.result()?
+ # Allow for 90 seconds of "warm up" before rows visible. See
+ # https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
+ # 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
+ retry = RetryInstanceState(_job_done, max_tries=8)
+ retry(job.reload)()
+
+ def test_extract_table(self):
+ local_id = unique_resource_id()
+ bucket_name = "bq_extract_test" + local_id
+ source_blob_name = "person_ages.csv"
+ dataset_id = _make_dataset_id("load_gcs_then_extract")
+ table_id = "test_table"
+ project = Config.CLIENT.project
+ dataset_ref = bigquery.DatasetReference(project, dataset_id)
+ table_ref = dataset_ref.table(table_id)
+ table = Table(table_ref)
+ self.to_delete.insert(0, table)
+ bucket = self._create_bucket(bucket_name)
+ self._load_table_for_extract_table(bucket, source_blob_name, table_ref, ROWS)
+ destination_blob_name = "person_ages_out.csv"
+ destination = bucket.blob(destination_blob_name)
+ destination_uri = "gs://{}/person_ages_out.csv".format(bucket_name)
+
+ job = Config.CLIENT.extract_table(table_ref, destination_uri)
+ job.result(timeout=100)
+
+ self.to_delete.insert(0, destination)
+ got_bytes = retry_storage_errors(destination.download_as_bytes)()
+ got = got_bytes.decode("utf-8")
+ self.assertIn("Bharney Rhubble", got)
+
+ def test_copy_table(self):
+ # If we create a new table to copy from, the test won't work
+ # because the new rows will be stored in the streaming buffer,
+ # and copy jobs don't read the streaming buffer.
+ # We could wait for the streaming buffer to empty, but that could
+ # take minutes. Instead we copy a small public table.
+ source_dataset = DatasetReference("bigquery-public-data", "samples")
+ source_ref = source_dataset.table("shakespeare")
+ dest_dataset = self.temp_dataset(_make_dataset_id("copy_table"))
+ dest_ref = dest_dataset.table("destination_table")
+ job_config = bigquery.CopyJobConfig()
+ job = Config.CLIENT.copy_table(source_ref, dest_ref, job_config=job_config)
+ job.result()
+
+ dest_table = Config.CLIENT.get_table(dest_ref)
+ self.to_delete.insert(0, dest_table)
+ # Just check that we got some rows.
+ got_rows = self._fetch_single_page(dest_table)
+ self.assertTrue(len(got_rows) > 0)
+
+ def test_test_iam_permissions(self):
+ dataset = self.temp_dataset(_make_dataset_id("create_table"))
+ table_id = "test_table"
+ table_ref = Table(dataset.table(table_id))
+ self.assertFalse(_table_exists(table_ref))
+
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_ref)
+ self.to_delete.insert(0, table)
+
+ self.assertTrue(_table_exists(table))
+
+ # Test some default permissions.
+ permissions = [
+ "bigquery.tables.get",
+ "bigquery.tables.getData",
+ "bigquery.tables.update",
+ ]
+
+ response = Config.CLIENT.test_iam_permissions(table, [permissions])
+ self.assertEqual(set(response["permissions"]), set(permissions))
+
+ def test_job_cancel(self):
+ DATASET_ID = _make_dataset_id("job_cancel")
+ JOB_ID_PREFIX = "fetch_" + DATASET_ID
+ TABLE_NAME = "test_table"
+ QUERY = "SELECT * FROM %s.%s" % (DATASET_ID, TABLE_NAME)
+
+ dataset = self.temp_dataset(DATASET_ID)
+
+ table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+
+ job = Config.CLIENT.query(QUERY, job_id_prefix=JOB_ID_PREFIX)
+ job.cancel()
+
+ retry = RetryInstanceState(_job_done, max_tries=8)
+ retry(job.reload)()
+
+ # The `cancel` API doesn't leave any reliable traces on
+ # the status of the job resource, so we can't really assert for
+ # them here. The best we can do is not that the API call didn't
+ # raise an error, and that the job completed (in the `retry()`
+ # above).
+
+ def test_job_labels(self):
+ DATASET_ID = _make_dataset_id("job_cancel")
+ JOB_ID_PREFIX = "fetch_" + DATASET_ID
+ QUERY = "SELECT 1 as one"
+
+ self.temp_dataset(DATASET_ID)
+
+ job_config = bigquery.QueryJobConfig(
+ labels={"custom_label": "label_value", "another_label": "foo123"}
+ )
+ job = Config.CLIENT.query(
+ QUERY, job_id_prefix=JOB_ID_PREFIX, job_config=job_config
+ )
+
+ expected_labels = {"custom_label": "label_value", "another_label": "foo123"}
+ self.assertEqual(job.labels, expected_labels)
+
+ def test_get_failed_job(self):
+ # issue 4246
+ from google.api_core.exceptions import BadRequest
+
+ JOB_ID = "invalid_{}".format(str(uuid.uuid4()))
+ QUERY = "SELECT TIMESTAMP_ADD(@ts_value, INTERVAL 1 HOUR);"
+ PARAM = bigquery.ScalarQueryParameter("ts_value", "TIMESTAMP", 1.4810976e9)
+
+ job_config = bigquery.QueryJobConfig()
+ job_config.query_parameters = [PARAM]
+
+ with self.assertRaises(BadRequest):
+ Config.CLIENT.query(QUERY, job_id=JOB_ID, job_config=job_config).result()
+
+ job = Config.CLIENT.get_job(JOB_ID)
+
+ with self.assertRaises(ValueError):
+ job.query_parameters
+
+ def test_query_w_legacy_sql_types(self):
+ naive = datetime.datetime(2016, 12, 5, 12, 41, 9)
+ stamp = "%s %s" % (naive.date().isoformat(), naive.time().isoformat())
+ zoned = naive.replace(tzinfo=UTC)
+ examples = [
+ {"sql": "SELECT 1", "expected": 1},
+ {"sql": "SELECT 1.3", "expected": 1.3},
+ {"sql": "SELECT TRUE", "expected": True},
+ {"sql": 'SELECT "ABC"', "expected": "ABC"},
+ {"sql": 'SELECT CAST("foo" AS BYTES)', "expected": b"foo"},
+ {"sql": 'SELECT CAST("%s" AS TIMESTAMP)' % (stamp,), "expected": zoned},
+ ]
+ for example in examples:
+ job_config = bigquery.QueryJobConfig()
+ job_config.use_legacy_sql = True
+ rows = list(Config.CLIENT.query(example["sql"], job_config=job_config))
+ self.assertEqual(len(rows), 1)
+ self.assertEqual(len(rows[0]), 1)
+ self.assertEqual(rows[0][0], example["expected"])
+
+ def test_query_w_standard_sql_types(self):
+ for sql, expected in helpers.STANDARD_SQL_EXAMPLES:
+ rows = list(Config.CLIENT.query(sql))
+ self.assertEqual(len(rows), 1)
+ self.assertEqual(len(rows[0]), 1)
+ self.assertEqual(rows[0][0], expected)
+
+ def test_query_w_failed_query(self):
+ from google.api_core.exceptions import BadRequest
+
+ with self.assertRaises(BadRequest):
+ Config.CLIENT.query("invalid syntax;").result()
+
+ def test_query_w_wrong_config(self):
+ from google.cloud.bigquery.job import LoadJobConfig
+
+ good_query = "SELECT 1;"
+ rows = list(Config.CLIENT.query("SELECT 1;").result())
+ assert rows[0][0] == 1
+
+ bad_config = LoadJobConfig()
+ bad_config.source_format = enums.SourceFormat.CSV
+ with self.assertRaises(Exception):
+ Config.CLIENT.query(good_query, job_config=bad_config).result()
+
+ def test_query_w_page_size(self):
+ page_size = 45
+ query_job = Config.CLIENT.query(
+ "SELECT word FROM `bigquery-public-data.samples.shakespeare`;",
+ job_id_prefix="test_query_w_page_size_",
+ )
+ iterator = query_job.result(page_size=page_size)
+ self.assertEqual(next(iterator.pages).num_items, page_size)
+
+ def test_query_w_start_index(self):
+ start_index = 164652
+ query_job = Config.CLIENT.query(
+ "SELECT word FROM `bigquery-public-data.samples.shakespeare`;",
+ job_id_prefix="test_query_w_start_index_",
+ )
+ result1 = query_job.result(start_index=start_index)
+ total_rows = result1.total_rows
+
+ self.assertEqual(result1.extra_params["startIndex"], start_index)
+ self.assertEqual(len(list(result1)), total_rows - start_index)
+
+ def test_dml_statistics(self):
+ table_schema = (
+ bigquery.SchemaField("foo", "STRING"),
+ bigquery.SchemaField("bar", "INTEGER"),
+ )
+
+ dataset_id = _make_dataset_id("bq_system_test")
+ self.temp_dataset(dataset_id)
+ table_id = "{}.{}.test_dml_statistics".format(Config.CLIENT.project, dataset_id)
+
+ # Create the table before loading so that the column order is deterministic.
+ table = helpers.retry_403(Config.CLIENT.create_table)(
+ Table(table_id, schema=table_schema)
+ )
+ self.to_delete.insert(0, table)
+
+ # Insert a few rows and check the stats.
+ sql = f"""
+ INSERT INTO `{table_id}`
+ VALUES ("one", 1), ("two", 2), ("three", 3), ("four", 4);
+ """
+ query_job = Config.CLIENT.query(sql)
+ query_job.result()
+
+ assert query_job.dml_stats is not None
+ assert query_job.dml_stats.inserted_row_count == 4
+ assert query_job.dml_stats.updated_row_count == 0
+ assert query_job.dml_stats.deleted_row_count == 0
+
+ # Update some of the rows.
+ sql = f"""
+ UPDATE `{table_id}`
+ SET bar = bar + 1
+ WHERE bar > 2;
+ """
+ query_job = Config.CLIENT.query(sql)
+ query_job.result()
+
+ assert query_job.dml_stats is not None
+ assert query_job.dml_stats.inserted_row_count == 0
+ assert query_job.dml_stats.updated_row_count == 2
+ assert query_job.dml_stats.deleted_row_count == 0
+
+ # Now delete a few rows and check the stats.
+ sql = f"""
+ DELETE FROM `{table_id}`
+ WHERE foo != "two";
+ """
+ query_job = Config.CLIENT.query(sql)
+ query_job.result()
+
+ assert query_job.dml_stats is not None
+ assert query_job.dml_stats.inserted_row_count == 0
+ assert query_job.dml_stats.updated_row_count == 0
+ assert query_job.dml_stats.deleted_row_count == 3
+
+ def test_transaction_info(self):
+ table_schema = (
+ bigquery.SchemaField("foo", "STRING"),
+ bigquery.SchemaField("bar", "INTEGER"),
+ )
+
+ dataset_id = _make_dataset_id("bq_system_test")
+ self.temp_dataset(dataset_id)
+ table_id = f"{Config.CLIENT.project}.{dataset_id}.test_dml_statistics"
+
+ # Create the table before loading so that the column order is deterministic.
+ table = helpers.retry_403(Config.CLIENT.create_table)(
+ Table(table_id, schema=table_schema)
+ )
+ self.to_delete.insert(0, table)
+
+ # Insert a few rows and check the stats.
+ sql = f"""
+ BEGIN TRANSACTION;
+ INSERT INTO `{table_id}`
+ VALUES ("one", 1), ("two", 2), ("three", 3), ("four", 4);
+
+ UPDATE `{table_id}`
+ SET bar = bar + 1
+ WHERE bar > 2;
+ COMMIT TRANSACTION;
+ """
+ query_job = Config.CLIENT.query(sql)
+ query_job.result()
+
+ child_jobs = Config.CLIENT.list_jobs(parent_job=query_job)
+ begin_transaction_job = next(iter(child_jobs))
+
+ # Transaction ID set by the server should be accessible on the child
+ # job responsible for `BEGIN TRANSACTION`. It is not expected to be
+ # present on the parent job itself.
+ # https://github.com/googleapis/python-bigquery/issues/975
+ assert begin_transaction_job.transaction_info is not None
+ assert begin_transaction_job.transaction_info.transaction_id != ""
+
+ def test_dbapi_w_standard_sql_types(self):
+ for sql, expected in helpers.STANDARD_SQL_EXAMPLES:
+ Config.CURSOR.execute(sql)
+ self.assertEqual(Config.CURSOR.rowcount, 1)
+ row = Config.CURSOR.fetchone()
+ self.assertEqual(len(row), 1)
+ self.assertEqual(row[0], expected)
+ row = Config.CURSOR.fetchone()
+ self.assertIsNone(row)
+
+ def test_dbapi_fetchall(self):
+ query = "SELECT * FROM UNNEST([(1, 2), (3, 4), (5, 6)])"
+
+ for arraysize in range(1, 5):
+ Config.CURSOR.execute(query)
+ self.assertEqual(Config.CURSOR.rowcount, 3, "expected 3 rows")
+ Config.CURSOR.arraysize = arraysize
+ rows = Config.CURSOR.fetchall()
+ row_tuples = [r.values() for r in rows]
+ self.assertEqual(row_tuples, [(1, 2), (3, 4), (5, 6)])
+
+ def test_dbapi_fetchall_from_script(self):
+ query = """
+ CREATE TEMP TABLE Example
+ (
+ x INT64,
+ y STRING
+ );
+
+ INSERT INTO Example
+ VALUES (5, 'foo'),
+ (6, 'bar'),
+ (7, 'baz');
+
+ SELECT *
+ FROM Example
+ ORDER BY x ASC;
+ """
+
+ Config.CURSOR.execute(query)
+ self.assertEqual(Config.CURSOR.rowcount, 3, "expected 3 rows")
+ rows = Config.CURSOR.fetchall()
+ row_tuples = [r.values() for r in rows]
+ self.assertEqual(row_tuples, [(5, "foo"), (6, "bar"), (7, "baz")])
+
+ def test_dbapi_fetch_w_bqstorage_client_large_result_set(self):
+ bigquery_storage = pytest.importorskip("google.cloud.bigquery_storage")
+ pytest.importorskip("pyarrow")
+
+ bqstorage_client = bigquery_storage.BigQueryReadClient(
+ credentials=Config.CLIENT._credentials
+ )
+ cursor = dbapi.connect(Config.CLIENT, bqstorage_client).cursor()
+
+ cursor.execute(
+ """
+ SELECT id, `by`, timestamp
+ FROM `bigquery-public-data.hacker_news.full`
+ ORDER BY `id` ASC
+ LIMIT 100000
+ """
+ )
+
+ result_rows = [cursor.fetchone(), cursor.fetchone(), cursor.fetchone()]
+ field_name = operator.itemgetter(0)
+ fetched_data = [sorted(row.items(), key=field_name) for row in result_rows]
+ # Since DB API is not thread safe, only a single result stream should be
+ # requested by the BQ storage client, meaning that results should arrive
+ # in the sorted order.
+
+ expected_data = [
+ [
+ ("by", "pg"),
+ ("id", 1),
+ (
+ "timestamp",
+ datetime.datetime(
+ 2006, 10, 9, 18, 21, 51, tzinfo=datetime.timezone.utc
+ ),
+ ),
+ ],
+ [
+ ("by", "phyllis"),
+ ("id", 2),
+ (
+ "timestamp",
+ datetime.datetime(
+ 2006, 10, 9, 18, 30, 28, tzinfo=datetime.timezone.utc
+ ),
+ ),
+ ],
+ [
+ ("by", "phyllis"),
+ ("id", 3),
+ (
+ "timestamp",
+ datetime.datetime(
+ 2006, 10, 9, 18, 40, 33, tzinfo=datetime.timezone.utc
+ ),
+ ),
+ ],
+ ]
+
+ self.assertEqual(fetched_data, expected_data)
+
+ def test_dbapi_dry_run_query(self):
+ from google.cloud.bigquery.job import QueryJobConfig
+
+ query = """
+ SELECT country_name
+ FROM `bigquery-public-data.utility_us.country_code_iso`
+ WHERE country_name LIKE 'U%'
+ """
+
+ Config.CURSOR.execute(query, job_config=QueryJobConfig(dry_run=True))
+ self.assertEqual(Config.CURSOR.rowcount, 0, "expected no rows")
+
+ rows = Config.CURSOR.fetchall()
+
+ self.assertEqual(list(rows), [])
+
+ def test_dbapi_connection_does_not_leak_sockets(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ current_process = psutil.Process()
+ conn_count_start = len(current_process.connections())
+
+ # Provide no explicit clients, so that the connection will create and own them.
+ connection = dbapi.connect()
+ cursor = connection.cursor()
+
+ cursor.execute(
+ """
+ SELECT id, `by`, timestamp
+ FROM `bigquery-public-data.hacker_news.full`
+ ORDER BY `id` ASC
+ LIMIT 100000
+ """
+ )
+ rows = cursor.fetchall()
+ self.assertEqual(len(rows), 100000)
+
+ connection.close()
+ conn_count_end = len(current_process.connections())
+ self.assertLessEqual(conn_count_end, conn_count_start)
+
+ def _load_table_for_dml(self, rows, dataset_id, table_id):
+ from google.cloud._testing import _NamedTemporaryFile
+ from google.cloud.bigquery.job import CreateDisposition
+ from google.cloud.bigquery.job import SourceFormat
+ from google.cloud.bigquery.job import WriteDisposition
+
+ dataset = self.temp_dataset(dataset_id)
+ greeting = bigquery.SchemaField("greeting", "STRING", mode="NULLABLE")
+ table_ref = dataset.table(table_id)
+ table_arg = Table(table_ref, schema=[greeting])
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+
+ with _NamedTemporaryFile() as temp:
+ with open(temp.name, "w") as csv_write:
+ writer = csv.writer(csv_write)
+ writer.writerow(("Greeting",))
+ writer.writerows(rows)
+
+ with open(temp.name, "rb") as csv_read:
+ config = bigquery.LoadJobConfig()
+ config.source_format = SourceFormat.CSV
+ config.skip_leading_rows = 1
+ config.create_disposition = CreateDisposition.CREATE_NEVER
+ config.write_disposition = WriteDisposition.WRITE_EMPTY
+ job = Config.CLIENT.load_table_from_file(
+ csv_read, table_ref, job_config=config
+ )
+
+ # Retry until done.
+ job.result(timeout=JOB_TIMEOUT)
+ self._fetch_single_page(table)
+
+ def test_query_w_dml(self):
+ dataset_name = _make_dataset_id("dml_query")
+ table_name = "test_table"
+ self._load_table_for_dml([("Hello World",)], dataset_name, table_name)
+ query_template = """UPDATE {}.{}
+ SET greeting = 'Guten Tag'
+ WHERE greeting = 'Hello World'
+ """
+
+ query_job = Config.CLIENT.query(
+ query_template.format(dataset_name, table_name),
+ job_id_prefix="test_query_w_dml_",
+ )
+ query_job.result()
+
+ self.assertEqual(query_job.num_dml_affected_rows, 1)
+
+ def test_dbapi_w_dml(self):
+ dataset_name = _make_dataset_id("dml_dbapi")
+ table_name = "test_table"
+ self._load_table_for_dml(
+ [("こんにちは",), ("Hello World",), ("Howdy!",)], dataset_name, table_name
+ )
+ query_template = """UPDATE {}.{}
+ SET greeting = 'Guten Tag'
+ WHERE greeting = 'Hello World'
+ """
+
+ Config.CURSOR.execute(
+ query_template.format(dataset_name, table_name),
+ job_id="test_dbapi_w_dml_{}".format(str(uuid.uuid4())),
+ )
+ self.assertEqual(Config.CURSOR.rowcount, 1)
+
+ def test_dbapi_w_query_parameters(self):
+ examples = [
+ {
+ "sql": "SELECT %(boolval)s",
+ "expected": True,
+ "query_parameters": {"boolval": True},
+ },
+ {
+ "sql": 'SELECT %(a "very" weird `name`)s',
+ "expected": True,
+ "query_parameters": {'a "very" weird `name`': True},
+ },
+ {
+ "sql": "SELECT %(select)s",
+ "expected": True,
+ "query_parameters": {"select": True}, # this name is a keyword
+ },
+ {"sql": "SELECT %s", "expected": False, "query_parameters": [False]},
+ {
+ "sql": "SELECT %(intval)s",
+ "expected": 123,
+ "query_parameters": {"intval": 123},
+ },
+ {
+ "sql": "SELECT %s",
+ "expected": -123456789,
+ "query_parameters": [-123456789],
+ },
+ {
+ "sql": "SELECT %(floatval)s",
+ "expected": 1.25,
+ "query_parameters": {"floatval": 1.25},
+ },
+ {
+ "sql": "SELECT LOWER(%(strval)s)",
+ "query_parameters": {"strval": "I Am A String"},
+ "expected": "i am a string",
+ },
+ {
+ "sql": "SELECT DATE_SUB(%(dateval)s, INTERVAL 1 DAY)",
+ "query_parameters": {"dateval": datetime.date(2017, 4, 2)},
+ "expected": datetime.date(2017, 4, 1),
+ },
+ {
+ "sql": "SELECT TIME_ADD(%(timeval)s, INTERVAL 4 SECOND)",
+ "query_parameters": {"timeval": datetime.time(12, 34, 56)},
+ "expected": datetime.time(12, 35, 0),
+ },
+ {
+ "sql": ("SELECT DATETIME_ADD(%(datetimeval)s, INTERVAL 53 SECOND)"),
+ "query_parameters": {
+ "datetimeval": datetime.datetime(2012, 3, 4, 5, 6, 7)
+ },
+ "expected": datetime.datetime(2012, 3, 4, 5, 7, 0),
+ },
+ {
+ "sql": "SELECT TIMESTAMP_TRUNC(%(zoned)s, MINUTE)",
+ "query_parameters": {
+ "zoned": datetime.datetime(2012, 3, 4, 5, 6, 7, tzinfo=UTC)
+ },
+ "expected": datetime.datetime(2012, 3, 4, 5, 6, 0, tzinfo=UTC),
+ },
+ {
+ "sql": "SELECT TIMESTAMP_TRUNC(%(zoned)s, MINUTE)",
+ "query_parameters": {
+ "zoned": datetime.datetime(2012, 3, 4, 5, 6, 7, 250000, tzinfo=UTC)
+ },
+ "expected": datetime.datetime(2012, 3, 4, 5, 6, 0, tzinfo=UTC),
+ },
+ ]
+ for example in examples:
+ msg = "sql: {} query_parameters: {}".format(
+ example["sql"], example["query_parameters"]
+ )
+
+ Config.CURSOR.execute(example["sql"], example["query_parameters"])
+
+ self.assertEqual(Config.CURSOR.rowcount, 1, msg=msg)
+ row = Config.CURSOR.fetchone()
+ self.assertEqual(len(row), 1, msg=msg)
+ self.assertEqual(row[0], example["expected"], msg=msg)
+ row = Config.CURSOR.fetchone()
+ self.assertIsNone(row, msg=msg)
+
+ def test_large_query_w_public_data(self):
+ PUBLIC = "bigquery-public-data"
+ DATASET_ID = "samples"
+ TABLE_NAME = "natality"
+ LIMIT = 1000
+ SQL = "SELECT * from `{}.{}.{}` LIMIT {}".format(
+ PUBLIC, DATASET_ID, TABLE_NAME, LIMIT
+ )
+
+ query_job = Config.CLIENT.query(SQL)
+
+ rows = list(query_job)
+ self.assertEqual(len(rows), LIMIT)
+
+ def test_query_future(self):
+ query_job = Config.CLIENT.query("SELECT 1")
+ iterator = query_job.result(timeout=JOB_TIMEOUT)
+ row_tuples = [r.values() for r in iterator]
+ self.assertEqual(row_tuples, [(1,)])
+
+ def test_query_iter(self):
+ import types
+
+ query_job = Config.CLIENT.query("SELECT 1")
+ self.assertIsInstance(iter(query_job), types.GeneratorType)
+ row_tuples = [r.values() for r in query_job]
+ self.assertEqual(row_tuples, [(1,)])
+
+ def test_insert_rows_nested_nested(self):
+ # See #2951
+ SF = bigquery.SchemaField
+ schema = [
+ SF("string_col", "STRING", mode="NULLABLE"),
+ SF(
+ "record_col",
+ "RECORD",
+ mode="NULLABLE",
+ fields=[
+ SF("nested_string", "STRING", mode="NULLABLE"),
+ SF("nested_repeated", "INTEGER", mode="REPEATED"),
+ SF(
+ "nested_record",
+ "RECORD",
+ mode="NULLABLE",
+ fields=[SF("nested_nested_string", "STRING", mode="NULLABLE")],
+ ),
+ ],
+ ),
+ SF("json_col", "JSON"),
+ ]
+ record = {
+ "nested_string": "another string value",
+ "nested_repeated": [0, 1, 2],
+ "nested_record": {"nested_nested_string": "some deep insight"},
+ }
+ json_record = {
+ "json_array": [1, 2, 3],
+ "json_object": {"alpha": "abc", "num": 123},
+ }
+ to_insert = [("Some value", record, json_record)]
+ table_id = "test_table"
+ dataset = self.temp_dataset(_make_dataset_id("issue_2951"))
+ table_arg = Table(dataset.table(table_id), schema=schema)
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+
+ Config.CLIENT.insert_rows(table, to_insert)
+
+ retry = RetryResult(_has_rows, max_tries=8)
+ rows = retry(self._fetch_single_page)(table)
+ row_tuples = [r.values() for r in rows]
+ self.assertEqual(row_tuples, to_insert)
+
+ def test_insert_rows_nested_nested_dictionary(self):
+ # See #2951
+ SF = bigquery.SchemaField
+ schema = [
+ SF("string_col", "STRING", mode="NULLABLE"),
+ SF(
+ "record_col",
+ "RECORD",
+ mode="NULLABLE",
+ fields=[
+ SF("nested_string", "STRING", mode="NULLABLE"),
+ SF("nested_repeated", "INTEGER", mode="REPEATED"),
+ SF(
+ "nested_record",
+ "RECORD",
+ mode="NULLABLE",
+ fields=[SF("nested_nested_string", "STRING", mode="NULLABLE")],
+ ),
+ ],
+ ),
+ ]
+ record = {
+ "nested_string": "another string value",
+ "nested_repeated": [0, 1, 2],
+ "nested_record": {"nested_nested_string": "some deep insight"},
+ }
+ to_insert = [{"string_col": "Some value", "record_col": record}]
+ table_id = "test_table"
+ dataset = self.temp_dataset(_make_dataset_id("issue_2951"))
+ table_arg = Table(dataset.table(table_id), schema=schema)
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+
+ Config.CLIENT.insert_rows(table, to_insert)
+
+ retry = RetryResult(_has_rows, max_tries=8)
+ rows = retry(self._fetch_single_page)(table)
+ row_tuples = [r.values() for r in rows]
+ expected_rows = [("Some value", record)]
+ self.assertEqual(row_tuples, expected_rows)
+
+ @pytest.mark.skipif(
+ MTLS_TESTING, reason="mTLS testing has no permission to the max-value.js file"
+ )
+ def test_create_routine(self):
+ routine_name = "test_routine"
+ dataset = self.temp_dataset(_make_dataset_id("create_routine"))
+ float64_type = bigquery.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.FLOAT64
+ )
+ routine = bigquery.Routine(
+ dataset.routine(routine_name),
+ language="JAVASCRIPT",
+ type_="SCALAR_FUNCTION",
+ return_type=float64_type,
+ imported_libraries=[
+ "gs://{}/bigquery/udfs/max-value.js".format(SAMPLES_BUCKET)
+ ],
+ )
+ routine.arguments = [
+ bigquery.RoutineArgument(
+ name="arr",
+ data_type=bigquery.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.ARRAY,
+ array_element_type=float64_type,
+ ),
+ )
+ ]
+ routine.body = "return maxValue(arr)"
+ routine.determinism_level = bigquery.DeterminismLevel.DETERMINISTIC
+ query_string = "SELECT `{}`([-100.0, 3.14, 100.0, 42.0]) as max_value;".format(
+ str(routine.reference)
+ )
+
+ routine = helpers.retry_403(Config.CLIENT.create_routine)(routine)
+ query_job = helpers.retry_403(Config.CLIENT.query)(query_string)
+ rows = list(query_job.result())
+
+ assert len(rows) == 1
+ assert rows[0].max_value == 100.0
+
+ def test_create_routine_with_range(self):
+ routine_name = "routine_range"
+ dataset = self.temp_dataset(_make_dataset_id("routine_range"))
+
+ routine = bigquery.Routine(
+ dataset.routine(routine_name),
+ type_="SCALAR_FUNCTION",
+ language="SQL",
+ body="RANGE_START(x)",
+ arguments=[
+ bigquery.RoutineArgument(
+ name="x",
+ data_type=bigquery.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.RANGE,
+ range_element_type=bigquery.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.DATE
+ ),
+ ),
+ )
+ ],
+ return_type=bigquery.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.DATE
+ ),
+ )
+
+ query_string = (
+ "SELECT `{}`(RANGE '[2016-08-12, UNBOUNDED)') as range_start;".format(
+ str(routine.reference)
+ )
+ )
+
+ routine = helpers.retry_403(Config.CLIENT.create_routine)(routine)
+ query_job = helpers.retry_403(Config.CLIENT.query)(query_string)
+ rows = list(query_job.result())
+
+ assert len(rows) == 1
+ assert rows[0].range_start == datetime.date(2016, 8, 12)
+
+ def test_create_tvf_routine(self):
+ from google.cloud.bigquery import (
+ Routine,
+ RoutineArgument,
+ RoutineType,
+ StandardSqlTypeNames,
+ )
+
+ StandardSqlDataType = bigquery.StandardSqlDataType
+ StandardSqlField = bigquery.StandardSqlField
+ StandardSqlTableType = bigquery.StandardSqlTableType
+
+ INT64 = StandardSqlTypeNames.INT64
+ STRING = StandardSqlTypeNames.STRING
+
+ client = Config.CLIENT
+
+ dataset = self.temp_dataset(_make_dataset_id("create_tvf_routine"))
+ routine_ref = dataset.routine("test_tvf_routine")
+
+ routine_body = """
+ SELECT int_col, str_col
+ FROM (
+ UNNEST([1, 2, 3]) int_col
+ JOIN
+ (SELECT str_col FROM UNNEST(["one", "two", "three"]) str_col)
+ ON TRUE
+ )
+ WHERE int_col > threshold
+ """
+
+ return_table_type = StandardSqlTableType(
+ columns=[
+ StandardSqlField(
+ name="int_col",
+ type=StandardSqlDataType(type_kind=INT64),
+ ),
+ StandardSqlField(
+ name="str_col",
+ type=StandardSqlDataType(type_kind=STRING),
+ ),
+ ]
+ )
+
+ routine_args = [
+ RoutineArgument(
+ name="threshold",
+ data_type=StandardSqlDataType(type_kind=INT64),
+ )
+ ]
+
+ routine_def = Routine(
+ routine_ref,
+ type_=RoutineType.TABLE_VALUED_FUNCTION,
+ arguments=routine_args,
+ return_table_type=return_table_type,
+ body=routine_body,
+ )
+
+ # Create TVF routine.
+ client.delete_routine(routine_ref, not_found_ok=True)
+ routine = client.create_routine(routine_def)
+
+ assert routine.body == routine_body
+ assert routine.return_table_type == return_table_type
+ assert routine.arguments == routine_args
+
+ # Execute the routine to see if it's working as expected.
+ query_job = client.query(
+ f"""
+ SELECT int_col, str_col
+ FROM `{routine.reference}`(1)
+ ORDER BY int_col, str_col ASC
+ """
+ )
+
+ result_rows = [tuple(row) for row in query_job.result()]
+ expected = [
+ (2, "one"),
+ (2, "three"),
+ (2, "two"),
+ (3, "one"),
+ (3, "three"),
+ (3, "two"),
+ ]
+ assert result_rows == expected
+
+ def test_create_routine_w_data_governance(self):
+ routine_name = "routine_with_data_governance"
+ dataset = self.temp_dataset(_make_dataset_id("create_routine"))
+
+ routine = bigquery.Routine(
+ dataset.routine(routine_name),
+ type_="SCALAR_FUNCTION",
+ language="SQL",
+ body="x",
+ arguments=[
+ bigquery.RoutineArgument(
+ name="x",
+ data_type=bigquery.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.INT64
+ ),
+ )
+ ],
+ data_governance_type="DATA_MASKING",
+ return_type=bigquery.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.INT64
+ ),
+ )
+ routine_original = copy.deepcopy(routine)
+
+ client = Config.CLIENT
+ routine_new = client.create_routine(routine)
+
+ assert routine_new.reference == routine_original.reference
+ assert routine_new.type_ == routine_original.type_
+ assert routine_new.language == routine_original.language
+ assert routine_new.body == routine_original.body
+ assert routine_new.arguments == routine_original.arguments
+ assert routine_new.return_type == routine_original.return_type
+ assert routine_new.data_governance_type == routine_original.data_governance_type
+
+ def test_create_table_rows_fetch_nested_schema(self):
+ table_name = "test_table"
+ dataset = self.temp_dataset(_make_dataset_id("create_table_nested_schema"))
+ schema = _load_json_schema()
+ table_arg = Table(dataset.table(table_name), schema=schema)
+ table = helpers.retry_403(Config.CLIENT.create_table)(table_arg)
+ self.to_delete.insert(0, table)
+ self.assertTrue(_table_exists(table))
+ self.assertEqual(table.table_id, table_name)
+
+ to_insert = []
+ # Data is in "JSON Lines" format, see http://jsonlines.org/
+ json_filename = DATA_PATH / "characters.jsonl"
+ with open(json_filename) as rows_file:
+ for line in rows_file:
+ to_insert.append(json.loads(line))
+
+ errors = Config.CLIENT.insert_rows_json(table, to_insert)
+ self.assertEqual(len(errors), 0)
+
+ retry = RetryResult(_has_rows, max_tries=8)
+ fetched = retry(self._fetch_single_page)(table)
+ fetched_tuples = [f.values() for f in fetched]
+
+ self.assertEqual(len(fetched), len(to_insert))
+
+ for found, expected in zip(sorted(fetched_tuples), to_insert):
+ self.assertEqual(found[0], expected["Name"])
+ self.assertEqual(found[1], int(expected["Age"]))
+ self.assertEqual(found[2], expected["Weight"])
+ self.assertEqual(found[3], expected["IsMagic"])
+
+ self.assertEqual(len(found[4]), len(expected["Spells"]))
+ for f_spell, e_spell in zip(found[4], expected["Spells"]):
+ self.assertEqual(f_spell["Name"], e_spell["Name"])
+ parts = time.strptime(e_spell["LastUsed"], "%Y-%m-%d %H:%M:%S UTC")
+ e_used = datetime.datetime(*parts[0:6], tzinfo=UTC)
+ self.assertEqual(f_spell["LastUsed"], e_used)
+ self.assertEqual(f_spell["DiscoveredBy"], e_spell["DiscoveredBy"])
+ self.assertEqual(f_spell["Properties"], e_spell["Properties"])
+
+ e_icon = base64.standard_b64decode(e_spell["Icon"].encode("ascii"))
+ self.assertEqual(f_spell["Icon"], e_icon)
+
+ parts = time.strptime(expected["TeaTime"], "%H:%M:%S")
+ e_teatime = datetime.time(*parts[3:6])
+ self.assertEqual(found[5], e_teatime)
+
+ parts = time.strptime(expected["NextVacation"], "%Y-%m-%d")
+ e_nextvac = datetime.date(*parts[0:3])
+ self.assertEqual(found[6], e_nextvac)
+
+ parts = time.strptime(expected["FavoriteTime"], "%Y-%m-%dT%H:%M:%S")
+ e_favtime = datetime.datetime(*parts[0:6])
+ self.assertEqual(found[7], e_favtime)
+ self.assertEqual(found[8], decimal.Decimal(expected["FavoriteNumber"]))
+
+ def test_nested_table_to_arrow(self):
+ bigquery_storage = pytest.importorskip("google.cloud.bigquery_storage")
+ pyarrow = pytest.importorskip("pyarrow")
+ pyarrow.types = pytest.importorskip("pyarrow.types")
+ from google.cloud.bigquery.job import SourceFormat
+ from google.cloud.bigquery.job import WriteDisposition
+
+ SF = bigquery.SchemaField
+ schema = [
+ SF("string_col", "STRING", mode="NULLABLE"),
+ SF(
+ "record_col",
+ "RECORD",
+ mode="NULLABLE",
+ fields=[
+ SF("nested_string", "STRING", mode="NULLABLE"),
+ SF("nested_repeated", "INTEGER", mode="REPEATED"),
+ ],
+ ),
+ SF("float_col", "FLOAT", mode="NULLABLE"),
+ ]
+ record = {"nested_string": "another string value", "nested_repeated": [0, 1, 2]}
+ to_insert = [
+ {"string_col": "Some value", "record_col": record, "float_col": 3.14}
+ ]
+ rows = [json.dumps(row) for row in to_insert]
+ body = io.BytesIO("{}\n".format("\n".join(rows)).encode("ascii"))
+ table_id = "test_table"
+ dataset = self.temp_dataset(_make_dataset_id("nested_df"))
+ table = dataset.table(table_id)
+ self.to_delete.insert(0, table)
+ job_config = bigquery.LoadJobConfig()
+ job_config.write_disposition = WriteDisposition.WRITE_TRUNCATE
+ job_config.source_format = SourceFormat.NEWLINE_DELIMITED_JSON
+ job_config.schema = schema
+ # Load a table using a local JSON file from memory.
+ Config.CLIENT.load_table_from_file(body, table, job_config=job_config).result()
+ bqstorage_client = bigquery_storage.BigQueryReadClient(
+ credentials=Config.CLIENT._credentials
+ )
+
+ tbl = Config.CLIENT.list_rows(table, selected_fields=schema).to_arrow(
+ bqstorage_client=bqstorage_client
+ )
+
+ self.assertIsInstance(tbl, pyarrow.Table)
+ self.assertEqual(tbl.num_rows, 1)
+ self.assertEqual(tbl.num_columns, 3)
+ # Columns may not appear in the requested order.
+ self.assertTrue(pyarrow.types.is_float64(tbl.schema.field("float_col").type))
+ self.assertTrue(pyarrow.types.is_string(tbl.schema.field("string_col").type))
+ record_col = tbl.schema.field("record_col").type
+ self.assertTrue(pyarrow.types.is_struct(record_col))
+ self.assertEqual(record_col.num_fields, 2)
+ self.assertEqual(record_col[0].name, "nested_string")
+ self.assertTrue(pyarrow.types.is_string(record_col[0].type))
+ self.assertEqual(record_col[1].name, "nested_repeated")
+ self.assertTrue(pyarrow.types.is_list(record_col[1].type))
+ self.assertTrue(pyarrow.types.is_int64(record_col[1].type.value_type))
+
+ def temp_dataset(self, dataset_id, *args, **kwargs):
+ project = Config.CLIENT.project
+ dataset_ref = bigquery.DatasetReference(project, dataset_id)
+ dataset = Dataset(dataset_ref)
+ if kwargs.get("location"):
+ dataset.location = kwargs.get("location")
+ if kwargs.get("max_time_travel_hours"):
+ dataset.max_time_travel_hours = kwargs.get("max_time_travel_hours")
+ if kwargs.get("default_rounding_mode"):
+ dataset.default_rounding_mode = kwargs.get("default_rounding_mode")
+ if kwargs.get("is_case_insensitive"):
+ dataset.is_case_insensitive = kwargs.get("is_case_insensitive")
+ dataset = helpers.retry_403(Config.CLIENT.create_dataset)(dataset)
+ self.to_delete.append(dataset)
+ return dataset
+
+
+def _job_done(instance):
+ return instance.state.lower() == "done"
+
+
+def _dataset_exists(ds):
+ try:
+ Config.CLIENT.get_dataset(DatasetReference(ds.project, ds.dataset_id))
+ return True
+ except NotFound:
+ return False
+
+
+def _table_exists(t):
+ try:
+ tr = DatasetReference(t.project, t.dataset_id).table(t.table_id)
+ Config.CLIENT.get_table(tr)
+ return True
+ except NotFound:
+ return False
+
+
+def test_dbapi_create_view(dataset_id: str):
+ query = f"""
+ CREATE VIEW {dataset_id}.dbapi_create_view
+ AS SELECT name, SUM(number) AS total
+ FROM `bigquery-public-data.usa_names.usa_1910_2013`
+ GROUP BY name;
+ """
+
+ Config.CURSOR.execute(query)
+ assert Config.CURSOR.rowcount == 0, "expected 0 rows"
+
+
+def test_parameterized_types_round_trip(dataset_id: str):
+ client = Config.CLIENT
+ table_id = f"{dataset_id}.test_parameterized_types_round_trip"
+ fields = (
+ ("n", "NUMERIC"),
+ ("n9", "NUMERIC(9)"),
+ ("n92", "NUMERIC(9, 2)"),
+ ("bn", "BIGNUMERIC"),
+ ("bn9", "BIGNUMERIC(38)"),
+ ("bn92", "BIGNUMERIC(38, 22)"),
+ ("s", "STRING"),
+ ("s9", "STRING(9)"),
+ ("b", "BYTES"),
+ ("b9", "BYTES(9)"),
+ )
+ client.query(
+ "create table {} ({})".format(table_id, ", ".join(" ".join(f) for f in fields))
+ ).result()
+ table = client.get_table(table_id)
+ table_id2 = table_id + "2"
+ client.create_table(Table(f"{client.project}.{table_id2}", table.schema))
+ table2 = client.get_table(table_id2)
+
+ assert tuple(s._key()[:2] for s in table2.schema) == fields
+
+
+def test_table_snapshots(dataset_id: str):
+ from google.cloud.bigquery import CopyJobConfig
+ from google.cloud.bigquery import OperationType
+
+ client = Config.CLIENT
+
+ source_table_path = f"{client.project}.{dataset_id}.test_table"
+ snapshot_table_path = f"{source_table_path}_snapshot"
+
+ # Create the table before loading so that the column order is predictable.
+ schema = [
+ bigquery.SchemaField("foo", "INTEGER"),
+ bigquery.SchemaField("bar", "STRING"),
+ ]
+ source_table = helpers.retry_403(Config.CLIENT.create_table)(
+ Table(source_table_path, schema=schema)
+ )
+
+ # Populate the table with initial data.
+ rows = [{"foo": 1, "bar": "one"}, {"foo": 2, "bar": "two"}]
+ load_job = Config.CLIENT.load_table_from_json(rows, source_table)
+ load_job.result()
+
+ # Now create a snapshot before modifying the original table data.
+ copy_config = CopyJobConfig()
+ copy_config.operation_type = OperationType.SNAPSHOT
+
+ today = datetime.date.today()
+ destination_expiration_time = f"{today.year + 1}-01-01T00:00:00Z"
+
+ copy_config.destination_expiration_time = destination_expiration_time
+
+ copy_job = client.copy_table(
+ sources=source_table_path,
+ destination=snapshot_table_path,
+ job_config=copy_config,
+ )
+ copy_job.result()
+
+ # Modify data in original table.
+ sql = f'INSERT INTO `{source_table_path}`(foo, bar) VALUES (3, "three")'
+ query_job = client.query(sql)
+ query_job.result()
+
+ # List rows from the source table and compare them to rows from the snapshot.
+ rows_iter = client.list_rows(source_table_path)
+ rows = sorted(row.values() for row in rows_iter)
+ assert rows == [(1, "one"), (2, "two"), (3, "three")]
+
+ rows_iter = client.list_rows(snapshot_table_path)
+ rows = sorted(row.values() for row in rows_iter)
+ assert rows == [(1, "one"), (2, "two")]
+
+ # Now restore the table from the snapshot and it should again contain the old
+ # set of rows.
+ copy_config = CopyJobConfig()
+ copy_config.operation_type = OperationType.RESTORE
+ copy_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE
+
+ copy_job = client.copy_table(
+ sources=snapshot_table_path,
+ destination=source_table_path,
+ job_config=copy_config,
+ )
+ copy_job.result()
+
+ rows_iter = client.list_rows(source_table_path)
+ rows = sorted(row.values() for row in rows_iter)
+ assert rows == [(1, "one"), (2, "two")]
+
+
+def test_table_clones(dataset_id: str):
+ from google.cloud.bigquery import CopyJobConfig
+ from google.cloud.bigquery import OperationType
+
+ client = Config.CLIENT
+
+ table_path_source = f"{client.project}.{dataset_id}.test_table_clone"
+ clone_table_path = f"{table_path_source}_clone"
+
+ # Create the table before loading so that the column order is predictable.
+ schema = [
+ bigquery.SchemaField("foo", "INTEGER"),
+ bigquery.SchemaField("bar", "STRING"),
+ ]
+ source_table = helpers.retry_403(Config.CLIENT.create_table)(
+ Table(table_path_source, schema=schema)
+ )
+
+ # Populate the table with initial data.
+ rows = [{"foo": 1, "bar": "one"}, {"foo": 2, "bar": "two"}]
+ load_job = Config.CLIENT.load_table_from_json(rows, source_table)
+ load_job.result()
+
+ # Now create a clone before modifying the original table data.
+ copy_config = CopyJobConfig()
+ copy_config.operation_type = OperationType.CLONE
+ copy_config.write_disposition = bigquery.WriteDisposition.WRITE_EMPTY
+
+ copy_job = client.copy_table(
+ sources=table_path_source,
+ destination=clone_table_path,
+ job_config=copy_config,
+ )
+ copy_job.result()
+
+ # List rows from the source table and compare them to rows from the clone.
+ rows_iter = client.list_rows(table_path_source)
+ rows = sorted(row.values() for row in rows_iter)
+ assert rows == [(1, "one"), (2, "two")]
+
+ rows_iter = client.list_rows(clone_table_path)
+ rows = sorted(row.values() for row in rows_iter)
+ assert rows == [(1, "one"), (2, "two")]
+
+ # Compare properties of the source and clone table.
+ source_table_props = client.get_table(table_path_source)
+ clone_table_props = client.get_table(clone_table_path)
+
+ assert source_table_props.schema == clone_table_props.schema
+ assert source_table_props.num_bytes == clone_table_props.num_bytes
+ assert source_table_props.num_rows == clone_table_props.num_rows
+ assert source_table_props.description == clone_table_props.description
diff --git a/testbed/googleapis__python-bigquery/tests/system/test_job_retry.py b/testbed/googleapis__python-bigquery/tests/system/test_job_retry.py
new file mode 100644
index 0000000000000000000000000000000000000000..520545493290fe80a5b878b54d365517c79fbda5
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/system/test_job_retry.py
@@ -0,0 +1,72 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import contextlib
+import threading
+import time
+
+import google.api_core.exceptions
+import google.cloud.bigquery
+import pytest
+
+
+def thread(func):
+ thread = threading.Thread(target=func, daemon=True)
+ thread.start()
+ return thread
+
+
+@pytest.mark.parametrize("job_retry_on_query", [True, False])
+def test_query_retry_539(bigquery_client, dataset_id, job_retry_on_query):
+ """
+ Test job_retry
+
+ See: https://github.com/googleapis/python-bigquery/issues/539
+ """
+ from google.api_core import exceptions
+ from google.api_core.retry import if_exception_type, Retry
+
+ table_name = f"{dataset_id}.t539"
+
+ # Without a custom retry, we fail:
+ with pytest.raises(google.api_core.exceptions.NotFound):
+ bigquery_client.query(f"select count(*) from {table_name}").result()
+
+ retry_notfound = Retry(predicate=if_exception_type(exceptions.NotFound))
+
+ job_retry = dict(job_retry=retry_notfound) if job_retry_on_query else {}
+ job = bigquery_client.query(f"select count(*) from {table_name}", **job_retry)
+ job_id = job.job_id
+
+ # We can already know that the job failed, but we're not supposed
+ # to find out until we call result, which is where retry happend
+ assert job.done()
+ assert job.exception() is not None
+
+ @thread
+ def create_table():
+ time.sleep(1) # Give the first retry attempt time to fail.
+ with contextlib.closing(google.cloud.bigquery.Client()) as client:
+ client.query(f"create table {table_name} (id int64)").result()
+
+ job_retry = {} if job_retry_on_query else dict(job_retry=retry_notfound)
+ [[count]] = list(job.result(**job_retry))
+ assert count == 0
+
+ # The job was retried, and thus got a new job id
+ assert job.job_id != job_id
+
+ # Make sure we don't leave a thread behind:
+ create_table.join()
+ bigquery_client.query(f"drop table {table_name}").result()
diff --git a/testbed/googleapis__python-bigquery/tests/system/test_list_rows.py b/testbed/googleapis__python-bigquery/tests/system/test_list_rows.py
new file mode 100644
index 0000000000000000000000000000000000000000..108b842cec8ffa24822c6f2ad7958873191eda3f
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/system/test_list_rows.py
@@ -0,0 +1,134 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import decimal
+
+from dateutil import relativedelta
+
+from google.cloud import bigquery
+from google.cloud.bigquery import enums
+
+
+def test_list_rows_empty_table(bigquery_client: bigquery.Client, table_id: str):
+ from google.cloud.bigquery.table import RowIterator
+
+ table = bigquery_client.create_table(table_id)
+
+ # It's a bit silly to list rows for an empty table, but this does
+ # happen as the result of a DDL query from an IPython magic command.
+ rows = bigquery_client.list_rows(table)
+ assert isinstance(rows, RowIterator)
+ assert tuple(rows) == ()
+
+
+def test_list_rows_page_size(bigquery_client: bigquery.Client, table_id: str):
+ num_items = 7
+ page_size = 3
+ num_pages, num_last_page = divmod(num_items, page_size)
+
+ to_insert = [{"string_col": "item%d" % i, "rowindex": i} for i in range(num_items)]
+ bigquery_client.load_table_from_json(to_insert, table_id).result()
+
+ df = bigquery_client.list_rows(
+ table_id,
+ selected_fields=[bigquery.SchemaField("string_col", enums.SqlTypeNames.STRING)],
+ page_size=page_size,
+ )
+ pages = df.pages
+
+ for i in range(num_pages):
+ page = next(pages)
+ assert page.num_items == page_size
+ page = next(pages)
+ assert page.num_items == num_last_page
+
+
+def test_list_rows_scalars(bigquery_client: bigquery.Client, scalars_table: str):
+ rows = sorted(
+ bigquery_client.list_rows(scalars_table), key=lambda row: row["rowindex"]
+ )
+ row = rows[0]
+ assert row["bool_col"] # True
+ assert row["bytes_col"] == b"Hello, World!"
+ assert row["date_col"] == datetime.date(2021, 7, 21)
+ assert row["datetime_col"] == datetime.datetime(2021, 7, 21, 11, 39, 45)
+ assert row["geography_col"] == "POINT(-122.0838511 37.3860517)"
+ assert row["int64_col"] == 123456789
+ assert row["interval_col"] == relativedelta.relativedelta(
+ years=7, months=11, days=9, hours=4, minutes=15, seconds=37, microseconds=123456
+ )
+ assert row["numeric_col"] == decimal.Decimal("1.23456789")
+ assert row["bignumeric_col"] == decimal.Decimal("10.111213141516171819")
+ assert row["float64_col"] == 1.25
+ assert row["string_col"] == "Hello, World!"
+ assert row["time_col"] == datetime.time(11, 41, 43, 76160)
+ assert row["timestamp_col"] == datetime.datetime(
+ 2021, 7, 21, 17, 43, 43, 945289, tzinfo=datetime.timezone.utc
+ )
+
+ nullrow = rows[1]
+ for column, value in nullrow.items():
+ if column == "rowindex":
+ assert value == 1
+ else:
+ assert value is None
+
+
+def test_list_rows_scalars_extreme(
+ bigquery_client: bigquery.Client, scalars_extreme_table: str
+):
+ rows = sorted(
+ bigquery_client.list_rows(scalars_extreme_table),
+ key=lambda row: row["rowindex"],
+ )
+ row = rows[0]
+ assert row["bool_col"] # True
+ assert row["bytes_col"] == b"\r\n"
+ assert row["date_col"] == datetime.date(9999, 12, 31)
+ assert row["datetime_col"] == datetime.datetime(9999, 12, 31, 23, 59, 59, 999999)
+ assert row["geography_col"] == "POINT(-135 90)"
+ assert row["int64_col"] == 9223372036854775807
+ assert row["interval_col"] == relativedelta.relativedelta(
+ years=-10000, days=-3660000, hours=-87840000
+ )
+ assert row["numeric_col"] == decimal.Decimal(f"9.{'9' * 37}E+28")
+ assert row["bignumeric_col"] == decimal.Decimal(f"9.{'9' * 75}E+37")
+ assert row["float64_col"] == float("Inf")
+ assert row["string_col"] == "Hello, World"
+ assert row["time_col"] == datetime.time(23, 59, 59, 999999)
+ assert row["timestamp_col"] == datetime.datetime(
+ 9999, 12, 31, 23, 59, 59, 999999, tzinfo=datetime.timezone.utc
+ )
+
+ nullrow = rows[4]
+ for column, value in nullrow.items():
+ if column == "rowindex":
+ assert value == 4
+ else:
+ assert value is None
+
+
+def test_list_rows_range(bigquery_client: bigquery.Client, scalars_table_csv: str):
+ rows = bigquery_client.list_rows(scalars_table_csv)
+ rows = list(rows)
+ row = rows[0]
+ expected_range = {
+ "start": datetime.date(2020, 1, 1),
+ "end": datetime.date(2020, 2, 1),
+ }
+ assert row["range_date"] == expected_range
+
+ row_null = rows[1]
+ assert row_null["range_date"] is None
diff --git a/testbed/googleapis__python-bigquery/tests/system/test_magics.py b/testbed/googleapis__python-bigquery/tests/system/test_magics.py
new file mode 100644
index 0000000000000000000000000000000000000000..72d358a7422196d8deffad24f0f98ea62dc7a1bd
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/system/test_magics.py
@@ -0,0 +1,85 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""System tests for Jupyter/IPython connector."""
+
+import re
+
+import pytest
+import psutil
+
+
+IPython = pytest.importorskip("IPython")
+io = pytest.importorskip("IPython.utils.io")
+pandas = pytest.importorskip("pandas")
+tools = pytest.importorskip("IPython.testing.tools")
+interactiveshell = pytest.importorskip("IPython.terminal.interactiveshell")
+
+
+@pytest.fixture(scope="session")
+def ipython():
+ config = tools.default_config()
+ config.TerminalInteractiveShell.simple_prompt = True
+ shell = interactiveshell.TerminalInteractiveShell.instance(config=config)
+ return shell
+
+
+@pytest.fixture()
+def ipython_interactive(ipython):
+ """Activate IPython's builtin hooks
+
+ for the duration of the test scope.
+ """
+ with ipython.builtin_trap:
+ yield ipython
+
+
+def test_bigquery_magic(ipython_interactive):
+ ip = IPython.get_ipython()
+ current_process = psutil.Process()
+ conn_count_start = len(current_process.connections())
+
+ # Deprecated, but should still work in google-cloud-bigquery 3.x.
+ with pytest.warns(FutureWarning, match="bigquery_magics"):
+ ip.extension_manager.load_extension("google.cloud.bigquery")
+
+ sql = """
+ SELECT
+ CONCAT(
+ 'https://stackoverflow.com/questions/',
+ CAST(id as STRING)) as url,
+ view_count
+ FROM `bigquery-public-data.stackoverflow.posts_questions`
+ WHERE tags like '%google-bigquery%'
+ ORDER BY view_count DESC
+ LIMIT 10
+ """
+ with io.capture_output() as captured:
+ result = ip.run_cell_magic("bigquery", "--use_rest_api", sql)
+
+ conn_count_end = len(current_process.connections())
+
+ lines = re.split("\n|\r", captured.stdout)
+ # Removes blanks & terminal code (result of display clearing)
+ updates = list(filter(lambda x: bool(x) and x != "\x1b[2K", lines))
+ assert re.match("Executing query with job ID: .*", updates[0])
+ assert (re.match("Query executing: .*s", line) for line in updates[1:-1])
+ assert isinstance(result, pandas.DataFrame)
+ assert len(result) == 10 # verify row count
+ assert list(result) == ["url", "view_count"] # verify column names
+
+ # NOTE: For some reason, the number of open sockets is sometimes one *less*
+ # than expected when running system tests on Kokoro, thus using the <= assertion.
+ # That's still fine, however, since the sockets are apparently not leaked.
+ assert conn_count_end <= conn_count_start # system resources are released
diff --git a/testbed/googleapis__python-bigquery/tests/system/test_pandas.py b/testbed/googleapis__python-bigquery/tests/system/test_pandas.py
new file mode 100644
index 0000000000000000000000000000000000000000..85c7b79e64425e63457be4d1c08ef06ce0193727
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/system/test_pandas.py
@@ -0,0 +1,1450 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""System tests for pandas connector."""
+
+import collections
+import datetime
+import decimal
+import json
+import io
+import operator
+import warnings
+
+import google.api_core.retry
+import pytest
+
+try:
+ import importlib.metadata as metadata
+except ImportError:
+ import importlib_metadata as metadata
+
+from google.cloud import bigquery
+
+from google.cloud.bigquery import enums
+
+from . import helpers
+
+
+pandas = pytest.importorskip("pandas", minversion="0.23.0")
+pyarrow = pytest.importorskip("pyarrow")
+numpy = pytest.importorskip("numpy")
+
+bigquery_storage = pytest.importorskip(
+ "google.cloud.bigquery_storage", minversion="2.0.0"
+)
+
+if pandas is not None:
+ PANDAS_INSTALLED_VERSION = metadata.version("pandas")
+else:
+ PANDAS_INSTALLED_VERSION = "0.0.0"
+
+
+class MissingDataError(Exception):
+ pass
+
+
+def test_load_table_from_dataframe_w_automatic_schema(bigquery_client, dataset_id):
+ """Test that a DataFrame with dtypes that map well to BigQuery types
+ can be uploaded without specifying a schema.
+
+ https://github.com/googleapis/google-cloud-python/issues/9044
+ """
+ df_data = collections.OrderedDict(
+ [
+ ("bool_col", pandas.Series([True, False, True], dtype="bool")),
+ (
+ "ts_col",
+ pandas.Series(
+ [
+ datetime.datetime(2010, 1, 2, 3, 44, 50),
+ datetime.datetime(2011, 2, 3, 14, 50, 59),
+ datetime.datetime(2012, 3, 14, 15, 16),
+ ],
+ dtype="datetime64[ns]",
+ ).dt.tz_localize(datetime.timezone.utc),
+ ),
+ (
+ "dt_col_no_tz",
+ pandas.Series(
+ [
+ datetime.datetime(2010, 1, 2, 3, 44, 50),
+ datetime.datetime(2011, 2, 3, 14, 50, 59),
+ datetime.datetime(2012, 3, 14, 15, 16),
+ ],
+ dtype="datetime64[ns]",
+ ),
+ ),
+ ("float32_col", pandas.Series([1.0, 2.0, 3.0], dtype="float32")),
+ ("float64_col", pandas.Series([4.0, 5.0, 6.0], dtype="float64")),
+ ("int8_col", pandas.Series([-12, -11, -10], dtype="int8")),
+ ("int16_col", pandas.Series([-9, -8, -7], dtype="int16")),
+ ("int32_col", pandas.Series([-6, -5, -4], dtype="int32")),
+ ("int64_col", pandas.Series([-3, -2, -1], dtype="int64")),
+ ("uint8_col", pandas.Series([0, 1, 2], dtype="uint8")),
+ ("uint16_col", pandas.Series([3, 4, 5], dtype="uint16")),
+ ("uint32_col", pandas.Series([6, 7, 8], dtype="uint32")),
+ (
+ "date_col",
+ pandas.Series(
+ [
+ datetime.date(2010, 1, 2),
+ datetime.date(2011, 2, 3),
+ datetime.date(2012, 3, 14),
+ ],
+ dtype="dbdate",
+ ),
+ ),
+ (
+ "time_col",
+ pandas.Series(
+ [
+ datetime.time(3, 44, 50),
+ datetime.time(14, 50, 59),
+ datetime.time(15, 16),
+ ],
+ dtype="dbtime",
+ ),
+ ),
+ ("array_bool_col", pandas.Series([[True], [False], [True]])),
+ (
+ "array_ts_col",
+ pandas.Series(
+ [
+ [
+ datetime.datetime(
+ 2010, 1, 2, 3, 44, 50, tzinfo=datetime.timezone.utc
+ ),
+ ],
+ [
+ datetime.datetime(
+ 2011, 2, 3, 14, 50, 59, tzinfo=datetime.timezone.utc
+ ),
+ ],
+ [
+ datetime.datetime(
+ 2012, 3, 14, 15, 16, tzinfo=datetime.timezone.utc
+ ),
+ ],
+ ],
+ ),
+ ),
+ (
+ "array_dt_col_no_tz",
+ pandas.Series(
+ [
+ [datetime.datetime(2010, 1, 2, 3, 44, 50)],
+ [datetime.datetime(2011, 2, 3, 14, 50, 59)],
+ [datetime.datetime(2012, 3, 14, 15, 16)],
+ ],
+ ),
+ ),
+ (
+ "array_float32_col",
+ pandas.Series(
+ [numpy.array([_], dtype="float32") for _ in [1.0, 2.0, 3.0]]
+ ),
+ ),
+ (
+ "array_float64_col",
+ pandas.Series(
+ [numpy.array([_], dtype="float64") for _ in [4.0, 5.0, 6.0]]
+ ),
+ ),
+ (
+ "array_int8_col",
+ pandas.Series(
+ [numpy.array([_], dtype="int8") for _ in [-12, -11, -10]]
+ ),
+ ),
+ (
+ "array_int16_col",
+ pandas.Series([numpy.array([_], dtype="int16") for _ in [-9, -8, -7]]),
+ ),
+ (
+ "array_int32_col",
+ pandas.Series([numpy.array([_], dtype="int32") for _ in [-6, -5, -4]]),
+ ),
+ (
+ "array_int64_col",
+ pandas.Series([numpy.array([_], dtype="int64") for _ in [-3, -2, -1]]),
+ ),
+ (
+ "array_uint8_col",
+ pandas.Series([numpy.array([_], dtype="uint8") for _ in [0, 1, 2]]),
+ ),
+ (
+ "array_uint16_col",
+ pandas.Series([numpy.array([_], dtype="uint16") for _ in [3, 4, 5]]),
+ ),
+ (
+ "array_uint32_col",
+ pandas.Series([numpy.array([_], dtype="uint32") for _ in [6, 7, 8]]),
+ ),
+ ]
+ )
+ dataframe = pandas.DataFrame(df_data, columns=df_data.keys())
+
+ table_id = "{}.{}.load_table_from_dataframe_w_automatic_schema".format(
+ bigquery_client.project, dataset_id
+ )
+
+ load_job = bigquery_client.load_table_from_dataframe(dataframe, table_id)
+ load_job.result()
+
+ table = bigquery_client.get_table(table_id)
+ assert tuple(table.schema) == (
+ bigquery.SchemaField("bool_col", "BOOLEAN"),
+ bigquery.SchemaField("ts_col", "TIMESTAMP"),
+ bigquery.SchemaField("dt_col_no_tz", "DATETIME"),
+ bigquery.SchemaField("float32_col", "FLOAT"),
+ bigquery.SchemaField("float64_col", "FLOAT"),
+ bigquery.SchemaField("int8_col", "INTEGER"),
+ bigquery.SchemaField("int16_col", "INTEGER"),
+ bigquery.SchemaField("int32_col", "INTEGER"),
+ bigquery.SchemaField("int64_col", "INTEGER"),
+ bigquery.SchemaField("uint8_col", "INTEGER"),
+ bigquery.SchemaField("uint16_col", "INTEGER"),
+ bigquery.SchemaField("uint32_col", "INTEGER"),
+ bigquery.SchemaField("date_col", "DATE"),
+ bigquery.SchemaField("time_col", "TIME"),
+ bigquery.SchemaField("array_bool_col", "BOOLEAN", mode="REPEATED"),
+ bigquery.SchemaField("array_ts_col", "TIMESTAMP", mode="REPEATED"),
+ bigquery.SchemaField("array_dt_col_no_tz", "DATETIME", mode="REPEATED"),
+ bigquery.SchemaField("array_float32_col", "FLOAT", mode="REPEATED"),
+ bigquery.SchemaField("array_float64_col", "FLOAT", mode="REPEATED"),
+ bigquery.SchemaField("array_int8_col", "INTEGER", mode="REPEATED"),
+ bigquery.SchemaField("array_int16_col", "INTEGER", mode="REPEATED"),
+ bigquery.SchemaField("array_int32_col", "INTEGER", mode="REPEATED"),
+ bigquery.SchemaField("array_int64_col", "INTEGER", mode="REPEATED"),
+ bigquery.SchemaField("array_uint8_col", "INTEGER", mode="REPEATED"),
+ bigquery.SchemaField("array_uint16_col", "INTEGER", mode="REPEATED"),
+ bigquery.SchemaField("array_uint32_col", "INTEGER", mode="REPEATED"),
+ )
+
+ assert numpy.array(
+ sorted(map(list, bigquery_client.list_rows(table)), key=lambda r: r[5]),
+ dtype="object",
+ ).transpose().tolist() == [
+ # bool_col
+ [True, False, True],
+ # ts_col
+ [
+ datetime.datetime(2010, 1, 2, 3, 44, 50, tzinfo=datetime.timezone.utc),
+ datetime.datetime(2011, 2, 3, 14, 50, 59, tzinfo=datetime.timezone.utc),
+ datetime.datetime(2012, 3, 14, 15, 16, tzinfo=datetime.timezone.utc),
+ ],
+ # dt_col_no_tz
+ [
+ datetime.datetime(2010, 1, 2, 3, 44, 50),
+ datetime.datetime(2011, 2, 3, 14, 50, 59),
+ datetime.datetime(2012, 3, 14, 15, 16),
+ ],
+ # float32_col
+ [1.0, 2.0, 3.0],
+ # float64_col
+ [4.0, 5.0, 6.0],
+ # int8_col
+ [-12, -11, -10],
+ # int16_col
+ [-9, -8, -7],
+ # int32_col
+ [-6, -5, -4],
+ # int64_col
+ [-3, -2, -1],
+ # uint8_col
+ [0, 1, 2],
+ # uint16_col
+ [3, 4, 5],
+ # uint32_col
+ [6, 7, 8],
+ # date_col
+ [
+ datetime.date(2010, 1, 2),
+ datetime.date(2011, 2, 3),
+ datetime.date(2012, 3, 14),
+ ],
+ # time_col
+ [datetime.time(3, 44, 50), datetime.time(14, 50, 59), datetime.time(15, 16)],
+ # array_bool_col
+ [[True], [False], [True]],
+ # array_ts_col
+ [
+ [datetime.datetime(2010, 1, 2, 3, 44, 50, tzinfo=datetime.timezone.utc)],
+ [datetime.datetime(2011, 2, 3, 14, 50, 59, tzinfo=datetime.timezone.utc)],
+ [datetime.datetime(2012, 3, 14, 15, 16, tzinfo=datetime.timezone.utc)],
+ ],
+ # array_dt_col
+ [
+ [datetime.datetime(2010, 1, 2, 3, 44, 50)],
+ [datetime.datetime(2011, 2, 3, 14, 50, 59)],
+ [datetime.datetime(2012, 3, 14, 15, 16)],
+ ],
+ # array_float32_col
+ [[1.0], [2.0], [3.0]],
+ # array_float64_col
+ [[4.0], [5.0], [6.0]],
+ # array_int8_col
+ [[-12], [-11], [-10]],
+ # array_int16_col
+ [[-9], [-8], [-7]],
+ # array_int32_col
+ [[-6], [-5], [-4]],
+ # array_int64_col
+ [[-3], [-2], [-1]],
+ # array_uint8_col
+ [[0], [1], [2]],
+ # array_uint16_col
+ [[3], [4], [5]],
+ # array_uint32_col
+ [[6], [7], [8]],
+ ]
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_load_table_from_dataframe_w_nullable_int64_datatype(
+ bigquery_client, dataset_id
+):
+ """Test that a DataFrame containing column with None-type values and int64 datatype
+ can be uploaded if a BigQuery schema is specified.
+
+ https://github.com/googleapis/python-bigquery/issues/22
+ """
+ table_id = "{}.{}.load_table_from_dataframe_w_nullable_int64_datatype".format(
+ bigquery_client.project, dataset_id
+ )
+ table_schema = (bigquery.SchemaField("x", "INTEGER", mode="NULLABLE"),)
+ table = helpers.retry_403(bigquery_client.create_table)(
+ bigquery.Table(table_id, schema=table_schema)
+ )
+
+ df_data = collections.OrderedDict(
+ [("x", pandas.Series([1, 2, None, 4], dtype="Int64"))]
+ )
+ dataframe = pandas.DataFrame(df_data, columns=df_data.keys())
+ load_job = bigquery_client.load_table_from_dataframe(dataframe, table_id)
+ load_job.result()
+ table = bigquery_client.get_table(table_id)
+ assert tuple(table.schema) == (bigquery.SchemaField("x", "INTEGER"),)
+ assert table.num_rows == 4
+
+
+@pytest.mark.skipif(
+ PANDAS_INSTALLED_VERSION[0:2].startswith("0."),
+ reason="Only `pandas version >=1.0.0` is supported",
+)
+def test_load_table_from_dataframe_w_nullable_int64_datatype_automatic_schema(
+ bigquery_client, dataset_id, table_id
+):
+ """Test that a DataFrame containing column with None-type values and int64 datatype
+ can be uploaded without specifying a schema.
+
+ https://github.com/googleapis/python-bigquery/issues/22
+ """
+
+ df_data = collections.OrderedDict(
+ [("x", pandas.Series([1, 2, None, 4], dtype="Int64"))]
+ )
+ dataframe = pandas.DataFrame(df_data, columns=df_data.keys())
+ load_job = bigquery_client.load_table_from_dataframe(dataframe, table_id)
+ load_job.result()
+ table = bigquery_client.get_table(table_id)
+ assert tuple(table.schema) == (bigquery.SchemaField("x", "INTEGER"),)
+ assert table.num_rows == 4
+
+
+def test_load_table_from_dataframe_w_nulls(bigquery_client, dataset_id):
+ """Test that a DataFrame with null columns can be uploaded if a
+ BigQuery schema is specified.
+
+ See: https://github.com/googleapis/google-cloud-python/issues/7370
+ """
+ # Schema with all scalar types.
+ table_schema = (
+ bigquery.SchemaField("bool_col", "BOOLEAN"),
+ bigquery.SchemaField("bytes_col", "BYTES"),
+ bigquery.SchemaField("date_col", "DATE"),
+ bigquery.SchemaField("dt_col", "DATETIME"),
+ bigquery.SchemaField("float_col", "FLOAT"),
+ bigquery.SchemaField("geo_col", "GEOGRAPHY"),
+ bigquery.SchemaField("int_col", "INTEGER"),
+ bigquery.SchemaField("num_col", "NUMERIC"),
+ bigquery.SchemaField("str_col", "STRING"),
+ bigquery.SchemaField("time_col", "TIME"),
+ bigquery.SchemaField("ts_col", "TIMESTAMP"),
+ bigquery.SchemaField("bignum_col", "BIGNUMERIC"),
+ )
+
+ num_rows = 100
+ nulls = [None] * num_rows
+ df_data = [
+ ("bool_col", nulls),
+ ("bytes_col", nulls),
+ ("date_col", nulls),
+ ("dt_col", nulls),
+ ("float_col", nulls),
+ ("geo_col", nulls),
+ ("int_col", nulls),
+ ("num_col", nulls),
+ ("str_col", nulls),
+ ("time_col", nulls),
+ ("ts_col", nulls),
+ ("bignum_col", nulls),
+ ]
+ df_data = collections.OrderedDict(df_data)
+ dataframe = pandas.DataFrame(df_data, columns=df_data.keys())
+
+ table_id = "{}.{}.load_table_from_dataframe_w_nulls".format(
+ bigquery_client.project, dataset_id
+ )
+
+ # Create the table before loading so that schema mismatch errors are
+ # identified.
+ table = helpers.retry_403(bigquery_client.create_table)(
+ bigquery.Table(table_id, schema=table_schema)
+ )
+
+ job_config = bigquery.LoadJobConfig(schema=table_schema)
+ load_job = bigquery_client.load_table_from_dataframe(
+ dataframe, table_id, job_config=job_config
+ )
+ load_job.result()
+
+ table = bigquery_client.get_table(table)
+ assert tuple(table.schema) == table_schema
+ assert table.num_rows == num_rows
+
+
+def test_load_table_from_dataframe_w_required(bigquery_client, dataset_id):
+ """Test that a DataFrame can be uploaded to a table with required columns.
+
+ See: https://github.com/googleapis/google-cloud-python/issues/8093
+ """
+ table_schema = (
+ bigquery.SchemaField("name", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
+ )
+
+ records = [{"name": "Chip", "age": 2}, {"name": "Dale", "age": 3}]
+ dataframe = pandas.DataFrame(records, columns=["name", "age"])
+ table_id = "{}.{}.load_table_from_dataframe_w_required".format(
+ bigquery_client.project, dataset_id
+ )
+
+ # Create the table before loading so that schema mismatch errors are
+ # identified.
+ table = helpers.retry_403(bigquery_client.create_table)(
+ bigquery.Table(table_id, schema=table_schema)
+ )
+
+ load_job = bigquery_client.load_table_from_dataframe(dataframe, table_id)
+ load_job.result()
+
+ table = bigquery_client.get_table(table)
+ assert tuple(table.schema) == table_schema
+ assert table.num_rows == 2
+ for field in table.schema:
+ assert field.mode == "REQUIRED"
+
+
+def test_load_table_from_dataframe_w_required_but_local_nulls_fails(
+ bigquery_client, dataset_id
+):
+ """Test that a DataFrame with nulls can't be uploaded to a table with
+ required columns.
+
+ See: https://github.com/googleapis/python-bigquery/issues/1692
+ """
+ table_schema = (
+ bigquery.SchemaField("name", "STRING", mode="REQUIRED"),
+ bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
+ )
+
+ records = [
+ {"name": "Chip", "age": 2},
+ {"name": "Dale", "age": 3},
+ {"name": None, "age": None},
+ {"name": "Alvin", "age": 4},
+ ]
+ dataframe = pandas.DataFrame(records, columns=["name", "age"])
+ table_id = (
+ "{}.{}.load_table_from_dataframe_w_required_but_local_nulls_fails".format(
+ bigquery_client.project, dataset_id
+ )
+ )
+
+ # Create the table before loading so that schema mismatch errors are
+ # identified.
+ helpers.retry_403(bigquery_client.create_table)(
+ bigquery.Table(table_id, schema=table_schema)
+ )
+
+ with pytest.raises(google.api_core.exceptions.BadRequest, match="null"):
+ bigquery_client.load_table_from_dataframe(dataframe, table_id).result()
+
+
+def test_load_table_from_dataframe_w_explicit_schema(bigquery_client, dataset_id):
+ # Schema with all scalar types.
+ # See:
+ # https://github.com/googleapis/python-bigquery/issues/61
+ # https://issuetracker.google.com/issues/151765076
+ table_schema = (
+ bigquery.SchemaField("row_num", "INTEGER"),
+ bigquery.SchemaField("bool_col", "BOOLEAN"),
+ bigquery.SchemaField("bytes_col", "BYTES"),
+ bigquery.SchemaField("date_col", "DATE"),
+ bigquery.SchemaField("dt_col", "DATETIME"),
+ bigquery.SchemaField("float_col", "FLOAT"),
+ bigquery.SchemaField("geo_col", "GEOGRAPHY"),
+ bigquery.SchemaField("int_col", "INTEGER"),
+ bigquery.SchemaField("num_col", "NUMERIC"),
+ bigquery.SchemaField("str_col", "STRING"),
+ bigquery.SchemaField("time_col", "TIME"),
+ bigquery.SchemaField("ts_col", "TIMESTAMP"),
+ bigquery.SchemaField("bignum_col", "BIGNUMERIC"),
+ )
+
+ df_data = [
+ ("row_num", [1, 2, 3]),
+ ("bool_col", [True, None, False]),
+ ("bytes_col", [b"abc", None, b"def"]),
+ ("date_col", [datetime.date(1, 1, 1), None, datetime.date(9999, 12, 31)]),
+ (
+ "dt_col",
+ [
+ datetime.datetime(1, 1, 1, 0, 0, 0),
+ None,
+ datetime.datetime(9999, 12, 31, 23, 59, 59, 999999),
+ ],
+ ),
+ ("float_col", [float("-inf"), float("nan"), float("inf")]),
+ (
+ "geo_col",
+ ["POINT(30 10)", None, "POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))"],
+ ),
+ ("int_col", [-9223372036854775808, None, 9223372036854775807]),
+ (
+ "num_col",
+ [
+ decimal.Decimal("-99999999999999999999999999999.999999999"),
+ None,
+ decimal.Decimal("99999999999999999999999999999.999999999"),
+ ],
+ ),
+ ("str_col", ["abc", None, "def"]),
+ (
+ "time_col",
+ [datetime.time(0, 0, 0), None, datetime.time(23, 59, 59, 999999)],
+ ),
+ (
+ "ts_col",
+ [
+ datetime.datetime(1, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc),
+ None,
+ datetime.datetime(
+ 9999, 12, 31, 23, 59, 59, 999999, tzinfo=datetime.timezone.utc
+ ),
+ ],
+ ),
+ (
+ "bignum_col",
+ [
+ decimal.Decimal("-{d38}.{d38}".format(d38="9" * 38)),
+ None,
+ decimal.Decimal("{d38}.{d38}".format(d38="9" * 38)),
+ ],
+ ),
+ ]
+ df_data = collections.OrderedDict(df_data)
+ dataframe = pandas.DataFrame(df_data, dtype="object", columns=df_data.keys())
+
+ table_id = "{}.{}.load_table_from_dataframe_w_explicit_schema".format(
+ bigquery_client.project, dataset_id
+ )
+
+ job_config = bigquery.LoadJobConfig(schema=table_schema)
+ load_job = bigquery_client.load_table_from_dataframe(
+ dataframe, table_id, job_config=job_config
+ )
+ load_job.result()
+
+ table = bigquery_client.get_table(table_id)
+ assert tuple(table.schema) == table_schema
+ assert table.num_rows == 3
+
+ result = bigquery_client.list_rows(table).to_dataframe()
+ result.sort_values("row_num", inplace=True)
+
+ # Check that extreme DATE/DATETIME values are loaded correctly.
+ # https://github.com/googleapis/python-bigquery/issues/1076
+ assert result["date_col"][0] == datetime.date(1, 1, 1)
+ assert result["date_col"][2] == datetime.date(9999, 12, 31)
+ assert result["dt_col"][0] == datetime.datetime(1, 1, 1, 0, 0, 0)
+ assert result["dt_col"][2] == datetime.datetime(9999, 12, 31, 23, 59, 59, 999999)
+ assert result["ts_col"][0] == datetime.datetime(
+ 1, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc
+ )
+ assert result["ts_col"][2] == datetime.datetime(
+ 9999, 12, 31, 23, 59, 59, 999999, tzinfo=datetime.timezone.utc
+ )
+
+
+def test_load_table_from_dataframe_w_struct_datatype(bigquery_client, dataset_id):
+ """Test that a DataFrame with struct datatype can be uploaded if a
+ BigQuery schema is specified.
+
+ https://github.com/googleapis/python-bigquery/issues/21
+ """
+ table_id = "{}.{}.load_table_from_dataframe_w_struct_datatype".format(
+ bigquery_client.project, dataset_id
+ )
+ table_schema = [
+ bigquery.SchemaField(
+ "bar",
+ "RECORD",
+ fields=[
+ bigquery.SchemaField("id", "INTEGER", mode="REQUIRED"),
+ bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ],
+ mode="REQUIRED",
+ ),
+ ]
+ table = helpers.retry_403(bigquery_client.create_table)(
+ bigquery.Table(table_id, schema=table_schema)
+ )
+
+ df_data = [{"id": 1, "age": 21}, {"id": 2, "age": 22}, {"id": 2, "age": 23}]
+ dataframe = pandas.DataFrame(data={"bar": df_data}, columns=["bar"])
+
+ load_job = bigquery_client.load_table_from_dataframe(dataframe, table_id)
+ load_job.result()
+
+ table = bigquery_client.get_table(table_id)
+ assert table.schema == table_schema
+ assert table.num_rows == 3
+
+
+def test_load_table_from_dataframe_w_explicit_schema_source_format_csv(
+ bigquery_client, dataset_id
+):
+ from google.cloud.bigquery.job import SourceFormat
+
+ table_schema = (
+ bigquery.SchemaField("bool_col", "BOOLEAN"),
+ bigquery.SchemaField("bytes_col", "BYTES"),
+ bigquery.SchemaField("date_col", "DATE"),
+ bigquery.SchemaField("dt_col", "DATETIME"),
+ bigquery.SchemaField("float_col", "FLOAT"),
+ bigquery.SchemaField("geo_col", "GEOGRAPHY"),
+ bigquery.SchemaField("int_col", "INTEGER"),
+ bigquery.SchemaField("num_col", "NUMERIC"),
+ bigquery.SchemaField("bignum_col", "BIGNUMERIC"),
+ bigquery.SchemaField("str_col", "STRING"),
+ bigquery.SchemaField("time_col", "TIME"),
+ bigquery.SchemaField("ts_col", "TIMESTAMP"),
+ )
+ df_data = collections.OrderedDict(
+ [
+ ("bool_col", [True, None, False]),
+ ("bytes_col", ["abc", None, "def"]),
+ (
+ "date_col",
+ [datetime.date(1, 1, 1), None, datetime.date(9999, 12, 31)],
+ ),
+ (
+ "dt_col",
+ [
+ datetime.datetime(1, 1, 1, 0, 0, 0),
+ None,
+ datetime.datetime(9999, 12, 31, 23, 59, 59, 999999),
+ ],
+ ),
+ ("float_col", [float("-inf"), float("nan"), float("inf")]),
+ (
+ "geo_col",
+ [
+ "POINT(30 10)",
+ None,
+ "POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))",
+ ],
+ ),
+ ("int_col", [-9223372036854775808, None, 9223372036854775807]),
+ (
+ "num_col",
+ [
+ decimal.Decimal("-99999999999999999999999999999.999999999"),
+ None,
+ decimal.Decimal("99999999999999999999999999999.999999999"),
+ ],
+ ),
+ (
+ "bignum_col",
+ [
+ decimal.Decimal("-{d38}.{d38}".format(d38="9" * 38)),
+ None,
+ decimal.Decimal("{d38}.{d38}".format(d38="9" * 38)),
+ ],
+ ),
+ ("str_col", ["abc", None, "def"]),
+ (
+ "time_col",
+ [datetime.time(0, 0, 0), None, datetime.time(23, 59, 59, 999999)],
+ ),
+ (
+ "ts_col",
+ [
+ datetime.datetime(1, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc),
+ None,
+ datetime.datetime(
+ 9999, 12, 31, 23, 59, 59, 999999, tzinfo=datetime.timezone.utc
+ ),
+ ],
+ ),
+ ]
+ )
+ dataframe = pandas.DataFrame(df_data, dtype="object", columns=df_data.keys())
+
+ table_id = "{}.{}.load_table_from_dataframe_w_explicit_schema_csv".format(
+ bigquery_client.project, dataset_id
+ )
+
+ job_config = bigquery.LoadJobConfig(
+ schema=table_schema, source_format=SourceFormat.CSV
+ )
+ load_job = bigquery_client.load_table_from_dataframe(
+ dataframe, table_id, job_config=job_config
+ )
+ load_job.result()
+
+ table = bigquery_client.get_table(table_id)
+ assert tuple(table.schema) == table_schema
+ assert table.num_rows == 3
+
+
+def test_load_table_from_dataframe_w_explicit_schema_source_format_csv_floats(
+ bigquery_client, dataset_id, table_id
+):
+ from google.cloud.bigquery.job import SourceFormat
+
+ table_schema = (bigquery.SchemaField("float_col", "FLOAT"),)
+ df_data = collections.OrderedDict(
+ [
+ (
+ "float_col",
+ [
+ 0.14285714285714285,
+ 0.51428571485748,
+ 0.87128748,
+ 1.807960649,
+ 2.0679610649,
+ 2.4406779661016949,
+ 3.7148514257,
+ 3.8571428571428572,
+ 1.51251252e40,
+ ],
+ ),
+ ]
+ )
+ dataframe = pandas.DataFrame(df_data, dtype="object", columns=df_data.keys())
+
+ job_config = bigquery.LoadJobConfig(
+ schema=table_schema, source_format=SourceFormat.CSV
+ )
+ load_job = bigquery_client.load_table_from_dataframe(
+ dataframe, table_id, job_config=job_config
+ )
+ load_job.result()
+
+ table = bigquery_client.get_table(table_id)
+ rows = bigquery_client.list_rows(table_id)
+ floats = [r.values()[0] for r in rows]
+ assert tuple(table.schema) == table_schema
+ assert table.num_rows == 9
+ assert floats == df_data["float_col"]
+
+
+def test_query_results_to_dataframe(bigquery_client):
+ QUERY = """
+ SELECT id, `by`, timestamp, dead
+ FROM `bigquery-public-data.hacker_news.full`
+ LIMIT 10
+ """
+
+ df = bigquery_client.query(QUERY).result().to_dataframe()
+
+ assert isinstance(df, pandas.DataFrame)
+ assert len(df) == 10 # verify the number of rows
+ column_names = ["id", "by", "timestamp", "dead"]
+ assert list(df) == column_names # verify the column names
+ exp_datatypes = {
+ "id": int,
+ "by": str,
+ "timestamp": pandas.Timestamp,
+ "dead": bool,
+ }
+ for _, row in df.iterrows():
+ for col in column_names:
+ # all the schema fields are nullable, so None is acceptable
+ if not pandas.isna(row[col]):
+ assert isinstance(row[col], exp_datatypes[col])
+
+
+def test_query_results_to_dataframe_w_bqstorage(bigquery_client):
+ query = """
+ SELECT id, `by`, timestamp, dead
+ FROM `bigquery-public-data.hacker_news.full`
+ LIMIT 10
+ """
+
+ bqstorage_client = bigquery_storage.BigQueryReadClient(
+ credentials=bigquery_client._credentials
+ )
+
+ df = bigquery_client.query(query).result().to_dataframe(bqstorage_client)
+
+ assert isinstance(df, pandas.DataFrame)
+ assert len(df) == 10 # verify the number of rows
+ column_names = ["id", "by", "timestamp", "dead"]
+ assert list(df) == column_names
+ exp_datatypes = {
+ "id": int,
+ "by": str,
+ "timestamp": pandas.Timestamp,
+ "dead": bool,
+ }
+ for index, row in df.iterrows():
+ for col in column_names:
+ # all the schema fields are nullable, so None is acceptable
+ if not pandas.isna(row[col]):
+ assert isinstance(row[col], exp_datatypes[col])
+
+
+def test_insert_rows_from_dataframe(bigquery_client, dataset_id):
+ SF = bigquery.SchemaField
+ schema = [
+ SF("float_col", "FLOAT", mode="REQUIRED"),
+ SF("int_col", "INTEGER", mode="REQUIRED"),
+ SF("int64_col", "INTEGER", mode="NULLABLE"),
+ SF("bool_col", "BOOLEAN", mode="REQUIRED"),
+ SF("boolean_col", "BOOLEAN", mode="NULLABLE"),
+ SF("string_col", "STRING", mode="NULLABLE"),
+ SF("date_col", "DATE", mode="NULLABLE"),
+ SF("time_col", "TIME", mode="NULLABLE"),
+ ]
+
+ dataframe = pandas.DataFrame(
+ [
+ {
+ "float_col": 1.11,
+ "bool_col": True,
+ "string_col": "my string",
+ "int_col": 10,
+ "date_col": datetime.date(2021, 1, 1),
+ "time_col": datetime.time(21, 1, 1),
+ },
+ {
+ "float_col": 2.22,
+ "bool_col": False,
+ "string_col": "another string",
+ "int_col": 20,
+ "date_col": datetime.date(2021, 1, 2),
+ "time_col": datetime.time(21, 1, 2),
+ },
+ {
+ "float_col": 3.33,
+ "bool_col": False,
+ "string_col": "another string",
+ "int_col": 30,
+ "date_col": datetime.date(2021, 1, 3),
+ "time_col": datetime.time(21, 1, 3),
+ },
+ {
+ "float_col": 4.44,
+ "bool_col": True,
+ "string_col": "another string",
+ "int_col": 40,
+ "date_col": datetime.date(2021, 1, 4),
+ "time_col": datetime.time(21, 1, 4),
+ },
+ {
+ "float_col": 5.55,
+ "bool_col": False,
+ "string_col": "another string",
+ "int_col": 50,
+ "date_col": datetime.date(2021, 1, 5),
+ "time_col": datetime.time(21, 1, 5),
+ },
+ {
+ "float_col": 6.66,
+ "bool_col": True,
+ # Include a NaN value, because pandas often uses NaN as a
+ # NULL value indicator.
+ "string_col": float("NaN"),
+ "int_col": 60,
+ "date_col": datetime.date(2021, 1, 6),
+ "time_col": datetime.time(21, 1, 6),
+ },
+ ]
+ )
+ dataframe["date_col"] = dataframe["date_col"].astype("dbdate")
+ dataframe["time_col"] = dataframe["time_col"].astype("dbtime")
+
+ # Support nullable integer and boolean dtypes.
+ # https://github.com/googleapis/python-bigquery/issues/1815
+ dataframe["int64_col"] = pandas.Series(
+ [-11, -22, pandas.NA, -44, -55, -66], dtype="Int64"
+ )
+ dataframe["boolean_col"] = pandas.Series(
+ [True, False, True, pandas.NA, True, False], dtype="boolean"
+ )
+
+ table_id = f"{bigquery_client.project}.{dataset_id}.test_insert_rows_from_dataframe"
+ table_arg = bigquery.Table(table_id, schema=schema)
+ table = helpers.retry_403(bigquery_client.create_table)(table_arg)
+
+ chunk_errors = bigquery_client.insert_rows_from_dataframe(
+ table, dataframe, chunk_size=3
+ )
+ for errors in chunk_errors:
+ assert not errors
+ expected = [
+ # Pandas often represents NULL values as NaN. Convert to None for
+ # easier comparison.
+ tuple(None if pandas.isna(col) else col for col in data_row)
+ for data_row in dataframe.itertuples(index=False)
+ ]
+
+ # Use query to fetch rows instead of listing directly from the table so
+ # that we get values from the streaming buffer "within a few seconds".
+ # https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
+ @google.api_core.retry.Retry(
+ predicate=google.api_core.retry.if_exception_type(MissingDataError)
+ )
+ def get_rows():
+ rows = list(
+ bigquery_client.query(
+ "SELECT * FROM `{}.{}.{}`".format(
+ table.project, table.dataset_id, table.table_id
+ )
+ )
+ )
+ if len(rows) != len(expected):
+ raise MissingDataError()
+ return rows
+
+ rows = get_rows()
+ sorted_rows = sorted(rows, key=operator.attrgetter("int_col"))
+ row_tuples = [r.values() for r in sorted_rows]
+
+ for row, expected_row in zip(row_tuples, expected):
+ assert (
+ # Use Counter to verify the same number of values in each, because
+ # column order does not matter.
+ collections.Counter(row)
+ == collections.Counter(expected_row)
+ )
+
+
+def test_nested_table_to_dataframe(bigquery_client, dataset_id):
+ from google.cloud.bigquery.job import SourceFormat
+ from google.cloud.bigquery.job import WriteDisposition
+
+ SF = bigquery.SchemaField
+ schema = [
+ SF("string_col", "STRING", mode="NULLABLE"),
+ SF(
+ "record_col",
+ "RECORD",
+ mode="NULLABLE",
+ fields=[
+ SF("nested_string", "STRING", mode="NULLABLE"),
+ SF("nested_repeated", "INTEGER", mode="REPEATED"),
+ SF(
+ "nested_record",
+ "RECORD",
+ mode="NULLABLE",
+ fields=[SF("nested_nested_string", "STRING", mode="NULLABLE")],
+ ),
+ ],
+ ),
+ SF("bigfloat_col", "FLOAT", mode="NULLABLE"),
+ SF("smallfloat_col", "FLOAT", mode="NULLABLE"),
+ ]
+ record = {
+ "nested_string": "another string value",
+ "nested_repeated": [0, 1, 2],
+ "nested_record": {"nested_nested_string": "some deep insight"},
+ }
+ to_insert = [
+ {
+ "string_col": "Some value",
+ "record_col": record,
+ "bigfloat_col": 3.14,
+ "smallfloat_col": 2.72,
+ }
+ ]
+ rows = [json.dumps(row) for row in to_insert]
+ body = io.BytesIO("{}\n".format("\n".join(rows)).encode("ascii"))
+ table_id = f"{bigquery_client.project}.{dataset_id}.test_nested_table_to_dataframe"
+ job_config = bigquery.LoadJobConfig()
+ job_config.write_disposition = WriteDisposition.WRITE_TRUNCATE
+ job_config.source_format = SourceFormat.NEWLINE_DELIMITED_JSON
+ job_config.schema = schema
+ # Load a table using a local JSON file from memory.
+ bigquery_client.load_table_from_file(body, table_id, job_config=job_config).result()
+
+ df = bigquery_client.list_rows(table_id, selected_fields=schema).to_dataframe(
+ dtypes={"smallfloat_col": "float16"}
+ )
+
+ assert isinstance(df, pandas.DataFrame)
+ assert len(df) == 1 # verify the number of rows
+ exp_columns = ["string_col", "record_col", "bigfloat_col", "smallfloat_col"]
+ assert list(df) == exp_columns # verify the column names
+ row = df.iloc[0]
+ # verify the row content
+ assert row["string_col"] == "Some value"
+ expected_keys = tuple(sorted(record.keys()))
+ row_keys = tuple(sorted(row["record_col"].keys()))
+ assert row_keys == expected_keys
+ # Can't compare numpy arrays, which pyarrow encodes the embedded
+ # repeated column to, so convert to list.
+ assert list(row["record_col"]["nested_repeated"]) == [0, 1, 2]
+ # verify that nested data can be accessed with indices/keys
+ assert row["record_col"]["nested_repeated"][0] == 0
+ assert (
+ row["record_col"]["nested_record"]["nested_nested_string"]
+ == "some deep insight"
+ )
+ # verify dtypes
+ assert df.dtypes["bigfloat_col"].name == "float64"
+ assert df.dtypes["smallfloat_col"].name == "float16"
+
+
+def test_list_rows_max_results_w_bqstorage(bigquery_client):
+ table_ref = bigquery.DatasetReference("bigquery-public-data", "utility_us").table(
+ "country_code_iso"
+ )
+ bqstorage_client = bigquery_storage.BigQueryReadClient(
+ credentials=bigquery_client._credentials
+ )
+
+ row_iterator = bigquery_client.list_rows(
+ table_ref,
+ selected_fields=[bigquery.SchemaField("country_name", "STRING")],
+ max_results=100,
+ )
+ with pytest.warns(
+ UserWarning, match="Cannot use bqstorage_client if max_results is set"
+ ):
+ dataframe = row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
+
+ assert len(dataframe.index) == 100
+
+
+@pytest.mark.skipif(PANDAS_INSTALLED_VERSION[0:2] not in ["0.", "1."], reason="")
+@pytest.mark.parametrize(
+ ("max_results",),
+ (
+ (None,),
+ (10,),
+ ), # Use BQ Storage API. # Use REST API.
+)
+def test_list_rows_nullable_scalars_dtypes(bigquery_client, scalars_table, max_results):
+ # TODO(GH#836): Avoid INTERVAL columns until they are supported by the
+ # BigQuery Storage API and pyarrow.
+ schema = [
+ bigquery.SchemaField("bool_col", enums.SqlTypeNames.BOOLEAN),
+ bigquery.SchemaField("bignumeric_col", enums.SqlTypeNames.BIGNUMERIC),
+ bigquery.SchemaField("bytes_col", enums.SqlTypeNames.BYTES),
+ bigquery.SchemaField("date_col", enums.SqlTypeNames.DATE),
+ bigquery.SchemaField("datetime_col", enums.SqlTypeNames.DATETIME),
+ bigquery.SchemaField("float64_col", enums.SqlTypeNames.FLOAT64),
+ bigquery.SchemaField("geography_col", enums.SqlTypeNames.GEOGRAPHY),
+ bigquery.SchemaField("int64_col", enums.SqlTypeNames.INT64),
+ bigquery.SchemaField("numeric_col", enums.SqlTypeNames.NUMERIC),
+ bigquery.SchemaField("string_col", enums.SqlTypeNames.STRING),
+ bigquery.SchemaField("time_col", enums.SqlTypeNames.TIME),
+ bigquery.SchemaField("timestamp_col", enums.SqlTypeNames.TIMESTAMP),
+ ]
+
+ df = bigquery_client.list_rows(
+ scalars_table,
+ max_results=max_results,
+ selected_fields=schema,
+ ).to_dataframe()
+
+ assert df.dtypes["bool_col"].name == "boolean"
+ assert df.dtypes["datetime_col"].name == "datetime64[ns]"
+ assert df.dtypes["float64_col"].name == "float64"
+ assert df.dtypes["int64_col"].name == "Int64"
+ assert df.dtypes["timestamp_col"].name == "datetime64[ns, UTC]"
+ assert df.dtypes["date_col"].name == "dbdate"
+ assert df.dtypes["time_col"].name == "dbtime"
+
+ # decimal.Decimal is used to avoid loss of precision.
+ assert df.dtypes["bignumeric_col"].name == "object"
+ assert df.dtypes["numeric_col"].name == "object"
+
+ # pandas uses Python string and bytes objects.
+ assert df.dtypes["bytes_col"].name == "object"
+ assert df.dtypes["string_col"].name == "object"
+
+
+@pytest.mark.parametrize(
+ ("max_results",),
+ (
+ (None,),
+ (10,),
+ ), # Use BQ Storage API. # Use REST API.
+)
+def test_list_rows_nullable_scalars_extreme_dtypes(
+ bigquery_client, scalars_extreme_table, max_results
+):
+ # TODO(GH#836): Avoid INTERVAL columns until they are supported by the
+ # BigQuery Storage API and pyarrow.
+ schema = [
+ bigquery.SchemaField("bool_col", enums.SqlTypeNames.BOOLEAN),
+ bigquery.SchemaField("bignumeric_col", enums.SqlTypeNames.BIGNUMERIC),
+ bigquery.SchemaField("bytes_col", enums.SqlTypeNames.BYTES),
+ bigquery.SchemaField("date_col", enums.SqlTypeNames.DATE),
+ bigquery.SchemaField("datetime_col", enums.SqlTypeNames.DATETIME),
+ bigquery.SchemaField("float64_col", enums.SqlTypeNames.FLOAT64),
+ bigquery.SchemaField("geography_col", enums.SqlTypeNames.GEOGRAPHY),
+ bigquery.SchemaField("int64_col", enums.SqlTypeNames.INT64),
+ bigquery.SchemaField("numeric_col", enums.SqlTypeNames.NUMERIC),
+ bigquery.SchemaField("string_col", enums.SqlTypeNames.STRING),
+ bigquery.SchemaField("time_col", enums.SqlTypeNames.TIME),
+ bigquery.SchemaField("timestamp_col", enums.SqlTypeNames.TIMESTAMP),
+ ]
+
+ df = bigquery_client.list_rows(
+ scalars_extreme_table,
+ max_results=max_results,
+ selected_fields=schema,
+ ).to_dataframe()
+
+ # Extreme values are out-of-bounds for pandas datetime64 values, which use
+ # nanosecond precision. Values before 1677-09-21 and after 2262-04-11 must
+ # be represented with object.
+ # https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timestamp-limitations
+ assert df.dtypes["date_col"].name == "object"
+ assert df.dtypes["datetime_col"].name == "object"
+ assert df.dtypes["timestamp_col"].name == "object"
+
+ # These pandas dtypes can handle the same ranges as BigQuery.
+ assert df.dtypes["bool_col"].name == "boolean"
+ assert df.dtypes["float64_col"].name == "float64"
+ assert df.dtypes["int64_col"].name == "Int64"
+ assert df.dtypes["time_col"].name == "dbtime"
+
+ # decimal.Decimal is used to avoid loss of precision.
+ assert df.dtypes["numeric_col"].name == "object"
+ assert df.dtypes["bignumeric_col"].name == "object"
+
+ # pandas uses Python string and bytes objects.
+ assert df.dtypes["bytes_col"].name == "object"
+ assert df.dtypes["string_col"].name == "object"
+
+
+@pytest.mark.parametrize(
+ ("max_results",),
+ (
+ (None,),
+ (10,),
+ ), # Use BQ Storage API. # Use REST API.
+)
+def test_list_rows_nullable_scalars_extreme_dtypes_w_custom_dtype(
+ bigquery_client, scalars_extreme_table, max_results
+):
+ # TODO(GH#836): Avoid INTERVAL columns until they are supported by the
+ # BigQuery Storage API and pyarrow.
+ schema = [
+ bigquery.SchemaField("bool_col", enums.SqlTypeNames.BOOLEAN),
+ bigquery.SchemaField("bignumeric_col", enums.SqlTypeNames.BIGNUMERIC),
+ bigquery.SchemaField("bytes_col", enums.SqlTypeNames.BYTES),
+ bigquery.SchemaField("date_col", enums.SqlTypeNames.DATE),
+ bigquery.SchemaField("datetime_col", enums.SqlTypeNames.DATETIME),
+ bigquery.SchemaField("float64_col", enums.SqlTypeNames.FLOAT64),
+ bigquery.SchemaField("geography_col", enums.SqlTypeNames.GEOGRAPHY),
+ bigquery.SchemaField("int64_col", enums.SqlTypeNames.INT64),
+ bigquery.SchemaField("numeric_col", enums.SqlTypeNames.NUMERIC),
+ bigquery.SchemaField("string_col", enums.SqlTypeNames.STRING),
+ bigquery.SchemaField("time_col", enums.SqlTypeNames.TIME),
+ bigquery.SchemaField("timestamp_col", enums.SqlTypeNames.TIMESTAMP),
+ ]
+
+ df = bigquery_client.list_rows(
+ scalars_extreme_table,
+ max_results=max_results,
+ selected_fields=schema,
+ ).to_dataframe(
+ bool_dtype=pandas.BooleanDtype(),
+ int_dtype=pandas.Int64Dtype(),
+ float_dtype=(
+ pandas.Float64Dtype()
+ if hasattr(pandas, "Float64Dtype")
+ else pandas.StringDtype()
+ ),
+ string_dtype=pandas.StringDtype(),
+ date_dtype=(
+ pandas.ArrowDtype(pyarrow.date32())
+ if hasattr(pandas, "ArrowDtype")
+ else None
+ ),
+ datetime_dtype=(
+ pandas.ArrowDtype(pyarrow.timestamp("us"))
+ if hasattr(pandas, "ArrowDtype")
+ else None
+ ),
+ time_dtype=(
+ pandas.ArrowDtype(pyarrow.time64("us"))
+ if hasattr(pandas, "ArrowDtype")
+ else None
+ ),
+ timestamp_dtype=(
+ pandas.ArrowDtype(pyarrow.timestamp("us", tz="UTC"))
+ if hasattr(pandas, "ArrowDtype")
+ else None
+ ),
+ )
+
+ # These pandas dtypes are handled by the custom dtypes.
+ assert df.dtypes["bool_col"].name == "boolean"
+ assert df.dtypes["float64_col"].name == "Float64"
+ assert df.dtypes["int64_col"].name == "Int64"
+ assert df.dtypes["string_col"].name == "string"
+
+ assert (
+ df.dtypes["date_col"].name == "date32[day][pyarrow]"
+ if hasattr(pandas, "ArrowDtype")
+ else "datetime64[ns]"
+ )
+ assert (
+ df.dtypes["datetime_col"].name == "timestamp[us][pyarrow]"
+ if hasattr(pandas, "ArrowDtype")
+ else "object"
+ )
+ assert (
+ df.dtypes["timestamp_col"].name == "timestamp[us, tz=UTC][pyarrow]"
+ if hasattr(pandas, "ArrowDtype")
+ else "object"
+ )
+ assert (
+ df.dtypes["time_col"].name == "time64[us][pyarrow]"
+ if hasattr(pandas, "ArrowDtype")
+ else "object"
+ )
+
+ # decimal.Decimal is used to avoid loss of precision.
+ assert df.dtypes["numeric_col"].name == "object"
+ assert df.dtypes["bignumeric_col"].name == "object"
+
+ # pandas uses Python bytes objects.
+ assert df.dtypes["bytes_col"].name == "object"
+
+
+def test_upload_time_and_datetime_56(bigquery_client, dataset_id):
+ df = pandas.DataFrame(
+ dict(
+ dt=[
+ datetime.datetime(2020, 1, 8, 8, 0, 0),
+ datetime.datetime(
+ 2020,
+ 1,
+ 8,
+ 8,
+ 0,
+ 0,
+ tzinfo=datetime.timezone(datetime.timedelta(hours=-7)),
+ ),
+ ],
+ t=[datetime.time(0, 0, 10, 100001), None],
+ )
+ )
+ table = f"{dataset_id}.test_upload_time_and_datetime"
+ bigquery_client.load_table_from_dataframe(df, table).result()
+ data = list(map(list, bigquery_client.list_rows(table)))
+ assert data == [
+ [
+ datetime.datetime(2020, 1, 8, 8, 0, tzinfo=datetime.timezone.utc),
+ datetime.time(0, 0, 10, 100001),
+ ],
+ [datetime.datetime(2020, 1, 8, 15, 0, tzinfo=datetime.timezone.utc), None],
+ ]
+
+ from google.cloud.bigquery import job, schema
+
+ table = f"{dataset_id}.test_upload_time_and_datetime_dt"
+ config = job.LoadJobConfig(
+ schema=[schema.SchemaField("dt", "DATETIME"), schema.SchemaField("t", "TIME")]
+ )
+
+ bigquery_client.load_table_from_dataframe(df, table, job_config=config).result()
+ data = list(map(list, bigquery_client.list_rows(table)))
+ assert data == [
+ [datetime.datetime(2020, 1, 8, 8, 0), datetime.time(0, 0, 10, 100001)],
+ [datetime.datetime(2020, 1, 8, 15, 0), None],
+ ]
+
+
+def test_to_dataframe_geography_as_objects(bigquery_client, dataset_id):
+ wkt = pytest.importorskip("shapely.wkt")
+ bigquery_client.query(
+ f"create table {dataset_id}.lake (name string, geog geography)"
+ ).result()
+ bigquery_client.query(
+ f"""
+ insert into {dataset_id}.lake (name, geog) values
+ ('foo', st_geogfromtext('point(0 0)')),
+ ('bar', st_geogfromtext('point(0 1)')),
+ ('baz', null)
+ """
+ ).result()
+ df = bigquery_client.query(
+ f"select * from {dataset_id}.lake order by name"
+ ).to_dataframe(geography_as_object=True)
+ assert list(df["name"]) == ["bar", "baz", "foo"]
+ assert df["geog"][0] == wkt.loads("point(0 1)")
+ assert pandas.isna(df["geog"][1])
+ assert df["geog"][2] == wkt.loads("point(0 0)")
+
+
+def test_to_geodataframe(bigquery_client, dataset_id):
+ geopandas = pytest.importorskip("geopandas")
+ from shapely import wkt
+
+ bigquery_client.query(
+ f"create table {dataset_id}.geolake (name string, geog geography)"
+ ).result()
+ bigquery_client.query(
+ f"""
+ insert into {dataset_id}.geolake (name, geog) values
+ ('foo', st_geogfromtext('point(0 0)')),
+ ('bar', st_geogfromtext('polygon((0 0, 1 0, 1 1, 0 0))')),
+ ('baz', null)
+ """
+ ).result()
+ df = bigquery_client.query(
+ f"select * from {dataset_id}.geolake order by name"
+ ).to_geodataframe()
+ assert df["geog"][0] == wkt.loads("polygon((0 0, 1 0, 1 1, 0 0))")
+ assert pandas.isna(df["geog"][1])
+ assert df["geog"][2] == wkt.loads("point(0 0)")
+ assert isinstance(df, geopandas.GeoDataFrame)
+ assert isinstance(df["geog"], geopandas.GeoSeries)
+
+ with warnings.catch_warnings():
+ # Computing the area on a GeoDataFrame that uses a geographic Coordinate
+ # Reference System (CRS) produces a warning that we are not interested in.
+ # We do not mind if the computed area is incorrect with respect to the
+ # GeoDataFrame data, as long as it matches the expected "incorrect" value.
+ warnings.filterwarnings("ignore", category=UserWarning)
+ assert df.area[0] == 0.5
+ assert pandas.isna(df.area[1])
+ assert df.area[2] == 0.0
+
+ assert df.crs.srs == "EPSG:4326"
+ assert df.crs.name == "WGS 84"
+ assert df.geog.crs.srs == "EPSG:4326"
+ assert df.geog.crs.name == "WGS 84"
+
+
+def test_load_geodataframe(bigquery_client, dataset_id):
+ geopandas = pytest.importorskip("geopandas")
+ import pandas
+ from shapely import wkt
+ from google.cloud.bigquery.schema import SchemaField
+
+ df = geopandas.GeoDataFrame(
+ pandas.DataFrame(
+ dict(
+ name=["foo", "bar"],
+ geo1=[None, None],
+ geo2=[None, wkt.loads("Point(1 1)")],
+ )
+ ),
+ geometry="geo1",
+ )
+
+ table_id = f"{dataset_id}.lake_from_gp"
+ bigquery_client.load_table_from_dataframe(df, table_id).result()
+
+ table = bigquery_client.get_table(table_id)
+ assert table.schema == [
+ SchemaField("name", "STRING", "NULLABLE"),
+ SchemaField("geo1", "GEOGRAPHY", "NULLABLE"),
+ SchemaField("geo2", "GEOGRAPHY", "NULLABLE"),
+ ]
+ assert sorted(map(list, bigquery_client.list_rows(table_id))) == [
+ ["bar", None, "POINT(1 1)"],
+ ["foo", None, None],
+ ]
+
+
+def test_load_dataframe_w_shapely(bigquery_client, dataset_id):
+ wkt = pytest.importorskip("shapely.wkt")
+ from google.cloud.bigquery.schema import SchemaField
+
+ df = pandas.DataFrame(
+ dict(name=["foo", "bar"], geo=[None, wkt.loads("Point(1 1)")])
+ )
+
+ table_id = f"{dataset_id}.lake_from_shapes"
+ bigquery_client.load_table_from_dataframe(df, table_id).result()
+
+ table = bigquery_client.get_table(table_id)
+ assert table.schema == [
+ SchemaField("name", "STRING", "NULLABLE"),
+ SchemaField("geo", "GEOGRAPHY", "NULLABLE"),
+ ]
+ assert sorted(map(list, bigquery_client.list_rows(table_id))) == [
+ ["bar", "POINT(1 1)"],
+ ["foo", None],
+ ]
+
+ bigquery_client.load_table_from_dataframe(df, table_id).result()
+ assert sorted(map(list, bigquery_client.list_rows(table_id))) == [
+ ["bar", "POINT(1 1)"],
+ ["bar", "POINT(1 1)"],
+ ["foo", None],
+ ["foo", None],
+ ]
+
+
+def test_load_dataframe_w_wkb(bigquery_client, dataset_id):
+ wkt = pytest.importorskip("shapely.wkt")
+ from shapely import wkb
+ from google.cloud.bigquery.schema import SchemaField
+
+ df = pandas.DataFrame(
+ dict(name=["foo", "bar"], geo=[None, wkb.dumps(wkt.loads("Point(1 1)"))])
+ )
+
+ table_id = f"{dataset_id}.lake_from_wkb"
+ # We create the table first, to inform the interpretation of the wkb data
+ bigquery_client.query(
+ f"create table {table_id} (name string, geo GEOGRAPHY)"
+ ).result()
+ bigquery_client.load_table_from_dataframe(df, table_id).result()
+
+ table = bigquery_client.get_table(table_id)
+ assert table.schema == [
+ SchemaField("name", "STRING", "NULLABLE"),
+ SchemaField("geo", "GEOGRAPHY", "NULLABLE"),
+ ]
+ assert sorted(map(list, bigquery_client.list_rows(table_id))) == [
+ ["bar", "POINT(1 1)"],
+ ["foo", None],
+ ]
diff --git a/testbed/googleapis__python-bigquery/tests/system/test_query.py b/testbed/googleapis__python-bigquery/tests/system/test_query.py
new file mode 100644
index 0000000000000000000000000000000000000000..d94a117e362cb750488cc1bc3fcea42d0801c366
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/system/test_query.py
@@ -0,0 +1,548 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import concurrent.futures
+import datetime
+import decimal
+from typing import Tuple
+
+from google.api_core import exceptions
+import pytest
+
+from google.cloud import bigquery
+from google.cloud.bigquery.query import ArrayQueryParameter
+from google.cloud.bigquery.query import ScalarQueryParameter
+from google.cloud.bigquery.query import ScalarQueryParameterType
+from google.cloud.bigquery.query import StructQueryParameter
+from google.cloud.bigquery.query import StructQueryParameterType
+from google.cloud.bigquery.query import RangeQueryParameter
+
+
+@pytest.fixture(params=["INSERT", "QUERY"])
+def query_api_method(request):
+ return request.param
+
+
+@pytest.fixture(scope="session")
+def table_with_9999_columns_10_rows(bigquery_client, project_id, dataset_id):
+ """Generate a table of maximum width via CREATE TABLE AS SELECT.
+
+ The first column is named 'rowval', and has a value from 1..rowcount
+ Subsequent columns are named col_ and contain the value N*rowval, where
+ N is between 1 and 9999 inclusive.
+ """
+ table_id = "many_columns"
+ row_count = 10
+ col_projections = ",".join(f"r * {n} as col_{n}" for n in range(1, 10000))
+ sql = f"""
+ CREATE TABLE `{project_id}.{dataset_id}.{table_id}`
+ AS
+ SELECT
+ r as rowval,
+ {col_projections}
+ FROM
+ UNNEST(GENERATE_ARRAY(1,{row_count},1)) as r
+ """
+ query_job = bigquery_client.query(sql)
+ query_job.result()
+
+ return f"{project_id}.{dataset_id}.{table_id}"
+
+
+def test_query_many_columns(
+ bigquery_client, table_with_9999_columns_10_rows, query_api_method
+):
+ # Test working with the widest schema BigQuery supports, 10k columns.
+ query_job = bigquery_client.query(
+ f"SELECT * FROM `{table_with_9999_columns_10_rows}`",
+ api_method=query_api_method,
+ )
+ rows = list(query_job)
+ assert len(rows) == 10
+
+ # check field representations adhere to expected values.
+ for row in rows:
+ rowval = row["rowval"]
+ for column in range(1, 10000):
+ assert row[f"col_{column}"] == rowval * column
+
+
+def test_query_w_timeout(bigquery_client, query_api_method):
+ job_config = bigquery.QueryJobConfig()
+ job_config.use_query_cache = False
+
+ query_job = bigquery_client.query(
+ "SELECT * FROM `bigquery-public-data.github_repos.commits`;",
+ location="US",
+ job_config=job_config,
+ api_method=query_api_method,
+ )
+
+ with pytest.raises(concurrent.futures.TimeoutError):
+ query_job.result(timeout=1)
+
+ # Even though the query takes >1 second, the call to getQueryResults
+ # should succeed.
+ assert not query_job.done(timeout=1)
+ assert bigquery_client.cancel_job(query_job) is not None
+
+
+def test_query_statistics(bigquery_client, query_api_method):
+ """
+ A system test to exercise some of the extended query statistics.
+
+ Note: We construct a query that should need at least three stages by
+ specifying a JOIN query. Exact plan and stats are effectively
+ non-deterministic, so we're largely interested in confirming values
+ are present.
+ """
+
+ job_config = bigquery.QueryJobConfig()
+ job_config.use_query_cache = False
+
+ query_job = bigquery_client.query(
+ """
+ SELECT
+ COUNT(1)
+ FROM
+ (
+ SELECT
+ year,
+ wban_number
+ FROM `bigquery-public-data.samples.gsod`
+ LIMIT 1000
+ ) lside
+ INNER JOIN
+ (
+ SELECT
+ year,
+ state
+ FROM `bigquery-public-data.samples.natality`
+ LIMIT 1000
+ ) rside
+ ON
+ lside.year = rside.year
+ """,
+ location="US",
+ job_config=job_config,
+ api_method=query_api_method,
+ )
+
+ # run the job to completion
+ query_job.result()
+
+ # Must reload job to get stats if jobs.query was used.
+ if query_api_method == "QUERY":
+ query_job.reload()
+
+ # Assert top-level stats
+ assert not query_job.cache_hit
+ assert query_job.destination is not None
+ assert query_job.done
+ assert not query_job.dry_run
+ assert query_job.num_dml_affected_rows is None
+ assert query_job.priority == "INTERACTIVE"
+ assert query_job.total_bytes_billed > 1
+ assert query_job.total_bytes_processed > 1
+ assert query_job.statement_type == "SELECT"
+ assert query_job.slot_millis > 1
+
+ # Make assertions on the shape of the query plan.
+ plan = query_job.query_plan
+ assert len(plan) >= 3
+ first_stage = plan[0]
+ assert first_stage.start is not None
+ assert first_stage.end is not None
+ assert first_stage.entry_id is not None
+ assert first_stage.name is not None
+ assert first_stage.parallel_inputs > 0
+ assert first_stage.completed_parallel_inputs > 0
+ assert first_stage.shuffle_output_bytes > 0
+ assert first_stage.status == "COMPLETE"
+
+ # Query plan is a digraph. Ensure it has inter-stage links,
+ # but not every stage has inputs.
+ stages_with_inputs = 0
+ for entry in plan:
+ if len(entry.input_stages) > 0:
+ stages_with_inputs = stages_with_inputs + 1
+ assert stages_with_inputs > 0
+ assert len(plan) > stages_with_inputs
+
+
+@pytest.mark.parametrize(
+ ("sql", "expected", "query_parameters"),
+ (
+ (
+ "SELECT @question",
+ "What is the answer to life, the universe, and everything?",
+ [
+ ScalarQueryParameter(
+ name="question",
+ type_="STRING",
+ value="What is the answer to life, the universe, and everything?",
+ )
+ ],
+ ),
+ (
+ "SELECT @answer",
+ 42,
+ [ScalarQueryParameter(name="answer", type_="INT64", value=42)],
+ ),
+ (
+ "SELECT @pi",
+ 3.1415926,
+ [ScalarQueryParameter(name="pi", type_="FLOAT64", value=3.1415926)],
+ ),
+ (
+ "SELECT @pi_numeric_param",
+ decimal.Decimal("3.141592654"),
+ [
+ ScalarQueryParameter(
+ name="pi_numeric_param",
+ type_="NUMERIC",
+ value=decimal.Decimal("3.141592654"),
+ )
+ ],
+ ),
+ (
+ "SELECT @bignum_param",
+ decimal.Decimal("-{d38}.{d38}".format(d38="9" * 38)),
+ [
+ ScalarQueryParameter(
+ name="bignum_param",
+ type_="BIGNUMERIC",
+ value=decimal.Decimal("-{d38}.{d38}".format(d38="9" * 38)),
+ )
+ ],
+ ),
+ (
+ "SELECT @truthy",
+ True,
+ [ScalarQueryParameter(name="truthy", type_="BOOL", value=True)],
+ ),
+ (
+ "SELECT @beef",
+ b"DEADBEEF",
+ [ScalarQueryParameter(name="beef", type_="BYTES", value=b"DEADBEEF")],
+ ),
+ (
+ "SELECT @naive",
+ datetime.datetime(2016, 12, 5, 12, 41, 9),
+ [
+ ScalarQueryParameter(
+ name="naive",
+ type_="DATETIME",
+ value=datetime.datetime(2016, 12, 5, 12, 41, 9),
+ )
+ ],
+ ),
+ (
+ "SELECT @naive_date",
+ datetime.date(2016, 12, 5),
+ [
+ ScalarQueryParameter(
+ name="naive_date", type_="DATE", value=datetime.date(2016, 12, 5)
+ )
+ ],
+ ),
+ pytest.param(
+ "SELECT @json",
+ {"alpha": "abc", "num": [1, 2, 3]},
+ [
+ ScalarQueryParameter(
+ name="json",
+ type_="JSON",
+ value={"alpha": "abc", "num": [1, 2, 3]},
+ )
+ ],
+ id="scalar-json",
+ ),
+ (
+ "SELECT @naive_time",
+ datetime.time(12, 41, 9, 62500),
+ [
+ ScalarQueryParameter(
+ name="naive_time",
+ type_="TIME",
+ value=datetime.time(12, 41, 9, 62500),
+ )
+ ],
+ ),
+ (
+ "SELECT @zoned",
+ datetime.datetime(2016, 12, 5, 12, 41, 9, tzinfo=datetime.timezone.utc),
+ [
+ ScalarQueryParameter(
+ name="zoned",
+ type_="TIMESTAMP",
+ value=datetime.datetime(
+ 2016, 12, 5, 12, 41, 9, tzinfo=datetime.timezone.utc
+ ),
+ )
+ ],
+ ),
+ (
+ "SELECT @array_param",
+ [1, 2],
+ [
+ ArrayQueryParameter(
+ name="array_param", array_type="INT64", values=[1, 2]
+ )
+ ],
+ ),
+ (
+ "SELECT (@hitchhiker.question, @hitchhiker.answer)",
+ ({"_field_1": "What is the answer?", "_field_2": 42}),
+ [
+ StructQueryParameter(
+ "hitchhiker",
+ ScalarQueryParameter(
+ name="question",
+ type_="STRING",
+ value="What is the answer?",
+ ),
+ ScalarQueryParameter(
+ name="answer",
+ type_="INT64",
+ value=42,
+ ),
+ ),
+ ],
+ ),
+ (
+ "SELECT "
+ "((@rectangle.bottom_right.x - @rectangle.top_left.x) "
+ "* (@rectangle.top_left.y - @rectangle.bottom_right.y))",
+ 100,
+ [
+ StructQueryParameter(
+ "rectangle",
+ StructQueryParameter(
+ "top_left",
+ ScalarQueryParameter("x", "INT64", 12),
+ ScalarQueryParameter("y", "INT64", 102),
+ ),
+ StructQueryParameter(
+ "bottom_right",
+ ScalarQueryParameter("x", "INT64", 22),
+ ScalarQueryParameter("y", "INT64", 92),
+ ),
+ )
+ ],
+ ),
+ (
+ "SELECT ?",
+ [
+ {"name": "Phred Phlyntstone", "age": 32},
+ {"name": "Bharney Rhubbyl", "age": 31},
+ ],
+ [
+ ArrayQueryParameter(
+ name=None,
+ array_type="RECORD",
+ values=[
+ StructQueryParameter(
+ None,
+ ScalarQueryParameter(
+ name="name", type_="STRING", value="Phred Phlyntstone"
+ ),
+ ScalarQueryParameter(name="age", type_="INT64", value=32),
+ ),
+ StructQueryParameter(
+ None,
+ ScalarQueryParameter(
+ name="name", type_="STRING", value="Bharney Rhubbyl"
+ ),
+ ScalarQueryParameter(name="age", type_="INT64", value=31),
+ ),
+ ],
+ )
+ ],
+ ),
+ (
+ "SELECT @empty_array_param",
+ [],
+ [
+ ArrayQueryParameter(
+ name="empty_array_param",
+ values=[],
+ array_type=StructQueryParameterType(
+ ScalarQueryParameterType(name="foo", type_="INT64"),
+ ScalarQueryParameterType(name="bar", type_="STRING"),
+ ),
+ )
+ ],
+ ),
+ (
+ "SELECT @roles",
+ {
+ "hero": {"name": "Phred Phlyntstone", "age": 32},
+ "sidekick": {"name": "Bharney Rhubbyl", "age": 31},
+ },
+ [
+ StructQueryParameter(
+ "roles",
+ StructQueryParameter(
+ "hero",
+ ScalarQueryParameter(
+ name="name", type_="STRING", value="Phred Phlyntstone"
+ ),
+ ScalarQueryParameter(name="age", type_="INT64", value=32),
+ ),
+ StructQueryParameter(
+ "sidekick",
+ ScalarQueryParameter(
+ name="name", type_="STRING", value="Bharney Rhubbyl"
+ ),
+ ScalarQueryParameter(name="age", type_="INT64", value=31),
+ ),
+ ),
+ ],
+ ),
+ (
+ "SELECT ?",
+ {"friends": ["Jack", "Jill"]},
+ [
+ StructQueryParameter(
+ None,
+ ArrayQueryParameter(
+ name="friends", array_type="STRING", values=["Jack", "Jill"]
+ ),
+ )
+ ],
+ ),
+ (
+ "SELECT @range_date",
+ {"end": None, "start": datetime.date(2016, 12, 5)},
+ [
+ RangeQueryParameter(
+ name="range_date",
+ range_element_type="DATE",
+ start=datetime.date(2016, 12, 5),
+ )
+ ],
+ ),
+ (
+ "SELECT @range_datetime",
+ {"end": None, "start": datetime.datetime(2016, 12, 5, 0, 0)},
+ [
+ RangeQueryParameter(
+ name="range_datetime",
+ range_element_type="DATETIME",
+ start=datetime.datetime(2016, 12, 5),
+ )
+ ],
+ ),
+ (
+ "SELECT @range_unbounded",
+ {"end": None, "start": None},
+ [
+ RangeQueryParameter(
+ name="range_unbounded",
+ range_element_type="DATETIME",
+ )
+ ],
+ ),
+ ),
+)
+def test_query_parameters(
+ bigquery_client, query_api_method, sql, expected, query_parameters
+):
+ jconfig = bigquery.QueryJobConfig()
+ jconfig.query_parameters = query_parameters
+ query_job = bigquery_client.query(
+ sql,
+ job_config=jconfig,
+ api_method=query_api_method,
+ )
+ rows = list(query_job.result())
+ assert len(rows) == 1
+ assert len(rows[0]) == 1
+ assert rows[0][0] == expected
+
+
+def test_dry_run(
+ bigquery_client: bigquery.Client,
+ query_api_method: str,
+ scalars_table_multi_location: Tuple[str, str],
+):
+ location, full_table_id = scalars_table_multi_location
+ query_config = bigquery.QueryJobConfig()
+ query_config.dry_run = True
+
+ query_string = f"SELECT * FROM {full_table_id}"
+ query_job = bigquery_client.query(
+ query_string,
+ location=location,
+ job_config=query_config,
+ api_method=query_api_method,
+ )
+
+ # Note: `query_job.result()` is not necessary on a dry run query. All
+ # necessary information is returned in the initial response.
+ assert query_job.dry_run is True
+ assert query_job.total_bytes_processed > 0
+ assert len(query_job.schema) > 0
+
+
+def test_query_error_w_api_method_query(bigquery_client: bigquery.Client):
+ """No job is returned from jobs.query if the query fails."""
+
+ with pytest.raises(exceptions.NotFound, match="not_a_real_dataset"):
+ bigquery_client.query(
+ "SELECT * FROM not_a_real_dataset.doesnt_exist", api_method="QUERY"
+ )
+
+
+def test_query_error_w_api_method_default(bigquery_client: bigquery.Client):
+ """Test that an exception is not thrown until fetching the results.
+
+ For backwards compatibility, jobs.insert is the default API method. With
+ jobs.insert, a failed query job is "successfully" created. An exception is
+ thrown when fetching the results.
+ """
+
+ query_job = bigquery_client.query("SELECT * FROM not_a_real_dataset.doesnt_exist")
+
+ with pytest.raises(exceptions.NotFound, match="not_a_real_dataset"):
+ query_job.result()
+
+
+def test_session(bigquery_client: bigquery.Client, query_api_method: str):
+ initial_config = bigquery.QueryJobConfig()
+ initial_config.create_session = True
+ initial_query = """
+ CREATE TEMPORARY TABLE numbers(id INT64)
+ AS
+ SELECT * FROM UNNEST([1, 2, 3, 4, 5]) AS id;
+ """
+ initial_job = bigquery_client.query(
+ initial_query, job_config=initial_config, api_method=query_api_method
+ )
+ initial_job.result()
+ session_id = initial_job.session_info.session_id
+ assert session_id is not None
+
+ second_config = bigquery.QueryJobConfig()
+ second_config.connection_properties = [
+ bigquery.ConnectionProperty("session_id", session_id),
+ ]
+ second_job = bigquery_client.query(
+ "SELECT COUNT(*) FROM numbers;", job_config=second_config
+ )
+ rows = list(second_job.result())
+
+ assert len(rows) == 1
+ assert rows[0][0] == 5
diff --git a/testbed/googleapis__python-bigquery/tests/system/test_structs.py b/testbed/googleapis__python-bigquery/tests/system/test_structs.py
new file mode 100644
index 0000000000000000000000000000000000000000..1812b6fdee272dd155a3654a1e083c0f03e69892
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/system/test_structs.py
@@ -0,0 +1,32 @@
+import datetime
+
+import pytest
+
+from google.cloud.bigquery.dbapi import connect
+
+person_type = "struct>>"
+person_type_sized = (
+ "struct>>"
+)
+
+
+@pytest.mark.parametrize("person_type_decl", [person_type, person_type_sized])
+def test_structs(bigquery_client, dataset_id, person_type_decl, table_id):
+ conn = connect(bigquery_client)
+ cursor = conn.cursor()
+ cursor.execute(f"create table {table_id} (person {person_type_decl})")
+ data = dict(
+ name="par",
+ children=[
+ dict(name="ch1", bdate=datetime.date(2021, 1, 1)),
+ dict(name="ch2", bdate=datetime.date(2021, 1, 2)),
+ ],
+ )
+ cursor.execute(
+ f"insert into {table_id} (person) values (%(v:{person_type})s)",
+ dict(v=data),
+ )
+
+ cursor.execute(f"select * from {table_id}")
+ [[result]] = list(cursor)
+ assert result == data
diff --git a/testbed/googleapis__python-bigquery/tests/unit/__init__.py b/testbed/googleapis__python-bigquery/tests/unit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8e1c3845db5b44e0d5727e3354929c81d631f15
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/testbed/googleapis__python-bigquery/tests/unit/_helpers/__init__.py b/testbed/googleapis__python-bigquery/tests/unit/_helpers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/testbed/googleapis__python-bigquery/tests/unit/_helpers/test_from_json.py b/testbed/googleapis__python-bigquery/tests/unit/_helpers/test_from_json.py
new file mode 100644
index 0000000000000000000000000000000000000000..65b054f446bbf720d3f88df5b5bdcfe631da88d4
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/_helpers/test_from_json.py
@@ -0,0 +1,157 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dateutil.relativedelta import relativedelta
+import pytest
+
+from google.cloud.bigquery.schema import SchemaField
+
+
+def create_field(mode="NULLABLE", type_="IGNORED"):
+ return SchemaField("test_field", type_, mode=mode)
+
+
+@pytest.fixture
+def mut():
+ from google.cloud.bigquery import _helpers
+
+ return _helpers
+
+
+def test_interval_from_json_w_none_nullable(mut):
+ got = mut._interval_from_json(None, create_field())
+ assert got is None
+
+
+def test_interval_from_json_w_none_required(mut):
+ with pytest.raises(TypeError):
+ mut._interval_from_json(None, create_field(mode="REQUIRED"))
+
+
+def test_interval_from_json_w_invalid_format(mut):
+ with pytest.raises(ValueError, match="NOT_AN_INTERVAL"):
+ mut._interval_from_json("NOT_AN_INTERVAL", create_field())
+
+
+@pytest.mark.parametrize(
+ ("value", "expected"),
+ (
+ ("0-0 0 0:0:0", relativedelta()),
+ # SELECT INTERVAL X YEAR
+ ("-10000-0 0 0:0:0", relativedelta(years=-10000)),
+ ("-1-0 0 0:0:0", relativedelta(years=-1)),
+ ("1-0 0 0:0:0", relativedelta(years=1)),
+ ("10000-0 0 0:0:0", relativedelta(years=10000)),
+ # SELECT INTERVAL X MONTH
+ ("-0-11 0 0:0:0", relativedelta(months=-11)),
+ ("-0-1 0 0:0:0", relativedelta(months=-1)),
+ ("0-1 0 0:0:0", relativedelta(months=1)),
+ ("0-11 0 0:0:0", relativedelta(months=11)),
+ # SELECT INTERVAL X DAY
+ ("0-0 -3660000 0:0:0", relativedelta(days=-3660000)),
+ ("0-0 -1 0:0:0", relativedelta(days=-1)),
+ ("0-0 1 0:0:0", relativedelta(days=1)),
+ ("0-0 3660000 0:0:0", relativedelta(days=3660000)),
+ # SELECT INTERVAL X HOUR
+ ("0-0 0 -87840000:0:0", relativedelta(hours=-87840000)),
+ ("0-0 0 -1:0:0", relativedelta(hours=-1)),
+ ("0-0 0 1:0:0", relativedelta(hours=1)),
+ ("0-0 0 87840000:0:0", relativedelta(hours=87840000)),
+ # SELECT INTERVAL X MINUTE
+ ("0-0 0 -0:59:0", relativedelta(minutes=-59)),
+ ("0-0 0 -0:1:0", relativedelta(minutes=-1)),
+ ("0-0 0 0:1:0", relativedelta(minutes=1)),
+ ("0-0 0 0:59:0", relativedelta(minutes=59)),
+ # SELECT INTERVAL X SECOND
+ ("0-0 0 -0:0:59", relativedelta(seconds=-59)),
+ ("0-0 0 -0:0:1", relativedelta(seconds=-1)),
+ ("0-0 0 0:0:1", relativedelta(seconds=1)),
+ ("0-0 0 0:0:59", relativedelta(seconds=59)),
+ # SELECT (INTERVAL -1 SECOND) / 1000000
+ ("0-0 0 -0:0:0.000001", relativedelta(microseconds=-1)),
+ ("0-0 0 -0:0:59.999999", relativedelta(seconds=-59, microseconds=-999999)),
+ ("0-0 0 -0:0:59.999", relativedelta(seconds=-59, microseconds=-999000)),
+ ("0-0 0 0:0:59.999", relativedelta(seconds=59, microseconds=999000)),
+ ("0-0 0 0:0:59.999999", relativedelta(seconds=59, microseconds=999999)),
+ # Test with multiple digits in each section.
+ (
+ "32-11 45 67:16:23.987654",
+ relativedelta(
+ years=32,
+ months=11,
+ days=45,
+ hours=67,
+ minutes=16,
+ seconds=23,
+ microseconds=987654,
+ ),
+ ),
+ (
+ "-32-11 -45 -67:16:23.987654",
+ relativedelta(
+ years=-32,
+ months=-11,
+ days=-45,
+ hours=-67,
+ minutes=-16,
+ seconds=-23,
+ microseconds=-987654,
+ ),
+ ),
+ # Test with mixed +/- sections.
+ (
+ "9999-9 -999999 9999999:59:59.999999",
+ relativedelta(
+ years=9999,
+ months=9,
+ days=-999999,
+ hours=9999999,
+ minutes=59,
+ seconds=59,
+ microseconds=999999,
+ ),
+ ),
+ # Test with fraction that is not microseconds.
+ ("0-0 0 0:0:42.", relativedelta(seconds=42)),
+ ("0-0 0 0:0:59.1", relativedelta(seconds=59, microseconds=100000)),
+ ("0-0 0 0:0:0.12", relativedelta(microseconds=120000)),
+ ("0-0 0 0:0:0.123", relativedelta(microseconds=123000)),
+ ("0-0 0 0:0:0.1234", relativedelta(microseconds=123400)),
+ # Fractional seconds can cause rounding problems if cast to float. See:
+ # https://github.com/googleapis/python-db-dtypes-pandas/issues/18
+ ("0-0 0 0:0:59.876543", relativedelta(seconds=59, microseconds=876543)),
+ (
+ "0-0 0 01:01:01.010101",
+ relativedelta(hours=1, minutes=1, seconds=1, microseconds=10101),
+ ),
+ (
+ "0-0 0 09:09:09.090909",
+ relativedelta(hours=9, minutes=9, seconds=9, microseconds=90909),
+ ),
+ (
+ "0-0 0 11:11:11.111111",
+ relativedelta(hours=11, minutes=11, seconds=11, microseconds=111111),
+ ),
+ (
+ "0-0 0 19:16:23.987654",
+ relativedelta(hours=19, minutes=16, seconds=23, microseconds=987654),
+ ),
+ # Nanoseconds are not expected, but should not cause error.
+ ("0-0 0 0:0:00.123456789", relativedelta(microseconds=123456)),
+ ("0-0 0 0:0:59.87654321", relativedelta(seconds=59, microseconds=876543)),
+ ),
+)
+def test_w_string_values(mut, value, expected):
+ got = mut._interval_from_json(value, create_field())
+ assert got == expected
diff --git a/testbed/googleapis__python-bigquery/tests/unit/conftest.py b/testbed/googleapis__python-bigquery/tests/unit/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..ebe2d2a7a659af29bb3a2b87cae764158fe1f83f
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/conftest.py
@@ -0,0 +1,62 @@
+# Copyright 2021 Google LLC
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# https://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest import mock
+
+import pytest
+
+from .helpers import make_client
+
+
+@pytest.fixture
+def client():
+ yield make_client()
+
+
+@pytest.fixture
+def PROJECT():
+ yield "PROJECT"
+
+
+@pytest.fixture
+def DS_ID():
+ yield "DATASET_ID"
+
+
+@pytest.fixture
+def LOCATION():
+ yield "us-central"
+
+
+def noop_add_server_timeout_header(headers, kwargs):
+ if headers:
+ kwargs["headers"] = headers
+ return kwargs
+
+
+@pytest.fixture(autouse=True)
+def disable_add_server_timeout_header(request):
+ if "enable_add_server_timeout_header" in request.keywords:
+ yield
+ else:
+ with mock.patch(
+ "google.cloud.bigquery.client._add_server_timeout_header",
+ noop_add_server_timeout_header,
+ ):
+ yield
+
+
+def pytest_configure(config):
+ # Explicitly register custom test markers to avoid warnings.
+ config.addinivalue_line("markers", "enable_add_server_timeout_header")
diff --git a/testbed/googleapis__python-bigquery/tests/unit/helpers.py b/testbed/googleapis__python-bigquery/tests/unit/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5414138e8f5139fe12cfc433f71e52494873d9f
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/helpers.py
@@ -0,0 +1,97 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest import mock
+
+import pytest
+
+import google.cloud.bigquery.client
+import google.cloud.bigquery.dataset
+
+
+def make_connection(*responses):
+ import google.cloud.bigquery._http
+ from google.cloud.exceptions import NotFound
+
+ mock_conn = mock.create_autospec(google.cloud.bigquery._http.Connection)
+ mock_conn.user_agent = "testing 1.2.3"
+ mock_conn.api_request.side_effect = list(responses) + [NotFound("miss")]
+ mock_conn.API_BASE_URL = "https://bigquery.googleapis.com"
+ mock_conn.get_api_base_url_for_mtls = mock.Mock(return_value=mock_conn.API_BASE_URL)
+ return mock_conn
+
+
+def _to_pyarrow(value):
+ """Convert Python value to pyarrow value."""
+ import pyarrow
+
+ return pyarrow.array([value])[0]
+
+
+def make_client(project="PROJECT", **kw):
+ credentials = mock.Mock(spec=google.auth.credentials.Credentials)
+ return google.cloud.bigquery.client.Client(project, credentials, **kw)
+
+
+def make_creds(creds_universe: None):
+ from google.auth import credentials
+
+ class TestingCreds(credentials.Credentials):
+ def refresh(self, request): # pragma: NO COVER
+ raise NotImplementedError
+
+ @property
+ def universe_domain(self):
+ return creds_universe
+
+ return TestingCreds()
+
+
+def make_dataset_reference_string(project, ds_id):
+ return f"{project}.{ds_id}"
+
+
+def make_dataset(project, ds_id):
+ return google.cloud.bigquery.dataset.Dataset(
+ google.cloud.bigquery.dataset.DatasetReference(project, ds_id)
+ )
+
+
+def make_dataset_list_item(project, ds_id):
+ return google.cloud.bigquery.dataset.DatasetListItem(
+ dict(datasetReference=dict(projectId=project, datasetId=ds_id))
+ )
+
+
+def identity(x):
+ return x
+
+
+def get_reference(x):
+ return x.reference
+
+
+dataset_like = [
+ (google.cloud.bigquery.dataset.DatasetReference, identity),
+ (make_dataset, identity),
+ (make_dataset_list_item, get_reference),
+ (
+ make_dataset_reference_string,
+ google.cloud.bigquery.dataset.DatasetReference.from_string,
+ ),
+]
+
+dataset_polymorphic = pytest.mark.parametrize(
+ "make_dataset,get_reference", dataset_like
+)
diff --git a/testbed/googleapis__python-bigquery/tests/unit/job/__init__.py b/testbed/googleapis__python-bigquery/tests/unit/job/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6334245aea5aa2deb2f00ec6bc3de455e9cc132
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/job/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/testbed/googleapis__python-bigquery/tests/unit/job/helpers.py b/testbed/googleapis__python-bigquery/tests/unit/job/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..3642c7229647211e753942c64f4f5ed306703e08
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/job/helpers.py
@@ -0,0 +1,189 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+from google.api_core import exceptions
+
+from ..helpers import make_connection, make_client as __make_client
+
+
+def _make_client(project="test-project", connection=None):
+ client = __make_client(project)
+ if connection is None:
+ connection = make_connection()
+
+ client._connection = connection
+ return client
+
+
+def _make_retriable_exception():
+ return exceptions.TooManyRequests(
+ "retriable exception", errors=[{"reason": "rateLimitExceeded"}]
+ )
+
+
+def _make_job_resource(
+ creation_time_ms=1437767599006,
+ started_time_ms=1437767600007,
+ ended_time_ms=1437767601008,
+ started=False,
+ ended=False,
+ etag="abc-def-hjk",
+ endpoint="https://bigquery.googleapis.com",
+ job_type="load",
+ job_id="a-random-id",
+ location="US",
+ project_id="some-project",
+ user_email="bq-user@example.com",
+):
+ resource = {
+ "status": {"state": "PENDING"},
+ "configuration": {job_type: {}},
+ "statistics": {"creationTime": creation_time_ms, job_type: {}},
+ "etag": etag,
+ "id": "{}:{}".format(project_id, job_id),
+ "jobReference": {
+ "projectId": project_id,
+ "jobId": job_id,
+ "location": location,
+ },
+ "selfLink": "{}/bigquery/v2/projects/{}/jobs/{}".format(
+ endpoint, project_id, job_id
+ ),
+ "user_email": user_email,
+ }
+
+ if started or ended:
+ resource["statistics"]["startTime"] = started_time_ms
+ resource["status"]["state"] = "RUNNING"
+
+ if ended:
+ resource["statistics"]["endTime"] = ended_time_ms
+ resource["status"]["state"] = "DONE"
+
+ if job_type == "query":
+ resource["configuration"]["query"]["destinationTable"] = {
+ "projectId": project_id,
+ "datasetId": "_temp_dataset",
+ "tableId": "_temp_table",
+ }
+
+ return resource
+
+
+class _Base(unittest.TestCase):
+ from google.cloud.bigquery.dataset import DatasetReference
+ from google.cloud.bigquery.table import TableReference
+
+ ENDPOINT = "https://bigquery.googleapis.com"
+ PROJECT = "project"
+ SOURCE1 = "http://example.com/source1.csv"
+ DS_ID = "dataset_id"
+ DS_REF = DatasetReference(PROJECT, DS_ID)
+ TABLE_ID = "table_id"
+ TABLE_REF = TableReference(DS_REF, TABLE_ID)
+ JOB_ID = "JOB_ID"
+ JOB_TYPE = "unknown"
+ KMS_KEY_NAME = "projects/1/locations/us/keyRings/1/cryptoKeys/1"
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def _setUpConstants(self):
+ import datetime
+ from google.cloud._helpers import UTC
+
+ self.WHEN_TS = 1437767599.006
+ self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(tzinfo=UTC)
+ self.ETAG = "ETAG"
+ self.FULL_JOB_ID = "%s:%s" % (self.PROJECT, self.JOB_ID)
+ self.RESOURCE_URL = "{}/bigquery/v2/projects/{}/jobs/{}".format(
+ self.ENDPOINT, self.PROJECT, self.JOB_ID
+ )
+ self.USER_EMAIL = "phred@example.com"
+
+ def _table_ref(self, table_id):
+ from google.cloud.bigquery.table import TableReference
+
+ return TableReference(self.DS_REF, table_id)
+
+ def _make_resource(self, started=False, ended=False, location="US"):
+ self._setUpConstants()
+ return _make_job_resource(
+ creation_time_ms=int(self.WHEN_TS * 1000),
+ started_time_ms=int(self.WHEN_TS * 1000),
+ ended_time_ms=int(self.WHEN_TS * 1000) + 1000000,
+ started=started,
+ ended=ended,
+ etag=self.ETAG,
+ endpoint=self.ENDPOINT,
+ job_type=self.JOB_TYPE,
+ job_id=self.JOB_ID,
+ project_id=self.PROJECT,
+ user_email=self.USER_EMAIL,
+ location=location,
+ )
+
+ def _verifyInitialReadonlyProperties(self, job):
+ # root elements of resource
+ self.assertIsNone(job.etag)
+ self.assertIsNone(job.self_link)
+ self.assertIsNone(job.user_email)
+
+ # derived from resource['statistics']
+ self.assertIsNone(job.created)
+ self.assertIsNone(job.started)
+ self.assertIsNone(job.ended)
+ self.assertIsNone(job.transaction_info)
+
+ # derived from resource['status']
+ self.assertIsNone(job.error_result)
+ self.assertIsNone(job.errors)
+ self.assertIsNone(job.state)
+
+ def _verifyReadonlyResourceProperties(self, job, resource):
+ from datetime import timedelta
+
+ statistics = resource.get("statistics", {})
+
+ if "creationTime" in statistics:
+ self.assertEqual(job.created, self.WHEN)
+ else:
+ self.assertIsNone(job.created)
+
+ if "startTime" in statistics:
+ self.assertEqual(job.started, self.WHEN)
+ else:
+ self.assertIsNone(job.started)
+
+ if "endTime" in statistics:
+ self.assertEqual(job.ended, self.WHEN + timedelta(seconds=1000))
+ else:
+ self.assertIsNone(job.ended)
+
+ if "etag" in resource:
+ self.assertEqual(job.etag, self.ETAG)
+ else:
+ self.assertIsNone(job.etag)
+
+ if "selfLink" in resource:
+ self.assertEqual(job.self_link, self.RESOURCE_URL)
+ else:
+ self.assertIsNone(job.self_link)
+
+ if "user_email" in resource:
+ self.assertEqual(job.user_email, self.USER_EMAIL)
+ else:
+ self.assertIsNone(job.user_email)
diff --git a/testbed/googleapis__python-bigquery/tests/unit/job/test_base.py b/testbed/googleapis__python-bigquery/tests/unit/job/test_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d2f0c13c1ecd45ba4e900796d6663d8e3b98573
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/job/test_base.py
@@ -0,0 +1,1340 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import http
+import unittest
+from unittest import mock
+
+from google.api_core import exceptions
+import google.api_core.retry
+from google.api_core.future import polling
+import pytest
+
+from google.cloud.bigquery.retry import DEFAULT_GET_JOB_TIMEOUT
+
+from ..helpers import make_connection
+
+from .helpers import _make_client
+from .helpers import _make_retriable_exception
+from .helpers import _make_job_resource
+
+
+class Test__error_result_to_exception(unittest.TestCase):
+ def _call_fut(self, *args, **kwargs):
+ from google.cloud.bigquery import job
+
+ return job._error_result_to_exception(*args, **kwargs)
+
+ def test_simple(self):
+ error_result = {"reason": "invalid", "message": "bad request"}
+ exception = self._call_fut(error_result)
+ self.assertEqual(exception.code, http.client.BAD_REQUEST)
+ self.assertTrue(exception.message.startswith("bad request"))
+ self.assertIn(error_result, exception.errors)
+
+ def test_missing_reason(self):
+ error_result = {}
+ exception = self._call_fut(error_result)
+ self.assertEqual(exception.code, http.client.INTERNAL_SERVER_ERROR)
+
+ def test_contatenate_errors(self):
+ # Added test for b/310544564 and b/318889899.
+ # Ensures that error messages from both error_result and errors are
+ # present in the exception raised.
+
+ error_result = {
+ "reason": "invalid1",
+ "message": "error message 1",
+ }
+ errors = [
+ {"reason": "invalid2", "message": "error message 2"},
+ {"reason": "invalid3", "message": "error message 3"},
+ ]
+
+ exception = self._call_fut(error_result, errors)
+ self.assertEqual(
+ exception.message,
+ "error message 1; reason: invalid2, message: error message 2; "
+ "reason: invalid3, message: error message 3",
+ )
+
+
+class Test_JobReference(unittest.TestCase):
+ JOB_ID = "job-id"
+ PROJECT = "test-project-123"
+ LOCATION = "us-central"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery import job
+
+ return job._JobReference
+
+ def _make_one(self, job_id, project, location):
+ return self._get_target_class()(job_id, project, location)
+
+ def test_ctor(self):
+ job_ref = self._make_one(self.JOB_ID, self.PROJECT, self.LOCATION)
+
+ self.assertEqual(job_ref.job_id, self.JOB_ID)
+ self.assertEqual(job_ref.project, self.PROJECT)
+ self.assertEqual(job_ref.location, self.LOCATION)
+
+ def test__to_api_repr(self):
+ job_ref = self._make_one(self.JOB_ID, self.PROJECT, self.LOCATION)
+
+ self.assertEqual(
+ job_ref._to_api_repr(),
+ {
+ "jobId": self.JOB_ID,
+ "projectId": self.PROJECT,
+ "location": self.LOCATION,
+ },
+ )
+
+ def test_from_api_repr(self):
+ api_repr = {
+ "jobId": self.JOB_ID,
+ "projectId": self.PROJECT,
+ "location": self.LOCATION,
+ }
+
+ job_ref = self._get_target_class()._from_api_repr(api_repr)
+
+ self.assertEqual(job_ref.job_id, self.JOB_ID)
+ self.assertEqual(job_ref.project, self.PROJECT)
+ self.assertEqual(job_ref.location, self.LOCATION)
+
+
+class Test_AsyncJob(unittest.TestCase):
+ JOB_ID = "job-id"
+ PROJECT = "test-project-123"
+ LOCATION = "us-central"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery import job
+
+ return job._AsyncJob
+
+ def _make_one(self, job_id, client):
+ return self._get_target_class()(job_id, client)
+
+ def _make_derived_class(self):
+ class Derived(self._get_target_class()):
+ _JOB_TYPE = "derived"
+
+ return Derived
+
+ def _make_derived(self, job_id, client):
+ return self._make_derived_class()(job_id, client)
+
+ @staticmethod
+ def _job_reference(job_id, project, location):
+ from google.cloud.bigquery import job
+
+ return job._JobReference(job_id, project, location)
+
+ def test_ctor_w_bare_job_id(self):
+ import threading
+
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+
+ self.assertEqual(job.job_id, self.JOB_ID)
+ self.assertEqual(job.project, self.PROJECT)
+ self.assertIsNone(job.location)
+ self.assertIs(job._client, client)
+ self.assertEqual(
+ job._properties,
+ {"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}},
+ )
+ self.assertIsInstance(job._completion_lock, type(threading.Lock()))
+ self.assertEqual(
+ job.path, "/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID)
+ )
+
+ def test_ctor_w_job_ref(self):
+ import threading
+
+ other_project = "other-project-234"
+ client = _make_client(project=other_project)
+ job_ref = self._job_reference(self.JOB_ID, self.PROJECT, self.LOCATION)
+ job = self._make_one(job_ref, client)
+
+ self.assertEqual(job.job_id, self.JOB_ID)
+ self.assertEqual(job.project, self.PROJECT)
+ self.assertEqual(job.location, self.LOCATION)
+ self.assertIs(job._client, client)
+ self.assertEqual(
+ job._properties,
+ {
+ "jobReference": {
+ "projectId": self.PROJECT,
+ "location": self.LOCATION,
+ "jobId": self.JOB_ID,
+ }
+ },
+ )
+ self.assertFalse(job._result_set)
+ self.assertIsInstance(job._completion_lock, type(threading.Lock()))
+ self.assertEqual(
+ job.path, "/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID)
+ )
+
+ def test__require_client_w_none(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+
+ self.assertIs(job._require_client(None), client)
+
+ def test__require_client_w_other(self):
+ client = _make_client(project=self.PROJECT)
+ other = object()
+ job = self._make_one(self.JOB_ID, client)
+
+ self.assertIs(job._require_client(other), other)
+
+ def test_job_type(self):
+ client = _make_client(project=self.PROJECT)
+ derived = self._make_derived(self.JOB_ID, client)
+
+ self.assertEqual(derived.job_type, "derived")
+
+ def test_parent_job_id(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+
+ self.assertIsNone(job.parent_job_id)
+ job._properties["statistics"] = {"parentJobId": "parent-job-123"}
+ self.assertEqual(job.parent_job_id, "parent-job-123")
+
+ def test_script_statistics(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+
+ self.assertIsNone(job.script_statistics)
+ job._properties["statistics"] = {
+ "scriptStatistics": {
+ "evaluationKind": "EXPRESSION",
+ "stackFrames": [
+ {
+ "startLine": 5,
+ "startColumn": 29,
+ "endLine": 9,
+ "endColumn": 14,
+ "text": "QUERY TEXT",
+ }
+ ],
+ }
+ }
+ script_stats = job.script_statistics
+ self.assertEqual(script_stats.evaluation_kind, "EXPRESSION")
+ stack_frames = script_stats.stack_frames
+ self.assertEqual(len(stack_frames), 1)
+ stack_frame = stack_frames[0]
+ self.assertIsNone(stack_frame.procedure_id)
+ self.assertEqual(stack_frame.start_line, 5)
+ self.assertEqual(stack_frame.start_column, 29)
+ self.assertEqual(stack_frame.end_line, 9)
+ self.assertEqual(stack_frame.end_column, 14)
+ self.assertEqual(stack_frame.text, "QUERY TEXT")
+
+ def test_session_info(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+
+ self.assertIsNone(job.session_info)
+ job._properties["statistics"] = {"sessionInfo": {"sessionId": "abcdefg"}}
+ self.assertIsNotNone(job.session_info)
+ self.assertEqual(job.session_info.session_id, "abcdefg")
+
+ def test_transaction_info(self):
+ from google.cloud.bigquery.job.base import TransactionInfo
+
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ assert job.transaction_info is None
+
+ statistics = job._properties["statistics"] = {}
+ assert job.transaction_info is None
+
+ statistics["transactionInfo"] = {"transactionId": "123-abc-xyz"}
+ assert isinstance(job.transaction_info, TransactionInfo)
+ assert job.transaction_info.transaction_id == "123-abc-xyz"
+
+ def test_num_child_jobs(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+
+ self.assertEqual(job.num_child_jobs, 0)
+ job._properties["statistics"] = {"numChildJobs": "17"}
+ self.assertEqual(job.num_child_jobs, 17)
+
+ def test_labels_miss(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ self.assertEqual(job.labels, {})
+
+ def test_labels_update_in_place(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ labels = job.labels
+ labels["foo"] = "bar" # update in place
+ self.assertEqual(job.labels, {"foo": "bar"})
+
+ def test_labels_hit(self):
+ labels = {"foo": "bar"}
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ job._properties.setdefault("configuration", {})["labels"] = labels
+ self.assertEqual(job.labels, labels)
+
+ def test_etag(self):
+ etag = "ETAG-123"
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ self.assertIsNone(job.etag)
+ job._properties["etag"] = etag
+ self.assertEqual(job.etag, etag)
+
+ def test_self_link(self):
+ self_link = "https://api.example.com/123"
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ self.assertIsNone(job.self_link)
+ job._properties["selfLink"] = self_link
+ self.assertEqual(job.self_link, self_link)
+
+ def test_user_email(self):
+ user_email = "user@example.com"
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ self.assertIsNone(job.user_email)
+ job._properties["user_email"] = user_email
+ self.assertEqual(job.user_email, user_email)
+
+ @staticmethod
+ def _datetime_and_millis():
+ import datetime
+ from google.cloud._helpers import _millis
+
+ now = datetime.datetime.utcnow().replace(
+ microsecond=123000,
+ tzinfo=datetime.timezone.utc, # stats timestamps have ms precision
+ )
+ return now, _millis(now)
+
+ def test_created(self):
+ now, millis = self._datetime_and_millis()
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ self.assertIsNone(job.created)
+ stats = job._properties["statistics"] = {}
+ self.assertIsNone(job.created)
+ stats["creationTime"] = millis
+ self.assertEqual(job.created, now)
+
+ def test_started(self):
+ now, millis = self._datetime_and_millis()
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ self.assertIsNone(job.started)
+ stats = job._properties["statistics"] = {}
+ self.assertIsNone(job.started)
+ stats["startTime"] = millis
+ self.assertEqual(job.started, now)
+
+ def test_ended(self):
+ now, millis = self._datetime_and_millis()
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ self.assertIsNone(job.ended)
+ stats = job._properties["statistics"] = {}
+ self.assertIsNone(job.ended)
+ stats["endTime"] = millis
+ self.assertEqual(job.ended, now)
+
+ def test_reservation_usage_no_stats(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ job._properties["statistics"] = {}
+ self.assertEqual(job.reservation_usage, [])
+
+ def test_reservation_usage_stats_exist(self):
+ from google.cloud.bigquery.job import ReservationUsage
+
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ job._properties["statistics"] = {
+ "reservationUsage": [
+ {"name": "slot_foo", "slotMs": "42"},
+ {"name": "slot_bar", "slotMs": "123"},
+ ],
+ }
+
+ expected = [
+ ReservationUsage(name="slot_foo", slot_ms=42),
+ ReservationUsage(name="slot_bar", slot_ms=123),
+ ]
+ self.assertEqual(job.reservation_usage, expected)
+
+ def test__job_statistics(self):
+ statistics = {"foo": "bar"}
+ client = _make_client(project=self.PROJECT)
+ derived = self._make_derived(self.JOB_ID, client)
+ self.assertEqual(derived._job_statistics(), {})
+ stats = derived._properties["statistics"] = {}
+ self.assertEqual(derived._job_statistics(), {})
+ stats["derived"] = statistics
+ self.assertEqual(derived._job_statistics(), statistics)
+
+ def test_error_result(self):
+ error_result = {
+ "debugInfo": "DEBUG INFO",
+ "location": "LOCATION",
+ "message": "MESSAGE",
+ "reason": "REASON",
+ }
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ self.assertIsNone(job.error_result)
+ status = job._properties["status"] = {}
+ self.assertIsNone(job.error_result)
+ status["errorResult"] = error_result
+ self.assertEqual(job.error_result, error_result)
+
+ def test_errors(self):
+ errors = [
+ {
+ "debugInfo": "DEBUG INFO",
+ "location": "LOCATION",
+ "message": "MESSAGE",
+ "reason": "REASON",
+ }
+ ]
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ self.assertIsNone(job.errors)
+ status = job._properties["status"] = {}
+ self.assertIsNone(job.errors)
+ status["errors"] = errors
+ self.assertEqual(job.errors, errors)
+
+ def test_state(self):
+ state = "STATE"
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ self.assertIsNone(job.state)
+ status = job._properties["status"] = {}
+ self.assertIsNone(job.state)
+ status["state"] = state
+ self.assertEqual(job.state, state)
+
+ def _set_properties_job(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ job._set_future_result = mock.Mock()
+ job._properties = {
+ "jobReference": job._properties["jobReference"],
+ "foo": "bar",
+ }
+ return job
+
+ def test__set_properties_no_stats(self):
+ config = {"test": True}
+ resource = {"configuration": config}
+ expected = resource.copy()
+ expected["statistics"] = {}
+ job = self._set_properties_job()
+ original_resource = job._properties
+
+ job._set_properties(resource)
+
+ self.assertEqual(job._properties, expected)
+
+ # Make sure we don't mutate the object used in the request, as that
+ # makes debugging more difficult and leads to false positives in unit
+ # tests.
+ self.assertIsNot(job._properties, original_resource)
+
+ def test__set_properties_w_creation_time(self):
+ now, millis = self._datetime_and_millis()
+ config = {"test": True}
+ stats = {"creationTime": str(millis)}
+ resource = {"configuration": config, "statistics": stats}
+ job = self._set_properties_job()
+
+ job._set_properties(resource)
+
+ cleaned = copy.deepcopy(resource)
+ cleaned["statistics"]["creationTime"] = float(millis)
+ self.assertEqual(job._properties, cleaned)
+
+ def test__set_properties_w_start_time(self):
+ now, millis = self._datetime_and_millis()
+ config = {"test": True}
+ stats = {"startTime": str(millis)}
+ resource = {"configuration": config, "statistics": stats}
+ job = self._set_properties_job()
+
+ job._set_properties(resource)
+
+ cleaned = copy.deepcopy(resource)
+ cleaned["statistics"]["startTime"] = float(millis)
+ self.assertEqual(job._properties, cleaned)
+
+ def test__set_properties_w_end_time(self):
+ now, millis = self._datetime_and_millis()
+ config = {"test": True}
+ stats = {"endTime": str(millis)}
+ resource = {"configuration": config, "statistics": stats}
+ job = self._set_properties_job()
+
+ job._set_properties(resource)
+
+ cleaned = copy.deepcopy(resource)
+ cleaned["statistics"]["endTime"] = float(millis)
+ self.assertEqual(job._properties, cleaned)
+
+ def test__check_resource_config_missing_job_ref(self):
+ resource = {}
+ klass = self._make_derived_class()
+
+ with self.assertRaises(KeyError):
+ klass._check_resource_config(resource)
+
+ def test__check_resource_config_missing_job_id(self):
+ resource = {"jobReference": {}}
+ klass = self._make_derived_class()
+
+ with self.assertRaises(KeyError):
+ klass._check_resource_config(resource)
+
+ def test__check_resource_config_missing_configuration(self):
+ resource = {"jobReference": {"jobId": self.JOB_ID}}
+ klass = self._make_derived_class()
+
+ with self.assertRaises(KeyError):
+ klass._check_resource_config(resource)
+
+ def test__check_resource_config_missing_config_type(self):
+ resource = {"jobReference": {"jobId": self.JOB_ID}, "configuration": {}}
+ klass = self._make_derived_class()
+
+ with self.assertRaises(KeyError):
+ klass._check_resource_config(resource)
+
+ def test__check_resource_config_ok(self):
+ derived_config = {"foo": "bar"}
+ resource = {
+ "jobReference": {"jobId": self.JOB_ID},
+ "configuration": {"derived": derived_config},
+ }
+ klass = self._make_derived_class()
+
+ # Should not throw.
+ klass._check_resource_config(resource)
+
+ def test__build_resource(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ resource = job._build_resource()
+ assert resource["jobReference"]["jobId"] == self.JOB_ID
+
+ def test_to_api_repr(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ resource = job.to_api_repr()
+ assert resource["jobReference"]["jobId"] == self.JOB_ID
+
+ def test__begin_already(self):
+ job = self._set_properties_job()
+ job._properties["status"] = {"state": "WHATEVER"}
+
+ with self.assertRaises(ValueError):
+ job._begin()
+
+ def test__begin_defaults(self):
+ from google.cloud.bigquery.retry import DEFAULT_RETRY
+
+ resource = {
+ "jobReference": {
+ "jobId": self.JOB_ID,
+ "projectId": self.PROJECT,
+ "location": None,
+ },
+ "configuration": {"test": True},
+ }
+ expected = resource.copy()
+ expected["statistics"] = {}
+ job = self._set_properties_job()
+ builder = job.to_api_repr = mock.Mock()
+ builder.return_value = resource
+ call_api = job._client._call_api = mock.Mock()
+ call_api.return_value = resource
+ path = "/projects/{}/jobs".format(self.PROJECT)
+ job._begin()
+
+ call_api.assert_called_once_with(
+ DEFAULT_RETRY,
+ span_name="BigQuery.job.begin",
+ span_attributes={"path": path},
+ job_ref=job,
+ method="POST",
+ path=path,
+ data=resource,
+ timeout=None,
+ )
+ self.assertEqual(job._properties, expected)
+
+ def test__begin_explicit(self):
+ from google.cloud.bigquery.retry import DEFAULT_RETRY
+
+ other_project = "other-project-234"
+ resource = {
+ "jobReference": {
+ "jobId": self.JOB_ID,
+ "projectId": self.PROJECT,
+ "location": None,
+ },
+ "configuration": {"test": True},
+ }
+ expected = resource.copy()
+ expected["statistics"] = {}
+ job = self._set_properties_job()
+ builder = job.to_api_repr = mock.Mock()
+ builder.return_value = resource
+ client = _make_client(project=other_project)
+ call_api = client._call_api = mock.Mock()
+ call_api.return_value = resource
+ retry = DEFAULT_RETRY.with_deadline(1)
+ path = "/projects/{}/jobs".format(self.PROJECT)
+ job._begin(client=client, retry=retry, timeout=7.5)
+
+ call_api.assert_called_once_with(
+ retry,
+ span_name="BigQuery.job.begin",
+ span_attributes={"path": path},
+ job_ref=job,
+ method="POST",
+ path=path,
+ data=resource,
+ timeout=7.5,
+ )
+ self.assertEqual(job._properties, expected)
+
+ def test_exists_defaults_miss(self):
+ from google.cloud.exceptions import NotFound
+ from google.cloud.bigquery.retry import DEFAULT_RETRY
+
+ job = self._set_properties_job()
+ job._properties["jobReference"]["location"] = self.LOCATION
+ call_api = job._client._call_api = mock.Mock()
+ call_api.side_effect = NotFound("testing")
+ self.assertFalse(job.exists())
+
+ call_api.assert_called_once_with(
+ DEFAULT_RETRY,
+ span_name="BigQuery.job.exists",
+ span_attributes={
+ "path": "/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID)
+ },
+ job_ref=job,
+ method="GET",
+ path="/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID),
+ query_params={"fields": "id", "location": self.LOCATION},
+ timeout=None,
+ )
+
+ def test_exists_explicit_hit(self):
+ from google.cloud.bigquery.retry import DEFAULT_RETRY
+
+ other_project = "other-project-234"
+ resource = {
+ "jobReference": {
+ "jobId": self.JOB_ID,
+ "projectId": self.PROJECT,
+ "location": None,
+ },
+ "configuration": {"test": True},
+ }
+ job = self._set_properties_job()
+ client = _make_client(project=other_project)
+ call_api = client._call_api = mock.Mock()
+ call_api.return_value = resource
+ retry = DEFAULT_RETRY.with_deadline(1)
+ self.assertTrue(job.exists(client=client, retry=retry))
+
+ call_api.assert_called_once_with(
+ retry,
+ span_name="BigQuery.job.exists",
+ span_attributes={
+ "path": "/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID)
+ },
+ job_ref=job,
+ method="GET",
+ path="/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID),
+ query_params={"fields": "id"},
+ timeout=None,
+ )
+
+ def test_exists_w_timeout(self):
+ from google.cloud.bigquery.retry import DEFAULT_RETRY
+
+ PATH = "/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID)
+ job = self._set_properties_job()
+ call_api = job._client._call_api = mock.Mock()
+ job.exists(timeout=7.5)
+
+ call_api.assert_called_once_with(
+ DEFAULT_RETRY,
+ span_name="BigQuery.job.exists",
+ span_attributes={"path": PATH},
+ job_ref=job,
+ method="GET",
+ path=PATH,
+ query_params={"fields": "id"},
+ timeout=7.5,
+ )
+
+ def test_reload_defaults(self):
+ from google.cloud.bigquery.retry import DEFAULT_RETRY, DEFAULT_GET_JOB_TIMEOUT
+
+ resource = {
+ "jobReference": {
+ "jobId": self.JOB_ID,
+ "projectId": self.PROJECT,
+ "location": None,
+ },
+ "configuration": {"test": True},
+ }
+ expected = resource.copy()
+ expected["statistics"] = {}
+ job = self._set_properties_job()
+ job._properties["jobReference"]["location"] = self.LOCATION
+ call_api = job._client._call_api = mock.Mock()
+ call_api.return_value = resource
+ job.reload()
+
+ call_api.assert_called_once_with(
+ DEFAULT_RETRY,
+ span_name="BigQuery.getJob",
+ span_attributes={
+ "path": "/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID),
+ "job_id": "job-id",
+ "location": "us-central",
+ },
+ method="GET",
+ path="/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID),
+ query_params={
+ "projection": "full",
+ "location": "us-central",
+ },
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+ self.assertEqual(job._properties, expected)
+
+ def test_reload_explicit(self):
+ from google.cloud.bigquery.retry import DEFAULT_RETRY
+
+ other_project = "other-project-234"
+ resource = {
+ "jobReference": {
+ "jobId": self.JOB_ID,
+ "projectId": self.PROJECT,
+ "location": None,
+ },
+ "configuration": {"test": True},
+ }
+ expected = resource.copy()
+ expected["statistics"] = {}
+ job = self._set_properties_job()
+ client = _make_client(project=other_project)
+ call_api = client._call_api = mock.Mock()
+ call_api.return_value = resource
+ retry = DEFAULT_RETRY.with_deadline(1)
+ job.reload(client=client, retry=retry, timeout=4.2)
+
+ call_api.assert_called_once_with(
+ retry,
+ span_name="BigQuery.getJob",
+ span_attributes={
+ "path": "/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID),
+ "job_id": "job-id",
+ "location": None,
+ },
+ method="GET",
+ path="/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID),
+ query_params={"projection": "full"},
+ timeout=4.2,
+ )
+ self.assertEqual(job._properties, expected)
+
+ def test_reload_none_timeout(self):
+ from google.cloud.bigquery.retry import DEFAULT_RETRY
+
+ resource = {
+ "jobReference": {
+ "jobId": self.JOB_ID,
+ "projectId": self.PROJECT,
+ "location": None,
+ },
+ "configuration": {"test": True},
+ }
+ client = _make_client(project=self.PROJECT)
+ conn = client._connection = make_connection(resource)
+ job = self._set_properties_job()
+ retry = DEFAULT_RETRY.with_deadline(1)
+ job.reload(client=client, retry=retry, timeout=None)
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID),
+ query_params={"projection": "full"},
+ timeout=None,
+ )
+
+ def test_cancel_defaults(self):
+ resource = {
+ "jobReference": {
+ "jobId": self.JOB_ID,
+ "projectId": self.PROJECT,
+ "location": None,
+ },
+ "configuration": {"test": True},
+ }
+ expected = resource.copy()
+ expected["statistics"] = {}
+ response = {"job": resource}
+ job = self._set_properties_job()
+ job._properties["jobReference"]["location"] = self.LOCATION
+ connection = job._client._connection = make_connection(response)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ self.assertTrue(job.cancel())
+
+ final_attributes.assert_called()
+
+ connection.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/{}/jobs/{}/cancel".format(self.PROJECT, self.JOB_ID),
+ query_params={"location": self.LOCATION},
+ timeout=None,
+ )
+ self.assertEqual(job._properties, expected)
+
+ def test_cancel_explicit(self):
+ other_project = "other-project-234"
+ resource = {
+ "jobReference": {
+ "jobId": self.JOB_ID,
+ "projectId": self.PROJECT,
+ "location": None,
+ },
+ "configuration": {"test": True},
+ }
+ expected = resource.copy()
+ expected["statistics"] = {}
+ response = {"job": resource}
+ job = self._set_properties_job()
+ client = _make_client(project=other_project)
+ connection = client._connection = make_connection(response)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ self.assertTrue(job.cancel(client=client, timeout=7.5))
+
+ final_attributes.assert_called_with(
+ {"path": "/projects/{}/jobs/{}/cancel".format(self.PROJECT, self.JOB_ID)},
+ client,
+ job,
+ )
+
+ connection.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/{}/jobs/{}/cancel".format(self.PROJECT, self.JOB_ID),
+ query_params={},
+ timeout=7.5,
+ )
+ self.assertEqual(job._properties, expected)
+
+ def test_cancel_w_custom_retry(self):
+ from google.cloud.bigquery.retry import DEFAULT_RETRY
+
+ api_path = "/projects/{}/jobs/{}/cancel".format(self.PROJECT, self.JOB_ID)
+ resource = {
+ "jobReference": {
+ "jobId": self.JOB_ID,
+ "projectId": self.PROJECT,
+ "location": None,
+ },
+ "configuration": {"test": True},
+ }
+ expected = resource.copy()
+ expected["statistics"] = {}
+ response = {"job": resource}
+ job = self._set_properties_job()
+
+ api_request_patcher = mock.patch.object(
+ job._client._connection, "api_request", side_effect=[ValueError, response]
+ )
+ retry = DEFAULT_RETRY.with_deadline(1).with_predicate(
+ lambda exc: isinstance(exc, ValueError)
+ )
+
+ with api_request_patcher as fake_api_request:
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ result = job.cancel(retry=retry, timeout=7.5)
+
+ final_attributes.assert_called()
+
+ self.assertTrue(result)
+ self.assertEqual(job._properties, expected)
+ self.assertEqual(
+ fake_api_request.call_args_list,
+ [
+ mock.call(method="POST", path=api_path, query_params={}, timeout=7.5),
+ mock.call(
+ method="POST", path=api_path, query_params={}, timeout=7.5
+ ), # was retried once
+ ],
+ )
+
+ def test__set_future_result_wo_done(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ set_exception = job.set_exception = mock.Mock()
+ set_result = job.set_result = mock.Mock()
+
+ job._set_future_result()
+
+ set_exception.assert_not_called()
+ set_result.assert_not_called()
+
+ def test__set_future_result_w_result_set(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ job._properties["status"] = {"state": "DONE"}
+ job._result_set = True
+ set_exception = job.set_exception = mock.Mock()
+ set_result = job.set_result = mock.Mock()
+
+ job._set_future_result()
+
+ set_exception.assert_not_called()
+ set_result.assert_not_called()
+
+ def test__set_future_result_w_done_wo_result_set_w_error(self):
+ from google.cloud.exceptions import NotFound
+
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ job._properties["status"] = {
+ "state": "DONE",
+ "errorResult": {"reason": "notFound", "message": "testing"},
+ }
+ set_exception = job.set_exception = mock.Mock()
+ set_result = job.set_result = mock.Mock()
+
+ job._set_future_result()
+
+ set_exception.assert_called_once()
+ args, kw = set_exception.call_args
+ (exception,) = args
+ self.assertIsInstance(exception, NotFound)
+ self.assertEqual(exception.message, "testing")
+ self.assertEqual(kw, {})
+ set_result.assert_not_called()
+
+ def test__set_future_result_w_done_wo_result_set_wo_error(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ job._properties["status"] = {"state": "DONE"}
+ set_exception = job.set_exception = mock.Mock()
+ set_result = job.set_result = mock.Mock()
+
+ job._set_future_result()
+
+ set_exception.assert_not_called()
+ set_result.assert_called_once_with(job)
+
+ def test_done_defaults_wo_state(self):
+ from google.cloud.bigquery.retry import DEFAULT_RETRY
+
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ reload_ = job.reload = mock.Mock()
+
+ self.assertFalse(job.done())
+
+ reload_.assert_called_once_with(
+ retry=DEFAULT_RETRY,
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+
+ def test_done_explicit_wo_state(self):
+ from google.cloud.bigquery.retry import DEFAULT_RETRY
+
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ reload_ = job.reload = mock.Mock()
+ retry = DEFAULT_RETRY.with_deadline(1)
+
+ self.assertFalse(job.done(retry=retry, timeout=7.5))
+
+ reload_.assert_called_once_with(retry=retry, timeout=7.5)
+
+ def test_done_with_none_timeout(self):
+ from google.cloud.bigquery.retry import DEFAULT_RETRY
+
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ reload_ = job.reload = mock.Mock()
+ retry = DEFAULT_RETRY.with_deadline(1)
+
+ self.assertFalse(job.done(retry=retry, timeout=None))
+
+ reload_.assert_called_once_with(retry=retry, timeout=None)
+
+ def test_done_already(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ job._properties["status"] = {"state": "DONE"}
+
+ self.assertTrue(job.done())
+
+ def test_result_default_wo_state(self):
+ from google.cloud.bigquery.retry import DEFAULT_GET_JOB_TIMEOUT
+
+ begun_job_resource = _make_job_resource(
+ job_id=self.JOB_ID, project_id=self.PROJECT, location="US", started=True
+ )
+ done_job_resource = _make_job_resource(
+ job_id=self.JOB_ID,
+ project_id=self.PROJECT,
+ location="US",
+ started=True,
+ ended=True,
+ )
+ conn = make_connection(
+ _make_retriable_exception(),
+ begun_job_resource,
+ done_job_resource,
+ )
+ client = _make_client(project=self.PROJECT, connection=conn)
+ job = self._make_one(self.JOB_ID, client)
+
+ self.assertIs(job.result(retry=polling.DEFAULT_RETRY), job)
+
+ begin_call = mock.call(
+ method="POST",
+ path=f"/projects/{self.PROJECT}/jobs",
+ data={"jobReference": {"jobId": self.JOB_ID, "projectId": self.PROJECT}},
+ timeout=None,
+ )
+ reload_call = mock.call(
+ method="GET",
+ path=f"/projects/{self.PROJECT}/jobs/{self.JOB_ID}",
+ query_params={
+ "projection": "full",
+ "location": "US",
+ },
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+ conn.api_request.assert_has_calls([begin_call, begin_call, reload_call])
+
+ def test_result_w_retry_wo_state(self):
+ from google.cloud.bigquery.retry import DEFAULT_GET_JOB_TIMEOUT
+
+ begun_job_resource = _make_job_resource(
+ job_id=self.JOB_ID, project_id=self.PROJECT, location="EU", started=True
+ )
+ done_job_resource = _make_job_resource(
+ job_id=self.JOB_ID,
+ project_id=self.PROJECT,
+ location="EU",
+ started=True,
+ ended=True,
+ )
+ conn = make_connection(
+ exceptions.NotFound("not normally retriable"),
+ begun_job_resource,
+ exceptions.NotFound("not normally retriable"),
+ done_job_resource,
+ )
+ client = _make_client(project=self.PROJECT, connection=conn)
+ job = self._make_one(
+ self._job_reference(self.JOB_ID, self.PROJECT, "EU"), client
+ )
+ custom_predicate = mock.Mock()
+ custom_predicate.return_value = True
+ custom_retry = google.api_core.retry.Retry(
+ predicate=custom_predicate,
+ initial=0.001,
+ maximum=0.001,
+ deadline=0.1,
+ )
+ self.assertIs(job.result(retry=custom_retry), job)
+
+ begin_call = mock.call(
+ method="POST",
+ path=f"/projects/{self.PROJECT}/jobs",
+ data={
+ "jobReference": {
+ "jobId": self.JOB_ID,
+ "projectId": self.PROJECT,
+ "location": "EU",
+ }
+ },
+ timeout=None,
+ )
+ reload_call = mock.call(
+ method="GET",
+ path=f"/projects/{self.PROJECT}/jobs/{self.JOB_ID}",
+ query_params={
+ "projection": "full",
+ "location": "EU",
+ },
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+ conn.api_request.assert_has_calls(
+ [begin_call, begin_call, reload_call, reload_call]
+ )
+
+ def test_result_explicit_w_state(self):
+ conn = make_connection()
+ client = _make_client(project=self.PROJECT, connection=conn)
+ job = self._make_one(self.JOB_ID, client)
+ # Use _set_properties() instead of directly modifying _properties so
+ # that the result state is set properly.
+ job_resource = job._properties
+ job_resource["status"] = {"state": "DONE"}
+ job._set_properties(job_resource)
+ timeout = 1
+
+ self.assertIs(job.result(timeout=timeout), job)
+
+ conn.api_request.assert_not_called()
+
+ def test_cancelled_wo_error_result(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+
+ self.assertFalse(job.cancelled())
+
+ def test_cancelled_w_error_result_not_stopped(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ job._properties["status"] = {"errorResult": {"reason": "other"}}
+
+ self.assertFalse(job.cancelled())
+
+ def test_cancelled_w_error_result_w_stopped(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, client)
+ job._properties["status"] = {"errorResult": {"reason": "stopped"}}
+
+ self.assertTrue(job.cancelled())
+
+ def test_repr(self):
+ client = _make_client(project="project-foo")
+ job = self._make_one("job-99", client)
+ job._properties.setdefault("jobReference", {})["location"] = "ABC"
+ assert repr(job) == "_AsyncJob"
+
+
+class Test_JobConfig(unittest.TestCase):
+ JOB_TYPE = "testing"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery import job
+
+ return job._JobConfig
+
+ def _make_one(self, job_type=JOB_TYPE):
+ return self._get_target_class()(job_type)
+
+ def test_ctor(self):
+ job_config = self._make_one()
+ self.assertEqual(job_config._job_type, self.JOB_TYPE)
+ self.assertEqual(job_config._properties, {self.JOB_TYPE: {}})
+
+ def test_ctor_with_unknown_property_raises_error(self):
+ error_text = "Property wrong_name is unknown for"
+ with pytest.raises(AttributeError, match=error_text):
+ config = self._make_one()
+ config.wrong_name = None
+
+ def test_fill_query_job_config_from_default(self):
+ from google.cloud.bigquery import QueryJobConfig
+
+ job_config = QueryJobConfig()
+ job_config.dry_run = True
+ job_config.maximum_bytes_billed = 1000
+
+ default_job_config = QueryJobConfig()
+ default_job_config.use_query_cache = True
+ default_job_config.maximum_bytes_billed = 2000
+
+ final_job_config = job_config._fill_from_default(default_job_config)
+ self.assertTrue(final_job_config.dry_run)
+ self.assertTrue(final_job_config.use_query_cache)
+ self.assertEqual(final_job_config.maximum_bytes_billed, 1000)
+
+ def test_fill_load_job_from_default(self):
+ from google.cloud.bigquery import LoadJobConfig
+
+ job_config = LoadJobConfig()
+ job_config.create_session = True
+ job_config.encoding = "UTF-8"
+
+ default_job_config = LoadJobConfig()
+ default_job_config.ignore_unknown_values = True
+ default_job_config.encoding = "ISO-8859-1"
+
+ final_job_config = job_config._fill_from_default(default_job_config)
+ self.assertTrue(final_job_config.create_session)
+ self.assertTrue(final_job_config.ignore_unknown_values)
+ self.assertEqual(final_job_config.encoding, "UTF-8")
+
+ def test_fill_from_default_conflict(self):
+ from google.cloud.bigquery import QueryJobConfig
+
+ basic_job_config = QueryJobConfig()
+ conflicting_job_config = self._make_one("conflicting_job_type")
+ self.assertNotEqual(
+ basic_job_config._job_type, conflicting_job_config._job_type
+ )
+
+ with self.assertRaises(TypeError):
+ basic_job_config._fill_from_default(conflicting_job_config)
+
+ def test_fill_from_empty_default_conflict(self):
+ from google.cloud.bigquery import QueryJobConfig
+
+ job_config = QueryJobConfig()
+ job_config.dry_run = True
+ job_config.maximum_bytes_billed = 1000
+
+ final_job_config = job_config._fill_from_default(default_job_config=None)
+ self.assertTrue(final_job_config.dry_run)
+ self.assertEqual(final_job_config.maximum_bytes_billed, 1000)
+
+ @mock.patch("google.cloud.bigquery._helpers._get_sub_prop")
+ def test__get_sub_prop_wo_default(self, _get_sub_prop):
+ job_config = self._make_one()
+ key = "key"
+ self.assertIs(job_config._get_sub_prop(key), _get_sub_prop.return_value)
+ _get_sub_prop.assert_called_once_with(
+ job_config._properties, [self.JOB_TYPE, key], default=None
+ )
+
+ @mock.patch("google.cloud.bigquery._helpers._get_sub_prop")
+ def test__get_sub_prop_w_default(self, _get_sub_prop):
+ job_config = self._make_one()
+ key = "key"
+ default = "default"
+ self.assertIs(
+ job_config._get_sub_prop(key, default=default), _get_sub_prop.return_value
+ )
+ _get_sub_prop.assert_called_once_with(
+ job_config._properties, [self.JOB_TYPE, key], default=default
+ )
+
+ @mock.patch("google.cloud.bigquery._helpers._set_sub_prop")
+ def test__set_sub_prop(self, _set_sub_prop):
+ job_config = self._make_one()
+ key = "key"
+ value = "value"
+ job_config._set_sub_prop(key, value)
+ _set_sub_prop.assert_called_once_with(
+ job_config._properties, [self.JOB_TYPE, key], value
+ )
+
+ def test_to_api_repr(self):
+ job_config = self._make_one()
+ expected = job_config._properties = {self.JOB_TYPE: {"foo": "bar"}}
+ found = job_config.to_api_repr()
+ self.assertEqual(found, expected)
+ self.assertIsNot(found, expected) # copied
+
+ # 'from_api_repr' cannot be tested on '_JobConfig', because it presumes
+ # the ctor can be called w/o arguments
+
+ def test_labels_miss(self):
+ job_config = self._make_one()
+ self.assertEqual(job_config.labels, {})
+
+ def test_labels_update_in_place(self):
+ job_config = self._make_one()
+ labels = job_config.labels
+ labels["foo"] = "bar" # update in place
+ self.assertEqual(job_config.labels, {"foo": "bar"})
+
+ def test_labels_hit(self):
+ labels = {"foo": "bar"}
+ job_config = self._make_one()
+ job_config._properties["labels"] = labels
+ self.assertEqual(job_config.labels, labels)
+
+ def test_labels_setter_invalid(self):
+ labels = object()
+ job_config = self._make_one()
+ with self.assertRaises(ValueError):
+ job_config.labels = labels
+
+ def test_labels_setter(self):
+ labels = {"foo": "bar"}
+ job_config = self._make_one()
+ job_config.labels = labels
+ self.assertEqual(job_config._properties["labels"], labels)
+
+ def test_job_timeout_ms_raises_valueerror(self):
+ # Confirm that attempting to set a non-integer values will raise an Error.
+ with pytest.raises(ValueError):
+ job_config = self._make_one()
+ job_config.job_timeout_ms = "WillRaiseError"
+
+ def test_job_timeout_ms(self):
+ # Confirm that default status is None.
+ job_config = self._make_one()
+ assert job_config.job_timeout_ms is None
+
+ # Confirm that integers get converted to strings.
+ job_config.job_timeout_ms = 5000
+ assert job_config.job_timeout_ms == "5000" # int is converted to string
+
+ def test_job_timeout_is_none_when_set_none(self):
+ job_config = self._make_one()
+ job_config.job_timeout_ms = None
+ # Confirm value is None and not literal string 'None'
+ assert job_config.job_timeout_ms is None
+
+ def test_job_timeout_properties(self):
+ # Make sure any value stored in properties is erased
+ # when setting job_timeout to None.
+ job_config = self._make_one()
+ job_config.job_timeout_ms = 4200
+ assert job_config.job_timeout_ms == "4200"
+ assert job_config._properties.get("jobTimeoutMs") == "4200"
+
+ job_config.job_timeout_ms = None
+ assert job_config.job_timeout_ms is None
+ assert "jobTimeoutMs" not in job_config._properties
diff --git a/testbed/googleapis__python-bigquery/tests/unit/job/test_copy.py b/testbed/googleapis__python-bigquery/tests/unit/job/test_copy.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b09453109705ea19a28b4f34aa169fd07085ac1
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/job/test_copy.py
@@ -0,0 +1,546 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest import mock
+
+from ..helpers import make_connection
+
+from .helpers import _Base
+from .helpers import _make_client
+
+import datetime
+
+
+class TestCopyJobConfig(_Base):
+ JOB_TYPE = "copy"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.job import CopyJobConfig
+
+ return CopyJobConfig
+
+ def test_ctor_defaults(self):
+ from google.cloud.bigquery.job import OperationType
+
+ config = self._make_one()
+
+ assert config.create_disposition is None
+ assert config.write_disposition is None
+ assert config.destination_expiration_time is None
+ assert config.destination_encryption_configuration is None
+ assert config.operation_type == OperationType.OPERATION_TYPE_UNSPECIFIED
+
+ def test_ctor_w_properties(self):
+ from google.cloud.bigquery.job import CreateDisposition
+ from google.cloud.bigquery.job import OperationType
+ from google.cloud.bigquery.job import WriteDisposition
+
+ create_disposition = CreateDisposition.CREATE_NEVER
+ write_disposition = WriteDisposition.WRITE_TRUNCATE
+ snapshot_operation = OperationType.SNAPSHOT
+
+ today = datetime.date.today()
+ destination_expiration_time = f"{today.year + 1}-01-01T00:00:00Z"
+
+ config = self._get_target_class()(
+ create_disposition=create_disposition,
+ write_disposition=write_disposition,
+ operation_type=snapshot_operation,
+ destination_expiration_time=destination_expiration_time,
+ )
+
+ self.assertEqual(config.create_disposition, create_disposition)
+ self.assertEqual(config.write_disposition, write_disposition)
+ self.assertEqual(config.operation_type, snapshot_operation)
+ self.assertEqual(
+ config.destination_expiration_time, destination_expiration_time
+ )
+
+ def test_to_api_repr_with_encryption(self):
+ from google.cloud.bigquery.encryption_configuration import (
+ EncryptionConfiguration,
+ )
+
+ config = self._make_one()
+ config.destination_encryption_configuration = EncryptionConfiguration(
+ kms_key_name=self.KMS_KEY_NAME
+ )
+ resource = config.to_api_repr()
+ self.assertEqual(
+ resource,
+ {
+ "copy": {
+ "destinationEncryptionConfiguration": {
+ "kmsKeyName": self.KMS_KEY_NAME
+ }
+ }
+ },
+ )
+
+ def test_to_api_repr_with_encryption_none(self):
+ config = self._make_one()
+ config.destination_encryption_configuration = None
+ resource = config.to_api_repr()
+ self.assertEqual(
+ resource, {"copy": {"destinationEncryptionConfiguration": None}}
+ )
+
+ def test_operation_type_setting_none(self):
+ from google.cloud.bigquery.job import OperationType
+
+ config = self._make_one(operation_type=OperationType.SNAPSHOT)
+
+ # Setting it to None is the same as setting it to OPERATION_TYPE_UNSPECIFIED.
+ config.operation_type = None
+ assert config.operation_type == OperationType.OPERATION_TYPE_UNSPECIFIED
+
+ def test_operation_type_setting_non_none(self):
+ from google.cloud.bigquery.job import OperationType
+
+ config = self._make_one(operation_type=None)
+ config.operation_type = OperationType.RESTORE
+ assert config.operation_type == OperationType.RESTORE
+
+
+class TestCopyJob(_Base):
+ JOB_TYPE = "copy"
+ SOURCE_TABLE = "source_table"
+ DESTINATION_TABLE = "destination_table"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.job import CopyJob
+
+ return CopyJob
+
+ def _make_resource(self, started=False, ended=False):
+ resource = super(TestCopyJob, self)._make_resource(started, ended)
+ config = resource["configuration"]["copy"]
+ config["sourceTables"] = [
+ {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.SOURCE_TABLE,
+ }
+ ]
+ config["destinationTable"] = {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.DESTINATION_TABLE,
+ }
+
+ return resource
+
+ def _verifyResourceProperties(self, job, resource):
+ self._verifyReadonlyResourceProperties(job, resource)
+
+ config = resource.get("configuration", {}).get("copy")
+
+ table_ref = config["destinationTable"]
+ self.assertEqual(job.destination.project, table_ref["projectId"])
+ self.assertEqual(job.destination.dataset_id, table_ref["datasetId"])
+ self.assertEqual(job.destination.table_id, table_ref["tableId"])
+
+ sources = config.get("sourceTables")
+ if sources is None:
+ sources = [config["sourceTable"]]
+ self.assertEqual(len(sources), len(job.sources))
+ for table_ref, table in zip(sources, job.sources):
+ self.assertEqual(table.project, table_ref["projectId"])
+ self.assertEqual(table.dataset_id, table_ref["datasetId"])
+ self.assertEqual(table.table_id, table_ref["tableId"])
+
+ if "createDisposition" in config:
+ self.assertEqual(job.create_disposition, config["createDisposition"])
+ else:
+ self.assertIsNone(job.create_disposition)
+
+ if "writeDisposition" in config:
+ self.assertEqual(job.write_disposition, config["writeDisposition"])
+ else:
+ self.assertIsNone(job.write_disposition)
+
+ if "destinationEncryptionConfiguration" in config:
+ self.assertIsNotNone(job.destination_encryption_configuration)
+ self.assertEqual(
+ job.destination_encryption_configuration.kms_key_name,
+ config["destinationEncryptionConfiguration"]["kmsKeyName"],
+ )
+ else:
+ self.assertIsNone(job.destination_encryption_configuration)
+
+ def test_ctor(self):
+ client = _make_client(project=self.PROJECT)
+ source = self._table_ref(self.SOURCE_TABLE)
+ destination = self._table_ref(self.DESTINATION_TABLE)
+ job = self._make_one(self.JOB_ID, [source], destination, client)
+ self.assertEqual(job.destination, destination)
+ self.assertEqual(job.sources, [source])
+ self.assertIs(job._client, client)
+ self.assertEqual(job.job_type, self.JOB_TYPE)
+ self.assertEqual(job.path, "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID))
+
+ self._verifyInitialReadonlyProperties(job)
+
+ # set/read from resource['configuration']['copy']
+ self.assertIsNone(job.create_disposition)
+ self.assertIsNone(job.write_disposition)
+ self.assertIsNone(job.destination_encryption_configuration)
+
+ def test_from_api_repr_missing_identity(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {}
+ klass = self._get_target_class()
+ with self.assertRaises(KeyError):
+ klass.from_api_repr(RESOURCE, client=client)
+
+ def test_from_api_repr_missing_config(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {
+ "id": "%s:%s" % (self.PROJECT, self.DS_ID),
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ }
+ klass = self._get_target_class()
+ with self.assertRaises(KeyError):
+ klass.from_api_repr(RESOURCE, client=client)
+
+ def test_from_api_repr_bare(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {
+ "id": self.JOB_ID,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "copy": {
+ "sourceTables": [
+ {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.SOURCE_TABLE,
+ }
+ ],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.DESTINATION_TABLE,
+ },
+ }
+ },
+ }
+ klass = self._get_target_class()
+ job = klass.from_api_repr(RESOURCE, client=client)
+ self.assertIs(job._client, client)
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_from_api_with_encryption(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {
+ "id": self.JOB_ID,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "copy": {
+ "sourceTables": [
+ {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.SOURCE_TABLE,
+ }
+ ],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.DESTINATION_TABLE,
+ },
+ "destinationEncryptionConfiguration": {
+ "kmsKeyName": self.KMS_KEY_NAME
+ },
+ }
+ },
+ }
+ klass = self._get_target_class()
+ job = klass.from_api_repr(RESOURCE, client=client)
+ self.assertIs(job._client, client)
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_from_api_repr_w_sourcetable(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {
+ "id": self.JOB_ID,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "copy": {
+ "sourceTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.SOURCE_TABLE,
+ },
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.DESTINATION_TABLE,
+ },
+ }
+ },
+ }
+ klass = self._get_target_class()
+ job = klass.from_api_repr(RESOURCE, client=client)
+ self.assertIs(job._client, client)
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_from_api_repr_wo_sources(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {
+ "id": self.JOB_ID,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "copy": {
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.DESTINATION_TABLE,
+ }
+ }
+ },
+ }
+ klass = self._get_target_class()
+ job = klass.from_api_repr(RESOURCE, client=client)
+ with self.assertRaises(KeyError):
+ _ = job.sources
+
+ def test_from_api_repr_w_properties(self):
+ from google.cloud.bigquery.job import CreateDisposition
+
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = self._make_resource()
+ copy_config = RESOURCE["configuration"]["copy"]
+ copy_config["createDisposition"] = CreateDisposition.CREATE_IF_NEEDED
+ klass = self._get_target_class()
+ job = klass.from_api_repr(RESOURCE, client=client)
+ self.assertIs(job._client, client)
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_begin_w_bound_client(self):
+ PATH = "/projects/%s/jobs" % (self.PROJECT,)
+ RESOURCE = self._make_resource()
+ # Ensure None for missing server-set props
+ del RESOURCE["statistics"]["creationTime"]
+ del RESOURCE["etag"]
+ del RESOURCE["selfLink"]
+ del RESOURCE["user_email"]
+ conn = make_connection(RESOURCE)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ source = self._table_ref(self.SOURCE_TABLE)
+ destination = self._table_ref(self.DESTINATION_TABLE)
+ job = self._make_one(self.JOB_ID, [source], destination, client)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job._begin()
+
+ final_attributes.assert_called_with({"path": PATH}, client, job)
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=PATH,
+ data={
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "copy": {
+ "sourceTables": [
+ {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.SOURCE_TABLE,
+ }
+ ],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.DESTINATION_TABLE,
+ },
+ }
+ },
+ },
+ timeout=None,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_begin_w_alternate_client(self):
+ from google.cloud.bigquery.job import CopyJobConfig
+
+ from google.cloud.bigquery.job import CreateDisposition
+ from google.cloud.bigquery.job import WriteDisposition
+
+ PATH = "/projects/%s/jobs" % (self.PROJECT,)
+ RESOURCE = self._make_resource(ended=True)
+ COPY_CONFIGURATION = {
+ "sourceTables": [
+ {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.SOURCE_TABLE,
+ }
+ ],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.DESTINATION_TABLE,
+ },
+ "createDisposition": CreateDisposition.CREATE_NEVER,
+ "writeDisposition": WriteDisposition.WRITE_TRUNCATE,
+ }
+ RESOURCE["configuration"]["copy"] = COPY_CONFIGURATION
+ conn1 = make_connection()
+ client1 = _make_client(project=self.PROJECT, connection=conn1)
+ conn2 = make_connection(RESOURCE)
+ client2 = _make_client(project=self.PROJECT, connection=conn2)
+ source = self._table_ref(self.SOURCE_TABLE)
+ destination = self._table_ref(self.DESTINATION_TABLE)
+ config = CopyJobConfig()
+ config.create_disposition = CreateDisposition.CREATE_NEVER
+ config.write_disposition = WriteDisposition.WRITE_TRUNCATE
+ job = self._make_one(self.JOB_ID, [source], destination, client1, config)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job._begin(client=client2)
+
+ final_attributes.assert_called_with({"path": PATH}, client2, job)
+
+ conn1.api_request.assert_not_called()
+ conn2.api_request.assert_called_once_with(
+ method="POST",
+ path=PATH,
+ data={
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {"copy": COPY_CONFIGURATION},
+ },
+ timeout=None,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_exists_miss_w_bound_client(self):
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ conn = make_connection()
+ client = _make_client(project=self.PROJECT, connection=conn)
+
+ source = self._table_ref(self.SOURCE_TABLE)
+ destination = self._table_ref(self.DESTINATION_TABLE)
+ job = self._make_one(self.JOB_ID, [source], destination, client)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ self.assertFalse(job.exists())
+
+ final_attributes.assert_called_with({"path": PATH}, client, job)
+
+ conn.api_request.assert_called_once_with(
+ method="GET", path=PATH, query_params={"fields": "id"}, timeout=None
+ )
+
+ def test_exists_hit_w_alternate_client(self):
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ conn1 = make_connection()
+ client1 = _make_client(project=self.PROJECT, connection=conn1)
+ conn2 = make_connection({})
+ client2 = _make_client(project=self.PROJECT, connection=conn2)
+ source = self._table_ref(self.SOURCE_TABLE)
+ destination = self._table_ref(self.DESTINATION_TABLE)
+ job = self._make_one(self.JOB_ID, [source], destination, client1)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ self.assertTrue(job.exists(client=client2))
+
+ final_attributes.assert_called_with({"path": PATH}, client2, job)
+
+ conn1.api_request.assert_not_called()
+ conn2.api_request.assert_called_once_with(
+ method="GET", path=PATH, query_params={"fields": "id"}, timeout=None
+ )
+
+ def test_reload_w_bound_client(self):
+ from google.cloud.bigquery.retry import DEFAULT_GET_JOB_TIMEOUT
+
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ RESOURCE = self._make_resource()
+ conn = make_connection(RESOURCE)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ source = self._table_ref(self.SOURCE_TABLE)
+ destination = self._table_ref(self.DESTINATION_TABLE)
+ job = self._make_one(self.JOB_ID, [source], destination, client)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job.reload()
+
+ final_attributes.assert_called_with(
+ {
+ "path": PATH,
+ "job_id": self.JOB_ID,
+ "location": None,
+ },
+ client,
+ None,
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path=PATH,
+ query_params={"projection": "full"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_reload_w_alternate_client(self):
+ from google.cloud.bigquery.retry import DEFAULT_GET_JOB_TIMEOUT
+
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ RESOURCE = self._make_resource()
+ conn1 = make_connection()
+ client1 = _make_client(project=self.PROJECT, connection=conn1)
+ conn2 = make_connection(RESOURCE)
+ client2 = _make_client(project=self.PROJECT, connection=conn2)
+ source = self._table_ref(self.SOURCE_TABLE)
+ destination = self._table_ref(self.DESTINATION_TABLE)
+ job = self._make_one(self.JOB_ID, [source], destination, client1)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job.reload(client=client2)
+
+ final_attributes.assert_called_with(
+ {
+ "path": PATH,
+ "job_id": self.JOB_ID,
+ "location": None,
+ },
+ client2,
+ None,
+ )
+
+ conn1.api_request.assert_not_called()
+ conn2.api_request.assert_called_once_with(
+ method="GET",
+ path=PATH,
+ query_params={"projection": "full"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
diff --git a/testbed/googleapis__python-bigquery/tests/unit/job/test_extract.py b/testbed/googleapis__python-bigquery/tests/unit/job/test_extract.py
new file mode 100644
index 0000000000000000000000000000000000000000..ebf9f09e6991ef5cb57ec22fd2c921f4f05cacaa
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/job/test_extract.py
@@ -0,0 +1,468 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+from unittest import mock
+
+from ..helpers import make_connection
+
+from .helpers import _Base
+from .helpers import _make_client
+
+
+class TestExtractJobConfig(_Base):
+ JOB_TYPE = "extract"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.job import ExtractJobConfig
+
+ return ExtractJobConfig
+
+ def test_ctor_w_properties(self):
+ config = self._get_target_class()(field_delimiter="\t", print_header=True)
+
+ self.assertEqual(config.field_delimiter, "\t")
+ self.assertTrue(config.print_header)
+
+ def test_to_api_repr(self):
+ from google.cloud.bigquery import job
+
+ config = self._make_one()
+ config.compression = job.Compression.SNAPPY
+ config.destination_format = job.DestinationFormat.AVRO
+ config.field_delimiter = "ignored for avro"
+ config.print_header = False
+ config._properties["extract"]["someNewField"] = "some-value"
+ config.use_avro_logical_types = True
+ resource = json.dumps(config.to_api_repr(), sort_keys=True)
+ expected = json.dumps(
+ {
+ "extract": {
+ "compression": "SNAPPY",
+ "destinationFormat": "AVRO",
+ "fieldDelimiter": "ignored for avro",
+ "printHeader": False,
+ "someNewField": "some-value",
+ "useAvroLogicalTypes": True,
+ }
+ },
+ sort_keys=True,
+ )
+
+ self.assertEqual(
+ resource,
+ expected,
+ )
+
+ def test_from_api_repr(self):
+ cls = self._get_target_class()
+ config = cls.from_api_repr(
+ {
+ "extract": {
+ "compression": "NONE",
+ "destinationFormat": "CSV",
+ "fieldDelimiter": "\t",
+ "printHeader": True,
+ "someNewField": "some-value",
+ "useAvroLogicalTypes": False,
+ }
+ }
+ )
+ self.assertEqual(config.compression, "NONE")
+ self.assertEqual(config.destination_format, "CSV")
+ self.assertEqual(config.field_delimiter, "\t")
+ self.assertEqual(config.print_header, True)
+ self.assertEqual(config._properties["extract"]["someNewField"], "some-value")
+ self.assertEqual(config.use_avro_logical_types, False)
+
+
+class TestExtractJob(_Base):
+ JOB_TYPE = "extract"
+ SOURCE_TABLE = "source_table"
+ DESTINATION_URI = "gs://bucket_name/object_name"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.job import ExtractJob
+
+ return ExtractJob
+
+ def _make_resource(self, started=False, ended=False):
+ resource = super(TestExtractJob, self)._make_resource(started, ended)
+ config = resource["configuration"]["extract"]
+ config["sourceTable"] = {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.SOURCE_TABLE,
+ }
+ config["destinationUris"] = [self.DESTINATION_URI]
+ return resource
+
+ def _verifyResourceProperties(self, job, resource):
+ self._verifyReadonlyResourceProperties(job, resource)
+
+ config = resource.get("configuration", {}).get("extract")
+
+ self.assertEqual(job.destination_uris, config["destinationUris"])
+
+ if "sourceTable" in config:
+ table_ref = config["sourceTable"]
+ self.assertEqual(job.source.project, table_ref["projectId"])
+ self.assertEqual(job.source.dataset_id, table_ref["datasetId"])
+ self.assertEqual(job.source.table_id, table_ref["tableId"])
+ else:
+ model_ref = config["sourceModel"]
+ self.assertEqual(job.source.project, model_ref["projectId"])
+ self.assertEqual(job.source.dataset_id, model_ref["datasetId"])
+ self.assertEqual(job.source.model_id, model_ref["modelId"])
+
+ if "compression" in config:
+ self.assertEqual(job.compression, config["compression"])
+ else:
+ self.assertIsNone(job.compression)
+
+ if "destinationFormat" in config:
+ self.assertEqual(job.destination_format, config["destinationFormat"])
+ else:
+ self.assertIsNone(job.destination_format)
+
+ if "fieldDelimiter" in config:
+ self.assertEqual(job.field_delimiter, config["fieldDelimiter"])
+ else:
+ self.assertIsNone(job.field_delimiter)
+
+ if "printHeader" in config:
+ self.assertEqual(job.print_header, config["printHeader"])
+ else:
+ self.assertIsNone(job.print_header)
+
+ def test_ctor(self):
+ from google.cloud.bigquery.table import Table
+
+ client = _make_client(project=self.PROJECT)
+ source = Table(self.TABLE_REF)
+ job = self._make_one(self.JOB_ID, source, [self.DESTINATION_URI], client)
+ self.assertEqual(job.source.project, self.PROJECT)
+ self.assertEqual(job.source.dataset_id, self.DS_ID)
+ self.assertEqual(job.source.table_id, self.TABLE_ID)
+ self.assertEqual(job.destination_uris, [self.DESTINATION_URI])
+ self.assertIs(job._client, client)
+ self.assertEqual(job.job_type, self.JOB_TYPE)
+ self.assertEqual(job.path, "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID))
+
+ self._verifyInitialReadonlyProperties(job)
+
+ # set/read from resource['configuration']['extract']
+ self.assertIsNone(job.compression)
+ self.assertIsNone(job.destination_format)
+ self.assertIsNone(job.field_delimiter)
+ self.assertIsNone(job.print_header)
+
+ def test_destination_uri_file_counts(self):
+ file_counts = 23
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(
+ self.JOB_ID, self.TABLE_REF, [self.DESTINATION_URI], client
+ )
+ self.assertIsNone(job.destination_uri_file_counts)
+
+ statistics = job._properties["statistics"] = {}
+ self.assertIsNone(job.destination_uri_file_counts)
+
+ extract_stats = statistics["extract"] = {}
+ self.assertIsNone(job.destination_uri_file_counts)
+
+ extract_stats["destinationUriFileCounts"] = [str(file_counts)]
+ self.assertEqual(job.destination_uri_file_counts, [file_counts])
+
+ def test_from_api_repr_missing_identity(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {}
+ klass = self._get_target_class()
+ with self.assertRaises(KeyError):
+ klass.from_api_repr(RESOURCE, client=client)
+
+ def test_from_api_repr_missing_config(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {
+ "id": "%s:%s" % (self.PROJECT, self.DS_ID),
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ }
+ klass = self._get_target_class()
+ with self.assertRaises(KeyError):
+ klass.from_api_repr(RESOURCE, client=client)
+
+ def test_from_api_repr_bare(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {
+ "id": self.JOB_ID,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "extract": {
+ "sourceTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.SOURCE_TABLE,
+ },
+ "destinationUris": [self.DESTINATION_URI],
+ }
+ },
+ }
+ klass = self._get_target_class()
+ job = klass.from_api_repr(RESOURCE, client=client)
+ self.assertIs(job._client, client)
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_from_api_repr_for_model(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {
+ "id": self.JOB_ID,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "extract": {
+ "sourceModel": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "modelId": "model_id",
+ },
+ "destinationUris": [self.DESTINATION_URI],
+ }
+ },
+ }
+ klass = self._get_target_class()
+ job = klass.from_api_repr(RESOURCE, client=client)
+ self.assertIs(job._client, client)
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_from_api_repr_w_properties(self):
+ from google.cloud.bigquery.job import Compression
+
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = self._make_resource()
+ extract_config = RESOURCE["configuration"]["extract"]
+ extract_config["compression"] = Compression.GZIP
+ klass = self._get_target_class()
+ job = klass.from_api_repr(RESOURCE, client=client)
+ self.assertIs(job._client, client)
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_begin_w_bound_client(self):
+ from google.cloud.bigquery.dataset import DatasetReference
+
+ PATH = "/projects/%s/jobs" % (self.PROJECT,)
+ RESOURCE = self._make_resource()
+ # Ensure None for missing server-set props
+ del RESOURCE["statistics"]["creationTime"]
+ del RESOURCE["etag"]
+ del RESOURCE["selfLink"]
+ del RESOURCE["user_email"]
+ conn = make_connection(RESOURCE)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ source_dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ source = source_dataset.table(self.SOURCE_TABLE)
+ job = self._make_one(self.JOB_ID, source, [self.DESTINATION_URI], client)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job._begin()
+
+ final_attributes.assert_called_with({"path": PATH}, client, job)
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=PATH,
+ data={
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "extract": {
+ "sourceTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.SOURCE_TABLE,
+ },
+ "destinationUris": [self.DESTINATION_URI],
+ }
+ },
+ },
+ timeout=None,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_begin_w_alternate_client(self):
+ from google.cloud.bigquery.dataset import DatasetReference
+ from google.cloud.bigquery.job import Compression
+ from google.cloud.bigquery.job import DestinationFormat
+ from google.cloud.bigquery.job import ExtractJobConfig
+
+ PATH = "/projects/%s/jobs" % (self.PROJECT,)
+ RESOURCE = self._make_resource(ended=True)
+ EXTRACT_CONFIGURATION = {
+ "sourceTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.SOURCE_TABLE,
+ },
+ "destinationUris": [self.DESTINATION_URI],
+ "compression": Compression.GZIP,
+ "destinationFormat": DestinationFormat.NEWLINE_DELIMITED_JSON,
+ "fieldDelimiter": "|",
+ "printHeader": False,
+ }
+ RESOURCE["configuration"]["extract"] = EXTRACT_CONFIGURATION
+ conn1 = make_connection()
+ client1 = _make_client(project=self.PROJECT, connection=conn1)
+ conn2 = make_connection(RESOURCE)
+ client2 = _make_client(project=self.PROJECT, connection=conn2)
+ source_dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ source = source_dataset.table(self.SOURCE_TABLE)
+ config = ExtractJobConfig()
+ config.compression = Compression.GZIP
+ config.destination_format = DestinationFormat.NEWLINE_DELIMITED_JSON
+ config.field_delimiter = "|"
+ config.print_header = False
+ job = self._make_one(
+ self.JOB_ID, source, [self.DESTINATION_URI], client1, config
+ )
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job._begin(client=client2)
+
+ final_attributes.assert_called_with({"path": PATH}, client2, job)
+
+ conn1.api_request.assert_not_called()
+ conn2.api_request.assert_called_once_with(
+ method="POST",
+ path=PATH,
+ data={
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {"extract": EXTRACT_CONFIGURATION},
+ },
+ timeout=None,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_exists_miss_w_bound_client(self):
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ conn = make_connection()
+ client = _make_client(project=self.PROJECT, connection=conn)
+ job = self._make_one(
+ self.JOB_ID, self.TABLE_REF, [self.DESTINATION_URI], client
+ )
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ self.assertFalse(job.exists())
+
+ final_attributes.assert_called_with({"path": PATH}, client, job)
+
+ conn.api_request.assert_called_once_with(
+ method="GET", path=PATH, query_params={"fields": "id"}, timeout=None
+ )
+
+ def test_exists_hit_w_alternate_client(self):
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ conn1 = make_connection()
+ client1 = _make_client(project=self.PROJECT, connection=conn1)
+ conn2 = make_connection({})
+ client2 = _make_client(project=self.PROJECT, connection=conn2)
+ job = self._make_one(
+ self.JOB_ID, self.TABLE_REF, [self.DESTINATION_URI], client1
+ )
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ self.assertTrue(job.exists(client=client2))
+
+ final_attributes.assert_called_with({"path": PATH}, client2, job)
+
+ conn1.api_request.assert_not_called()
+ conn2.api_request.assert_called_once_with(
+ method="GET", path=PATH, query_params={"fields": "id"}, timeout=None
+ )
+
+ def test_reload_w_bound_client(self):
+ from google.cloud.bigquery.dataset import DatasetReference
+ from google.cloud.bigquery.retry import DEFAULT_GET_JOB_TIMEOUT
+
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ RESOURCE = self._make_resource()
+ conn = make_connection(RESOURCE)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ source_dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ source = source_dataset.table(self.SOURCE_TABLE)
+ job = self._make_one(self.JOB_ID, source, [self.DESTINATION_URI], client)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job.reload()
+
+ final_attributes.assert_called_with(
+ {
+ "path": PATH,
+ "job_id": self.JOB_ID,
+ "location": None,
+ },
+ client,
+ None,
+ )
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path=PATH,
+ query_params={"projection": "full"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_reload_w_alternate_client(self):
+ from google.cloud.bigquery.dataset import DatasetReference
+ from google.cloud.bigquery.retry import DEFAULT_GET_JOB_TIMEOUT
+
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ RESOURCE = self._make_resource()
+ conn1 = make_connection()
+ client1 = _make_client(project=self.PROJECT, connection=conn1)
+ conn2 = make_connection(RESOURCE)
+ client2 = _make_client(project=self.PROJECT, connection=conn2)
+ source_dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ source = source_dataset.table(self.SOURCE_TABLE)
+ job = self._make_one(self.JOB_ID, source, [self.DESTINATION_URI], client1)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job.reload(client=client2)
+
+ final_attributes.assert_called_with(
+ {
+ "path": PATH,
+ "job_id": self.JOB_ID,
+ "location": None,
+ },
+ client2,
+ None,
+ )
+
+ conn1.api_request.assert_not_called()
+ conn2.api_request.assert_called_once_with(
+ method="GET",
+ path=PATH,
+ query_params={"projection": "full"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
diff --git a/testbed/googleapis__python-bigquery/tests/unit/job/test_load.py b/testbed/googleapis__python-bigquery/tests/unit/job/test_load.py
new file mode 100644
index 0000000000000000000000000000000000000000..0fb0446965063461b39acbae1899e974f2c38d02
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/job/test_load.py
@@ -0,0 +1,884 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+from unittest import mock
+
+from ..helpers import make_connection
+
+from .helpers import _Base
+from .helpers import _make_client
+
+
+class TestLoadJob(_Base):
+ JOB_TYPE = "load"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.job import LoadJob
+
+ return LoadJob
+
+ def _setUpConstants(self):
+ super(TestLoadJob, self)._setUpConstants()
+ self.INPUT_FILES = 2
+ self.INPUT_BYTES = 12345
+ self.OUTPUT_BYTES = 23456
+ self.OUTPUT_ROWS = 345
+ self.REFERENCE_FILE_SCHEMA_URI = "gs://path/to/reference"
+
+ def _make_resource(self, started=False, ended=False):
+ resource = super(TestLoadJob, self)._make_resource(started, ended)
+ config = resource["configuration"]["load"]
+ config["sourceUris"] = [self.SOURCE1]
+ config["destinationTable"] = {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ }
+ config["referenceFileSchemaUri"] = self.REFERENCE_FILE_SCHEMA_URI
+
+ if ended:
+ resource["status"] = {"state": "DONE"}
+ resource["statistics"]["load"]["inputFiles"] = self.INPUT_FILES
+ resource["statistics"]["load"]["inputFileBytes"] = self.INPUT_BYTES
+ resource["statistics"]["load"]["outputBytes"] = self.OUTPUT_BYTES
+ resource["statistics"]["load"]["outputRows"] = self.OUTPUT_ROWS
+
+ return resource
+
+ def _verifyBooleanConfigProperties(self, job, config):
+ if "allowJaggedRows" in config:
+ self.assertEqual(job.allow_jagged_rows, config["allowJaggedRows"])
+ else:
+ self.assertIsNone(job.allow_jagged_rows)
+ if "allowQuotedNewlines" in config:
+ self.assertEqual(job.allow_quoted_newlines, config["allowQuotedNewlines"])
+ else:
+ self.assertIsNone(job.allow_quoted_newlines)
+ if "autodetect" in config:
+ self.assertEqual(job.autodetect, config["autodetect"])
+ else:
+ self.assertIsNone(job.autodetect)
+ if "ignoreUnknownValues" in config:
+ self.assertEqual(job.ignore_unknown_values, config["ignoreUnknownValues"])
+ else:
+ self.assertIsNone(job.ignore_unknown_values)
+ if "useAvroLogicalTypes" in config:
+ self.assertEqual(job.use_avro_logical_types, config["useAvroLogicalTypes"])
+ else:
+ self.assertIsNone(job.use_avro_logical_types)
+
+ def _verifyEnumConfigProperties(self, job, config):
+ if "createDisposition" in config:
+ self.assertEqual(job.create_disposition, config["createDisposition"])
+ else:
+ self.assertIsNone(job.create_disposition)
+ if "encoding" in config:
+ self.assertEqual(job.encoding, config["encoding"])
+ else:
+ self.assertIsNone(job.encoding)
+ if "sourceFormat" in config:
+ self.assertEqual(job.source_format, config["sourceFormat"])
+ else:
+ self.assertIsNone(job.source_format)
+ if "writeDisposition" in config:
+ self.assertEqual(job.write_disposition, config["writeDisposition"])
+ else:
+ self.assertIsNone(job.write_disposition)
+ if "schemaUpdateOptions" in config:
+ self.assertEqual(job.schema_update_options, config["schemaUpdateOptions"])
+ else:
+ self.assertIsNone(job.schema_update_options)
+
+ def _verifyResourceProperties(self, job, resource):
+ self._verifyReadonlyResourceProperties(job, resource)
+
+ config = resource.get("configuration", {}).get("load")
+
+ self._verifyBooleanConfigProperties(job, config)
+ self._verifyEnumConfigProperties(job, config)
+
+ self.assertEqual(job.source_uris, config["sourceUris"])
+
+ table_ref = config["destinationTable"]
+ self.assertEqual(job.destination.project, table_ref["projectId"])
+ self.assertEqual(job.destination.dataset_id, table_ref["datasetId"])
+ self.assertEqual(job.destination.table_id, table_ref["tableId"])
+
+ if "fieldDelimiter" in config:
+ self.assertEqual(job.field_delimiter, config["fieldDelimiter"])
+ else:
+ self.assertIsNone(job.field_delimiter)
+ if "maxBadRecords" in config:
+ self.assertEqual(job.max_bad_records, config["maxBadRecords"])
+ else:
+ self.assertIsNone(job.max_bad_records)
+ if "nullMarker" in config:
+ self.assertEqual(job.null_marker, config["nullMarker"])
+ else:
+ self.assertIsNone(job.null_marker)
+ if "quote" in config:
+ self.assertEqual(job.quote_character, config["quote"])
+ else:
+ self.assertIsNone(job.quote_character)
+ if "skipLeadingRows" in config:
+ self.assertEqual(str(job.skip_leading_rows), config["skipLeadingRows"])
+ else:
+ self.assertIsNone(job.skip_leading_rows)
+ if "referenceFileSchemaUri" in config:
+ self.assertEqual(
+ job.reference_file_schema_uri, config["referenceFileSchemaUri"]
+ )
+ else:
+ self.assertIsNone(job.reference_file_schema_uri)
+
+ if "destinationEncryptionConfiguration" in config:
+ self.assertIsNotNone(job.destination_encryption_configuration)
+ self.assertEqual(
+ job.destination_encryption_configuration.kms_key_name,
+ config["destinationEncryptionConfiguration"]["kmsKeyName"],
+ )
+ else:
+ self.assertIsNone(job.destination_encryption_configuration)
+
+ def test_ctor(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
+ self.assertEqual(job.destination, self.TABLE_REF)
+ self.assertEqual(list(job.source_uris), [self.SOURCE1])
+ self.assertIs(job._client, client)
+ self.assertEqual(job.job_type, self.JOB_TYPE)
+ self.assertEqual(job.path, "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID))
+
+ self._verifyInitialReadonlyProperties(job)
+
+ # derived from resource['statistics']['load']
+ self.assertIsNone(job.input_file_bytes)
+ self.assertIsNone(job.input_files)
+ self.assertIsNone(job.output_bytes)
+ self.assertIsNone(job.output_rows)
+
+ # set/read from resource['configuration']['load']
+ self.assertIsNone(job.schema)
+ self.assertIsNone(job.allow_jagged_rows)
+ self.assertIsNone(job.allow_quoted_newlines)
+ self.assertIsNone(job.autodetect)
+ self.assertIsNone(job.create_disposition)
+ self.assertIsNone(job.encoding)
+ self.assertIsNone(job.field_delimiter)
+ self.assertIsNone(job.ignore_unknown_values)
+ self.assertIsNone(job.max_bad_records)
+ self.assertIsNone(job.null_marker)
+ self.assertIsNone(job.quote_character)
+ self.assertIsNone(job.skip_leading_rows)
+ self.assertIsNone(job.source_format)
+ self.assertIsNone(job.write_disposition)
+ self.assertIsNone(job.destination_encryption_configuration)
+ self.assertIsNone(job.destination_table_description)
+ self.assertIsNone(job.destination_table_friendly_name)
+ self.assertIsNone(job.range_partitioning)
+ self.assertIsNone(job.time_partitioning)
+ self.assertIsNone(job.use_avro_logical_types)
+ self.assertIsNone(job.clustering_fields)
+ self.assertIsNone(job.schema_update_options)
+ self.assertIsNone(job.reference_file_schema_uri)
+
+ def test_ctor_w_config(self):
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.job import LoadJobConfig
+
+ client = _make_client(project=self.PROJECT)
+ full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
+ age = SchemaField("age", "INTEGER", mode="REQUIRED")
+ config = LoadJobConfig()
+ config.schema = [full_name, age]
+ job = self._make_one(
+ self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client, config
+ )
+ self.assertEqual(job.schema, [full_name, age])
+ config.destination_table_description = "Description"
+ expected = {"description": "Description"}
+ self.assertEqual(
+ config._properties["load"]["destinationTableProperties"], expected
+ )
+ friendly_name = "Friendly Name"
+ config._properties["load"]["destinationTableProperties"] = {
+ "friendlyName": friendly_name
+ }
+ self.assertEqual(config.destination_table_friendly_name, friendly_name)
+
+ def test_ctor_w_job_reference(self):
+ from google.cloud.bigquery import job
+
+ client = _make_client(project=self.PROJECT)
+ job_ref = job._JobReference(self.JOB_ID, "alternative-project", "US")
+ load_job = self._make_one(job_ref, [self.SOURCE1], self.TABLE_REF, client)
+ self.assertEqual(load_job.project, "alternative-project")
+ self.assertEqual(load_job.location, "US")
+
+ def test_done(self):
+ client = _make_client(project=self.PROJECT)
+ resource = self._make_resource(ended=True)
+ job = self._get_target_class().from_api_repr(resource, client)
+ self.assertTrue(job.done())
+
+ def test_result(self):
+ client = _make_client(project=self.PROJECT)
+ resource = self._make_resource(ended=True)
+ job = self._get_target_class().from_api_repr(resource, client)
+
+ result = job.result()
+
+ self.assertIs(result, job)
+
+ def test_result_invokes_begin(self):
+ begun_resource = self._make_resource()
+ done_resource = copy.deepcopy(begun_resource)
+ done_resource["status"] = {"state": "DONE"}
+ connection = make_connection(begun_resource, done_resource)
+ client = _make_client(self.PROJECT)
+ client._connection = connection
+
+ job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
+ job.result()
+
+ self.assertEqual(len(connection.api_request.call_args_list), 2)
+ begin_request, reload_request = connection.api_request.call_args_list
+ self.assertEqual(begin_request[1]["method"], "POST")
+ self.assertEqual(reload_request[1]["method"], "GET")
+
+ def test_schema_setter_non_list(self):
+ from google.cloud.bigquery.job import LoadJobConfig
+
+ config = LoadJobConfig()
+ with self.assertRaises(TypeError):
+ config.schema = object()
+
+ def test_schema_setter_invalid_field(self):
+ from google.cloud.bigquery.job import LoadJobConfig
+ from google.cloud.bigquery.schema import SchemaField
+
+ config = LoadJobConfig()
+ full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
+ with self.assertRaises(ValueError):
+ config.schema = [full_name, object()]
+
+ def test_schema_setter(self):
+ from google.cloud.bigquery.job import LoadJobConfig
+ from google.cloud.bigquery.schema import SchemaField
+
+ config = LoadJobConfig()
+ full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
+ age = SchemaField("age", "INTEGER", mode="REQUIRED")
+ config.schema = [full_name, age]
+ self.assertEqual(config.schema, [full_name, age])
+
+ def test_props_set_by_server(self):
+ import datetime
+ from google.cloud._helpers import UTC
+ from google.cloud._helpers import _millis
+
+ CREATED = datetime.datetime(2015, 8, 11, 12, 13, 22, tzinfo=UTC)
+ STARTED = datetime.datetime(2015, 8, 11, 13, 47, 15, tzinfo=UTC)
+ ENDED = datetime.datetime(2015, 8, 11, 14, 47, 15, tzinfo=UTC)
+ FULL_JOB_ID = "%s:%s" % (self.PROJECT, self.JOB_ID)
+ URL = "http://example.com/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ EMAIL = "phred@example.com"
+ ERROR_RESULT = {
+ "debugInfo": "DEBUG",
+ "location": "LOCATION",
+ "message": "MESSAGE",
+ "reason": "REASON",
+ }
+
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
+ job._properties["etag"] = "ETAG"
+ job._properties["id"] = FULL_JOB_ID
+ job._properties["selfLink"] = URL
+ job._properties["user_email"] = EMAIL
+
+ statistics = job._properties["statistics"] = {}
+ statistics["creationTime"] = _millis(CREATED)
+ statistics["startTime"] = _millis(STARTED)
+ statistics["endTime"] = _millis(ENDED)
+
+ self.assertEqual(job.etag, "ETAG")
+ self.assertEqual(job.self_link, URL)
+ self.assertEqual(job.user_email, EMAIL)
+
+ self.assertEqual(job.created, CREATED)
+ self.assertEqual(job.started, STARTED)
+ self.assertEqual(job.ended, ENDED)
+
+ # running jobs have no load stats not yet set.
+ self.assertIsNone(job.output_bytes)
+
+ load_stats = statistics["load"] = {}
+ load_stats["inputFileBytes"] = 12345
+ load_stats["inputFiles"] = 1
+ load_stats["outputBytes"] = 23456
+ load_stats["outputRows"] = 345
+
+ self.assertEqual(job.input_file_bytes, 12345)
+ self.assertEqual(job.input_files, 1)
+ self.assertEqual(job.output_bytes, 23456)
+ self.assertEqual(job.output_rows, 345)
+
+ status = job._properties["status"] = {}
+
+ self.assertIsNone(job.error_result)
+ self.assertIsNone(job.errors)
+ self.assertIsNone(job.state)
+
+ status["errorResult"] = ERROR_RESULT
+ status["errors"] = [ERROR_RESULT]
+ status["state"] = "STATE"
+
+ self.assertEqual(job.error_result, ERROR_RESULT)
+ self.assertEqual(job.errors, [ERROR_RESULT])
+ self.assertEqual(job.state, "STATE")
+
+ def test_from_api_repr_missing_identity(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {}
+ klass = self._get_target_class()
+ with self.assertRaises(KeyError):
+ klass.from_api_repr(RESOURCE, client=client)
+
+ def test_from_api_repr_missing_config(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {
+ "id": "%s:%s" % (self.PROJECT, self.JOB_ID),
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ }
+ klass = self._get_target_class()
+ with self.assertRaises(KeyError):
+ klass.from_api_repr(RESOURCE, client=client)
+
+ def test_from_api_repr_bare(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {
+ "id": self.FULL_JOB_ID,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "load": {
+ "sourceUris": [self.SOURCE1],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ }
+ },
+ }
+ klass = self._get_target_class()
+ job = klass.from_api_repr(RESOURCE, client=client)
+ self.assertIs(job._client, client)
+ self._verifyResourceProperties(job, RESOURCE)
+ self.assertEqual(len(job.connection_properties), 0)
+ self.assertIsNone(job.create_session)
+
+ def test_from_api_with_encryption(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {
+ "id": self.FULL_JOB_ID,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "load": {
+ "sourceUris": [self.SOURCE1],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ "destinationEncryptionConfiguration": {
+ "kmsKeyName": self.KMS_KEY_NAME
+ },
+ }
+ },
+ }
+ klass = self._get_target_class()
+ job = klass.from_api_repr(RESOURCE, client=client)
+ self.assertIs(job._client, client)
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_from_api_repr_w_properties(self):
+ from google.cloud.bigquery.job import CreateDisposition
+
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = self._make_resource()
+ load_config = RESOURCE["configuration"]["load"]
+ load_config["createDisposition"] = CreateDisposition.CREATE_IF_NEEDED
+ klass = self._get_target_class()
+ job = klass.from_api_repr(RESOURCE, client=client)
+ self.assertIs(job._client, client)
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_begin_w_already_running(self):
+ conn = make_connection()
+ client = _make_client(project=self.PROJECT, connection=conn)
+ job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
+ job._properties["status"] = {"state": "RUNNING"}
+
+ with self.assertRaises(ValueError):
+ job._begin()
+
+ def test_begin_w_bound_client(self):
+ RESOURCE = self._make_resource()
+ # Ensure None for missing server-set props
+ del RESOURCE["statistics"]["creationTime"]
+ del RESOURCE["etag"]
+ del RESOURCE["selfLink"]
+ del RESOURCE["user_email"]
+ conn = make_connection(RESOURCE)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
+ job.configuration.reference_file_schema_uri = self.REFERENCE_FILE_SCHEMA_URI
+ path = "/projects/{}/jobs".format(self.PROJECT)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job._begin()
+
+ final_attributes.assert_called_with({"path": path}, client, job)
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=path,
+ data={
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "load": {
+ "sourceUris": [self.SOURCE1],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ "referenceFileSchemaUri": self.REFERENCE_FILE_SCHEMA_URI,
+ }
+ },
+ },
+ timeout=None,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_begin_w_autodetect(self):
+ from google.cloud.bigquery.job import LoadJobConfig
+
+ path = "/projects/{}/jobs".format(self.PROJECT)
+ resource = self._make_resource()
+ resource["configuration"]["load"]["autodetect"] = True
+ # Ensure None for missing server-set props
+ del resource["statistics"]["creationTime"]
+ del resource["etag"]
+ del resource["selfLink"]
+ del resource["user_email"]
+ conn = make_connection(resource)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ config = LoadJobConfig()
+ config.autodetect = True
+ job = self._make_one(
+ self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client, config
+ )
+ job.configuration.reference_file_schema_uri = self.REFERENCE_FILE_SCHEMA_URI
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job._begin()
+
+ final_attributes.assert_called_with({"path": path}, client, job)
+
+ sent = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "load": {
+ "sourceUris": [self.SOURCE1],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ "referenceFileSchemaUri": self.REFERENCE_FILE_SCHEMA_URI,
+ "autodetect": True,
+ }
+ },
+ }
+ conn.api_request.assert_called_once_with(
+ method="POST", path=path, data=sent, timeout=None
+ )
+ self._verifyResourceProperties(job, resource)
+
+ def test_begin_w_alternate_client(self):
+ from google.cloud.bigquery.job import CreateDisposition
+ from google.cloud.bigquery.job import LoadJobConfig
+ from google.cloud.bigquery.job import SchemaUpdateOption
+ from google.cloud.bigquery.job import WriteDisposition
+ from google.cloud.bigquery.schema import SchemaField
+
+ PATH = "/projects/%s/jobs" % (self.PROJECT,)
+ RESOURCE = self._make_resource(ended=True)
+ LOAD_CONFIGURATION = {
+ "sourceUris": [self.SOURCE1],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ "allowJaggedRows": True,
+ "allowQuotedNewlines": True,
+ "createDisposition": CreateDisposition.CREATE_NEVER,
+ "encoding": "ISO-8559-1",
+ "fieldDelimiter": "|",
+ "ignoreUnknownValues": True,
+ "maxBadRecords": 100,
+ "nullMarker": r"\N",
+ "quote": "'",
+ "skipLeadingRows": "1",
+ "sourceFormat": "CSV",
+ "useAvroLogicalTypes": True,
+ "writeDisposition": WriteDisposition.WRITE_TRUNCATE,
+ "referenceFileSchemaUri": "gs://path/to/reference",
+ "schema": {
+ "fields": [
+ {
+ "name": "full_name",
+ "type": "STRING",
+ "mode": "REQUIRED",
+ },
+ {
+ "name": "age",
+ "type": "INTEGER",
+ "mode": "REQUIRED",
+ },
+ ]
+ },
+ "schemaUpdateOptions": [SchemaUpdateOption.ALLOW_FIELD_ADDITION],
+ }
+ RESOURCE["configuration"]["load"] = LOAD_CONFIGURATION
+ conn1 = make_connection()
+ client1 = _make_client(project=self.PROJECT, connection=conn1)
+ conn2 = make_connection(RESOURCE)
+ client2 = _make_client(project=self.PROJECT, connection=conn2)
+ full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
+ age = SchemaField("age", "INTEGER", mode="REQUIRED")
+ config = LoadJobConfig()
+ config.schema = [full_name, age]
+ job = self._make_one(
+ self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client1, config
+ )
+ config.allow_jagged_rows = True
+ config.allow_quoted_newlines = True
+ config.create_disposition = CreateDisposition.CREATE_NEVER
+ config.encoding = "ISO-8559-1"
+ config.field_delimiter = "|"
+ config.ignore_unknown_values = True
+ config.max_bad_records = 100
+ config.null_marker = r"\N"
+ config.quote_character = "'"
+ config.skip_leading_rows = 1
+ config.source_format = "CSV"
+ config.use_avro_logical_types = True
+ config.write_disposition = WriteDisposition.WRITE_TRUNCATE
+ config.schema_update_options = [SchemaUpdateOption.ALLOW_FIELD_ADDITION]
+ config.reference_file_schema_uri = "gs://path/to/reference"
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job._begin(client=client2)
+
+ final_attributes.assert_called_with({"path": PATH}, client2, job)
+
+ conn1.api_request.assert_not_called()
+ self.assertEqual(len(conn2.api_request.call_args_list), 1)
+ req = conn2.api_request.call_args_list[0]
+ self.assertEqual(req[1]["method"], "POST")
+ self.assertEqual(req[1]["path"], PATH)
+ SENT = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {"load": LOAD_CONFIGURATION},
+ }
+ self.maxDiff = None
+ self.assertEqual(req[1]["data"], SENT)
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_begin_w_job_reference(self):
+ from google.cloud.bigquery import job
+
+ resource = self._make_resource()
+ resource["jobReference"]["projectId"] = "alternative-project"
+ resource["jobReference"]["location"] = "US"
+ job_ref = job._JobReference(self.JOB_ID, "alternative-project", "US")
+ conn = make_connection(resource)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ load_job = self._make_one(job_ref, [self.SOURCE1], self.TABLE_REF, client)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ load_job._begin()
+ final_attributes.assert_called_with(
+ {"path": "/projects/alternative-project/jobs"}, client, load_job
+ )
+
+ conn.api_request.assert_called_once()
+ _, request = conn.api_request.call_args
+ self.assertEqual(request["method"], "POST")
+ self.assertEqual(request["path"], "/projects/alternative-project/jobs")
+ self.assertEqual(
+ request["data"]["jobReference"]["projectId"], "alternative-project"
+ )
+ self.assertEqual(request["data"]["jobReference"]["location"], "US")
+ self.assertEqual(request["data"]["jobReference"]["jobId"], self.JOB_ID)
+
+ def test_exists_miss_w_bound_client(self):
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ conn = make_connection()
+ client = _make_client(project=self.PROJECT, connection=conn)
+ job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ self.assertFalse(job.exists())
+
+ final_attributes.assert_called_with(
+ {"path": "/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID)},
+ client,
+ job,
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="GET", path=PATH, query_params={"fields": "id"}, timeout=None
+ )
+
+ def test_exists_hit_w_alternate_client(self):
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ conn1 = make_connection()
+ client1 = _make_client(project=self.PROJECT, connection=conn1)
+ conn2 = make_connection({})
+ client2 = _make_client(project=self.PROJECT, connection=conn2)
+ job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client1)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ self.assertTrue(job.exists(client=client2))
+
+ final_attributes.assert_called_with(
+ {"path": "/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID)},
+ client2,
+ job,
+ )
+
+ conn1.api_request.assert_not_called()
+ conn2.api_request.assert_called_once_with(
+ method="GET", path=PATH, query_params={"fields": "id"}, timeout=None
+ )
+
+ def test_exists_miss_w_job_reference(self):
+ from google.cloud.bigquery import job
+
+ job_ref = job._JobReference("my-job-id", "other-project", "US")
+ conn = make_connection()
+ client = _make_client(project=self.PROJECT, connection=conn)
+ load_job = self._make_one(job_ref, [self.SOURCE1], self.TABLE_REF, client)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ self.assertFalse(load_job.exists())
+
+ final_attributes.assert_called_with(
+ {"path": "/projects/other-project/jobs/my-job-id"}, client, load_job
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/projects/other-project/jobs/my-job-id",
+ query_params={"fields": "id", "location": "US"},
+ timeout=None,
+ )
+
+ def test_reload_w_bound_client(self):
+ from google.cloud.bigquery.retry import DEFAULT_GET_JOB_TIMEOUT
+
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ RESOURCE = self._make_resource()
+ conn = make_connection(RESOURCE)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job.reload()
+
+ final_attributes.assert_called_with(
+ {
+ "path": PATH,
+ "job_id": self.JOB_ID,
+ "location": None,
+ },
+ client,
+ None,
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path=PATH,
+ query_params={"projection": "full"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_reload_w_alternate_client(self):
+ from google.cloud.bigquery.retry import DEFAULT_GET_JOB_TIMEOUT
+
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ RESOURCE = self._make_resource()
+ conn1 = make_connection()
+ client1 = _make_client(project=self.PROJECT, connection=conn1)
+ conn2 = make_connection(RESOURCE)
+ client2 = _make_client(project=self.PROJECT, connection=conn2)
+ job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client1)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job.reload(client=client2)
+
+ final_attributes.assert_called_with(
+ {
+ "path": PATH,
+ "job_id": self.JOB_ID,
+ "location": None,
+ },
+ client2,
+ None,
+ )
+
+ conn1.api_request.assert_not_called()
+ conn2.api_request.assert_called_once_with(
+ method="GET",
+ path=PATH,
+ query_params={"projection": "full"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_reload_w_job_reference(self):
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.retry import DEFAULT_GET_JOB_TIMEOUT
+
+ resource = self._make_resource(ended=True)
+ resource["jobReference"]["projectId"] = "alternative-project"
+ resource["jobReference"]["location"] = "US"
+ job_ref = job._JobReference(self.JOB_ID, "alternative-project", "US")
+ conn = make_connection(resource)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ load_job = self._make_one(job_ref, [self.SOURCE1], self.TABLE_REF, client)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ load_job.reload()
+
+ final_attributes.assert_called_with(
+ {
+ "path": "/projects/alternative-project/jobs/{}".format(self.JOB_ID),
+ "job_id": self.JOB_ID,
+ "location": "US",
+ },
+ client,
+ None,
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/projects/alternative-project/jobs/{}".format(self.JOB_ID),
+ query_params={"projection": "full", "location": "US"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+
+ def test_cancel_w_bound_client(self):
+ PATH = "/projects/%s/jobs/%s/cancel" % (self.PROJECT, self.JOB_ID)
+ RESOURCE = self._make_resource(ended=True)
+ RESPONSE = {"job": RESOURCE}
+ conn = make_connection(RESPONSE)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job.cancel()
+
+ final_attributes.assert_called_with({"path": PATH}, client, job)
+
+ conn.api_request.assert_called_once_with(
+ method="POST", path=PATH, query_params={}, timeout=None
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_cancel_w_alternate_client(self):
+ PATH = "/projects/%s/jobs/%s/cancel" % (self.PROJECT, self.JOB_ID)
+ RESOURCE = self._make_resource(ended=True)
+ RESPONSE = {"job": RESOURCE}
+ conn1 = make_connection()
+ client1 = _make_client(project=self.PROJECT, connection=conn1)
+ conn2 = make_connection(RESPONSE)
+ client2 = _make_client(project=self.PROJECT, connection=conn2)
+ job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client1)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job.cancel(client=client2)
+
+ final_attributes.assert_called_with({"path": PATH}, client2, job)
+
+ conn1.api_request.assert_not_called()
+ conn2.api_request.assert_called_once_with(
+ method="POST", path=PATH, query_params={}, timeout=None
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_cancel_w_job_reference(self):
+ from google.cloud.bigquery import job
+
+ resource = self._make_resource(ended=True)
+ resource["jobReference"]["projectId"] = "alternative-project"
+ resource["jobReference"]["location"] = "US"
+ job_ref = job._JobReference(self.JOB_ID, "alternative-project", "US")
+ conn = make_connection({"job": resource})
+ client = _make_client(project=self.PROJECT, connection=conn)
+ load_job = self._make_one(job_ref, [self.SOURCE1], self.TABLE_REF, client)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ load_job.cancel()
+
+ final_attributes.assert_called_with(
+ {
+ "path": "/projects/alternative-project/jobs/{}/cancel".format(
+ self.JOB_ID
+ )
+ },
+ client,
+ load_job,
+ )
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/alternative-project/jobs/{}/cancel".format(self.JOB_ID),
+ query_params={"location": "US"},
+ timeout=None,
+ )
diff --git a/testbed/googleapis__python-bigquery/tests/unit/job/test_load_config.py b/testbed/googleapis__python-bigquery/tests/unit/job/test_load_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..becf3e959b7991c0fd8fc82fbe87668959c5fc05
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/job/test_load_config.py
@@ -0,0 +1,884 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import warnings
+
+import pytest
+
+from .helpers import _Base
+
+
+class TestLoadJobConfig(_Base):
+ JOB_TYPE = "load"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.job import LoadJobConfig
+
+ return LoadJobConfig
+
+ def test_ctor_w_properties(self):
+ config = self._get_target_class()(
+ allow_jagged_rows=True, allow_quoted_newlines=True
+ )
+
+ self.assertTrue(config.allow_jagged_rows)
+ self.assertTrue(config.allow_quoted_newlines)
+
+ def test_allow_jagged_rows_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.allow_jagged_rows)
+
+ def test_allow_jagged_rows_hit(self):
+ config = self._get_target_class()()
+ config._properties["load"]["allowJaggedRows"] = True
+ self.assertTrue(config.allow_jagged_rows)
+
+ def test_allow_jagged_rows_setter(self):
+ config = self._get_target_class()()
+ config.allow_jagged_rows = True
+ self.assertTrue(config._properties["load"]["allowJaggedRows"])
+
+ def test_allow_quoted_newlines_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.allow_quoted_newlines)
+
+ def test_allow_quoted_newlines_hit(self):
+ config = self._get_target_class()()
+ config._properties["load"]["allowQuotedNewlines"] = True
+ self.assertTrue(config.allow_quoted_newlines)
+
+ def test_allow_quoted_newlines_setter(self):
+ config = self._get_target_class()()
+ config.allow_quoted_newlines = True
+ self.assertTrue(config._properties["load"]["allowQuotedNewlines"])
+
+ def test_autodetect_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.autodetect)
+
+ def test_autodetect_hit(self):
+ config = self._get_target_class()()
+ config._properties["load"]["autodetect"] = True
+ self.assertTrue(config.autodetect)
+
+ def test_autodetect_setter(self):
+ config = self._get_target_class()()
+ config.autodetect = True
+ self.assertTrue(config._properties["load"]["autodetect"])
+
+ def test_clustering_fields_miss(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.clustering_fields)
+
+ def test_clustering_fields_hit(self):
+ config = self._get_target_class()()
+ fields = ["email", "postal_code"]
+ config._properties["load"]["clustering"] = {"fields": fields}
+ self.assertEqual(config.clustering_fields, fields)
+
+ def test_clustering_fields_setter(self):
+ fields = ["email", "postal_code"]
+ config = self._get_target_class()()
+ config.clustering_fields = fields
+ self.assertEqual(config._properties["load"]["clustering"], {"fields": fields})
+
+ def test_clustering_fields_setter_w_none(self):
+ config = self._get_target_class()()
+ fields = ["email", "postal_code"]
+ config._properties["load"]["clustering"] = {"fields": fields}
+ config.clustering_fields = None
+ self.assertIsNone(config.clustering_fields)
+ self.assertNotIn("clustering", config._properties["load"])
+
+ def test_create_disposition_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.create_disposition)
+
+ def test_create_disposition_hit(self):
+ from google.cloud.bigquery.job import CreateDisposition
+
+ disposition = CreateDisposition.CREATE_IF_NEEDED
+ config = self._get_target_class()()
+ config._properties["load"]["createDisposition"] = disposition
+ self.assertEqual(config.create_disposition, disposition)
+
+ def test_create_disposition_setter(self):
+ from google.cloud.bigquery.job import CreateDisposition
+
+ disposition = CreateDisposition.CREATE_IF_NEEDED
+ config = self._get_target_class()()
+ config.create_disposition = disposition
+ self.assertEqual(config._properties["load"]["createDisposition"], disposition)
+
+ def test_connection_properties(self):
+ from google.cloud.bigquery.query import ConnectionProperty
+
+ config = self._get_target_class()()
+ self.assertEqual(len(config.connection_properties), 0)
+
+ session_id = ConnectionProperty("session_id", "abcd")
+ time_zone = ConnectionProperty("time_zone", "America/Chicago")
+ config.connection_properties = [session_id, time_zone]
+ self.assertEqual(len(config.connection_properties), 2)
+ self.assertEqual(config.connection_properties[0].key, "session_id")
+ self.assertEqual(config.connection_properties[0].value, "abcd")
+ self.assertEqual(config.connection_properties[1].key, "time_zone")
+ self.assertEqual(config.connection_properties[1].value, "America/Chicago")
+
+ def test_create_session(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.create_session)
+ config.create_session = True
+ self.assertTrue(config.create_session)
+
+ def test_decimal_target_types_miss(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.decimal_target_types)
+
+ def test_decimal_target_types_hit(self):
+ from google.cloud.bigquery.enums import DecimalTargetType
+
+ config = self._get_target_class()()
+ decimal_target_types = [DecimalTargetType.NUMERIC, DecimalTargetType.STRING]
+ config._properties["load"]["decimalTargetTypes"] = decimal_target_types
+
+ expected = frozenset(decimal_target_types)
+ self.assertEqual(config.decimal_target_types, expected)
+
+ def test_decimal_target_types_setter(self):
+ from google.cloud.bigquery.enums import DecimalTargetType
+
+ decimal_target_types = (DecimalTargetType.NUMERIC, DecimalTargetType.BIGNUMERIC)
+ config = self._get_target_class()()
+ config.decimal_target_types = decimal_target_types
+ self.assertEqual(
+ config._properties["load"]["decimalTargetTypes"],
+ list(decimal_target_types),
+ )
+
+ def test_decimal_target_types_setter_w_none(self):
+ from google.cloud.bigquery.enums import DecimalTargetType
+
+ config = self._get_target_class()()
+ decimal_target_types = [DecimalTargetType.BIGNUMERIC]
+ config._properties["load"]["decimalTargetTypes"] = decimal_target_types
+
+ config.decimal_target_types = None
+
+ self.assertIsNone(config.decimal_target_types)
+ self.assertNotIn("decimalTargetTypes", config._properties["load"])
+
+ config.decimal_target_types = None # No error if unsetting an unset property.
+
+ def test_destination_encryption_configuration_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.destination_encryption_configuration)
+
+ def test_destination_encryption_configuration_hit(self):
+ from google.cloud.bigquery.encryption_configuration import (
+ EncryptionConfiguration,
+ )
+
+ kms_key_name = "kms-key-name"
+ encryption_configuration = EncryptionConfiguration(kms_key_name)
+ config = self._get_target_class()()
+ config._properties["load"]["destinationEncryptionConfiguration"] = {
+ "kmsKeyName": kms_key_name
+ }
+ self.assertEqual(
+ config.destination_encryption_configuration, encryption_configuration
+ )
+
+ def test_destination_encryption_configuration_setter(self):
+ from google.cloud.bigquery.encryption_configuration import (
+ EncryptionConfiguration,
+ )
+
+ kms_key_name = "kms-key-name"
+ encryption_configuration = EncryptionConfiguration(kms_key_name)
+ config = self._get_target_class()()
+ config.destination_encryption_configuration = encryption_configuration
+ expected = {"kmsKeyName": kms_key_name}
+ self.assertEqual(
+ config._properties["load"]["destinationEncryptionConfiguration"], expected
+ )
+
+ def test_destination_encryption_configuration_setter_w_none(self):
+ kms_key_name = "kms-key-name"
+ config = self._get_target_class()()
+ config._properties["load"]["destinationEncryptionConfiguration"] = {
+ "kmsKeyName": kms_key_name
+ }
+ config.destination_encryption_configuration = None
+ self.assertIsNone(config.destination_encryption_configuration)
+ self.assertNotIn(
+ "destinationEncryptionConfiguration", config._properties["load"]
+ )
+
+ def test_destination_table_description_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.destination_table_description)
+
+ def test_destination_table_description_hit(self):
+ description = "Description"
+ config = self._get_target_class()()
+ config._properties["load"]["destinationTableProperties"] = {
+ "description": description
+ }
+ self.assertEqual(config.destination_table_description, description)
+
+ def test_destination_table_description_setter(self):
+ description = "Description"
+ config = self._get_target_class()()
+ config.destination_table_description = description
+ expected = {"description": description}
+ self.assertEqual(
+ config._properties["load"]["destinationTableProperties"], expected
+ )
+
+ def test_destination_table_description_setter_w_fn_already(self):
+ description = "Description"
+ friendly_name = "Friendly Name"
+ config = self._get_target_class()()
+ config._properties["load"]["destinationTableProperties"] = {
+ "friendlyName": friendly_name
+ }
+ config.destination_table_description = description
+ expected = {"friendlyName": friendly_name, "description": description}
+ self.assertEqual(
+ config._properties["load"]["destinationTableProperties"], expected
+ )
+
+ def test_destination_table_description_w_none(self):
+ description = "Description"
+ friendly_name = "Friendly Name"
+ config = self._get_target_class()()
+ config._properties["load"]["destinationTableProperties"] = {
+ "description": description,
+ "friendlyName": friendly_name,
+ }
+ config.destination_table_description = None
+ expected = {"friendlyName": friendly_name}
+ self.assertEqual(
+ config._properties["load"]["destinationTableProperties"], expected
+ )
+
+ def test_destination_table_friendly_name_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.destination_table_friendly_name)
+
+ def test_destination_table_friendly_name_hit(self):
+ friendly_name = "Friendly Name"
+ config = self._get_target_class()()
+ config._properties["load"]["destinationTableProperties"] = {
+ "friendlyName": friendly_name
+ }
+ self.assertEqual(config.destination_table_friendly_name, friendly_name)
+
+ def test_destination_table_friendly_name_setter(self):
+ friendly_name = "Friendly Name"
+ config = self._get_target_class()()
+ config.destination_table_friendly_name = friendly_name
+ expected = {"friendlyName": friendly_name}
+ self.assertEqual(
+ config._properties["load"]["destinationTableProperties"], expected
+ )
+
+ def test_destination_table_friendly_name_setter_w_descr_already(self):
+ friendly_name = "Friendly Name"
+ description = "Description"
+ config = self._get_target_class()()
+ config._properties["load"]["destinationTableProperties"] = {
+ "description": description
+ }
+ config.destination_table_friendly_name = friendly_name
+ expected = {"friendlyName": friendly_name, "description": description}
+ self.assertEqual(
+ config._properties["load"]["destinationTableProperties"], expected
+ )
+
+ def test_destination_table_friendly_name_w_none(self):
+ friendly_name = "Friendly Name"
+ description = "Description"
+ config = self._get_target_class()()
+ config._properties["load"]["destinationTableProperties"] = {
+ "description": description,
+ "friendlyName": friendly_name,
+ }
+ config.destination_table_friendly_name = None
+ expected = {"description": description}
+ self.assertEqual(
+ config._properties["load"]["destinationTableProperties"], expected
+ )
+
+ def test_encoding_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.encoding)
+
+ def test_encoding_hit(self):
+ from google.cloud.bigquery.job import Encoding
+
+ encoding = Encoding.UTF_8
+ config = self._get_target_class()()
+ config._properties["load"]["encoding"] = encoding
+ self.assertEqual(config.encoding, encoding)
+
+ def test_encoding_setter(self):
+ from google.cloud.bigquery.job import Encoding
+
+ encoding = Encoding.UTF_8
+ config = self._get_target_class()()
+ config.encoding = encoding
+ self.assertEqual(config._properties["load"]["encoding"], encoding)
+
+ def test_field_delimiter_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.field_delimiter)
+
+ def test_field_delimiter_hit(self):
+ field_delimiter = "|"
+ config = self._get_target_class()()
+ config._properties["load"]["fieldDelimiter"] = field_delimiter
+ self.assertEqual(config.field_delimiter, field_delimiter)
+
+ def test_field_delimiter_setter(self):
+ field_delimiter = "|"
+ config = self._get_target_class()()
+ config.field_delimiter = field_delimiter
+ self.assertEqual(config._properties["load"]["fieldDelimiter"], field_delimiter)
+
+ def test_hive_partitioning_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.hive_partitioning)
+
+ def test_hive_partitioning_hit(self):
+ from google.cloud.bigquery.external_config import HivePartitioningOptions
+
+ config = self._get_target_class()()
+ config._properties["load"]["hivePartitioningOptions"] = {
+ "sourceUriPrefix": "http://foo/bar",
+ "mode": "STRINGS",
+ }
+ result = config.hive_partitioning
+ self.assertIsInstance(result, HivePartitioningOptions)
+ self.assertEqual(result.source_uri_prefix, "http://foo/bar")
+ self.assertEqual(result.mode, "STRINGS")
+
+ def test_hive_partitioning_setter(self):
+ from google.cloud.bigquery.external_config import HivePartitioningOptions
+
+ hive_partitioning = HivePartitioningOptions()
+ hive_partitioning.source_uri_prefix = "http://foo/bar"
+ hive_partitioning.mode = "AUTO"
+
+ config = self._get_target_class()()
+ config.hive_partitioning = hive_partitioning
+ self.assertEqual(
+ config._properties["load"]["hivePartitioningOptions"],
+ {"sourceUriPrefix": "http://foo/bar", "mode": "AUTO"},
+ )
+
+ config.hive_partitioning = None
+ self.assertIsNone(config._properties["load"]["hivePartitioningOptions"])
+
+ def test_hive_partitioning_invalid_type(self):
+ config = self._get_target_class()()
+
+ with self.assertRaises(TypeError):
+ config.hive_partitioning = {"mode": "AUTO"}
+
+ def test_ignore_unknown_values_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.ignore_unknown_values)
+
+ def test_ignore_unknown_values_hit(self):
+ config = self._get_target_class()()
+ config._properties["load"]["ignoreUnknownValues"] = True
+ self.assertTrue(config.ignore_unknown_values)
+
+ def test_ignore_unknown_values_setter(self):
+ config = self._get_target_class()()
+ config.ignore_unknown_values = True
+ self.assertTrue(config._properties["load"]["ignoreUnknownValues"])
+
+ def test_json_extension_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.json_extension)
+
+ def test_json_extension_hit(self):
+ config = self._get_target_class()()
+ config._properties["load"]["jsonExtension"] = "GEOJSON"
+ self.assertEqual(config.json_extension, "GEOJSON")
+
+ def test_json_extension_setter(self):
+ config = self._get_target_class()()
+ self.assertFalse(config.json_extension)
+ config.json_extension = "GEOJSON"
+ self.assertTrue(config.json_extension)
+ self.assertEqual(config._properties["load"]["jsonExtension"], "GEOJSON")
+
+ def test_to_api_repr_includes_json_extension(self):
+ config = self._get_target_class()()
+ config._properties["load"]["jsonExtension"] = "GEOJSON"
+ api_repr = config.to_api_repr()
+ self.assertIn("jsonExtension", api_repr["load"])
+ self.assertEqual(api_repr["load"]["jsonExtension"], "GEOJSON")
+
+ def test_max_bad_records_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.max_bad_records)
+
+ def test_max_bad_records_hit(self):
+ max_bad_records = 13
+ config = self._get_target_class()()
+ config._properties["load"]["maxBadRecords"] = max_bad_records
+ self.assertEqual(config.max_bad_records, max_bad_records)
+
+ def test_max_bad_records_setter(self):
+ max_bad_records = 13
+ config = self._get_target_class()()
+ config.max_bad_records = max_bad_records
+ self.assertEqual(config._properties["load"]["maxBadRecords"], max_bad_records)
+
+ def test_null_marker_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.null_marker)
+
+ def test_null_marker_hit(self):
+ null_marker = "XXX"
+ config = self._get_target_class()()
+ config._properties["load"]["nullMarker"] = null_marker
+ self.assertEqual(config.null_marker, null_marker)
+
+ def test_null_marker_setter(self):
+ null_marker = "XXX"
+ config = self._get_target_class()()
+ config.null_marker = null_marker
+ self.assertEqual(config._properties["load"]["nullMarker"], null_marker)
+
+ def test_preserve_ascii_control_characters_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.preserve_ascii_control_characters)
+
+ def test_preserve_ascii_control_characters_hit(self):
+ config = self._get_target_class()()
+ config._properties["load"]["preserveAsciiControlCharacters"] = True
+ self.assertTrue(config.preserve_ascii_control_characters)
+
+ def test_preserve_ascii_control_characters_setter(self):
+ config = self._get_target_class()()
+ config.preserve_ascii_control_characters = True
+ self.assertTrue(config._properties["load"]["preserveAsciiControlCharacters"])
+
+ def test_projection_fields_miss(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.projection_fields)
+
+ def test_projection_fields_hit(self):
+ config = self._get_target_class()()
+ fields = ["email", "postal_code"]
+ config.projection_fields = fields
+ self.assertEqual(config._properties["load"]["projectionFields"], fields)
+ self.assertEqual(config.projection_fields, fields)
+
+ def test_quote_character_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.quote_character)
+
+ def test_quote_character_hit(self):
+ quote_character = "'"
+ config = self._get_target_class()()
+ config._properties["load"]["quote"] = quote_character
+ self.assertEqual(config.quote_character, quote_character)
+
+ def test_quote_character_setter(self):
+ quote_character = "'"
+ config = self._get_target_class()()
+ config.quote_character = quote_character
+ self.assertEqual(config._properties["load"]["quote"], quote_character)
+
+ def test_schema_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.schema)
+
+ def test_schema_hit(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ config = self._get_target_class()()
+ all_props_repr = {
+ "mode": "REQUIRED",
+ "name": "foo",
+ "type": "INTEGER",
+ "description": "Foo",
+ }
+ minimal_repr = {"name": "bar", "type": "STRING"}
+ config._properties["load"]["schema"] = {
+ "fields": [all_props_repr, minimal_repr]
+ }
+ all_props, minimal = config.schema
+ self.assertEqual(all_props, SchemaField.from_api_repr(all_props_repr))
+ self.assertEqual(minimal, SchemaField.from_api_repr(minimal_repr))
+
+ def test_schema_setter_fields(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ config = self._get_target_class()()
+ full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
+ age = SchemaField("age", "INTEGER", mode="REQUIRED")
+ config.schema = [full_name, age]
+ full_name_repr = {
+ "name": "full_name",
+ "type": "STRING",
+ "mode": "REQUIRED",
+ }
+ age_repr = {
+ "name": "age",
+ "type": "INTEGER",
+ "mode": "REQUIRED",
+ }
+ self.assertEqual(
+ config._properties["load"]["schema"], {"fields": [full_name_repr, age_repr]}
+ )
+
+ def test_schema_setter_valid_mappings_list(self):
+ config = self._get_target_class()()
+
+ full_name_repr = {
+ "name": "full_name",
+ "type": "STRING",
+ "mode": "REQUIRED",
+ }
+ age_repr = {
+ "name": "age",
+ "type": "INTEGER",
+ "mode": "REQUIRED",
+ }
+ schema = [full_name_repr, age_repr]
+ config.schema = schema
+ self.assertEqual(
+ config._properties["load"]["schema"], {"fields": [full_name_repr, age_repr]}
+ )
+
+ def test_schema_setter_invalid_mappings_list(self):
+ config = self._get_target_class()()
+
+ schema = [
+ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "age", "typeoo": "INTEGER", "mode": "REQUIRED"},
+ ]
+
+ with self.assertRaises(Exception):
+ config.schema = schema
+
+ def test_schema_setter_unsetting_schema(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ config = self._get_target_class()()
+ config._properties["load"]["schema"] = [
+ SchemaField("full_name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+
+ config.schema = None
+ self.assertNotIn("schema", config._properties["load"])
+ config.schema = None # no error, idempotent operation
+
+ def test_schema_update_options_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.schema_update_options)
+
+ def test_schema_update_options_hit(self):
+ from google.cloud.bigquery.job import SchemaUpdateOption
+
+ options = [
+ SchemaUpdateOption.ALLOW_FIELD_ADDITION,
+ SchemaUpdateOption.ALLOW_FIELD_RELAXATION,
+ ]
+ config = self._get_target_class()()
+ config._properties["load"]["schemaUpdateOptions"] = options
+ self.assertEqual(config.schema_update_options, options)
+
+ def test_schema_update_options_setter(self):
+ from google.cloud.bigquery.job import SchemaUpdateOption
+
+ options = [
+ SchemaUpdateOption.ALLOW_FIELD_ADDITION,
+ SchemaUpdateOption.ALLOW_FIELD_RELAXATION,
+ ]
+ config = self._get_target_class()()
+ config.schema_update_options = options
+ self.assertEqual(config._properties["load"]["schemaUpdateOptions"], options)
+
+ def test_skip_leading_rows_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.skip_leading_rows)
+
+ def test_skip_leading_rows_hit_w_str(self):
+ skip_leading_rows = 1
+ config = self._get_target_class()()
+ config._properties["load"]["skipLeadingRows"] = str(skip_leading_rows)
+ self.assertEqual(config.skip_leading_rows, skip_leading_rows)
+
+ def test_skip_leading_rows_hit_w_integer(self):
+ skip_leading_rows = 1
+ config = self._get_target_class()()
+ config._properties["load"]["skipLeadingRows"] = skip_leading_rows
+ self.assertEqual(config.skip_leading_rows, skip_leading_rows)
+
+ def test_skip_leading_rows_setter(self):
+ skip_leading_rows = 1
+ config = self._get_target_class()()
+ config.skip_leading_rows = skip_leading_rows
+ self.assertEqual(
+ config._properties["load"]["skipLeadingRows"], str(skip_leading_rows)
+ )
+
+ def test_source_format_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.source_format)
+
+ def test_source_format_hit(self):
+ from google.cloud.bigquery.job import SourceFormat
+
+ source_format = SourceFormat.CSV
+ config = self._get_target_class()()
+ config._properties["load"]["sourceFormat"] = source_format
+ self.assertEqual(config.source_format, source_format)
+
+ def test_source_format_setter(self):
+ from google.cloud.bigquery.job import SourceFormat
+
+ source_format = SourceFormat.CSV
+ config = self._get_target_class()()
+ config.source_format = source_format
+ self.assertEqual(config._properties["load"]["sourceFormat"], source_format)
+
+ def test_range_partitioning_w_none(self):
+ object_under_test = self._get_target_class()()
+ assert object_under_test.range_partitioning is None
+
+ def test_range_partitioning_w_value(self):
+ object_under_test = self._get_target_class()()
+ object_under_test._properties["load"]["rangePartitioning"] = {
+ "field": "column_one",
+ "range": {"start": 1, "end": 1000, "interval": 10},
+ }
+ object_under_test.range_partitioning.field == "column_one"
+ object_under_test.range_partitioning.range_.start == 1
+ object_under_test.range_partitioning.range_.end == 1000
+ object_under_test.range_partitioning.range_.interval == 10
+
+ def test_range_partitioning_setter(self):
+ from google.cloud.bigquery.table import PartitionRange
+ from google.cloud.bigquery.table import RangePartitioning
+
+ object_under_test = self._get_target_class()()
+ object_under_test.range_partitioning = RangePartitioning(
+ field="column_one", range_=PartitionRange(start=1, end=1000, interval=10)
+ )
+ object_under_test.range_partitioning.field == "column_one"
+ object_under_test.range_partitioning.range_.start == 1
+ object_under_test.range_partitioning.range_.end == 1000
+ object_under_test.range_partitioning.range_.interval == 10
+
+ def test_range_partitioning_setter_w_none(self):
+ object_under_test = self._get_target_class()()
+ object_under_test.range_partitioning = None
+ assert object_under_test.range_partitioning is None
+
+ def test_range_partitioning_setter_w_wrong_type(self):
+ object_under_test = self._get_target_class()()
+ with pytest.raises(ValueError, match="RangePartitioning"):
+ object_under_test.range_partitioning = object()
+
+ def test_time_partitioning_miss(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.time_partitioning)
+
+ def test_time_partitioning_hit(self):
+ from google.cloud.bigquery.table import TimePartitioning
+ from google.cloud.bigquery.table import TimePartitioningType
+
+ field = "creation_date"
+ year_ms = 86400 * 1000 * 365
+ config = self._get_target_class()()
+ config._properties["load"]["timePartitioning"] = {
+ "type": TimePartitioningType.DAY,
+ "field": field,
+ "expirationMs": str(year_ms),
+ "requirePartitionFilter": False,
+ }
+ with warnings.catch_warnings(record=True) as warned:
+ expected = TimePartitioning(
+ type_=TimePartitioningType.DAY,
+ field=field,
+ expiration_ms=year_ms,
+ require_partition_filter=False,
+ )
+ self.assertEqual(config.time_partitioning, expected)
+
+ assert len(warned) == 1
+ warning = warned[0]
+ assert "TimePartitioning.require_partition_filter" in str(warning)
+
+ def test_time_partitioning_setter(self):
+ from google.cloud.bigquery.table import TimePartitioning
+ from google.cloud.bigquery.table import TimePartitioningType
+
+ field = "creation_date"
+ year_ms = 86400 * 1000 * 365
+
+ with warnings.catch_warnings(record=True) as warned:
+ time_partitioning = TimePartitioning(
+ type_=TimePartitioningType.DAY,
+ field=field,
+ expiration_ms=year_ms,
+ require_partition_filter=False,
+ )
+
+ config = self._get_target_class()()
+ config.time_partitioning = time_partitioning
+ expected = {
+ "type": TimePartitioningType.DAY,
+ "field": field,
+ "expirationMs": str(year_ms),
+ "requirePartitionFilter": False,
+ }
+ self.assertEqual(config._properties["load"]["timePartitioning"], expected)
+
+ assert len(warned) == 1
+ warning = warned[0]
+ assert "TimePartitioning.require_partition_filter" in str(warning)
+
+ def test_time_partitioning_setter_w_none(self):
+ from google.cloud.bigquery.table import TimePartitioningType
+
+ field = "creation_date"
+ year_ms = 86400 * 1000 * 365
+ config = self._get_target_class()()
+ config._properties["load"]["timePartitioning"] = {
+ "type": TimePartitioningType.DAY,
+ "field": field,
+ "expirationMs": str(year_ms),
+ "requirePartitionFilter": False,
+ }
+ config.time_partitioning = None
+ self.assertIsNone(config.time_partitioning)
+ self.assertNotIn("timePartitioning", config._properties["load"])
+
+ def test_use_avro_logical_types(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.use_avro_logical_types)
+
+ def test_use_avro_logical_types_setter(self):
+ config = self._get_target_class()()
+ config.use_avro_logical_types = True
+ self.assertTrue(config._properties["load"]["useAvroLogicalTypes"])
+
+ def test_write_disposition_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.write_disposition)
+
+ def test_write_disposition_hit(self):
+ from google.cloud.bigquery.job import WriteDisposition
+
+ write_disposition = WriteDisposition.WRITE_TRUNCATE
+ config = self._get_target_class()()
+ config._properties["load"]["writeDisposition"] = write_disposition
+ self.assertEqual(config.write_disposition, write_disposition)
+
+ def test_write_disposition_setter(self):
+ from google.cloud.bigquery.job import WriteDisposition
+
+ write_disposition = WriteDisposition.WRITE_TRUNCATE
+ config = self._get_target_class()()
+ config.write_disposition = write_disposition
+ self.assertEqual(
+ config._properties["load"]["writeDisposition"], write_disposition
+ )
+
+ def test_parquet_options_missing(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.parquet_options)
+
+ def test_parquet_options_hit(self):
+ config = self._get_target_class()()
+ config._properties["load"]["parquetOptions"] = dict(
+ enumAsString=True, enableListInference=False
+ )
+ self.assertTrue(config.parquet_options.enum_as_string)
+ self.assertFalse(config.parquet_options.enable_list_inference)
+
+ def test_parquet_options_setter(self):
+ from google.cloud.bigquery.format_options import ParquetOptions
+
+ parquet_options = ParquetOptions.from_api_repr(
+ dict(enumAsString=False, enableListInference=True)
+ )
+ config = self._get_target_class()()
+
+ config.parquet_options = parquet_options
+ self.assertEqual(
+ config._properties["load"]["parquetOptions"],
+ {"enumAsString": False, "enableListInference": True},
+ )
+
+ def test_parquet_options_setter_clearing(self):
+ config = self._get_target_class()()
+ config._properties["load"]["parquetOptions"] = dict(
+ enumAsString=False, enableListInference=True
+ )
+
+ config.parquet_options = None
+ self.assertNotIn("parquetOptions", config._properties["load"])
+
+ def test_column_name_character_map_missing(self):
+ from google.cloud.bigquery.job.load import ColumnNameCharacterMap
+
+ config = self._get_target_class()()
+ self.assertEqual(
+ config.column_name_character_map,
+ ColumnNameCharacterMap.COLUMN_NAME_CHARACTER_MAP_UNSPECIFIED,
+ )
+
+ def test_column_name_character_map_hit(self):
+ from google.cloud.bigquery.job.load import ColumnNameCharacterMap
+
+ config = self._get_target_class()()
+ config._properties["load"]["columnNameCharacterMap"] = "STRICT"
+ self.assertEqual(
+ config.column_name_character_map,
+ ColumnNameCharacterMap.STRICT,
+ )
+
+ def test_column_name_character_map_setter(self):
+ from google.cloud.bigquery.job.load import ColumnNameCharacterMap
+
+ config = self._get_target_class()()
+ config.column_name_character_map = "V1"
+ self.assertEqual(
+ config._properties["load"]["columnNameCharacterMap"],
+ ColumnNameCharacterMap.V1,
+ )
+
+ def test_column_name_character_map_none(self):
+ from google.cloud.bigquery.job.load import ColumnNameCharacterMap
+
+ config = self._get_target_class()()
+ config.column_name_character_map = None
+ self.assertEqual(
+ config._properties["load"]["columnNameCharacterMap"],
+ ColumnNameCharacterMap.COLUMN_NAME_CHARACTER_MAP_UNSPECIFIED,
+ )
diff --git a/testbed/googleapis__python-bigquery/tests/unit/job/test_query.py b/testbed/googleapis__python-bigquery/tests/unit/job/test_query.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bbd31c7307aef29daf8f513ffab84b4eaae770b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/job/test_query.py
@@ -0,0 +1,2419 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import concurrent
+import concurrent.futures
+import copy
+import http
+import textwrap
+import types
+from unittest import mock
+
+import freezegun
+from google.api_core import exceptions
+import google.api_core.retry
+import requests
+
+from google.cloud.bigquery.client import _LIST_ROWS_FROM_QUERY_RESULTS_FIELDS
+import google.cloud.bigquery._job_helpers
+import google.cloud.bigquery.query
+import google.cloud.bigquery.retry
+from google.cloud.bigquery.retry import DEFAULT_GET_JOB_TIMEOUT
+from google.cloud.bigquery.table import _EmptyRowIterator
+
+from ..helpers import make_connection
+
+from .helpers import _Base
+from .helpers import _make_client
+
+
+class TestQueryJob(_Base):
+ JOB_TYPE = "query"
+ QUERY = "select count(*) from persons"
+ DESTINATION_TABLE = "destination_table"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.job import QueryJob
+
+ return QueryJob
+
+ def _make_resource(self, started=False, ended=False, location="US"):
+ resource = super(TestQueryJob, self)._make_resource(
+ started, ended, location=location
+ )
+ config = resource["configuration"]["query"]
+ config["query"] = self.QUERY
+ return resource
+
+ def _verifyBooleanResourceProperties(self, job, config):
+ if "allowLargeResults" in config:
+ self.assertEqual(job.allow_large_results, config["allowLargeResults"])
+ else:
+ self.assertIsNone(job.allow_large_results)
+ if "flattenResults" in config:
+ self.assertEqual(job.flatten_results, config["flattenResults"])
+ else:
+ self.assertIsNone(job.flatten_results)
+ if "useQueryCache" in config:
+ self.assertEqual(job.use_query_cache, config["useQueryCache"])
+ else:
+ self.assertIsNone(job.use_query_cache)
+ if "useLegacySql" in config:
+ self.assertEqual(job.use_legacy_sql, config["useLegacySql"])
+ else:
+ self.assertIsNone(job.use_legacy_sql)
+
+ def _verifyIntegerResourceProperties(self, job, config):
+ if "maximumBillingTier" in config:
+ self.assertEqual(job.maximum_billing_tier, config["maximumBillingTier"])
+ else:
+ self.assertIsNone(job.maximum_billing_tier)
+ if "maximumBytesBilled" in config:
+ self.assertEqual(
+ str(job.maximum_bytes_billed), config["maximumBytesBilled"]
+ )
+ self.assertIsInstance(job.maximum_bytes_billed, int)
+ else:
+ self.assertIsNone(job.maximum_bytes_billed)
+
+ def _verify_udf_resources(self, job, config):
+ udf_resources = config.get("userDefinedFunctionResources", ())
+ self.assertEqual(len(job.udf_resources), len(udf_resources))
+ for found, expected in zip(job.udf_resources, udf_resources):
+ if "resourceUri" in expected:
+ self.assertEqual(found.udf_type, "resourceUri")
+ self.assertEqual(found.value, expected["resourceUri"])
+ else:
+ self.assertEqual(found.udf_type, "inlineCode")
+ self.assertEqual(found.value, expected["inlineCode"])
+
+ def _verifyQueryParameters(self, job, config):
+ query_parameters = config.get("queryParameters", ())
+ self.assertEqual(len(job.query_parameters), len(query_parameters))
+ for found, expected in zip(job.query_parameters, query_parameters):
+ self.assertEqual(found.to_api_repr(), expected)
+
+ def _verify_table_definitions(self, job, config):
+ table_defs = config.get("tableDefinitions")
+ if job.table_definitions is None:
+ self.assertIsNone(table_defs)
+ else:
+ self.assertEqual(len(job.table_definitions), len(table_defs))
+ for found_key, found_ec in job.table_definitions.items():
+ expected_ec = table_defs.get(found_key)
+ self.assertIsNotNone(expected_ec)
+ self.assertEqual(found_ec.to_api_repr(), expected_ec)
+
+ def _verify_dml_stats_resource_properties(self, job, resource):
+ query_stats = resource.get("statistics", {}).get("query", {})
+
+ if "dmlStats" in query_stats:
+ resource_dml_stats = query_stats["dmlStats"]
+ job_dml_stats = job.dml_stats
+ assert str(job_dml_stats.inserted_row_count) == resource_dml_stats.get(
+ "insertedRowCount", "0"
+ )
+ assert str(job_dml_stats.updated_row_count) == resource_dml_stats.get(
+ "updatedRowCount", "0"
+ )
+ assert str(job_dml_stats.deleted_row_count) == resource_dml_stats.get(
+ "deletedRowCount", "0"
+ )
+ else:
+ assert job.dml_stats is None
+
+ def _verify_transaction_info_resource_properties(self, job, resource):
+ resource_stats = resource.get("statistics", {})
+
+ if "transactionInfo" in resource_stats:
+ resource_transaction_info = resource_stats["transactionInfo"]
+ job_transaction_info = job.transaction_info
+ assert job_transaction_info.transaction_id == resource_transaction_info.get(
+ "transactionId"
+ )
+ else:
+ assert job.transaction_info is None
+
+ def _verify_configuration_properties(self, job, configuration):
+ if "dryRun" in configuration:
+ self.assertEqual(job.dry_run, configuration["dryRun"])
+ else:
+ self.assertIsNone(job.dry_run)
+
+ def _verifyResourceProperties(self, job, resource):
+ self._verifyReadonlyResourceProperties(job, resource)
+ self._verify_dml_stats_resource_properties(job, resource)
+ self._verify_transaction_info_resource_properties(job, resource)
+
+ configuration = resource.get("configuration", {})
+ self._verify_configuration_properties(job, configuration)
+
+ query_config = resource.get("configuration", {}).get("query")
+ self._verifyBooleanResourceProperties(job, query_config)
+ self._verifyIntegerResourceProperties(job, query_config)
+ self._verify_udf_resources(job, query_config)
+ self._verifyQueryParameters(job, query_config)
+ self._verify_table_definitions(job, query_config)
+
+ self.assertEqual(job.query, query_config["query"])
+
+ if "createDisposition" in query_config:
+ self.assertEqual(job.create_disposition, query_config["createDisposition"])
+ else:
+ self.assertIsNone(job.create_disposition)
+
+ if "defaultDataset" in query_config:
+ ds_ref = job.default_dataset
+ ds_ref = {"projectId": ds_ref.project, "datasetId": ds_ref.dataset_id}
+ self.assertEqual(ds_ref, query_config["defaultDataset"])
+ else:
+ self.assertIsNone(job.default_dataset)
+
+ if "destinationTable" in query_config:
+ table = job.destination
+ tb_ref = {
+ "projectId": table.project,
+ "datasetId": table.dataset_id,
+ "tableId": table.table_id,
+ }
+ self.assertEqual(tb_ref, query_config["destinationTable"])
+ else:
+ self.assertIsNone(job.destination)
+
+ if "priority" in query_config:
+ self.assertEqual(job.priority, query_config["priority"])
+ else:
+ self.assertIsNone(job.priority)
+
+ if "writeDisposition" in query_config:
+ self.assertEqual(job.write_disposition, query_config["writeDisposition"])
+ else:
+ self.assertIsNone(job.write_disposition)
+
+ if "destinationEncryptionConfiguration" in query_config:
+ self.assertIsNotNone(job.destination_encryption_configuration)
+ self.assertEqual(
+ job.destination_encryption_configuration.kms_key_name,
+ query_config["destinationEncryptionConfiguration"]["kmsKeyName"],
+ )
+ else:
+ self.assertIsNone(job.destination_encryption_configuration)
+
+ if "schemaUpdateOptions" in query_config:
+ self.assertEqual(
+ job.schema_update_options, query_config["schemaUpdateOptions"]
+ )
+ else:
+ self.assertIsNone(job.schema_update_options)
+
+ def test_ctor_defaults(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ self.assertEqual(job.query, self.QUERY)
+ self.assertIs(job._client, client)
+ self.assertEqual(job.job_type, self.JOB_TYPE)
+ self.assertEqual(job.path, "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID))
+
+ self._verifyInitialReadonlyProperties(job)
+
+ self.assertFalse(job.use_legacy_sql)
+
+ # set/read from resource['configuration']['query']
+ self.assertIsNone(job.allow_large_results)
+ self.assertIsNone(job.create_disposition)
+ self.assertIsNone(job.default_dataset)
+ self.assertIsNone(job.destination)
+ self.assertIsNone(job.dml_stats)
+ self.assertIsNone(job.flatten_results)
+ self.assertIsNone(job.priority)
+ self.assertIsNone(job.use_query_cache)
+ self.assertIsNone(job.dry_run)
+ self.assertIsNone(job.write_disposition)
+ self.assertIsNone(job.maximum_billing_tier)
+ self.assertIsNone(job.maximum_bytes_billed)
+ self.assertIsNone(job.table_definitions)
+ self.assertIsNone(job.destination_encryption_configuration)
+ self.assertIsNone(job.range_partitioning)
+ self.assertIsNone(job.time_partitioning)
+ self.assertIsNone(job.clustering_fields)
+ self.assertIsNone(job.schema_update_options)
+
+ def test_ctor_w_udf_resources(self):
+ from google.cloud.bigquery.job import QueryJobConfig
+ from google.cloud.bigquery.query import UDFResource
+
+ RESOURCE_URI = "gs://some-bucket/js/lib.js"
+ udf_resources = [UDFResource("resourceUri", RESOURCE_URI)]
+ client = _make_client(project=self.PROJECT)
+ config = QueryJobConfig()
+ config.udf_resources = udf_resources
+ job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config)
+ self.assertEqual(job.udf_resources, udf_resources)
+
+ def test_ctor_w_query_parameters(self):
+ from google.cloud.bigquery.job import QueryJobConfig
+ from google.cloud.bigquery.query import ScalarQueryParameter
+
+ query_parameters = [ScalarQueryParameter("foo", "INT64", 123)]
+ client = _make_client(project=self.PROJECT)
+ config = QueryJobConfig(query_parameters=query_parameters)
+ job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config)
+ self.assertEqual(job.query_parameters, query_parameters)
+
+ def test_from_api_repr_bare(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {
+ "id": self.JOB_ID,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {"query": {"query": self.QUERY}},
+ }
+ klass = self._get_target_class()
+ job = klass.from_api_repr(RESOURCE, client=client)
+ self.assertIs(job._client, client)
+ self._verifyResourceProperties(job, RESOURCE)
+ self.assertEqual(len(job.connection_properties), 0)
+ self.assertIsNone(job.create_session)
+
+ def test_from_api_repr_with_encryption(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {
+ "id": self.JOB_ID,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "query": {
+ "query": self.QUERY,
+ "destinationEncryptionConfiguration": {
+ "kmsKeyName": self.KMS_KEY_NAME
+ },
+ }
+ },
+ }
+ klass = self._get_target_class()
+ job = klass.from_api_repr(RESOURCE, client=client)
+ self.assertIs(job._client, client)
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_from_api_repr_with_dml_stats(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {
+ "id": self.JOB_ID,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {"query": {"query": self.QUERY}},
+ "statistics": {
+ "query": {
+ "dmlStats": {"insertedRowCount": "15", "updatedRowCount": "2"},
+ },
+ },
+ }
+ klass = self._get_target_class()
+
+ job = klass.from_api_repr(RESOURCE, client=client)
+
+ self.assertIs(job._client, client)
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_from_api_repr_with_transaction_info(self):
+ self._setUpConstants()
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = {
+ "id": self.JOB_ID,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {"query": {"query": self.QUERY}},
+ "statistics": {"transactionInfo": {"transactionId": "1a2b-3c4d"}},
+ }
+ klass = self._get_target_class()
+
+ job = klass.from_api_repr(RESOURCE, client=client)
+
+ self.assertIs(job._client, client)
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_from_api_repr_w_properties(self):
+ from google.cloud.bigquery.job import CreateDisposition
+ from google.cloud.bigquery.job import SchemaUpdateOption
+ from google.cloud.bigquery.job import WriteDisposition
+
+ client = _make_client(project=self.PROJECT)
+ RESOURCE = self._make_resource()
+ query_config = RESOURCE["configuration"]["query"]
+ query_config["createDisposition"] = CreateDisposition.CREATE_IF_NEEDED
+ query_config["writeDisposition"] = WriteDisposition.WRITE_TRUNCATE
+ query_config["destinationTable"] = {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.DESTINATION_TABLE,
+ }
+ query_config["schemaUpdateOptions"] = [SchemaUpdateOption.ALLOW_FIELD_ADDITION]
+ klass = self._get_target_class()
+ job = klass.from_api_repr(RESOURCE, client=client)
+ self.assertIs(job._client, client)
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_cancelled(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ job._properties["status"] = {
+ "state": "DONE",
+ "errorResult": {"reason": "stopped"},
+ }
+
+ self.assertTrue(job.cancelled())
+
+ def test_query_plan(self):
+ from google.cloud._helpers import _RFC3339_MICROS
+ from google.cloud.bigquery.job import QueryPlanEntry
+ from google.cloud.bigquery.job import QueryPlanEntryStep
+
+ plan_entries = [
+ {
+ "name": "NAME",
+ "id": "1234",
+ "inputStages": ["88", "101"],
+ "startMs": "1522540800000",
+ "endMs": "1522540804000",
+ "parallelInputs": "1000",
+ "completedParallelInputs": "5",
+ "waitMsAvg": "33",
+ "waitMsMax": "400",
+ "waitRatioAvg": 2.71828,
+ "waitRatioMax": 3.14159,
+ "readMsAvg": "45",
+ "readMsMax": "90",
+ "readRatioAvg": 1.41421,
+ "readRatioMax": 1.73205,
+ "computeMsAvg": "55",
+ "computeMsMax": "99",
+ "computeRatioAvg": 0.69315,
+ "computeRatioMax": 1.09861,
+ "writeMsAvg": "203",
+ "writeMsMax": "340",
+ "writeRatioAvg": 3.32193,
+ "writeRatioMax": 2.30258,
+ "recordsRead": "100",
+ "recordsWritten": "1",
+ "status": "STATUS",
+ "shuffleOutputBytes": "1024",
+ "shuffleOutputBytesSpilled": "1",
+ "steps": [{"kind": "KIND", "substeps": ["SUBSTEP1", "SUBSTEP2"]}],
+ }
+ ]
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ self.assertEqual(job.query_plan, [])
+
+ statistics = job._properties["statistics"] = {}
+ self.assertEqual(job.query_plan, [])
+
+ query_stats = statistics["query"] = {}
+ self.assertEqual(job.query_plan, [])
+
+ query_stats["queryPlan"] = plan_entries
+
+ self.assertEqual(len(job.query_plan), len(plan_entries))
+ for found, expected in zip(job.query_plan, plan_entries):
+ self.assertIsInstance(found, QueryPlanEntry)
+ self.assertEqual(found.name, expected["name"])
+ self.assertEqual(found.entry_id, expected["id"])
+ self.assertEqual(len(found.input_stages), len(expected["inputStages"]))
+ for f_id in found.input_stages:
+ self.assertIn(f_id, [int(e) for e in expected["inputStages"]])
+ self.assertEqual(
+ found.start.strftime(_RFC3339_MICROS), "2018-04-01T00:00:00.000000Z"
+ )
+ self.assertEqual(
+ found.end.strftime(_RFC3339_MICROS), "2018-04-01T00:00:04.000000Z"
+ )
+ self.assertEqual(found.parallel_inputs, int(expected["parallelInputs"]))
+ self.assertEqual(
+ found.completed_parallel_inputs,
+ int(expected["completedParallelInputs"]),
+ )
+ self.assertEqual(found.wait_ms_avg, int(expected["waitMsAvg"]))
+ self.assertEqual(found.wait_ms_max, int(expected["waitMsMax"]))
+ self.assertEqual(found.wait_ratio_avg, expected["waitRatioAvg"])
+ self.assertEqual(found.wait_ratio_max, expected["waitRatioMax"])
+ self.assertEqual(found.read_ms_avg, int(expected["readMsAvg"]))
+ self.assertEqual(found.read_ms_max, int(expected["readMsMax"]))
+ self.assertEqual(found.read_ratio_avg, expected["readRatioAvg"])
+ self.assertEqual(found.read_ratio_max, expected["readRatioMax"])
+ self.assertEqual(found.compute_ms_avg, int(expected["computeMsAvg"]))
+ self.assertEqual(found.compute_ms_max, int(expected["computeMsMax"]))
+ self.assertEqual(found.compute_ratio_avg, expected["computeRatioAvg"])
+ self.assertEqual(found.compute_ratio_max, expected["computeRatioMax"])
+ self.assertEqual(found.write_ms_avg, int(expected["writeMsAvg"]))
+ self.assertEqual(found.write_ms_max, int(expected["writeMsMax"]))
+ self.assertEqual(found.write_ratio_avg, expected["writeRatioAvg"])
+ self.assertEqual(found.write_ratio_max, expected["writeRatioMax"])
+ self.assertEqual(found.records_read, int(expected["recordsRead"]))
+ self.assertEqual(found.records_written, int(expected["recordsWritten"]))
+ self.assertEqual(found.status, expected["status"])
+ self.assertEqual(
+ found.shuffle_output_bytes, int(expected["shuffleOutputBytes"])
+ )
+ self.assertEqual(
+ found.shuffle_output_bytes_spilled,
+ int(expected["shuffleOutputBytesSpilled"]),
+ )
+
+ self.assertEqual(len(found.steps), len(expected["steps"]))
+ for f_step, e_step in zip(found.steps, expected["steps"]):
+ self.assertIsInstance(f_step, QueryPlanEntryStep)
+ self.assertEqual(f_step.kind, e_step["kind"])
+ self.assertEqual(f_step.substeps, e_step["substeps"])
+
+ def test_total_bytes_processed(self):
+ total_bytes = 1234
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ self.assertIsNone(job.total_bytes_processed)
+
+ statistics = job._properties["statistics"] = {}
+ self.assertIsNone(job.total_bytes_processed)
+
+ query_stats = statistics["query"] = {}
+ self.assertIsNone(job.total_bytes_processed)
+
+ query_stats["totalBytesProcessed"] = str(total_bytes)
+ self.assertEqual(job.total_bytes_processed, total_bytes)
+
+ def test_total_bytes_billed(self):
+ total_bytes = 1234
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ self.assertIsNone(job.total_bytes_billed)
+
+ statistics = job._properties["statistics"] = {}
+ self.assertIsNone(job.total_bytes_billed)
+
+ query_stats = statistics["query"] = {}
+ self.assertIsNone(job.total_bytes_billed)
+
+ query_stats["totalBytesBilled"] = str(total_bytes)
+ self.assertEqual(job.total_bytes_billed, total_bytes)
+
+ def test_billing_tier(self):
+ billing_tier = 1
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ self.assertIsNone(job.billing_tier)
+
+ statistics = job._properties["statistics"] = {}
+ self.assertIsNone(job.billing_tier)
+
+ query_stats = statistics["query"] = {}
+ self.assertIsNone(job.billing_tier)
+
+ query_stats["billingTier"] = billing_tier
+ self.assertEqual(job.billing_tier, billing_tier)
+
+ def test_cache_hit(self):
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ self.assertIsNone(job.cache_hit)
+
+ statistics = job._properties["statistics"] = {}
+ self.assertIsNone(job.cache_hit)
+
+ query_stats = statistics["query"] = {}
+ self.assertIsNone(job.cache_hit)
+
+ query_stats["cacheHit"] = True
+ self.assertTrue(job.cache_hit)
+
+ def test_ddl_operation_performed(self):
+ op = "SKIP"
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ self.assertIsNone(job.ddl_operation_performed)
+
+ statistics = job._properties["statistics"] = {}
+ self.assertIsNone(job.ddl_operation_performed)
+
+ query_stats = statistics["query"] = {}
+ self.assertIsNone(job.ddl_operation_performed)
+
+ query_stats["ddlOperationPerformed"] = op
+ self.assertEqual(job.ddl_operation_performed, op)
+
+ def test_ddl_target_routine(self):
+ from google.cloud.bigquery.routine import RoutineReference
+
+ ref_routine = {
+ "projectId": self.PROJECT,
+ "datasetId": "ddl_ds",
+ "routineId": "targetroutine",
+ }
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ self.assertIsNone(job.ddl_target_routine)
+
+ statistics = job._properties["statistics"] = {}
+ self.assertIsNone(job.ddl_target_routine)
+
+ query_stats = statistics["query"] = {}
+ self.assertIsNone(job.ddl_target_routine)
+
+ query_stats["ddlTargetRoutine"] = ref_routine
+ self.assertIsInstance(job.ddl_target_routine, RoutineReference)
+ self.assertEqual(job.ddl_target_routine.routine_id, "targetroutine")
+ self.assertEqual(job.ddl_target_routine.dataset_id, "ddl_ds")
+ self.assertEqual(job.ddl_target_routine.project, self.PROJECT)
+
+ def test_ddl_target_table(self):
+ from google.cloud.bigquery.table import TableReference
+
+ ref_table = {
+ "projectId": self.PROJECT,
+ "datasetId": "ddl_ds",
+ "tableId": "targettable",
+ }
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ self.assertIsNone(job.ddl_target_table)
+
+ statistics = job._properties["statistics"] = {}
+ self.assertIsNone(job.ddl_target_table)
+
+ query_stats = statistics["query"] = {}
+ self.assertIsNone(job.ddl_target_table)
+
+ query_stats["ddlTargetTable"] = ref_table
+ self.assertIsInstance(job.ddl_target_table, TableReference)
+ self.assertEqual(job.ddl_target_table.table_id, "targettable")
+ self.assertEqual(job.ddl_target_table.dataset_id, "ddl_ds")
+ self.assertEqual(job.ddl_target_table.project, self.PROJECT)
+
+ def test_num_dml_affected_rows(self):
+ num_rows = 1234
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ self.assertIsNone(job.num_dml_affected_rows)
+
+ statistics = job._properties["statistics"] = {}
+ self.assertIsNone(job.num_dml_affected_rows)
+
+ query_stats = statistics["query"] = {}
+ self.assertIsNone(job.num_dml_affected_rows)
+
+ query_stats["numDmlAffectedRows"] = str(num_rows)
+ self.assertEqual(job.num_dml_affected_rows, num_rows)
+
+ def test_slot_millis(self):
+ millis = 1234
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ self.assertIsNone(job.slot_millis)
+
+ statistics = job._properties["statistics"] = {}
+ self.assertIsNone(job.slot_millis)
+
+ query_stats = statistics["query"] = {}
+ self.assertIsNone(job.slot_millis)
+
+ query_stats["totalSlotMs"] = millis
+ self.assertEqual(job.slot_millis, millis)
+
+ def test_statement_type(self):
+ statement_type = "SELECT"
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ self.assertIsNone(job.statement_type)
+
+ statistics = job._properties["statistics"] = {}
+ self.assertIsNone(job.statement_type)
+
+ query_stats = statistics["query"] = {}
+ self.assertIsNone(job.statement_type)
+
+ query_stats["statementType"] = statement_type
+ self.assertEqual(job.statement_type, statement_type)
+
+ def test_referenced_tables(self):
+ from google.cloud.bigquery.table import TableReference
+
+ ref_tables_resource = [
+ {"projectId": self.PROJECT, "datasetId": "dataset", "tableId": "local1"},
+ {"projectId": self.PROJECT, "datasetId": "dataset", "tableId": "local2"},
+ {
+ "projectId": "other-project-123",
+ "datasetId": "other-dataset",
+ "tableId": "other-table",
+ },
+ ]
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ self.assertEqual(job.referenced_tables, [])
+
+ statistics = job._properties["statistics"] = {}
+ self.assertEqual(job.referenced_tables, [])
+
+ query_stats = statistics["query"] = {}
+ self.assertEqual(job.referenced_tables, [])
+
+ query_stats["referencedTables"] = ref_tables_resource
+
+ local1, local2, remote = job.referenced_tables
+
+ self.assertIsInstance(local1, TableReference)
+ self.assertEqual(local1.table_id, "local1")
+ self.assertEqual(local1.dataset_id, "dataset")
+ self.assertEqual(local1.project, self.PROJECT)
+
+ self.assertIsInstance(local2, TableReference)
+ self.assertEqual(local2.table_id, "local2")
+ self.assertEqual(local2.dataset_id, "dataset")
+ self.assertEqual(local2.project, self.PROJECT)
+
+ self.assertIsInstance(remote, TableReference)
+ self.assertEqual(remote.table_id, "other-table")
+ self.assertEqual(remote.dataset_id, "other-dataset")
+ self.assertEqual(remote.project, "other-project-123")
+
+ def test_timeline(self):
+ timeline_resource = [
+ {
+ "elapsedMs": 1,
+ "activeUnits": 22,
+ "pendingUnits": 33,
+ "completedUnits": 44,
+ "totalSlotMs": 101,
+ }
+ ]
+
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ self.assertEqual(job.timeline, [])
+
+ statistics = job._properties["statistics"] = {}
+ self.assertEqual(job.timeline, [])
+
+ query_stats = statistics["query"] = {}
+ self.assertEqual(job.timeline, [])
+
+ query_stats["timeline"] = timeline_resource
+
+ self.assertEqual(len(job.timeline), len(timeline_resource))
+ self.assertEqual(job.timeline[0].elapsed_ms, 1)
+ self.assertEqual(job.timeline[0].active_units, 22)
+ self.assertEqual(job.timeline[0].pending_units, 33)
+ self.assertEqual(job.timeline[0].completed_units, 44)
+ self.assertEqual(job.timeline[0].slot_millis, 101)
+
+ def test_undeclared_query_parameters(self):
+ from google.cloud.bigquery.query import ArrayQueryParameter
+ from google.cloud.bigquery.query import ScalarQueryParameter
+ from google.cloud.bigquery.query import StructQueryParameter
+
+ undeclared = [
+ {
+ "name": "my_scalar",
+ "parameterType": {"type": "STRING"},
+ "parameterValue": {"value": "value"},
+ },
+ {
+ "name": "my_array",
+ "parameterType": {"type": "ARRAY", "arrayType": {"type": "INT64"}},
+ "parameterValue": {
+ "arrayValues": [{"value": "1066"}, {"value": "1745"}]
+ },
+ },
+ {
+ "name": "my_struct",
+ "parameterType": {
+ "type": "STRUCT",
+ "structTypes": [{"name": "count", "type": {"type": "INT64"}}],
+ },
+ "parameterValue": {"structValues": {"count": {"value": "123"}}},
+ },
+ ]
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ self.assertEqual(job.undeclared_query_parameters, [])
+
+ statistics = job._properties["statistics"] = {}
+ self.assertEqual(job.undeclared_query_parameters, [])
+
+ query_stats = statistics["query"] = {}
+ self.assertEqual(job.undeclared_query_parameters, [])
+
+ query_stats["undeclaredQueryParameters"] = undeclared
+
+ scalar, array, struct = job.undeclared_query_parameters
+
+ self.assertIsInstance(scalar, ScalarQueryParameter)
+ self.assertEqual(scalar.name, "my_scalar")
+ self.assertEqual(scalar.type_, "STRING")
+ self.assertEqual(scalar.value, "value")
+
+ self.assertIsInstance(array, ArrayQueryParameter)
+ self.assertEqual(array.name, "my_array")
+ self.assertEqual(array.array_type, "INT64")
+ self.assertEqual(array.values, [1066, 1745])
+
+ self.assertIsInstance(struct, StructQueryParameter)
+ self.assertEqual(struct.name, "my_struct")
+ self.assertEqual(struct.struct_types, {"count": "INT64"})
+ self.assertEqual(struct.struct_values, {"count": 123})
+
+ def test_estimated_bytes_processed(self):
+ est_bytes = 123456
+
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ self.assertIsNone(job.estimated_bytes_processed)
+
+ statistics = job._properties["statistics"] = {}
+ self.assertIsNone(job.estimated_bytes_processed)
+
+ query_stats = statistics["query"] = {}
+ self.assertIsNone(job.estimated_bytes_processed)
+
+ query_stats["estimatedBytesProcessed"] = str(est_bytes)
+ self.assertEqual(job.estimated_bytes_processed, est_bytes)
+
+ def test_bi_engine_stats(self):
+ from google.cloud.bigquery.job.query import BiEngineStats
+
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ assert job.bi_engine_stats is None
+
+ statistics = job._properties["statistics"] = {}
+ assert job.bi_engine_stats is None
+
+ query_stats = statistics["query"] = {}
+ assert job.bi_engine_stats is None
+
+ query_stats["biEngineStatistics"] = {"biEngineMode": "FULL"}
+ assert isinstance(job.bi_engine_stats, BiEngineStats)
+ assert job.bi_engine_stats.mode == "FULL"
+
+ def test_dml_stats(self):
+ from google.cloud.bigquery.job.query import DmlStats
+
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ assert job.dml_stats is None
+
+ statistics = job._properties["statistics"] = {}
+ assert job.dml_stats is None
+
+ query_stats = statistics["query"] = {}
+ assert job.dml_stats is None
+
+ query_stats["dmlStats"] = {"insertedRowCount": "35"}
+ assert isinstance(job.dml_stats, DmlStats)
+ assert job.dml_stats.inserted_row_count == 35
+
+ def test_search_stats(self):
+ from google.cloud.bigquery.job.query import SearchStats
+
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ assert job.search_stats is None
+
+ statistics = job._properties["statistics"] = {}
+ assert job.search_stats is None
+
+ query_stats = statistics["query"] = {}
+ assert job.search_stats is None
+
+ query_stats["searchStatistics"] = {
+ "indexUsageMode": "INDEX_USAGE_MODE_UNSPECIFIED",
+ "indexUnusedReasons": [],
+ }
+ # job.search_stats is a daisy-chain of calls and gets:
+ # job.search_stats << job._job_statistics << job._properties
+ assert isinstance(job.search_stats, SearchStats)
+ assert job.search_stats.mode == "INDEX_USAGE_MODE_UNSPECIFIED"
+
+ def test_reload_query_results_uses_transport_timeout(self):
+ conn = make_connection({})
+ client = _make_client(self.PROJECT, connection=conn)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ job._transport_timeout = 123
+
+ job._reload_query_results()
+
+ query_results_path = f"/projects/{self.PROJECT}/queries/{self.JOB_ID}"
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path=query_results_path,
+ query_params={"maxResults": 0},
+ timeout=123,
+ )
+
+ def test_result_reloads_job_state_until_done(self):
+ """Verify that result() doesn't return until state == 'DONE'.
+
+ This test verifies correctness for a possible sequence of API responses
+ that might cause internal customer issue b/332850329.
+ """
+ from google.cloud.bigquery.table import RowIterator
+
+ query_resource = {
+ "jobComplete": False,
+ "jobReference": {
+ "projectId": self.PROJECT,
+ "jobId": self.JOB_ID,
+ "location": "EU",
+ },
+ }
+ query_resource_done = {
+ "jobComplete": True,
+ "jobReference": {
+ "projectId": self.PROJECT,
+ "jobId": self.JOB_ID,
+ "location": "EU",
+ },
+ "schema": {"fields": [{"name": "col1", "type": "STRING"}]},
+ "totalRows": "2",
+ "queryId": "abc-def",
+ }
+ job_resource = self._make_resource(started=True, location="EU")
+ job_resource_done = self._make_resource(started=True, ended=True, location="EU")
+ job_resource_done["configuration"]["query"]["destinationTable"] = {
+ "projectId": "dest-project",
+ "datasetId": "dest_dataset",
+ "tableId": "dest_table",
+ }
+ query_page_resource = {
+ # Explicitly set totalRows to be different from the initial
+ # response to test update during iteration.
+ "totalRows": "1",
+ "pageToken": None,
+ "rows": [{"f": [{"v": "abc"}]}],
+ }
+ conn = make_connection(
+ # QueryJob.result() makes a pair of jobs.get & jobs.getQueryResults
+ # REST API calls each iteration to determine if the job has finished
+ # or not.
+ #
+ # jobs.get (https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get)
+ # is necessary to make sure the job has really finished via
+ # `Job.status.state == "DONE"` and to get necessary properties for
+ # `RowIterator` like the destination table.
+ #
+ # jobs.getQueryResults
+ # (https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/getQueryResults)
+ # with maxResults == 0 is technically optional,
+ # but it hangs up to 10 seconds until the job has finished. This
+ # makes sure we can know when the query has finished as close as
+ # possible to when the query finishes. It also gets properties
+ # necessary for `RowIterator` that isn't available on the job
+ # resource such as the schema
+ # (https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/getQueryResults#body.GetQueryResultsResponse.FIELDS.schema)
+ # of the results.
+ job_resource,
+ query_resource,
+ # The query wasn't finished in the last call to jobs.get, so try
+ # again with a call to both jobs.get & jobs.getQueryResults.
+ job_resource,
+ query_resource_done,
+ # Even though, the previous jobs.getQueryResults response says
+ # the job is complete, we haven't downloaded the full job status
+ # yet.
+ #
+ # Important: per internal issue 332850329, this reponse has
+ # `Job.status.state = "RUNNING"`. This ensures we are protected
+ # against possible eventual consistency issues where
+ # `jobs.getQueryResults` says jobComplete == True, but our next
+ # call to `jobs.get` still doesn't have
+ # `Job.status.state == "DONE"`.
+ job_resource,
+ # Try again until `Job.status.state == "DONE"`.
+ #
+ # Note: the call to `jobs.getQueryResults` is missing here as
+ # an optimization. We already received a "completed" response, so
+ # we won't learn anything new by calling that API again.
+ job_resource,
+ job_resource_done,
+ # When we iterate over the `RowIterator` we return from
+ # `QueryJob.result()`, we make additional calls to
+ # `jobs.getQueryResults` but this time allowing the actual rows
+ # to be returned as well.
+ query_page_resource,
+ )
+ client = _make_client(self.PROJECT, connection=conn)
+ job = self._get_target_class().from_api_repr(job_resource, client)
+
+ result = job.result()
+
+ self.assertIsInstance(result, RowIterator)
+ self.assertEqual(result.total_rows, 2)
+ rows = list(result)
+ self.assertEqual(len(rows), 1)
+ self.assertEqual(rows[0].col1, "abc")
+ self.assertEqual(result.job_id, self.JOB_ID)
+ self.assertEqual(result.location, "EU")
+ self.assertEqual(result.project, self.PROJECT)
+ self.assertEqual(result.query_id, "abc-def")
+ # Test that the total_rows property has changed during iteration, based
+ # on the response from tabledata.list.
+ self.assertEqual(result.total_rows, 1)
+
+ query_results_path = f"/projects/{self.PROJECT}/queries/{self.JOB_ID}"
+ query_results_call = mock.call(
+ method="GET",
+ path=query_results_path,
+ query_params={"maxResults": 0, "location": "EU"},
+ timeout=None,
+ )
+ reload_call = mock.call(
+ method="GET",
+ path=f"/projects/{self.PROJECT}/jobs/{self.JOB_ID}",
+ query_params={"projection": "full", "location": "EU"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+ query_page_call = mock.call(
+ method="GET",
+ path=query_results_path,
+ query_params={
+ "fields": _LIST_ROWS_FROM_QUERY_RESULTS_FIELDS,
+ "location": "EU",
+ "formatOptions.useInt64Timestamp": True,
+ },
+ timeout=None,
+ )
+ # Ensure that we actually made the expected API calls in the sequence
+ # we thought above at the make_connection() call above.
+ #
+ # Note: The responses from jobs.get and jobs.getQueryResults can be
+ # deceptively similar, so this check ensures we actually made the
+ # requests we expected.
+ conn.api_request.assert_has_calls(
+ [
+ # jobs.get & jobs.getQueryResults because the job just started.
+ reload_call,
+ query_results_call,
+ # jobs.get & jobs.getQueryResults because the query is still
+ # running.
+ reload_call,
+ query_results_call,
+ # We got a jobComplete response from the most recent call to
+ # jobs.getQueryResults, so now call jobs.get until we get
+ # `Jobs.status.state == "DONE"`. This tests a fix for internal
+ # issue b/332850329.
+ reload_call,
+ reload_call,
+ reload_call,
+ # jobs.getQueryResults without `maxResults` set to download
+ # the rows as we iterate over the `RowIterator`.
+ query_page_call,
+ ]
+ )
+
+ def test_result_dry_run(self):
+ job_resource = self._make_resource(started=True, location="EU")
+ job_resource["configuration"]["dryRun"] = True
+ conn = make_connection()
+ client = _make_client(self.PROJECT, connection=conn)
+ job = self._get_target_class().from_api_repr(job_resource, client)
+
+ result = job.result()
+
+ calls = conn.api_request.mock_calls
+ self.assertIsInstance(result, _EmptyRowIterator)
+ self.assertEqual(calls, [])
+ self.assertEqual(result.location, "EU")
+ self.assertEqual(result.project, self.PROJECT)
+ # Intentionally omit job_id and query_id since this doesn't
+ # actually correspond to a finished query job.
+ self.assertIsNone(result.job_id)
+ self.assertIsNone(result.query_id)
+
+ # If the job doesn't exist, create the job first. Issue:
+ # https://github.com/googleapis/python-bigquery/issues/1940
+ def test_result_begin_job_if_not_exist(self):
+ begun_resource = self._make_resource()
+ query_running_resource = {
+ "jobComplete": True,
+ "jobReference": {
+ "projectId": self.PROJECT,
+ "jobId": self.JOB_ID,
+ "location": "US",
+ },
+ "schema": {"fields": [{"name": "col1", "type": "STRING"}]},
+ "status": {"state": "RUNNING"},
+ }
+ query_done_resource = {
+ "jobComplete": True,
+ "jobReference": {
+ "projectId": self.PROJECT,
+ "jobId": self.JOB_ID,
+ "location": "US",
+ },
+ "schema": {"fields": [{"name": "col1", "type": "STRING"}]},
+ "status": {"state": "DONE"},
+ }
+ done_resource = copy.deepcopy(begun_resource)
+ done_resource["status"] = {"state": "DONE"}
+ connection = make_connection(
+ begun_resource,
+ query_running_resource,
+ query_done_resource,
+ done_resource,
+ )
+ client = _make_client(project=self.PROJECT, connection=connection)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ job._properties["jobReference"]["location"] = "US"
+
+ job.result()
+
+ create_job_call = mock.call(
+ method="POST",
+ path=f"/projects/{self.PROJECT}/jobs",
+ data={
+ "jobReference": {
+ "jobId": self.JOB_ID,
+ "projectId": self.PROJECT,
+ "location": "US",
+ },
+ "configuration": {
+ "query": {"useLegacySql": False, "query": self.QUERY},
+ },
+ },
+ timeout=None,
+ )
+ reload_call = mock.call(
+ method="GET",
+ path=f"/projects/{self.PROJECT}/jobs/{self.JOB_ID}",
+ query_params={"projection": "full", "location": "US"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+ get_query_results_call = mock.call(
+ method="GET",
+ path=f"/projects/{self.PROJECT}/queries/{self.JOB_ID}",
+ query_params={
+ "maxResults": 0,
+ "location": "US",
+ },
+ timeout=None,
+ )
+
+ connection.api_request.assert_has_calls(
+ [
+ # Make sure we start a job that hasn't started yet. See:
+ # https://github.com/googleapis/python-bigquery/issues/1940
+ create_job_call,
+ reload_call,
+ get_query_results_call,
+ reload_call,
+ ]
+ )
+
+ def test_result_with_done_job_calls_get_query_results(self):
+ query_resource_done = {
+ "jobComplete": True,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "schema": {"fields": [{"name": "col1", "type": "STRING"}]},
+ "totalRows": "1",
+ }
+ job_resource = self._make_resource(started=True, ended=True, location="EU")
+ job_resource["configuration"]["query"]["destinationTable"] = {
+ "projectId": "dest-project",
+ "datasetId": "dest_dataset",
+ "tableId": "dest_table",
+ }
+ results_page_resource = {
+ "totalRows": "1",
+ "pageToken": None,
+ "rows": [{"f": [{"v": "abc"}]}],
+ }
+ conn = make_connection(query_resource_done, results_page_resource)
+ client = _make_client(self.PROJECT, connection=conn)
+ job = self._get_target_class().from_api_repr(job_resource, client)
+
+ result = job.result()
+
+ rows = list(result)
+ self.assertEqual(len(rows), 1)
+ self.assertEqual(rows[0].col1, "abc")
+
+ query_results_path = f"/projects/{self.PROJECT}/queries/{self.JOB_ID}"
+ query_results_call = mock.call(
+ method="GET",
+ path=query_results_path,
+ query_params={"maxResults": 0, "location": "EU"},
+ timeout=None,
+ )
+ query_results_page_call = mock.call(
+ method="GET",
+ path=query_results_path,
+ query_params={
+ "fields": _LIST_ROWS_FROM_QUERY_RESULTS_FIELDS,
+ "location": "EU",
+ "formatOptions.useInt64Timestamp": True,
+ },
+ timeout=None,
+ )
+ conn.api_request.assert_has_calls([query_results_call, query_results_page_call])
+ assert conn.api_request.call_count == 2
+
+ def test_result_with_done_jobs_query_response_doesnt_call_get_query_results(self):
+ """With a done result from jobs.query, we don't need to call
+ jobs.getQueryResults to wait for the query to finish.
+
+ jobs.get is still called because there is an assumption that after
+ QueryJob.result(), all job metadata is available locally.
+ """
+ job_resource = self._make_resource(started=True, ended=True, location="EU")
+ conn = make_connection(job_resource)
+ client = _make_client(self.PROJECT, connection=conn)
+ query_resource_done = {
+ "jobComplete": True,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "schema": {"fields": [{"name": "col1", "type": "STRING"}]},
+ "rows": [{"f": [{"v": "abc"}]}],
+ "totalRows": "1",
+ }
+ job = google.cloud.bigquery._job_helpers._to_query_job(
+ client,
+ "SELECT 'abc' AS col1",
+ request_config=None,
+ query_response=query_resource_done,
+ )
+
+ # We want job.result() to refresh the job state, so the conversion is
+ # always "PENDING", even if the job is finished.
+ assert job.state == "PENDING"
+
+ result = job.result()
+
+ rows = list(result)
+ self.assertEqual(len(rows), 1)
+ self.assertEqual(rows[0].col1, "abc")
+ job_path = f"/projects/{self.PROJECT}/jobs/{self.JOB_ID}"
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path=job_path,
+ query_params={"projection": "full"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+
+ def test_result_with_none_timeout(self):
+ # Verifies that with an intentional None timeout, get job uses None
+ # instead of the default timeout.
+ job_resource = self._make_resource(started=True, ended=True, location="EU")
+ conn = make_connection(job_resource)
+ client = _make_client(self.PROJECT, connection=conn)
+ query_resource_done = {
+ "jobComplete": True,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "schema": {"fields": [{"name": "col1", "type": "STRING"}]},
+ "rows": [{"f": [{"v": "abc"}]}],
+ "totalRows": "1",
+ }
+ job = google.cloud.bigquery._job_helpers._to_query_job(
+ client,
+ "SELECT 'abc' AS col1",
+ request_config=None,
+ query_response=query_resource_done,
+ )
+
+ job.result(timeout=None)
+
+ job_path = f"/projects/{self.PROJECT}/jobs/{self.JOB_ID}"
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path=job_path,
+ query_params={"projection": "full"},
+ timeout=None,
+ )
+
+ def test_result_with_max_results(self):
+ from google.cloud.bigquery.table import RowIterator
+
+ query_resource = {
+ "jobComplete": True,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "schema": {"fields": [{"name": "col1", "type": "STRING"}]},
+ "totalRows": "10",
+ "pageToken": "first-page-token",
+ "rows": [
+ {"f": [{"v": "abc"}]},
+ {"f": [{"v": "def"}]},
+ {"f": [{"v": "ghi"}]},
+ {"f": [{"v": "jkl"}]},
+ {"f": [{"v": "mno"}]},
+ {"f": [{"v": "pqr"}]},
+ # Pretend these are very large rows, so the API doesn't return
+ # all of the rows we asked for in the first response.
+ ],
+ }
+ query_page_resource = {
+ "totalRows": "10",
+ "pageToken": None,
+ "rows": [
+ {"f": [{"v": "stu"}]},
+ {"f": [{"v": "vwx"}]},
+ {"f": [{"v": "yz0"}]},
+ ],
+ }
+ job_resource_running = self._make_resource(
+ started=True, ended=False, location="US"
+ )
+ job_resource_done = self._make_resource(started=True, ended=True, location="US")
+ conn = make_connection(job_resource_done, query_resource, query_page_resource)
+ client = _make_client(self.PROJECT, connection=conn)
+ job = self._get_target_class().from_api_repr(job_resource_running, client)
+
+ max_results = 9
+ result = job.result(max_results=max_results)
+
+ self.assertIsInstance(result, RowIterator)
+ self.assertEqual(result.total_rows, 10)
+
+ rows = list(result)
+
+ self.assertEqual(len(rows), 9)
+ jobs_get_path = f"/projects/{self.PROJECT}/jobs/{self.JOB_ID}"
+ jobs_get_call = mock.call(
+ method="GET",
+ path=jobs_get_path,
+ query_params={"projection": "full", "location": "US"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+ query_results_path = f"/projects/{self.PROJECT}/queries/{self.JOB_ID}"
+ query_page_waiting_call = mock.call(
+ method="GET",
+ path=query_results_path,
+ query_params={
+ # Waiting for the results should set maxResults and cache the
+ # first page if page_size is set. This allows customers to
+ # more finely tune when we fallback to the BQ Storage API.
+ # See internal issue: 344008814.
+ "maxResults": max_results,
+ "formatOptions.useInt64Timestamp": True,
+ "location": "US",
+ },
+ timeout=None,
+ )
+ query_page_2_call = mock.call(
+ timeout=None,
+ method="GET",
+ path=query_results_path,
+ query_params={
+ "pageToken": "first-page-token",
+ "maxResults": 3,
+ "fields": _LIST_ROWS_FROM_QUERY_RESULTS_FIELDS,
+ "location": "US",
+ "formatOptions.useInt64Timestamp": True,
+ },
+ )
+ # Waiting for the results should set maxResults and cache the
+ # first page if max_results is set. This allows customers to
+ # more finely tune when we fallback to the BQ Storage API.
+ # See internal issue: 344008814.
+ conn.api_request.assert_has_calls(
+ [jobs_get_call, query_page_waiting_call, query_page_2_call]
+ )
+
+ def test_result_w_custom_retry(self):
+ from google.cloud.bigquery.table import RowIterator
+
+ query_resource = {
+ "jobComplete": False,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ }
+ query_resource_done = {
+ "jobComplete": True,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "schema": {"fields": [{"name": "col1", "type": "STRING"}]},
+ "totalRows": "2",
+ }
+ job_resource = self._make_resource(started=True, location="asia-northeast1")
+ job_resource_done = self._make_resource(
+ started=True, ended=True, location="asia-northeast1"
+ )
+ job_resource_done["configuration"]["query"]["destinationTable"] = {
+ "projectId": "dest-project",
+ "datasetId": "dest_dataset",
+ "tableId": "dest_table",
+ }
+
+ connection = make_connection(
+ # Also, for each API request, raise an exception that we know can
+ # be retried. Because of this, for each iteration we do:
+ # jobs.get (x2) & jobs.getQueryResults (x2)
+ exceptions.NotFound("not normally retriable"),
+ job_resource,
+ exceptions.NotFound("not normally retriable"),
+ query_resource,
+ # Query still not done, repeat both.
+ exceptions.NotFound("not normally retriable"),
+ job_resource,
+ exceptions.NotFound("not normally retriable"),
+ query_resource,
+ exceptions.NotFound("not normally retriable"),
+ # Query still not done, repeat both.
+ job_resource_done,
+ exceptions.NotFound("not normally retriable"),
+ query_resource_done,
+ # Query finished!
+ )
+ client = _make_client(self.PROJECT, connection=connection)
+ job = self._get_target_class().from_api_repr(job_resource, client)
+
+ custom_predicate = mock.Mock()
+ custom_predicate.return_value = True
+ custom_retry = google.api_core.retry.Retry(
+ initial=0.001,
+ maximum=0.001,
+ multiplier=1.0,
+ deadline=0.1,
+ predicate=custom_predicate,
+ )
+
+ self.assertIsInstance(job.result(retry=custom_retry), RowIterator)
+ query_results_call = mock.call(
+ method="GET",
+ path=f"/projects/{self.PROJECT}/queries/{self.JOB_ID}",
+ query_params={"maxResults": 0, "location": "asia-northeast1"},
+ # TODO(tswast): Why do we end up setting timeout to
+ # google.cloud.bigquery.client._MIN_GET_QUERY_RESULTS_TIMEOUT in
+ # some cases but not others?
+ timeout=mock.ANY,
+ )
+ reload_call = mock.call(
+ method="GET",
+ path=f"/projects/{self.PROJECT}/jobs/{self.JOB_ID}",
+ query_params={"projection": "full", "location": "asia-northeast1"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+
+ connection.api_request.assert_has_calls(
+ [
+ # See make_connection() call above for explanation of the
+ # expected API calls.
+ #
+ # Query not done.
+ reload_call,
+ reload_call,
+ query_results_call,
+ query_results_call,
+ # Query still not done.
+ reload_call,
+ reload_call,
+ query_results_call,
+ query_results_call,
+ # Query done!
+ reload_call,
+ reload_call,
+ query_results_call,
+ query_results_call,
+ ]
+ )
+
+ def test_result_w_empty_schema(self):
+ from google.cloud.bigquery.table import _EmptyRowIterator
+
+ # Destination table may have no schema for some DDL and DML queries.
+ query_resource = {
+ "jobComplete": True,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "schema": {"fields": []},
+ "queryId": "xyz-abc",
+ }
+ connection = make_connection(query_resource, query_resource)
+ client = _make_client(self.PROJECT, connection=connection)
+ resource = self._make_resource(ended=True, location="asia-northeast1")
+ job = self._get_target_class().from_api_repr(resource, client)
+
+ result = job.result()
+
+ self.assertIsInstance(result, _EmptyRowIterator)
+ self.assertEqual(list(result), [])
+ self.assertEqual(result.project, self.PROJECT)
+ self.assertEqual(result.job_id, self.JOB_ID)
+ self.assertEqual(result.location, "asia-northeast1")
+ self.assertEqual(result.query_id, "xyz-abc")
+
+ def test_result_w_timeout_doesnt_raise(self):
+ import google.cloud.bigquery.client
+
+ begun_resource = self._make_resource()
+ query_resource = {
+ "jobComplete": True,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "schema": {"fields": [{"name": "col1", "type": "STRING"}]},
+ }
+ done_resource = copy.deepcopy(begun_resource)
+ done_resource["status"] = {"state": "DONE"}
+ connection = make_connection(begun_resource, query_resource, done_resource)
+ client = _make_client(project=self.PROJECT, connection=connection)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ job._properties["jobReference"]["location"] = "US"
+ job._properties["status"] = {"state": "RUNNING"}
+
+ with freezegun.freeze_time("1970-01-01 00:00:00", tick=False):
+ job.result(
+ # Test that fractional seconds are supported, but use a timeout
+ # that is representable as a floating point without rounding
+ # errors since it can be represented exactly in base 2. In this
+ # case 1.125 is 9 / 8, which is a fraction with a power of 2 in
+ # the denominator.
+ timeout=1.125,
+ )
+
+ reload_call = mock.call(
+ method="GET",
+ path=f"/projects/{self.PROJECT}/jobs/{self.JOB_ID}",
+ query_params={"projection": "full", "location": "US"},
+ timeout=1.125,
+ )
+ get_query_results_call = mock.call(
+ method="GET",
+ path=f"/projects/{self.PROJECT}/queries/{self.JOB_ID}",
+ query_params={
+ "maxResults": 0,
+ "location": "US",
+ },
+ timeout=google.cloud.bigquery.client._MIN_GET_QUERY_RESULTS_TIMEOUT,
+ )
+ connection.api_request.assert_has_calls(
+ [
+ reload_call,
+ get_query_results_call,
+ reload_call,
+ ]
+ )
+
+ def test_result_w_timeout_raises_concurrent_futures_timeout(self):
+ import google.cloud.bigquery.client
+
+ begun_resource = self._make_resource()
+ begun_resource["jobReference"]["location"] = "US"
+ query_resource = {
+ "jobComplete": True,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "schema": {"fields": [{"name": "col1", "type": "STRING"}]},
+ }
+ done_resource = copy.deepcopy(begun_resource)
+ done_resource["status"] = {"state": "DONE"}
+ connection = make_connection(begun_resource, query_resource, done_resource)
+ client = _make_client(project=self.PROJECT, connection=connection)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ job._properties["jobReference"]["location"] = "US"
+ job._properties["status"] = {"state": "RUNNING"}
+
+ with freezegun.freeze_time(
+ "1970-01-01 00:00:00", auto_tick_seconds=1.0
+ ), self.assertRaises(concurrent.futures.TimeoutError):
+ job.result(timeout=1.125)
+
+ reload_call = mock.call(
+ method="GET",
+ path=f"/projects/{self.PROJECT}/jobs/{self.JOB_ID}",
+ query_params={"projection": "full", "location": "US"},
+ timeout=1.125,
+ )
+ get_query_results_call = mock.call(
+ method="GET",
+ path=f"/projects/{self.PROJECT}/queries/{self.JOB_ID}",
+ query_params={
+ "maxResults": 0,
+ "location": "US",
+ },
+ timeout=google.cloud.bigquery.client._MIN_GET_QUERY_RESULTS_TIMEOUT,
+ )
+ connection.api_request.assert_has_calls(
+ [
+ reload_call,
+ get_query_results_call,
+ # Timeout before we can reload with the final job state.
+ ]
+ )
+
+ def test_result_w_page_size(self):
+ # Arrange
+ query_results_resource = {
+ "jobComplete": True,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "schema": {"fields": [{"name": "col1", "type": "STRING"}]},
+ "totalRows": "10",
+ "rows": [
+ {"f": [{"v": "row1"}]},
+ {"f": [{"v": "row2"}]},
+ {"f": [{"v": "row3"}]},
+ {"f": [{"v": "row4"}]},
+ {"f": [{"v": "row5"}]},
+ {"f": [{"v": "row6"}]},
+ {"f": [{"v": "row7"}]},
+ {"f": [{"v": "row8"}]},
+ {"f": [{"v": "row9"}]},
+ ],
+ "pageToken": "first-page-token",
+ }
+ job_resource_running = self._make_resource(
+ started=True, ended=False, location="US"
+ )
+ job_resource_done = self._make_resource(started=True, ended=True, location="US")
+ destination_table = {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ }
+ q_config = job_resource_done["configuration"]["query"]
+ q_config["destinationTable"] = destination_table
+ query_page_resource_2 = {"totalRows": 10, "rows": [{"f": [{"v": "row10"}]}]}
+ conn = make_connection(
+ job_resource_running,
+ query_results_resource,
+ job_resource_done,
+ query_page_resource_2,
+ )
+ client = _make_client(self.PROJECT, connection=conn)
+ job = self._get_target_class().from_api_repr(job_resource_running, client)
+
+ # Act
+ result = job.result(page_size=9)
+
+ # Assert
+ actual_rows = list(result)
+ self.assertEqual(len(actual_rows), 10)
+
+ jobs_get_path = f"/projects/{self.PROJECT}/jobs/{self.JOB_ID}"
+ jobs_get_call = mock.call(
+ method="GET",
+ path=jobs_get_path,
+ query_params={"projection": "full", "location": "US"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+ query_results_path = f"/projects/{self.PROJECT}/queries/{self.JOB_ID}"
+ query_page_waiting_call = mock.call(
+ method="GET",
+ path=query_results_path,
+ query_params={
+ # Waiting for the results should set maxResults and cache the
+ # first page if page_size is set. This allows customers to
+ # more finely tune when we fallback to the BQ Storage API.
+ # See internal issue: 344008814.
+ "maxResults": 9,
+ "location": "US",
+ "formatOptions.useInt64Timestamp": True,
+ },
+ timeout=None,
+ )
+ query_page_2_call = mock.call(
+ timeout=None,
+ method="GET",
+ path=query_results_path,
+ query_params={
+ "pageToken": "first-page-token",
+ "maxResults": 9,
+ "fields": _LIST_ROWS_FROM_QUERY_RESULTS_FIELDS,
+ "location": "US",
+ "formatOptions.useInt64Timestamp": True,
+ },
+ )
+ conn.api_request.assert_has_calls(
+ [jobs_get_call, query_page_waiting_call, jobs_get_call, query_page_2_call]
+ )
+
+ def test_result_with_start_index(self):
+ from google.cloud.bigquery.table import RowIterator
+
+ query_resource = {
+ "jobComplete": True,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "schema": {"fields": [{"name": "col1", "type": "STRING"}]},
+ "totalRows": "5",
+ }
+ tabledata_resource = {
+ "totalRows": "5",
+ "pageToken": None,
+ "rows": [
+ {"f": [{"v": "abc"}]},
+ {"f": [{"v": "def"}]},
+ {"f": [{"v": "ghi"}]},
+ {"f": [{"v": "jkl"}]},
+ ],
+ }
+ connection = make_connection(query_resource, tabledata_resource)
+ client = _make_client(self.PROJECT, connection=connection)
+ resource = self._make_resource(ended=True)
+ job = self._get_target_class().from_api_repr(resource, client)
+
+ start_index = 1
+
+ # Verifies that page_size isn't overwritten by max_results when
+ # start_index is not None. See
+ # https://github.com/googleapis/python-bigquery/issues/1950
+ page_size = 10
+ max_results = 100
+
+ result = job.result(
+ page_size=page_size,
+ max_results=max_results,
+ start_index=start_index,
+ )
+
+ self.assertIsInstance(result, RowIterator)
+ self.assertEqual(result.total_rows, 5)
+
+ rows = list(result)
+
+ self.assertEqual(len(rows), 4)
+ self.assertEqual(len(connection.api_request.call_args_list), 2)
+ tabledata_list_request = connection.api_request.call_args_list[1]
+ self.assertEqual(
+ tabledata_list_request[1]["query_params"]["startIndex"], start_index
+ )
+ self.assertEqual(
+ tabledata_list_request[1]["query_params"]["maxResults"], page_size
+ )
+
+ def test_result_error(self):
+ from google.cloud import exceptions
+
+ query = textwrap.dedent(
+ """
+ SELECT foo, bar
+ FROM table_baz
+ WHERE foo == bar"""
+ )
+
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, query, client)
+ error_result = {
+ "debugInfo": "DEBUG",
+ "location": "LOCATION",
+ "message": "MESSAGE",
+ "reason": "invalid",
+ }
+ job._properties["status"] = {
+ "errorResult": error_result,
+ "errors": [error_result],
+ "state": "DONE",
+ }
+ job._query_results = google.cloud.bigquery.query._QueryResults.from_api_repr(
+ {"jobComplete": True, "jobReference": job._properties["jobReference"]}
+ )
+ job._set_future_result()
+
+ with self.assertRaises(exceptions.GoogleCloudError) as exc_info:
+ job.result()
+
+ self.assertIsInstance(exc_info.exception, exceptions.GoogleCloudError)
+ self.assertEqual(exc_info.exception.code, http.client.BAD_REQUEST)
+
+ exc_job_instance = getattr(exc_info.exception, "query_job", None)
+ self.assertIs(exc_job_instance, job)
+
+ # Query text could contain sensitive information, so it must not be
+ # included in logs / exception representation.
+ full_text = str(exc_info.exception)
+ assert job.job_id in full_text
+ assert "Query Job SQL Follows" not in full_text
+
+ # It is useful to have query text available, so it is provided in a
+ # debug_message property.
+ debug_message = exc_info.exception.debug_message
+ assert "Query Job SQL Follows" in debug_message
+ for i, line in enumerate(query.splitlines(), start=1):
+ expected_line = "{}:{}".format(i, line)
+ assert expected_line in debug_message
+
+ def test_result_transport_timeout_error(self):
+ query = textwrap.dedent(
+ """
+ SELECT foo, bar
+ FROM table_baz
+ WHERE foo == bar"""
+ )
+
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, query, client)
+ call_api_patch = mock.patch(
+ "google.cloud.bigquery.client.Client._call_api",
+ autospec=True,
+ side_effect=requests.exceptions.Timeout("Server response took too long."),
+ )
+
+ # Make sure that timeout errors get rebranded to concurrent futures timeout.
+ with call_api_patch, self.assertRaises(concurrent.futures.TimeoutError):
+ job.result(timeout=1)
+
+ def test_no_schema(self):
+ client = _make_client(project=self.PROJECT)
+ resource = {}
+ klass = self._get_target_class()
+ job = klass.from_api_repr(resource, client=client)
+ assert job.schema is None
+
+ def test_schema(self):
+ client = _make_client(project=self.PROJECT)
+ resource = {
+ "statistics": {
+ "query": {
+ "schema": {
+ "fields": [
+ {"mode": "NULLABLE", "name": "bool_col", "type": "BOOLEAN"},
+ {
+ "mode": "NULLABLE",
+ "name": "string_col",
+ "type": "STRING",
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "timestamp_col",
+ "type": "TIMESTAMP",
+ },
+ ]
+ },
+ },
+ },
+ }
+ klass = self._get_target_class()
+ job = klass.from_api_repr(resource, client=client)
+ assert len(job.schema) == 3
+ assert job.schema[0].field_type == "BOOLEAN"
+ assert job.schema[1].field_type == "STRING"
+ assert job.schema[2].field_type == "TIMESTAMP"
+
+ def test__begin_error(self):
+ from google.cloud import exceptions
+
+ query = textwrap.dedent(
+ """
+ SELECT foo, bar
+ FROM table_baz
+ WHERE foo == bar"""
+ )
+
+ client = _make_client(project=self.PROJECT)
+ job = self._make_one(self.JOB_ID, query, client)
+ call_api_patch = mock.patch(
+ "google.cloud.bigquery.client.Client._call_api",
+ autospec=True,
+ side_effect=exceptions.BadRequest("Syntax error in SQL query"),
+ )
+
+ with call_api_patch, self.assertRaises(exceptions.GoogleCloudError) as exc_info:
+ job.result()
+
+ self.assertIsInstance(exc_info.exception, exceptions.GoogleCloudError)
+ self.assertEqual(exc_info.exception.code, http.client.BAD_REQUEST)
+
+ exc_job_instance = getattr(exc_info.exception, "query_job", None)
+ self.assertIs(exc_job_instance, job)
+
+ # Query text could contain sensitive information, so it must not be
+ # included in logs / exception representation.
+ full_text = str(exc_info.exception)
+ assert job.job_id in full_text
+ assert "Query Job SQL Follows" not in full_text
+
+ # It is useful to have query text available, so it is provided in a
+ # debug_message property.
+ debug_message = exc_info.exception.debug_message
+ assert "Query Job SQL Follows" in debug_message
+ for i, line in enumerate(query.splitlines(), start=1):
+ expected_line = "{}:{}".format(i, line)
+ assert expected_line in debug_message
+
+ def test__begin_w_timeout(self):
+ PATH = "/projects/%s/jobs" % (self.PROJECT,)
+ RESOURCE = self._make_resource()
+
+ conn = make_connection(RESOURCE)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job._begin(timeout=7.5)
+
+ final_attributes.assert_called_with({"path": PATH}, client, job)
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=PATH,
+ data={
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "query": {"query": self.QUERY, "useLegacySql": False}
+ },
+ },
+ timeout=7.5,
+ )
+
+ def test_begin_w_bound_client(self):
+ from google.cloud.bigquery.dataset import DatasetReference
+ from google.cloud.bigquery.job import QueryJobConfig
+
+ PATH = "/projects/%s/jobs" % (self.PROJECT,)
+ DS_ID = "DATASET"
+ RESOURCE = self._make_resource()
+ # Ensure None for missing server-set props
+ del RESOURCE["statistics"]["creationTime"]
+ del RESOURCE["etag"]
+ del RESOURCE["selfLink"]
+ del RESOURCE["user_email"]
+ conn = make_connection(RESOURCE)
+ client = _make_client(project=self.PROJECT, connection=conn)
+
+ config = QueryJobConfig()
+ config.default_dataset = DatasetReference(self.PROJECT, DS_ID)
+ job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job._begin()
+
+ final_attributes.assert_called_with({"path": PATH}, client, job)
+
+ self.assertIsNone(job.default_dataset)
+ self.assertEqual(job.udf_resources, [])
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=PATH,
+ data={
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "query": {
+ "query": self.QUERY,
+ "useLegacySql": False,
+ "defaultDataset": {
+ "projectId": self.PROJECT,
+ "datasetId": DS_ID,
+ },
+ }
+ },
+ },
+ timeout=None,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_begin_w_alternate_client(self):
+ from google.cloud.bigquery.dataset import DatasetReference
+ from google.cloud.bigquery.job import CreateDisposition
+ from google.cloud.bigquery.job import QueryJobConfig
+ from google.cloud.bigquery.job import QueryPriority
+ from google.cloud.bigquery.job import SchemaUpdateOption
+ from google.cloud.bigquery.job import WriteDisposition
+
+ PATH = "/projects/%s/jobs" % (self.PROJECT,)
+ TABLE = "TABLE"
+ DS_ID = "DATASET"
+ RESOURCE = self._make_resource(ended=True)
+ QUERY_CONFIGURATION = {
+ "query": self.QUERY,
+ "allowLargeResults": True,
+ "createDisposition": CreateDisposition.CREATE_NEVER,
+ "defaultDataset": {"projectId": self.PROJECT, "datasetId": DS_ID},
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": DS_ID,
+ "tableId": TABLE,
+ },
+ "flattenResults": True,
+ "priority": QueryPriority.INTERACTIVE,
+ "useQueryCache": True,
+ "useLegacySql": True,
+ "writeDisposition": WriteDisposition.WRITE_TRUNCATE,
+ "maximumBillingTier": 4,
+ "maximumBytesBilled": "123456",
+ "schemaUpdateOptions": [SchemaUpdateOption.ALLOW_FIELD_RELAXATION],
+ }
+ RESOURCE["configuration"]["query"] = QUERY_CONFIGURATION
+ RESOURCE["configuration"]["dryRun"] = True
+ conn1 = make_connection()
+ client1 = _make_client(project=self.PROJECT, connection=conn1)
+ conn2 = make_connection(RESOURCE)
+ client2 = _make_client(project=self.PROJECT, connection=conn2)
+ dataset_ref = DatasetReference(self.PROJECT, DS_ID)
+ table_ref = dataset_ref.table(TABLE)
+
+ config = QueryJobConfig()
+ config.allow_large_results = True
+ config.create_disposition = CreateDisposition.CREATE_NEVER
+ config.default_dataset = dataset_ref
+ config.destination = table_ref
+ config.dry_run = True
+ config.flatten_results = True
+ config.maximum_billing_tier = 4
+ config.priority = QueryPriority.INTERACTIVE
+ config.use_legacy_sql = True
+ config.use_query_cache = True
+ config.write_disposition = WriteDisposition.WRITE_TRUNCATE
+ config.maximum_bytes_billed = 123456
+ config.schema_update_options = [SchemaUpdateOption.ALLOW_FIELD_RELAXATION]
+ job = self._make_one(self.JOB_ID, self.QUERY, client1, job_config=config)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job._begin(client=client2)
+
+ final_attributes.assert_called_with({"path": PATH}, client2, job)
+
+ conn1.api_request.assert_not_called()
+ conn2.api_request.assert_called_once_with(
+ method="POST",
+ path=PATH,
+ data={
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {"dryRun": True, "query": QUERY_CONFIGURATION},
+ },
+ timeout=None,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_begin_w_udf(self):
+ from google.cloud.bigquery.job import QueryJobConfig
+ from google.cloud.bigquery.query import UDFResource
+
+ RESOURCE_URI = "gs://some-bucket/js/lib.js"
+ INLINE_UDF_CODE = 'var someCode = "here";'
+ PATH = "/projects/%s/jobs" % (self.PROJECT,)
+ RESOURCE = self._make_resource()
+ # Ensure None for missing server-set props
+ del RESOURCE["statistics"]["creationTime"]
+ del RESOURCE["etag"]
+ del RESOURCE["selfLink"]
+ del RESOURCE["user_email"]
+ RESOURCE["configuration"]["query"]["userDefinedFunctionResources"] = [
+ {"resourceUri": RESOURCE_URI},
+ {"inlineCode": INLINE_UDF_CODE},
+ ]
+ conn = make_connection(RESOURCE)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ udf_resources = [
+ UDFResource("resourceUri", RESOURCE_URI),
+ UDFResource("inlineCode", INLINE_UDF_CODE),
+ ]
+ config = QueryJobConfig()
+ config.udf_resources = udf_resources
+ config.use_legacy_sql = True
+ job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job._begin()
+
+ final_attributes.assert_called_with({"path": PATH}, client, job)
+
+ self.assertEqual(job.udf_resources, udf_resources)
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=PATH,
+ data={
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "query": {
+ "query": self.QUERY,
+ "useLegacySql": True,
+ "userDefinedFunctionResources": [
+ {"resourceUri": RESOURCE_URI},
+ {"inlineCode": INLINE_UDF_CODE},
+ ],
+ }
+ },
+ },
+ timeout=None,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_begin_w_named_query_parameter(self):
+ from google.cloud.bigquery.job import QueryJobConfig
+ from google.cloud.bigquery.query import ScalarQueryParameter
+
+ query_parameters = [ScalarQueryParameter("foo", "INT64", 123)]
+ PATH = "/projects/%s/jobs" % (self.PROJECT,)
+ RESOURCE = self._make_resource()
+ # Ensure None for missing server-set props
+ del RESOURCE["statistics"]["creationTime"]
+ del RESOURCE["etag"]
+ del RESOURCE["selfLink"]
+ del RESOURCE["user_email"]
+ config = RESOURCE["configuration"]["query"]
+ config["parameterMode"] = "NAMED"
+ config["queryParameters"] = [
+ {
+ "name": "foo",
+ "parameterType": {"type": "INT64"},
+ "parameterValue": {"value": "123"},
+ }
+ ]
+ conn = make_connection(RESOURCE)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ jconfig = QueryJobConfig()
+ jconfig.query_parameters = query_parameters
+ job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=jconfig)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job._begin()
+
+ final_attributes.assert_called_with({"path": PATH}, client, job)
+
+ self.assertEqual(job.query_parameters, query_parameters)
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=PATH,
+ data={
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "query": {
+ "query": self.QUERY,
+ "useLegacySql": False,
+ "parameterMode": "NAMED",
+ "queryParameters": config["queryParameters"],
+ }
+ },
+ },
+ timeout=None,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_begin_w_positional_query_parameter(self):
+ from google.cloud.bigquery.job import QueryJobConfig
+ from google.cloud.bigquery.query import ScalarQueryParameter
+
+ query_parameters = [ScalarQueryParameter.positional("INT64", 123)]
+ PATH = "/projects/%s/jobs" % (self.PROJECT,)
+ RESOURCE = self._make_resource()
+ # Ensure None for missing server-set props
+ del RESOURCE["statistics"]["creationTime"]
+ del RESOURCE["etag"]
+ del RESOURCE["selfLink"]
+ del RESOURCE["user_email"]
+ config = RESOURCE["configuration"]["query"]
+ config["parameterMode"] = "POSITIONAL"
+ config["queryParameters"] = [
+ {"parameterType": {"type": "INT64"}, "parameterValue": {"value": "123"}}
+ ]
+ conn = make_connection(RESOURCE)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ jconfig = QueryJobConfig()
+ jconfig.query_parameters = query_parameters
+ job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=jconfig)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job._begin()
+
+ final_attributes.assert_called_with({"path": PATH}, client, job)
+
+ self.assertEqual(job.query_parameters, query_parameters)
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=PATH,
+ data={
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "query": {
+ "query": self.QUERY,
+ "useLegacySql": False,
+ "parameterMode": "POSITIONAL",
+ "queryParameters": config["queryParameters"],
+ }
+ },
+ },
+ timeout=None,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_begin_w_table_defs(self):
+ from google.cloud.bigquery.job import QueryJobConfig
+ from google.cloud.bigquery.external_config import ExternalConfig
+ from google.cloud.bigquery.external_config import BigtableColumn
+ from google.cloud.bigquery.external_config import BigtableColumnFamily
+
+ PATH = "/projects/%s/jobs" % (self.PROJECT,)
+ RESOURCE = self._make_resource()
+ # Ensure None for missing server-set props
+ del RESOURCE["statistics"]["creationTime"]
+ del RESOURCE["etag"]
+ del RESOURCE["selfLink"]
+ del RESOURCE["user_email"]
+
+ bt_config = ExternalConfig("BIGTABLE")
+ bt_config.ignore_unknown_values = True
+ bt_config.options.read_rowkey_as_string = True
+ cf = BigtableColumnFamily()
+ cf.family_id = "cf"
+ col = BigtableColumn()
+ col.field_name = "fn"
+ cf.columns = [col]
+ bt_config.options.column_families = [cf]
+ BT_CONFIG_RESOURCE = {
+ "sourceFormat": "BIGTABLE",
+ "ignoreUnknownValues": True,
+ "bigtableOptions": {
+ "readRowkeyAsString": True,
+ "columnFamilies": [
+ {"familyId": "cf", "columns": [{"fieldName": "fn"}]}
+ ],
+ },
+ }
+ CSV_CONFIG_RESOURCE = {
+ "sourceFormat": "CSV",
+ "maxBadRecords": 8,
+ "csvOptions": {"allowJaggedRows": True},
+ }
+ csv_config = ExternalConfig("CSV")
+ csv_config.max_bad_records = 8
+ csv_config.options.allow_jagged_rows = True
+ bt_table = "bigtable-table"
+ csv_table = "csv-table"
+ RESOURCE["configuration"]["query"]["tableDefinitions"] = {
+ bt_table: BT_CONFIG_RESOURCE,
+ csv_table: CSV_CONFIG_RESOURCE,
+ }
+ want_resource = copy.deepcopy(RESOURCE)
+ conn = make_connection(RESOURCE)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ config = QueryJobConfig()
+ config.table_definitions = {bt_table: bt_config, csv_table: csv_config}
+ config.use_legacy_sql = True
+ job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job._begin()
+
+ final_attributes.assert_called_with({"path": PATH}, client, job)
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=PATH,
+ data={
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "query": {
+ "query": self.QUERY,
+ "useLegacySql": True,
+ "tableDefinitions": {
+ bt_table: BT_CONFIG_RESOURCE,
+ csv_table: CSV_CONFIG_RESOURCE,
+ },
+ }
+ },
+ },
+ timeout=None,
+ )
+ self._verifyResourceProperties(job, want_resource)
+
+ def test_dry_run_query(self):
+ from google.cloud.bigquery.job import QueryJobConfig
+
+ PATH = "/projects/%s/jobs" % (self.PROJECT,)
+ RESOURCE = self._make_resource()
+ # Ensure None for missing server-set props
+ del RESOURCE["statistics"]["creationTime"]
+ del RESOURCE["etag"]
+ del RESOURCE["selfLink"]
+ del RESOURCE["user_email"]
+ RESOURCE["configuration"]["dryRun"] = True
+ conn = make_connection(RESOURCE)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ config = QueryJobConfig()
+ config.dry_run = True
+ job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job._begin()
+
+ final_attributes.assert_called_with({"path": PATH}, client, job)
+ self.assertEqual(job.udf_resources, [])
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=PATH,
+ data={
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "configuration": {
+ "query": {"query": self.QUERY, "useLegacySql": False},
+ "dryRun": True,
+ },
+ },
+ timeout=None,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_exists_miss_w_bound_client(self):
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ conn = make_connection()
+ client = _make_client(project=self.PROJECT, connection=conn)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ self.assertFalse(job.exists())
+
+ final_attributes.assert_called_with({"path": PATH}, client, job)
+
+ conn.api_request.assert_called_once_with(
+ method="GET", path=PATH, query_params={"fields": "id"}, timeout=None
+ )
+
+ def test_exists_hit_w_alternate_client(self):
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ conn1 = make_connection()
+ client1 = _make_client(project=self.PROJECT, connection=conn1)
+ conn2 = make_connection({})
+ client2 = _make_client(project=self.PROJECT, connection=conn2)
+ job = self._make_one(self.JOB_ID, self.QUERY, client1)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ self.assertTrue(job.exists(client=client2))
+
+ final_attributes.assert_called_with({"path": PATH}, client2, job)
+
+ conn1.api_request.assert_not_called()
+ conn2.api_request.assert_called_once_with(
+ method="GET", path=PATH, query_params={"fields": "id"}, timeout=None
+ )
+
+ def test_reload_w_bound_client(self):
+ from google.cloud.bigquery.dataset import DatasetReference
+ from google.cloud.bigquery.job import QueryJobConfig
+
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ DS_ID = "DATASET"
+ DEST_TABLE = "dest_table"
+ RESOURCE = self._make_resource()
+ conn = make_connection(RESOURCE)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ dataset_ref = DatasetReference(self.PROJECT, DS_ID)
+ table_ref = dataset_ref.table(DEST_TABLE)
+ config = QueryJobConfig()
+ config.destination = table_ref
+ job = self._make_one(self.JOB_ID, None, client, job_config=config)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job.reload()
+
+ final_attributes.assert_called_with(
+ {
+ "path": PATH,
+ "job_id": self.JOB_ID,
+ "location": None,
+ },
+ client,
+ None,
+ )
+
+ self.assertNotEqual(job.destination, table_ref)
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path=PATH,
+ query_params={"projection": "full"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_reload_w_alternate_client(self):
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ DS_ID = "DATASET"
+ DEST_TABLE = "dest_table"
+ RESOURCE = self._make_resource()
+ q_config = RESOURCE["configuration"]["query"]
+ q_config["destinationTable"] = {
+ "projectId": self.PROJECT,
+ "datasetId": DS_ID,
+ "tableId": DEST_TABLE,
+ }
+ conn1 = make_connection()
+ client1 = _make_client(project=self.PROJECT, connection=conn1)
+ conn2 = make_connection(RESOURCE)
+ client2 = _make_client(project=self.PROJECT, connection=conn2)
+ job = self._make_one(self.JOB_ID, self.QUERY, client1)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job.reload(client=client2)
+
+ final_attributes.assert_called_with(
+ {
+ "path": PATH,
+ "job_id": self.JOB_ID,
+ "location": None,
+ },
+ client2,
+ None,
+ )
+
+ conn1.api_request.assert_not_called()
+ conn2.api_request.assert_called_once_with(
+ method="GET",
+ path=PATH,
+ query_params={"projection": "full"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+ self._verifyResourceProperties(job, RESOURCE)
+
+ def test_reload_w_timeout(self):
+ from google.cloud.bigquery.dataset import DatasetReference
+ from google.cloud.bigquery.job import QueryJobConfig
+
+ PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
+ DS_ID = "DATASET"
+ DEST_TABLE = "dest_table"
+ RESOURCE = self._make_resource()
+ conn = make_connection(RESOURCE)
+ client = _make_client(project=self.PROJECT, connection=conn)
+ dataset_ref = DatasetReference(self.PROJECT, DS_ID)
+ table_ref = dataset_ref.table(DEST_TABLE)
+ config = QueryJobConfig()
+ config.destination = table_ref
+ job = self._make_one(self.JOB_ID, None, client, job_config=config)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ job.reload(timeout=4.2)
+ final_attributes.assert_called_with(
+ {
+ "path": PATH,
+ "job_id": self.JOB_ID,
+ "location": None,
+ },
+ client,
+ None,
+ )
+
+ self.assertNotEqual(job.destination, table_ref)
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path=PATH,
+ query_params={"projection": "full"},
+ timeout=4.2,
+ )
+
+ def test_iter(self):
+ begun_resource = self._make_resource()
+ query_resource = {
+ "jobComplete": True,
+ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
+ "totalRows": "0",
+ "schema": {"fields": [{"name": "col1", "type": "STRING"}]},
+ }
+ done_resource = copy.deepcopy(begun_resource)
+ done_resource["status"] = {"state": "DONE"}
+ connection = make_connection(begun_resource, query_resource, done_resource)
+ client = _make_client(project=self.PROJECT, connection=connection)
+ job = self._make_one(self.JOB_ID, self.QUERY, client)
+ job._properties["status"] = {"state": "RUNNING"}
+
+ self.assertIsInstance(iter(job), types.GeneratorType)
diff --git a/testbed/googleapis__python-bigquery/tests/unit/job/test_query_config.py b/testbed/googleapis__python-bigquery/tests/unit/job/test_query_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..7818236f4846b7f46943cde9a749df56c41687c5
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/job/test_query_config.py
@@ -0,0 +1,332 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from .helpers import _Base
+
+
+class TestQueryJobConfig(_Base):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.job import QueryJobConfig
+
+ return QueryJobConfig
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor(self):
+ config = self._make_one()
+ self.assertEqual(config._properties, {"query": {}})
+
+ def test_ctor_w_none(self):
+ config = self._make_one()
+ config.default_dataset = None
+ config.destination = None
+ self.assertIsNone(config.default_dataset)
+ self.assertIsNone(config.destination)
+
+ def test_ctor_w_properties(self):
+ config = self._get_target_class()(use_query_cache=False, use_legacy_sql=True)
+
+ self.assertFalse(config.use_query_cache)
+ self.assertTrue(config.use_legacy_sql)
+
+ def test_ctor_w_string_default_dataset(self):
+ from google.cloud.bigquery import dataset
+
+ default_dataset = "default-proj.default_dset"
+ config = self._get_target_class()(default_dataset=default_dataset)
+ expected = dataset.DatasetReference.from_string(default_dataset)
+ self.assertEqual(config.default_dataset, expected)
+
+ def test_ctor_w_string_destinaton(self):
+ from google.cloud.bigquery import table
+
+ destination = "dest-proj.dest_dset.dest_tbl"
+ config = self._get_target_class()(destination=destination)
+ expected = table.TableReference.from_string(destination)
+ self.assertEqual(config.destination, expected)
+
+ def test_default_dataset_w_string(self):
+ from google.cloud.bigquery import dataset
+
+ default_dataset = "default-proj.default_dset"
+ config = self._make_one()
+ config.default_dataset = default_dataset
+ expected = dataset.DatasetReference.from_string(default_dataset)
+ self.assertEqual(config.default_dataset, expected)
+
+ def test_default_dataset_w_dataset(self):
+ from google.cloud.bigquery import dataset
+
+ default_dataset = "default-proj.default_dset"
+ expected = dataset.DatasetReference.from_string(default_dataset)
+ config = self._make_one()
+ config.default_dataset = dataset.Dataset(expected)
+ self.assertEqual(config.default_dataset, expected)
+
+ def test_destinaton_w_string(self):
+ from google.cloud.bigquery import table
+
+ destination = "dest-proj.dest_dset.dest_tbl"
+ config = self._make_one()
+ config.destination = destination
+ expected = table.TableReference.from_string(destination)
+ self.assertEqual(config.destination, expected)
+
+ def test_range_partitioning_w_none(self):
+ object_under_test = self._get_target_class()()
+ assert object_under_test.range_partitioning is None
+
+ def test_range_partitioning_w_value(self):
+ object_under_test = self._get_target_class()()
+ object_under_test._properties["query"]["rangePartitioning"] = {
+ "field": "column_one",
+ "range": {"start": 1, "end": 1000, "interval": 10},
+ }
+ object_under_test.range_partitioning.field == "column_one"
+ object_under_test.range_partitioning.range_.start == 1
+ object_under_test.range_partitioning.range_.end == 1000
+ object_under_test.range_partitioning.range_.interval == 10
+
+ def test_range_partitioning_setter(self):
+ from google.cloud.bigquery.table import PartitionRange
+ from google.cloud.bigquery.table import RangePartitioning
+
+ object_under_test = self._get_target_class()()
+ object_under_test.range_partitioning = RangePartitioning(
+ field="column_one", range_=PartitionRange(start=1, end=1000, interval=10)
+ )
+ object_under_test.range_partitioning.field == "column_one"
+ object_under_test.range_partitioning.range_.start == 1
+ object_under_test.range_partitioning.range_.end == 1000
+ object_under_test.range_partitioning.range_.interval == 10
+
+ def test_range_partitioning_setter_w_none(self):
+ object_under_test = self._get_target_class()()
+ object_under_test.range_partitioning = None
+ assert object_under_test.range_partitioning is None
+
+ def test_range_partitioning_setter_w_wrong_type(self):
+ object_under_test = self._get_target_class()()
+ with pytest.raises(ValueError, match="RangePartitioning"):
+ object_under_test.range_partitioning = object()
+
+ def test_time_partitioning(self):
+ from google.cloud.bigquery import table
+
+ time_partitioning = table.TimePartitioning(
+ type_=table.TimePartitioningType.DAY, field="name"
+ )
+ config = self._make_one()
+ config.time_partitioning = time_partitioning
+ # TimePartitioning should be configurable after assigning
+ time_partitioning.expiration_ms = 10000
+
+ self.assertEqual(config.time_partitioning.type_, table.TimePartitioningType.DAY)
+ self.assertEqual(config.time_partitioning.field, "name")
+ self.assertEqual(config.time_partitioning.expiration_ms, 10000)
+
+ config.time_partitioning = None
+ self.assertIsNone(config.time_partitioning)
+
+ def test_clustering_fields(self):
+ fields = ["email", "postal_code"]
+ config = self._get_target_class()()
+ config.clustering_fields = fields
+ self.assertEqual(config.clustering_fields, fields)
+
+ config.clustering_fields = None
+ self.assertIsNone(config.clustering_fields)
+
+ def test_connection_properties(self):
+ from google.cloud.bigquery.job.query import ConnectionProperty
+
+ config = self._get_target_class()()
+ self.assertEqual(len(config.connection_properties), 0)
+
+ session_id = ConnectionProperty("session_id", "abcd")
+ time_zone = ConnectionProperty("time_zone", "America/Chicago")
+ config.connection_properties = [session_id, time_zone]
+ self.assertEqual(len(config.connection_properties), 2)
+ self.assertEqual(config.connection_properties[0].key, "session_id")
+ self.assertEqual(config.connection_properties[0].value, "abcd")
+ self.assertEqual(config.connection_properties[1].key, "time_zone")
+ self.assertEqual(config.connection_properties[1].value, "America/Chicago")
+
+ def test_create_session(self):
+ config = self._get_target_class()()
+ self.assertIsNone(config.create_session)
+ config.create_session = True
+ self.assertTrue(config.create_session)
+
+ def test_from_api_repr_empty(self):
+ klass = self._get_target_class()
+ config = klass.from_api_repr({})
+ self.assertIsNone(config.dry_run)
+ self.assertIsNone(config.use_legacy_sql)
+ self.assertIsNone(config.default_dataset)
+ self.assertIsNone(config.destination)
+ self.assertIsNone(config.destination_encryption_configuration)
+
+ def test_from_api_repr_normal(self):
+ from google.cloud.bigquery.dataset import DatasetReference
+
+ resource = {
+ "query": {
+ "useLegacySql": True,
+ "query": "no property for me",
+ "defaultDataset": {
+ "projectId": "someproject",
+ "datasetId": "somedataset",
+ },
+ "someNewProperty": "I should be saved, too.",
+ },
+ "dryRun": True,
+ }
+ klass = self._get_target_class()
+
+ config = klass.from_api_repr(resource)
+
+ self.assertTrue(config.use_legacy_sql)
+ self.assertEqual(
+ config.default_dataset, DatasetReference("someproject", "somedataset")
+ )
+ self.assertTrue(config.dry_run)
+ # Make sure unknown properties propagate.
+ self.assertEqual(config._properties["query"]["query"], "no property for me")
+ self.assertEqual(
+ config._properties["query"]["someNewProperty"], "I should be saved, too."
+ )
+
+ def test_to_api_repr_normal(self):
+ from google.cloud.bigquery.dataset import DatasetReference
+
+ config = self._make_one()
+ config.use_legacy_sql = True
+ config.default_dataset = DatasetReference("someproject", "somedataset")
+ config.dry_run = False
+ config._properties["someNewProperty"] = "Woohoo, alpha stuff."
+
+ resource = config.to_api_repr()
+
+ self.assertFalse(resource["dryRun"])
+ self.assertTrue(resource["query"]["useLegacySql"])
+ self.assertEqual(
+ resource["query"]["defaultDataset"]["projectId"], "someproject"
+ )
+ self.assertEqual(
+ resource["query"]["defaultDataset"]["datasetId"], "somedataset"
+ )
+ # Make sure unknown properties propagate.
+ self.assertEqual(resource["someNewProperty"], "Woohoo, alpha stuff.")
+
+ def test_to_api_repr_with_encryption(self):
+ from google.cloud.bigquery.encryption_configuration import (
+ EncryptionConfiguration,
+ )
+
+ config = self._make_one()
+ config.destination_encryption_configuration = EncryptionConfiguration(
+ kms_key_name=self.KMS_KEY_NAME
+ )
+ resource = config.to_api_repr()
+ self.assertEqual(
+ resource,
+ {
+ "query": {
+ "destinationEncryptionConfiguration": {
+ "kmsKeyName": self.KMS_KEY_NAME
+ }
+ }
+ },
+ )
+
+ def test_to_api_repr_with_encryption_none(self):
+ config = self._make_one()
+ config.destination_encryption_configuration = None
+ resource = config.to_api_repr()
+ self.assertEqual(
+ resource, {"query": {"destinationEncryptionConfiguration": None}}
+ )
+
+ def test_from_api_repr_with_encryption(self):
+ resource = {
+ "query": {
+ "destinationEncryptionConfiguration": {"kmsKeyName": self.KMS_KEY_NAME}
+ }
+ }
+ klass = self._get_target_class()
+ config = klass.from_api_repr(resource)
+ self.assertEqual(
+ config.destination_encryption_configuration.kms_key_name, self.KMS_KEY_NAME
+ )
+
+ def test_to_api_repr_with_script_options_none(self):
+ config = self._make_one()
+ config.script_options = None
+
+ resource = config.to_api_repr()
+
+ self.assertEqual(resource, {"query": {"scriptOptions": None}})
+ self.assertIsNone(config.script_options)
+
+ def test_to_api_repr_with_script_options(self):
+ from google.cloud.bigquery import KeyResultStatementKind
+ from google.cloud.bigquery import ScriptOptions
+
+ config = self._make_one()
+ config.script_options = ScriptOptions(
+ statement_timeout_ms=60,
+ statement_byte_budget=999,
+ key_result_statement=KeyResultStatementKind.FIRST_SELECT,
+ )
+
+ resource = config.to_api_repr()
+
+ expected_script_options_repr = {
+ "statementTimeoutMs": "60",
+ "statementByteBudget": "999",
+ "keyResultStatement": KeyResultStatementKind.FIRST_SELECT,
+ }
+ self.assertEqual(
+ resource, {"query": {"scriptOptions": expected_script_options_repr}}
+ )
+
+ def test_from_api_repr_with_script_options(self):
+ from google.cloud.bigquery import KeyResultStatementKind
+ from google.cloud.bigquery import ScriptOptions
+
+ resource = {
+ "query": {
+ "scriptOptions": {
+ "statementTimeoutMs": "42",
+ "statementByteBudget": "123",
+ "keyResultStatement": KeyResultStatementKind.LAST,
+ },
+ },
+ }
+ klass = self._get_target_class()
+
+ config = klass.from_api_repr(resource)
+
+ script_options = config.script_options
+ self.assertIsInstance(script_options, ScriptOptions)
+ self.assertEqual(script_options.statement_timeout_ms, 42)
+ self.assertEqual(script_options.statement_byte_budget, 123)
+ self.assertEqual(
+ script_options.key_result_statement, KeyResultStatementKind.LAST
+ )
diff --git a/testbed/googleapis__python-bigquery/tests/unit/job/test_query_pandas.py b/testbed/googleapis__python-bigquery/tests/unit/job/test_query_pandas.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a5d92dbd761878bb8602148bec8016c7a9ceb2c
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/job/test_query_pandas.py
@@ -0,0 +1,1021 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import concurrent.futures
+import copy
+import json
+from unittest import mock
+
+import pytest
+
+from ..helpers import make_connection
+from .helpers import _make_client
+from .helpers import _make_job_resource
+
+try:
+ from google.cloud import bigquery_storage
+ import google.cloud.bigquery_storage_v1.reader
+ import google.cloud.bigquery_storage_v1.services.big_query_read.client
+except (ImportError, AttributeError):
+ bigquery_storage = None
+
+try:
+ import shapely
+except (ImportError, AttributeError):
+ shapely = None
+try:
+ import geopandas
+except (ImportError, AttributeError):
+ geopandas = None
+try:
+ import tqdm
+except (ImportError, AttributeError):
+ tqdm = None
+
+try:
+ import pyarrow
+ import pyarrow.types
+except ImportError:
+ pyarrow = None
+
+pandas = pytest.importorskip("pandas")
+
+
+@pytest.fixture
+def table_read_options_kwarg():
+ read_options = bigquery_storage.ReadSession.TableReadOptions(
+ arrow_serialization_options=bigquery_storage.ArrowSerializationOptions(
+ buffer_compression=bigquery_storage.ArrowSerializationOptions.CompressionCodec.LZ4_FRAME
+ )
+ )
+ return {"read_options": read_options}
+
+
+@pytest.mark.parametrize(
+ "query,expected",
+ (
+ (None, False),
+ ("", False),
+ ("select name, age from table", False),
+ ("select name, age from table LIMIT 10;", False),
+ ("select name, age from table order by other_column;", True),
+ ("Select name, age From table Order By other_column", True),
+ ("SELECT name, age FROM table ORDER BY other_column;", True),
+ ("select name, age from table order\nby other_column", True),
+ ("Select name, age From table Order\nBy other_column;", True),
+ ("SELECT name, age FROM table ORDER\nBY other_column", True),
+ ("SelecT name, age froM table OrdeR \n\t BY other_column;", True),
+ ),
+)
+def test__contains_order_by(query, expected):
+ from google.cloud.bigquery import job as mut
+
+ if expected:
+ assert mut._contains_order_by(query)
+ else:
+ assert not mut._contains_order_by(query)
+
+
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+@pytest.mark.parametrize(
+ "query",
+ (
+ "select name, age from table order by other_column;",
+ "Select name, age From table Order By other_column;",
+ "SELECT name, age FROM table ORDER BY other_column;",
+ "select name, age from table order\nby other_column;",
+ "Select name, age From table Order\nBy other_column;",
+ "SELECT name, age FROM table ORDER\nBY other_column;",
+ "SelecT name, age froM table OrdeR \n\t BY other_column;",
+ ),
+)
+def test_to_dataframe_bqstorage_preserve_order(query, table_read_options_kwarg):
+ from google.cloud.bigquery.job import QueryJob as target_class
+
+ job_resource = _make_job_resource(
+ project_id="test-project", job_type="query", ended=True
+ )
+ job_resource["configuration"]["query"]["query"] = query
+ job_resource["status"] = {"state": "DONE"}
+ query_resource = {
+ "jobComplete": True,
+ "jobReference": {"projectId": "test-project", "jobId": "test-job"},
+ "schema": {
+ "fields": [
+ {"name": "name", "type": "STRING", "mode": "NULLABLE"},
+ {"name": "age", "type": "INTEGER", "mode": "NULLABLE"},
+ ]
+ },
+ "totalRows": "4",
+ }
+ stream_id = "projects/1/locations/2/sessions/3/streams/4"
+ name_array = pyarrow.array(
+ ["John", "Paul", "George", "Ringo"], type=pyarrow.string()
+ )
+ age_array = pyarrow.array([17, 24, 21, 15], type=pyarrow.int64())
+ arrow_schema = pyarrow.schema(
+ [
+ pyarrow.field("name", pyarrow.string(), True),
+ pyarrow.field("age", pyarrow.int64(), True),
+ ]
+ )
+ record_batch = pyarrow.RecordBatch.from_arrays(
+ [name_array, age_array], schema=arrow_schema
+ )
+ connection = make_connection(query_resource)
+ client = _make_client(connection=connection)
+ job = target_class.from_api_repr(job_resource, client)
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ session = bigquery_storage.types.ReadSession()
+ session.arrow_schema.serialized_schema = arrow_schema.serialize().to_pybytes()
+ session.streams = [bigquery_storage.types.ReadStream(name=stream_id)]
+ reader = mock.create_autospec(
+ google.cloud.bigquery_storage_v1.reader.ReadRowsStream, instance=True
+ )
+ row_iterable = mock.create_autospec(
+ google.cloud.bigquery_storage_v1.reader.ReadRowsIterable, instance=True
+ )
+ page = mock.create_autospec(
+ google.cloud.bigquery_storage_v1.reader.ReadRowsPage, instance=True
+ )
+ page.to_arrow.return_value = record_batch
+ type(row_iterable).pages = mock.PropertyMock(return_value=[page])
+ reader.rows.return_value = row_iterable
+ bqstorage_client = mock.create_autospec(
+ bigquery_storage.BigQueryReadClient, instance=True
+ )
+ bqstorage_client.create_read_session.return_value = session
+ bqstorage_client.read_rows.return_value = reader
+
+ dataframe = job.to_dataframe(bqstorage_client=bqstorage_client)
+
+ assert len(dataframe) == 4
+ destination_table = (
+ "projects/{projectId}/datasets/{datasetId}/tables/{tableId}".format(
+ **job_resource["configuration"]["query"]["destinationTable"]
+ )
+ )
+ expected_session = bigquery_storage.ReadSession(
+ table=destination_table,
+ data_format=bigquery_storage.DataFormat.ARROW,
+ **table_read_options_kwarg,
+ )
+ bqstorage_client.create_read_session.assert_called_once_with(
+ parent="projects/test-project",
+ read_session=expected_session,
+ max_stream_count=1, # Use a single stream to preserve row order.
+ )
+
+
+@pytest.mark.skipif(pyarrow is None, reason="Requires `pyarrow`")
+def test_to_arrow():
+ from google.cloud.bigquery.job import QueryJob as target_class
+
+ begun_resource = _make_job_resource(job_type="query")
+ query_resource = {
+ "jobComplete": True,
+ "jobReference": begun_resource["jobReference"],
+ "totalRows": "4",
+ "schema": {
+ "fields": [
+ {
+ "name": "spouse_1",
+ "type": "RECORD",
+ "fields": [
+ {"name": "name", "type": "STRING", "mode": "NULLABLE"},
+ {"name": "age", "type": "INTEGER", "mode": "NULLABLE"},
+ ],
+ },
+ {
+ "name": "spouse_2",
+ "type": "RECORD",
+ "fields": [
+ {"name": "name", "type": "STRING", "mode": "NULLABLE"},
+ {"name": "age", "type": "INTEGER", "mode": "NULLABLE"},
+ ],
+ },
+ ]
+ },
+ }
+ tabledata_resource = {
+ "rows": [
+ {
+ "f": [
+ {"v": {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]}},
+ {"v": {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]}},
+ ]
+ },
+ {
+ "f": [
+ {"v": {"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]}},
+ {"v": {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]}},
+ ]
+ },
+ ]
+ }
+ done_resource = copy.deepcopy(begun_resource)
+ done_resource["status"] = {"state": "DONE"}
+ connection = make_connection(
+ begun_resource, query_resource, done_resource, tabledata_resource
+ )
+ client = _make_client(connection=connection)
+ job = target_class.from_api_repr(begun_resource, client)
+
+ tbl = job.to_arrow(create_bqstorage_client=False)
+
+ assert isinstance(tbl, pyarrow.Table)
+ assert tbl.num_rows == 2
+
+ # Check the schema.
+ assert tbl.schema[0].name == "spouse_1"
+ assert tbl.schema[0].type[0].name == "name"
+ assert tbl.schema[0].type[1].name == "age"
+ assert pyarrow.types.is_struct(tbl.schema[0].type)
+ assert pyarrow.types.is_string(tbl.schema[0].type[0].type)
+ assert pyarrow.types.is_int64(tbl.schema[0].type[1].type)
+ assert tbl.schema[1].name == "spouse_2"
+ assert tbl.schema[1].type[0].name == "name"
+ assert tbl.schema[1].type[1].name == "age"
+ assert pyarrow.types.is_struct(tbl.schema[1].type)
+ assert pyarrow.types.is_string(tbl.schema[1].type[0].type)
+ assert pyarrow.types.is_int64(tbl.schema[1].type[1].type)
+
+ # Check the data.
+ tbl_data = tbl.to_pydict()
+ spouse_1 = tbl_data["spouse_1"]
+ assert spouse_1 == [
+ {"name": "Phred Phlyntstone", "age": 32},
+ {"name": "Bhettye Rhubble", "age": 27},
+ ]
+ spouse_2 = tbl_data["spouse_2"]
+ assert spouse_2 == [
+ {"name": "Wylma Phlyntstone", "age": 29},
+ {"name": "Bharney Rhubble", "age": 33},
+ ]
+
+
+@pytest.mark.skipif(pyarrow is None, reason="Requires `pyarrow`")
+def test_to_arrow_max_results_no_progress_bar():
+ from google.cloud.bigquery import table
+ from google.cloud.bigquery.job import QueryJob as target_class
+ from google.cloud.bigquery.schema import SchemaField
+
+ connection = make_connection({})
+ client = _make_client(connection=connection)
+ begun_resource = _make_job_resource(job_type="query")
+ job = target_class.from_api_repr(begun_resource, client)
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = table.RowIterator(client, api_request, path, schema)
+
+ result_patch = mock.patch(
+ "google.cloud.bigquery.job.QueryJob.result",
+ return_value=row_iterator,
+ )
+ with result_patch as result_patch_tqdm:
+ tbl = job.to_arrow(create_bqstorage_client=False, max_results=123)
+
+ result_patch_tqdm.assert_called_once_with(max_results=123)
+
+ assert isinstance(tbl, pyarrow.Table)
+ assert tbl.num_rows == 2
+
+
+@pytest.mark.skipif(pyarrow is None, reason="Requires `pyarrow`")
+@pytest.mark.skipif(tqdm is None, reason="Requires `tqdm`")
+@mock.patch("google.cloud.bigquery._tqdm_helpers.tqdm")
+def test_to_arrow_w_tqdm_w_query_plan(tqdm_mock):
+ from google.cloud.bigquery import table
+ from google.cloud.bigquery.job import QueryJob as target_class
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery._tqdm_helpers import _PROGRESS_BAR_UPDATE_INTERVAL
+
+ begun_resource = _make_job_resource(job_type="query")
+ rows = [
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
+ ]
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ connection = make_connection({})
+ client = _make_client(connection=connection)
+ job = target_class.from_api_repr(begun_resource, client)
+
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = table.RowIterator(client, api_request, path, schema)
+
+ job._properties["statistics"] = {
+ "query": {
+ "queryPlan": [
+ {"name": "S00: Input", "id": "0", "status": "COMPLETE"},
+ {"name": "S01: Output", "id": "1", "status": "COMPLETE"},
+ ]
+ },
+ }
+ reload_patch = mock.patch(
+ "google.cloud.bigquery.job._AsyncJob.reload", autospec=True
+ )
+ result_patch = mock.patch(
+ "google.cloud.bigquery.job.QueryJob.result",
+ side_effect=[
+ concurrent.futures.TimeoutError,
+ concurrent.futures.TimeoutError,
+ row_iterator,
+ ],
+ )
+ with result_patch as tqdm_mock, reload_patch:
+ tbl = job.to_arrow(progress_bar_type="tqdm", create_bqstorage_client=False)
+
+ assert tqdm_mock.call_count == 3
+ assert isinstance(tbl, pyarrow.Table)
+ assert tbl.num_rows == 2
+ tqdm_mock.assert_called_with(
+ timeout=_PROGRESS_BAR_UPDATE_INTERVAL, max_results=None
+ )
+
+
+@pytest.mark.skipif(pyarrow is None, reason="Requires `pyarrow`")
+@pytest.mark.skipif(tqdm is None, reason="Requires `tqdm`")
+@mock.patch("google.cloud.bigquery._tqdm_helpers.tqdm")
+def test_to_arrow_w_tqdm_w_pending_status(tqdm_mock):
+ from google.cloud.bigquery import table
+ from google.cloud.bigquery.job import QueryJob as target_class
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery._tqdm_helpers import _PROGRESS_BAR_UPDATE_INTERVAL
+
+ begun_resource = _make_job_resource(job_type="query")
+ rows = [
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
+ ]
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ connection = make_connection({})
+ client = _make_client(connection=connection)
+ job = target_class.from_api_repr(begun_resource, client)
+
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = table.RowIterator(client, api_request, path, schema)
+
+ job._properties["statistics"] = {
+ "query": {
+ "queryPlan": [
+ {"name": "S00: Input", "id": "0", "status": "PENDING"},
+ {"name": "S00: Input", "id": "1", "status": "COMPLETE"},
+ ]
+ },
+ }
+ reload_patch = mock.patch(
+ "google.cloud.bigquery.job._AsyncJob.reload", autospec=True
+ )
+ result_patch = mock.patch(
+ "google.cloud.bigquery.job.QueryJob.result",
+ side_effect=[concurrent.futures.TimeoutError, row_iterator],
+ )
+ with result_patch as tqdm_mock, reload_patch:
+ tbl = job.to_arrow(progress_bar_type="tqdm", create_bqstorage_client=False)
+
+ assert tqdm_mock.call_count == 2
+ assert isinstance(tbl, pyarrow.Table)
+ assert tbl.num_rows == 2
+ tqdm_mock.assert_called_with(
+ timeout=_PROGRESS_BAR_UPDATE_INTERVAL, max_results=None
+ )
+
+
+@pytest.mark.skipif(pyarrow is None, reason="Requires `pyarrow`")
+@pytest.mark.skipif(tqdm is None, reason="Requires `tqdm`")
+@mock.patch("google.cloud.bigquery._tqdm_helpers.tqdm")
+def test_to_arrow_w_tqdm_wo_query_plan(tqdm_mock):
+ from google.cloud.bigquery import table
+ from google.cloud.bigquery.job import QueryJob as target_class
+ from google.cloud.bigquery.schema import SchemaField
+
+ begun_resource = _make_job_resource(job_type="query")
+ rows = [
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
+ ]
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ connection = make_connection({})
+ client = _make_client(connection=connection)
+ job = target_class.from_api_repr(begun_resource, client)
+
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = table.RowIterator(client, api_request, path, schema)
+
+ reload_patch = mock.patch(
+ "google.cloud.bigquery.job._AsyncJob.reload", autospec=True
+ )
+ result_patch = mock.patch(
+ "google.cloud.bigquery.job.QueryJob.result",
+ side_effect=[concurrent.futures.TimeoutError, row_iterator],
+ )
+ with result_patch as tqdm_mock, reload_patch:
+ tbl = job.to_arrow(progress_bar_type="tqdm", create_bqstorage_client=False)
+
+ assert tqdm_mock.call_count == 2
+ assert isinstance(tbl, pyarrow.Table)
+ assert tbl.num_rows == 2
+ tqdm_mock.assert_called()
+
+
+def _make_job(schema=(), rows=()):
+ from google.cloud.bigquery.job import QueryJob as target_class
+
+ begun_resource = _make_job_resource(job_type="query")
+ query_resource = {
+ "jobComplete": True,
+ "jobReference": begun_resource["jobReference"],
+ "totalRows": str(len(rows)),
+ "schema": {
+ "fields": [
+ dict(name=field[0], type=field[1], mode=field[2]) for field in schema
+ ]
+ },
+ }
+ tabledata_resource = {"rows": [{"f": [{"v": v} for v in row]} for row in rows]}
+ done_resource = copy.deepcopy(begun_resource)
+ done_resource["status"] = {"state": "DONE"}
+ connection = make_connection(
+ begun_resource, query_resource, done_resource, tabledata_resource
+ )
+ client = _make_client(connection=connection)
+ return target_class.from_api_repr(begun_resource, client)
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_to_dataframe():
+ job = _make_job(
+ (("name", "STRING", "NULLABLE"), ("age", "INTEGER", "NULLABLE")),
+ (
+ ("Phred Phlyntstone", "32"),
+ ("Bharney Rhubble", "33"),
+ ("Wylma Phlyntstone", "29"),
+ ("Bhettye Rhubble", "27"),
+ ),
+ )
+ df = job.to_dataframe(create_bqstorage_client=False)
+
+ assert isinstance(df, pandas.DataFrame)
+ assert len(df) == 4 # verify the number of rows
+ assert list(df) == ["name", "age"] # verify the column names
+
+
+def test_to_dataframe_ddl_query():
+ from google.cloud.bigquery.job import QueryJob as target_class
+
+ # Destination table may have no schema for some DDL and DML queries.
+ resource = _make_job_resource(job_type="query", ended=True)
+ query_resource = {
+ "jobComplete": True,
+ "jobReference": resource["jobReference"],
+ "schema": {"fields": []},
+ }
+ connection = make_connection(query_resource)
+ client = _make_client(connection=connection)
+ job = target_class.from_api_repr(resource, client)
+
+ df = job.to_dataframe()
+
+ assert len(df) == 0
+
+
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test_to_dataframe_bqstorage(table_read_options_kwarg):
+ from google.cloud.bigquery.job import QueryJob as target_class
+
+ resource = _make_job_resource(job_type="query", ended=True)
+ query_resource = {
+ "jobComplete": True,
+ "jobReference": resource["jobReference"],
+ "totalRows": "4",
+ "schema": {
+ "fields": [
+ {"name": "name", "type": "STRING", "mode": "NULLABLE"},
+ {"name": "age", "type": "INTEGER", "mode": "NULLABLE"},
+ ]
+ },
+ }
+ stream_id = "projects/1/locations/2/sessions/3/streams/4"
+ name_array = pyarrow.array(
+ ["John", "Paul", "George", "Ringo"], type=pyarrow.string()
+ )
+ age_array = pyarrow.array([17, 24, 21, 15], type=pyarrow.int64())
+ arrow_schema = pyarrow.schema(
+ [
+ pyarrow.field("name", pyarrow.string(), True),
+ pyarrow.field("age", pyarrow.int64(), True),
+ ]
+ )
+ record_batch = pyarrow.RecordBatch.from_arrays(
+ [name_array, age_array], schema=arrow_schema
+ )
+ connection = make_connection(query_resource)
+ client = _make_client(connection=connection, project="bqstorage-billing-project")
+ job = target_class.from_api_repr(resource, client)
+ session = bigquery_storage.types.ReadSession()
+ session.arrow_schema.serialized_schema = arrow_schema.serialize().to_pybytes()
+ session.streams = [bigquery_storage.types.ReadStream(name=stream_id)]
+ reader = mock.create_autospec(
+ google.cloud.bigquery_storage_v1.reader.ReadRowsStream, instance=True
+ )
+ row_iterable = mock.create_autospec(
+ google.cloud.bigquery_storage_v1.reader.ReadRowsIterable, instance=True
+ )
+ page = mock.create_autospec(
+ google.cloud.bigquery_storage_v1.reader.ReadRowsPage, instance=True
+ )
+ page.to_arrow.return_value = record_batch
+ type(row_iterable).pages = mock.PropertyMock(return_value=[page])
+ reader.rows.return_value = row_iterable
+ bqstorage_client = mock.create_autospec(
+ bigquery_storage.BigQueryReadClient, instance=True
+ )
+ bqstorage_client.create_read_session.return_value = session
+ bqstorage_client.read_rows.return_value = reader
+
+ dataframe = job.to_dataframe(bqstorage_client=bqstorage_client)
+
+ assert len(dataframe) == 4
+ destination_table = (
+ "projects/{projectId}/datasets/{datasetId}/tables/{tableId}".format(
+ **resource["configuration"]["query"]["destinationTable"]
+ )
+ )
+ expected_session = bigquery_storage.ReadSession(
+ table=destination_table,
+ data_format=bigquery_storage.DataFormat.ARROW,
+ **table_read_options_kwarg,
+ )
+ bqstorage_client.create_read_session.assert_called_once_with(
+ # The billing project can differ from the data project. Make sure we
+ # are charging to the billing project, not the data project.
+ parent="projects/bqstorage-billing-project",
+ read_session=expected_session,
+ max_stream_count=0, # Use default number of streams for best performance.
+ )
+ bqstorage_client.read_rows.assert_called_once_with(stream_id)
+
+
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test_to_dataframe_bqstorage_no_pyarrow_compression():
+ from google.cloud.bigquery.job import QueryJob as target_class
+
+ resource = _make_job_resource(job_type="query", ended=True)
+ query_resource = {
+ "jobComplete": True,
+ "jobReference": resource["jobReference"],
+ "totalRows": "4",
+ "schema": {"fields": [{"name": "name", "type": "STRING", "mode": "NULLABLE"}]},
+ }
+ connection = make_connection(query_resource)
+ client = _make_client(connection=connection, project="bqstorage-billing-project")
+ job = target_class.from_api_repr(resource, client)
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ session = bigquery_storage.types.ReadSession()
+ session.avro_schema.schema = json.dumps(
+ {
+ "type": "record",
+ "name": "__root__",
+ "fields": [{"name": "name", "type": ["null", "string"]}],
+ }
+ )
+ bqstorage_client.create_read_session.return_value = session
+
+ with mock.patch(
+ "google.cloud.bigquery._pandas_helpers._ARROW_COMPRESSION_SUPPORT", new=False
+ ):
+ job.to_dataframe(bqstorage_client=bqstorage_client)
+
+ destination_table = (
+ "projects/{projectId}/datasets/{datasetId}/tables/{tableId}".format(
+ **resource["configuration"]["query"]["destinationTable"]
+ )
+ )
+ expected_session = bigquery_storage.ReadSession(
+ table=destination_table,
+ data_format=bigquery_storage.DataFormat.ARROW,
+ )
+ bqstorage_client.create_read_session.assert_called_once_with(
+ # The billing project can differ from the data project. Make sure we
+ # are charging to the billing project, not the data project.
+ parent="projects/bqstorage-billing-project",
+ read_session=expected_session,
+ max_stream_count=0,
+ )
+
+
+@pytest.mark.skipif(
+ pandas.__version__.startswith("2."),
+ reason="pandas 2.0 changes some default dtypes and we haven't update the test to account for those",
+)
+@pytest.mark.skipif(pyarrow is None, reason="Requires `pyarrow`")
+def test_to_dataframe_column_dtypes():
+ from google.cloud.bigquery.job import QueryJob as target_class
+
+ begun_resource = _make_job_resource(job_type="query")
+ query_resource = {
+ "jobComplete": True,
+ "jobReference": begun_resource["jobReference"],
+ "totalRows": "4",
+ "schema": {
+ "fields": [
+ {"name": "start_timestamp", "type": "TIMESTAMP"},
+ {"name": "seconds", "type": "INT64"},
+ {"name": "miles", "type": "FLOAT64"},
+ {"name": "km", "type": "FLOAT64"},
+ {"name": "payment_type", "type": "STRING"},
+ {"name": "complete", "type": "BOOL"},
+ {"name": "date", "type": "DATE"},
+ ]
+ },
+ }
+ row_data = [
+ [
+ "1433836800000000",
+ "420",
+ "1.1",
+ "1.77",
+ "Cto_dataframeash",
+ "true",
+ "1999-12-01",
+ ],
+ ["1387811700000000", "2580", "17.7", "28.5", "Cash", "false", "1953-06-14"],
+ ["1385565300000000", "2280", "4.4", "7.1", "Credit", "true", "1981-11-04"],
+ ]
+ rows = [{"f": [{"v": field} for field in row]} for row in row_data]
+ query_resource["rows"] = rows
+ done_resource = copy.deepcopy(begun_resource)
+ done_resource["status"] = {"state": "DONE"}
+ connection = make_connection(
+ begun_resource, query_resource, done_resource, query_resource
+ )
+ client = _make_client(connection=connection)
+ job = target_class.from_api_repr(begun_resource, client)
+
+ df = job.to_dataframe(dtypes={"km": "float16"}, create_bqstorage_client=False)
+
+ assert isinstance(df, pandas.DataFrame)
+ assert len(df) == 3 # verify the number of rows
+ exp_columns = [field["name"] for field in query_resource["schema"]["fields"]]
+ assert list(df) == exp_columns # verify the column names
+
+ assert df.start_timestamp.dtype.name == "datetime64[ns, UTC]"
+ assert df.seconds.dtype.name == "Int64"
+ assert df.miles.dtype.name == "float64"
+ assert df.km.dtype.name == "float16"
+ assert df.payment_type.dtype.name == "object"
+ assert df.complete.dtype.name == "boolean"
+ assert df.date.dtype.name == "dbdate"
+
+
+def test_to_dataframe_column_date_dtypes():
+ from google.cloud.bigquery.job import QueryJob as target_class
+
+ begun_resource = _make_job_resource(job_type="query")
+ query_resource = {
+ "jobComplete": True,
+ "jobReference": begun_resource["jobReference"],
+ "totalRows": "1",
+ "schema": {"fields": [{"name": "date", "type": "DATE"}]},
+ }
+ row_data = [
+ ["1999-12-01"],
+ ]
+ rows = [{"f": [{"v": field} for field in row]} for row in row_data]
+ query_resource["rows"] = rows
+ done_resource = copy.deepcopy(begun_resource)
+ done_resource["status"] = {"state": "DONE"}
+ connection = make_connection(
+ begun_resource, query_resource, done_resource, query_resource
+ )
+ client = _make_client(connection=connection)
+ job = target_class.from_api_repr(begun_resource, client)
+ df = job.to_dataframe(create_bqstorage_client=False)
+
+ assert isinstance(df, pandas.DataFrame)
+ assert len(df) == 1 # verify the number of rows
+ exp_columns = [field["name"] for field in query_resource["schema"]["fields"]]
+ assert list(df) == exp_columns # verify the column names
+ assert df.date.dtype.name == "dbdate"
+
+
+@pytest.mark.skipif(tqdm is None, reason="Requires `tqdm`")
+@mock.patch("google.cloud.bigquery._tqdm_helpers.tqdm")
+def test_to_dataframe_with_progress_bar(tqdm_mock):
+ from google.cloud.bigquery.job import QueryJob as target_class
+
+ begun_resource = _make_job_resource(job_type="query")
+ query_resource = {
+ "jobComplete": True,
+ "jobReference": begun_resource["jobReference"],
+ "totalRows": "4",
+ "schema": {"fields": [{"name": "name", "type": "STRING", "mode": "NULLABLE"}]},
+ }
+ done_resource = copy.deepcopy(begun_resource)
+ done_resource["status"] = {"state": "DONE"}
+ connection = make_connection(
+ begun_resource,
+ query_resource,
+ done_resource,
+ query_resource,
+ query_resource,
+ )
+ client = _make_client(connection=connection)
+ job = target_class.from_api_repr(begun_resource, client)
+
+ job.to_dataframe(progress_bar_type=None, create_bqstorage_client=False)
+ tqdm_mock.tqdm.assert_not_called()
+
+ job.to_dataframe(progress_bar_type="tqdm", create_bqstorage_client=False)
+ tqdm_mock.tqdm.assert_called()
+
+
+@pytest.mark.skipif(tqdm is None, reason="Requires `tqdm`")
+@mock.patch("google.cloud.bigquery._tqdm_helpers.tqdm")
+def test_to_dataframe_w_tqdm_pending(tqdm_mock):
+ from google.cloud.bigquery import table
+ from google.cloud.bigquery.job import QueryJob as target_class
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery._tqdm_helpers import _PROGRESS_BAR_UPDATE_INTERVAL
+
+ begun_resource = _make_job_resource(job_type="query")
+ schema = [
+ SchemaField("name", "STRING", mode="NULLABLE"),
+ SchemaField("age", "INTEGER", mode="NULLABLE"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
+ {"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
+ ]
+
+ connection = make_connection({})
+ client = _make_client(connection=connection)
+ job = target_class.from_api_repr(begun_resource, client)
+
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = table.RowIterator(client, api_request, path, schema)
+
+ job._properties["statistics"] = {
+ "query": {
+ "queryPlan": [
+ {"name": "S00: Input", "id": "0", "status": "PENDING"},
+ {"name": "S01: Output", "id": "1", "status": "COMPLETE"},
+ ]
+ },
+ }
+ reload_patch = mock.patch(
+ "google.cloud.bigquery.job._AsyncJob.reload", autospec=True
+ )
+ result_patch = mock.patch(
+ "google.cloud.bigquery.job.QueryJob.result",
+ side_effect=[concurrent.futures.TimeoutError, row_iterator],
+ )
+ with result_patch as tqdm_mock, reload_patch:
+ df = job.to_dataframe(progress_bar_type="tqdm", create_bqstorage_client=False)
+
+ assert tqdm_mock.call_count == 2
+ assert isinstance(df, pandas.DataFrame)
+ assert len(df) == 4 # verify the number of rows
+ assert list(df) == ["name", "age"] # verify the column names
+ tqdm_mock.assert_called_with(
+ timeout=_PROGRESS_BAR_UPDATE_INTERVAL, max_results=None
+ )
+
+
+@pytest.mark.skipif(tqdm is None, reason="Requires `tqdm`")
+@mock.patch("google.cloud.bigquery._tqdm_helpers.tqdm")
+def test_to_dataframe_w_tqdm(tqdm_mock):
+ from google.cloud.bigquery import table
+ from google.cloud.bigquery.job import QueryJob as target_class
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery._tqdm_helpers import _PROGRESS_BAR_UPDATE_INTERVAL
+
+ begun_resource = _make_job_resource(job_type="query")
+ schema = [
+ SchemaField("name", "STRING", mode="NULLABLE"),
+ SchemaField("age", "INTEGER", mode="NULLABLE"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
+ {"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
+ ]
+
+ connection = make_connection({})
+ client = _make_client(connection=connection)
+ job = target_class.from_api_repr(begun_resource, client)
+
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = table.RowIterator(client, api_request, path, schema)
+
+ job._properties["statistics"] = {
+ "query": {
+ "queryPlan": [
+ {"name": "S00: Input", "id": "0", "status": "COMPLETE"},
+ {"name": "S01: Output", "id": "1", "status": "COMPLETE"},
+ ]
+ },
+ }
+ reload_patch = mock.patch(
+ "google.cloud.bigquery.job._AsyncJob.reload", autospec=True
+ )
+ result_patch = mock.patch(
+ "google.cloud.bigquery.job.QueryJob.result",
+ side_effect=[
+ concurrent.futures.TimeoutError,
+ concurrent.futures.TimeoutError,
+ row_iterator,
+ ],
+ )
+
+ with result_patch as tqdm_mock, reload_patch:
+ df = job.to_dataframe(progress_bar_type="tqdm", create_bqstorage_client=False)
+
+ assert tqdm_mock.call_count == 3
+ assert isinstance(df, pandas.DataFrame)
+ assert len(df) == 4 # verify the number of rows
+ assert list(df), ["name", "age"] # verify the column names
+ tqdm_mock.assert_called_with(
+ timeout=_PROGRESS_BAR_UPDATE_INTERVAL, max_results=None
+ )
+
+
+@pytest.mark.skipif(tqdm is None, reason="Requires `tqdm`")
+@mock.patch("google.cloud.bigquery._tqdm_helpers.tqdm")
+def test_to_dataframe_w_tqdm_max_results(tqdm_mock):
+ from google.cloud.bigquery import table
+ from google.cloud.bigquery.job import QueryJob as target_class
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery._tqdm_helpers import _PROGRESS_BAR_UPDATE_INTERVAL
+
+ begun_resource = _make_job_resource(job_type="query")
+ schema = [
+ SchemaField("name", "STRING", mode="NULLABLE"),
+ SchemaField("age", "INTEGER", mode="NULLABLE"),
+ ]
+ rows = [{"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]}]
+
+ connection = make_connection({})
+ client = _make_client(connection=connection)
+ job = target_class.from_api_repr(begun_resource, client)
+
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = table.RowIterator(client, api_request, path, schema)
+
+ job._properties["statistics"] = {
+ "query": {
+ "queryPlan": [
+ {"name": "S00: Input", "id": "0", "status": "COMPLETE"},
+ {"name": "S01: Output", "id": "1", "status": "COMPLETE"},
+ ]
+ },
+ }
+ reload_patch = mock.patch(
+ "google.cloud.bigquery.job._AsyncJob.reload", autospec=True
+ )
+ result_patch = mock.patch(
+ "google.cloud.bigquery.job.QueryJob.result",
+ side_effect=[concurrent.futures.TimeoutError, row_iterator],
+ )
+ with result_patch as tqdm_mock, reload_patch:
+ job.to_dataframe(
+ progress_bar_type="tqdm", create_bqstorage_client=False, max_results=3
+ )
+
+ assert tqdm_mock.call_count == 2
+ tqdm_mock.assert_called_with(timeout=_PROGRESS_BAR_UPDATE_INTERVAL, max_results=3)
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(shapely is None, reason="Requires `shapely`")
+def test_to_dataframe_geography_as_object():
+ job = _make_job(
+ (("name", "STRING", "NULLABLE"), ("geog", "GEOGRAPHY", "NULLABLE")),
+ (
+ ("Phred Phlyntstone", "Point(0 0)"),
+ ("Bharney Rhubble", "Point(0 1)"),
+ ("Wylma Phlyntstone", None),
+ ),
+ )
+ df = job.to_dataframe(create_bqstorage_client=False, geography_as_object=True)
+
+ assert isinstance(df, pandas.DataFrame)
+ assert len(df) == 3 # verify the number of rows
+ assert list(df) == ["name", "geog"] # verify the column names
+ assert [v.__class__.__name__ for v in df.geog] == [
+ "Point",
+ "Point",
+ "float",
+ ] # float because nan
+
+
+@pytest.mark.skipif(geopandas is None, reason="Requires `geopandas`")
+def test_to_geodataframe():
+ job = _make_job(
+ (("name", "STRING", "NULLABLE"), ("geog", "GEOGRAPHY", "NULLABLE")),
+ (
+ ("Phred Phlyntstone", "Point(0 0)"),
+ ("Bharney Rhubble", "Point(0 1)"),
+ ("Wylma Phlyntstone", None),
+ ),
+ )
+ df = job.to_geodataframe(create_bqstorage_client=False)
+
+ assert isinstance(df, geopandas.GeoDataFrame)
+ assert len(df) == 3 # verify the number of rows
+ assert list(df) == ["name", "geog"] # verify the column names
+ assert [v.__class__.__name__ for v in df.geog] == [
+ "Point",
+ "Point",
+ "NoneType",
+ ] # float because nan
+ assert isinstance(df.geog, geopandas.GeoSeries)
+
+
+@pytest.mark.skipif(geopandas is None, reason="Requires `geopandas`")
+@mock.patch("google.cloud.bigquery.job.query.wait_for_query")
+def test_query_job_to_geodataframe_delegation(wait_for_query):
+ """
+ QueryJob.to_geodataframe just delegates to RowIterator.to_geodataframe.
+
+ This test just demonstrates that. We don't need to test all the
+ variations, which are tested for RowIterator.
+ """
+ import numpy
+
+ job = _make_job()
+ bqstorage_client = object()
+ dtypes = dict(xxx=numpy.dtype("int64"))
+ progress_bar_type = "normal"
+ create_bqstorage_client = False
+ max_results = 42
+ geography_column = "g"
+
+ df = job.to_geodataframe(
+ bqstorage_client=bqstorage_client,
+ dtypes=dtypes,
+ progress_bar_type=progress_bar_type,
+ create_bqstorage_client=create_bqstorage_client,
+ max_results=max_results,
+ geography_column=geography_column,
+ )
+
+ wait_for_query.assert_called_once_with(
+ job, progress_bar_type, max_results=max_results
+ )
+ row_iterator = wait_for_query.return_value
+ row_iterator.to_geodataframe.assert_called_once_with(
+ bqstorage_client=bqstorage_client,
+ dtypes=dtypes,
+ progress_bar_type=progress_bar_type,
+ create_bqstorage_client=create_bqstorage_client,
+ geography_column=geography_column,
+ )
+ assert df is row_iterator.to_geodataframe.return_value
diff --git a/testbed/googleapis__python-bigquery/tests/unit/job/test_query_stats.py b/testbed/googleapis__python-bigquery/tests/unit/job/test_query_stats.py
new file mode 100644
index 0000000000000000000000000000000000000000..61b278d43f8813416ce76e363a8ad9ad587c316d
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/job/test_query_stats.py
@@ -0,0 +1,522 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .helpers import _Base
+
+
+class TestBiEngineStats:
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.job.query import BiEngineStats
+
+ return BiEngineStats
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor_defaults(self):
+ bi_engine_stats = self._make_one()
+ assert bi_engine_stats.mode == "ACCELERATION_MODE_UNSPECIFIED"
+ assert bi_engine_stats.reasons == []
+
+ def test_from_api_repr_unspecified(self):
+ klass = self._get_target_class()
+ result = klass.from_api_repr({"biEngineMode": "ACCELERATION_MODE_UNSPECIFIED"})
+
+ assert isinstance(result, klass)
+ assert result.mode == "ACCELERATION_MODE_UNSPECIFIED"
+ assert result.reasons == []
+
+ def test_from_api_repr_full(self):
+ klass = self._get_target_class()
+ result = klass.from_api_repr({"biEngineMode": "FULL"})
+
+ assert isinstance(result, klass)
+ assert result.mode == "FULL"
+ assert result.reasons == []
+
+ def test_from_api_repr_disabled(self):
+ klass = self._get_target_class()
+ result = klass.from_api_repr(
+ {
+ "biEngineMode": "DISABLED",
+ "biEngineReasons": [
+ {
+ "code": "OTHER_REASON",
+ "message": "Unable to support input table xyz due to an internal error.",
+ }
+ ],
+ }
+ )
+
+ assert isinstance(result, klass)
+ assert result.mode == "DISABLED"
+
+ reason = result.reasons[0]
+ assert reason.code == "OTHER_REASON"
+ assert (
+ reason.reason
+ == "Unable to support input table xyz due to an internal error."
+ )
+
+
+class TestDmlStats:
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.job import DmlStats
+
+ return DmlStats
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor_defaults(self):
+ dml_stats = self._make_one()
+ assert dml_stats.inserted_row_count == 0
+ assert dml_stats.deleted_row_count == 0
+ assert dml_stats.updated_row_count == 0
+
+ def test_from_api_repr_partial_stats(self):
+ klass = self._get_target_class()
+ result = klass.from_api_repr({"deletedRowCount": "12"})
+
+ assert isinstance(result, klass)
+ assert result.inserted_row_count == 0
+ assert result.deleted_row_count == 12
+ assert result.updated_row_count == 0
+
+ def test_from_api_repr_full_stats(self):
+ klass = self._get_target_class()
+ result = klass.from_api_repr(
+ {"updatedRowCount": "4", "insertedRowCount": "7", "deletedRowCount": "25"}
+ )
+
+ assert isinstance(result, klass)
+ assert result.inserted_row_count == 7
+ assert result.deleted_row_count == 25
+ assert result.updated_row_count == 4
+
+
+class TestSearchStatistics:
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.job.query import SearchStats
+
+ return SearchStats
+
+ def _make_one(self, *args, **kwargs):
+ return self._get_target_class()(*args, **kwargs)
+
+ def test_ctor_defaults(self):
+ search_stats = self._make_one()
+ assert search_stats.mode is None
+ assert search_stats.reason == []
+
+ def test_from_api_repr_unspecified(self):
+ klass = self._get_target_class()
+ result = klass.from_api_repr(
+ {"indexUsageMode": "INDEX_USAGE_MODE_UNSPECIFIED", "indexUnusedReasons": []}
+ )
+
+ assert isinstance(result, klass)
+ assert result.mode == "INDEX_USAGE_MODE_UNSPECIFIED"
+ assert result.reason == []
+
+
+class TestIndexUnusedReason:
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.job.query import IndexUnusedReason
+
+ return IndexUnusedReason
+
+ def _make_one(self, *args, **kwargs):
+ return self._get_target_class()(*args, **kwargs)
+
+ def test_ctor_defaults(self):
+ search_reason = self._make_one()
+ assert search_reason.code is None
+ assert search_reason.message is None
+ assert search_reason.baseTable is None
+ assert search_reason.indexName is None
+
+ def test_from_api_repr_unspecified(self):
+ klass = self._get_target_class()
+ result = klass.from_api_repr(
+ {
+ "code": "INDEX_CONFIG_NOT_AVAILABLE",
+ "message": "There is no search index...",
+ "baseTable": {
+ "projectId": "bigquery-public-data",
+ "datasetId": "usa_names",
+ "tableId": "usa_1910_current",
+ },
+ "indexName": None,
+ }
+ )
+
+ assert isinstance(result, klass)
+ assert result.code == "INDEX_CONFIG_NOT_AVAILABLE"
+ assert result.message == "There is no search index..."
+ assert result.baseTable == {
+ "projectId": "bigquery-public-data",
+ "datasetId": "usa_names",
+ "tableId": "usa_1910_current",
+ }
+ assert result.indexName is None
+
+
+class TestQueryPlanEntryStep(_Base):
+ KIND = "KIND"
+ SUBSTEPS = ("SUB1", "SUB2")
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.job import QueryPlanEntryStep
+
+ return QueryPlanEntryStep
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor(self):
+ step = self._make_one(self.KIND, self.SUBSTEPS)
+ self.assertEqual(step.kind, self.KIND)
+ self.assertEqual(step.substeps, list(self.SUBSTEPS))
+
+ def test_from_api_repr_empty(self):
+ klass = self._get_target_class()
+ step = klass.from_api_repr({})
+ self.assertIsNone(step.kind)
+ self.assertEqual(step.substeps, [])
+
+ def test_from_api_repr_normal(self):
+ resource = {"kind": self.KIND, "substeps": self.SUBSTEPS}
+ klass = self._get_target_class()
+ step = klass.from_api_repr(resource)
+ self.assertEqual(step.kind, self.KIND)
+ self.assertEqual(step.substeps, list(self.SUBSTEPS))
+
+ def test___eq___mismatched_type(self):
+ step = self._make_one(self.KIND, self.SUBSTEPS)
+ self.assertNotEqual(step, object())
+
+ def test___eq___mismatch_kind(self):
+ step = self._make_one(self.KIND, self.SUBSTEPS)
+ other = self._make_one("OTHER", self.SUBSTEPS)
+ self.assertNotEqual(step, other)
+
+ def test___eq___mismatch_substeps(self):
+ step = self._make_one(self.KIND, self.SUBSTEPS)
+ other = self._make_one(self.KIND, ())
+ self.assertNotEqual(step, other)
+
+ def test___eq___hit(self):
+ step = self._make_one(self.KIND, self.SUBSTEPS)
+ other = self._make_one(self.KIND, self.SUBSTEPS)
+ self.assertEqual(step, other)
+
+ def test___eq___wrong_type(self):
+ step = self._make_one(self.KIND, self.SUBSTEPS)
+ self.assertFalse(step == "hello")
+
+
+class TestQueryPlanEntry(_Base):
+ NAME = "NAME"
+ ENTRY_ID = 1234
+ START_MS = 1522540800000
+ END_MS = 1522540804000
+ INPUT_STAGES = (88, 101)
+ PARALLEL_INPUTS = 1000
+ COMPLETED_PARALLEL_INPUTS = 5
+ WAIT_MS_AVG = 33
+ WAIT_MS_MAX = 400
+ WAIT_RATIO_AVG = 2.71828
+ WAIT_RATIO_MAX = 3.14159
+ READ_MS_AVG = 45
+ READ_MS_MAX = 90
+ READ_RATIO_AVG = 1.41421
+ READ_RATIO_MAX = 1.73205
+ COMPUTE_MS_AVG = 55
+ COMPUTE_MS_MAX = 99
+ COMPUTE_RATIO_AVG = 0.69315
+ COMPUTE_RATIO_MAX = 1.09861
+ WRITE_MS_AVG = 203
+ WRITE_MS_MAX = 340
+ WRITE_RATIO_AVG = 3.32193
+ WRITE_RATIO_MAX = 2.30258
+ RECORDS_READ = 100
+ RECORDS_WRITTEN = 1
+ STATUS = "STATUS"
+ SHUFFLE_OUTPUT_BYTES = 1024
+ SHUFFLE_OUTPUT_BYTES_SPILLED = 1
+ SLOT_MS = 25
+
+ START_RFC3339_MICROS = "2018-04-01T00:00:00.000000Z"
+ END_RFC3339_MICROS = "2018-04-01T00:00:04.000000Z"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.job import QueryPlanEntry
+
+ return QueryPlanEntry
+
+ def test_from_api_repr_empty(self):
+ klass = self._get_target_class()
+
+ entry = klass.from_api_repr({})
+
+ self.assertIsNone(entry.name)
+ self.assertIsNone(entry.entry_id)
+ self.assertEqual(entry.input_stages, [])
+ self.assertIsNone(entry.start)
+ self.assertIsNone(entry.end)
+ self.assertIsNone(entry.parallel_inputs)
+ self.assertIsNone(entry.completed_parallel_inputs)
+ self.assertIsNone(entry.wait_ms_avg)
+ self.assertIsNone(entry.wait_ms_max)
+ self.assertIsNone(entry.wait_ratio_avg)
+ self.assertIsNone(entry.wait_ratio_max)
+ self.assertIsNone(entry.read_ms_avg)
+ self.assertIsNone(entry.read_ms_max)
+ self.assertIsNone(entry.read_ratio_avg)
+ self.assertIsNone(entry.read_ratio_max)
+ self.assertIsNone(entry.compute_ms_avg)
+ self.assertIsNone(entry.compute_ms_max)
+ self.assertIsNone(entry.compute_ratio_avg)
+ self.assertIsNone(entry.compute_ratio_max)
+ self.assertIsNone(entry.write_ms_avg)
+ self.assertIsNone(entry.write_ms_max)
+ self.assertIsNone(entry.write_ratio_avg)
+ self.assertIsNone(entry.write_ratio_max)
+ self.assertIsNone(entry.records_read)
+ self.assertIsNone(entry.records_written)
+ self.assertIsNone(entry.status)
+ self.assertIsNone(entry.shuffle_output_bytes)
+ self.assertIsNone(entry.shuffle_output_bytes_spilled)
+ self.assertEqual(entry.steps, [])
+ self.assertIsNone(entry.slot_ms)
+
+ def test_from_api_repr_normal(self):
+ from google.cloud.bigquery.job import QueryPlanEntryStep
+
+ steps = [
+ QueryPlanEntryStep(
+ kind=TestQueryPlanEntryStep.KIND,
+ substeps=TestQueryPlanEntryStep.SUBSTEPS,
+ )
+ ]
+ resource = {
+ "name": self.NAME,
+ "id": self.ENTRY_ID,
+ "inputStages": self.INPUT_STAGES,
+ "startMs": self.START_MS,
+ "endMs": self.END_MS,
+ "waitMsAvg": self.WAIT_MS_AVG,
+ "waitMsMax": self.WAIT_MS_MAX,
+ "waitRatioAvg": self.WAIT_RATIO_AVG,
+ "waitRatioMax": self.WAIT_RATIO_MAX,
+ "readMsAvg": self.READ_MS_AVG,
+ "readMsMax": self.READ_MS_MAX,
+ "readRatioAvg": self.READ_RATIO_AVG,
+ "readRatioMax": self.READ_RATIO_MAX,
+ "computeMsAvg": self.COMPUTE_MS_AVG,
+ "computeMsMax": self.COMPUTE_MS_MAX,
+ "computeRatioAvg": self.COMPUTE_RATIO_AVG,
+ "computeRatioMax": self.COMPUTE_RATIO_MAX,
+ "writeMsAvg": self.WRITE_MS_AVG,
+ "writeMsMax": self.WRITE_MS_MAX,
+ "writeRatioAvg": self.WRITE_RATIO_AVG,
+ "writeRatioMax": self.WRITE_RATIO_MAX,
+ "recordsRead": self.RECORDS_READ,
+ "recordsWritten": self.RECORDS_WRITTEN,
+ "status": self.STATUS,
+ "shuffleOutputBytes": self.SHUFFLE_OUTPUT_BYTES,
+ "shuffleOutputBytesSpilled": self.SHUFFLE_OUTPUT_BYTES_SPILLED,
+ "steps": [
+ {
+ "kind": TestQueryPlanEntryStep.KIND,
+ "substeps": TestQueryPlanEntryStep.SUBSTEPS,
+ }
+ ],
+ "slotMs": self.SLOT_MS,
+ }
+ klass = self._get_target_class()
+
+ entry = klass.from_api_repr(resource)
+ self.assertEqual(entry.name, self.NAME)
+ self.assertEqual(entry.entry_id, self.ENTRY_ID)
+ self.assertEqual(entry.wait_ratio_avg, self.WAIT_RATIO_AVG)
+ self.assertEqual(entry.wait_ratio_max, self.WAIT_RATIO_MAX)
+ self.assertEqual(entry.read_ratio_avg, self.READ_RATIO_AVG)
+ self.assertEqual(entry.read_ratio_max, self.READ_RATIO_MAX)
+ self.assertEqual(entry.compute_ratio_avg, self.COMPUTE_RATIO_AVG)
+ self.assertEqual(entry.compute_ratio_max, self.COMPUTE_RATIO_MAX)
+ self.assertEqual(entry.write_ratio_avg, self.WRITE_RATIO_AVG)
+ self.assertEqual(entry.write_ratio_max, self.WRITE_RATIO_MAX)
+ self.assertEqual(entry.records_read, self.RECORDS_READ)
+ self.assertEqual(entry.records_written, self.RECORDS_WRITTEN)
+ self.assertEqual(entry.status, self.STATUS)
+ self.assertEqual(entry.steps, steps)
+ self.assertEqual(entry.slot_ms, self.SLOT_MS)
+
+ def test_start(self):
+ from google.cloud._helpers import _RFC3339_MICROS
+
+ klass = self._get_target_class()
+
+ entry = klass.from_api_repr({})
+ self.assertEqual(entry.start, None)
+
+ entry._properties["startMs"] = self.START_MS
+ self.assertEqual(
+ entry.start.strftime(_RFC3339_MICROS), self.START_RFC3339_MICROS
+ )
+
+ def test_end(self):
+ from google.cloud._helpers import _RFC3339_MICROS
+
+ klass = self._get_target_class()
+
+ entry = klass.from_api_repr({})
+ self.assertEqual(entry.end, None)
+
+ entry._properties["endMs"] = self.END_MS
+ self.assertEqual(entry.end.strftime(_RFC3339_MICROS), self.END_RFC3339_MICROS)
+
+
+class TestScriptStackFrame(_Base):
+ def _make_one(self, resource):
+ from google.cloud.bigquery.job import ScriptStackFrame
+
+ return ScriptStackFrame(resource)
+
+ def test_procedure_id(self):
+ frame = self._make_one({"procedureId": "some-procedure"})
+ self.assertEqual(frame.procedure_id, "some-procedure")
+ del frame._properties["procedureId"]
+ self.assertIsNone(frame.procedure_id)
+
+ def test_start_line(self):
+ frame = self._make_one({"startLine": 5})
+ self.assertEqual(frame.start_line, 5)
+ frame._properties["startLine"] = "5"
+ self.assertEqual(frame.start_line, 5)
+
+ def test_start_column(self):
+ frame = self._make_one({"startColumn": 29})
+ self.assertEqual(frame.start_column, 29)
+ frame._properties["startColumn"] = "29"
+ self.assertEqual(frame.start_column, 29)
+
+ def test_end_line(self):
+ frame = self._make_one({"endLine": 9})
+ self.assertEqual(frame.end_line, 9)
+ frame._properties["endLine"] = "9"
+ self.assertEqual(frame.end_line, 9)
+
+ def test_end_column(self):
+ frame = self._make_one({"endColumn": 14})
+ self.assertEqual(frame.end_column, 14)
+ frame._properties["endColumn"] = "14"
+ self.assertEqual(frame.end_column, 14)
+
+ def test_text(self):
+ frame = self._make_one({"text": "QUERY TEXT"})
+ self.assertEqual(frame.text, "QUERY TEXT")
+
+
+class TestScriptStatistics(_Base):
+ def _make_one(self, resource):
+ from google.cloud.bigquery.job import ScriptStatistics
+
+ return ScriptStatistics(resource)
+
+ def test_evalutation_kind(self):
+ stats = self._make_one({"evaluationKind": "EXPRESSION"})
+ self.assertEqual(stats.evaluation_kind, "EXPRESSION")
+ self.assertEqual(stats.stack_frames, [])
+
+ def test_stack_frames(self):
+ stats = self._make_one(
+ {
+ "stackFrames": [
+ {
+ "procedureId": "some-procedure",
+ "startLine": 5,
+ "startColumn": 29,
+ "endLine": 9,
+ "endColumn": 14,
+ "text": "QUERY TEXT",
+ },
+ {},
+ ]
+ }
+ )
+ stack_frames = stats.stack_frames
+ self.assertEqual(len(stack_frames), 2)
+ stack_frame = stack_frames[0]
+ self.assertEqual(stack_frame.procedure_id, "some-procedure")
+ self.assertEqual(stack_frame.start_line, 5)
+ self.assertEqual(stack_frame.start_column, 29)
+ self.assertEqual(stack_frame.end_line, 9)
+ self.assertEqual(stack_frame.end_column, 14)
+ self.assertEqual(stack_frame.text, "QUERY TEXT")
+ stack_frame = stack_frames[1]
+ self.assertIsNone(stack_frame.procedure_id)
+ self.assertIsNone(stack_frame.start_line)
+ self.assertIsNone(stack_frame.start_column)
+ self.assertIsNone(stack_frame.end_line)
+ self.assertIsNone(stack_frame.end_column)
+ self.assertIsNone(stack_frame.text)
+
+
+class TestTimelineEntry(_Base):
+ ELAPSED_MS = 101
+ ACTIVE_UNITS = 50
+ PENDING_UNITS = 98
+ COMPLETED_UNITS = 520
+ SLOT_MILLIS = 12029
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.job import TimelineEntry
+
+ return TimelineEntry
+
+ def test_from_api_repr_empty(self):
+ klass = self._get_target_class()
+ entry = klass.from_api_repr({})
+ self.assertIsNone(entry.elapsed_ms)
+ self.assertIsNone(entry.active_units)
+ self.assertIsNone(entry.pending_units)
+ self.assertIsNone(entry.completed_units)
+ self.assertIsNone(entry.slot_millis)
+
+ def test_from_api_repr_normal(self):
+ resource = {
+ "elapsedMs": self.ELAPSED_MS,
+ "activeUnits": self.ACTIVE_UNITS,
+ "pendingUnits": self.PENDING_UNITS,
+ "completedUnits": self.COMPLETED_UNITS,
+ "totalSlotMs": self.SLOT_MILLIS,
+ }
+ klass = self._get_target_class()
+
+ entry = klass.from_api_repr(resource)
+ self.assertEqual(entry.elapsed_ms, self.ELAPSED_MS)
+ self.assertEqual(entry.active_units, self.ACTIVE_UNITS)
+ self.assertEqual(entry.pending_units, self.PENDING_UNITS)
+ self.assertEqual(entry.completed_units, self.COMPLETED_UNITS)
+ self.assertEqual(entry.slot_millis, self.SLOT_MILLIS)
diff --git a/testbed/googleapis__python-bigquery/tests/unit/line_arg_parser/__init__.py b/testbed/googleapis__python-bigquery/tests/unit/line_arg_parser/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6334245aea5aa2deb2f00ec6bc3de455e9cc132
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/line_arg_parser/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/testbed/googleapis__python-bigquery/tests/unit/line_arg_parser/test_lexer.py b/testbed/googleapis__python-bigquery/tests/unit/line_arg_parser/test_lexer.py
new file mode 100644
index 0000000000000000000000000000000000000000..3624ed0f36e699947b30e5b26a52877499271c19
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/line_arg_parser/test_lexer.py
@@ -0,0 +1,34 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+IPython = pytest.importorskip("IPython")
+
+
+@pytest.fixture(scope="session")
+def lexer_class():
+ from google.cloud.bigquery.magics.line_arg_parser.lexer import Lexer
+
+ return Lexer
+
+
+def test_empy_input(lexer_class):
+ from google.cloud.bigquery.magics.line_arg_parser import TokenType
+ from google.cloud.bigquery.magics.line_arg_parser.lexer import Token
+
+ lexer = lexer_class("")
+ tokens = list(lexer)
+
+ assert tokens == [Token(TokenType.EOL, lexeme="", pos=0)]
diff --git a/testbed/googleapis__python-bigquery/tests/unit/line_arg_parser/test_parser.py b/testbed/googleapis__python-bigquery/tests/unit/line_arg_parser/test_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..b170d536aeeeef16a70c2e93d8cd9f1fb7688ace
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/line_arg_parser/test_parser.py
@@ -0,0 +1,206 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+IPython = pytest.importorskip("IPython")
+
+
+@pytest.fixture(scope="session")
+def parser_class():
+ from google.cloud.bigquery.magics.line_arg_parser.parser import Parser
+
+ return Parser
+
+
+def test_consume_expected_eol(parser_class):
+ from google.cloud.bigquery.magics.line_arg_parser import TokenType
+ from google.cloud.bigquery.magics.line_arg_parser.lexer import Token
+
+ # A simple iterable of Tokens is sufficient.
+ fake_lexer = [Token(TokenType.EOL, lexeme="", pos=0)]
+ parser = parser_class(fake_lexer)
+
+ parser.consume(TokenType.EOL) # no error
+
+
+def test_consume_unexpected_eol(parser_class):
+ from google.cloud.bigquery.magics.line_arg_parser import ParseError
+ from google.cloud.bigquery.magics.line_arg_parser import TokenType
+ from google.cloud.bigquery.magics.line_arg_parser.lexer import Token
+
+ # A simple iterable of Tokens is sufficient.
+ fake_lexer = [Token(TokenType.EOL, lexeme="", pos=0)]
+ parser = parser_class(fake_lexer)
+
+ with pytest.raises(ParseError, match=r"Unexpected end of input.*expected.*COLON.*"):
+ parser.consume(TokenType.COLON)
+
+
+def test_input_line_unexpected_input(parser_class):
+ from google.cloud.bigquery.magics.line_arg_parser import ParseError
+ from google.cloud.bigquery.magics.line_arg_parser import TokenType
+ from google.cloud.bigquery.magics.line_arg_parser.lexer import Token
+
+ # A simple iterable of Tokens is sufficient.
+ fake_lexer = [
+ Token(TokenType.DEST_VAR, lexeme="results", pos=0),
+ Token(TokenType.UNKNOWN, lexeme="boo!", pos=8),
+ Token(TokenType.EOL, lexeme="", pos=12),
+ ]
+ parser = parser_class(fake_lexer)
+
+ with pytest.raises(ParseError, match=r"Unexpected input.*position 8.*boo!.*"):
+ parser.input_line()
+
+
+def test_destination_var_unexpected_input(parser_class):
+ from google.cloud.bigquery.magics.line_arg_parser import ParseError
+ from google.cloud.bigquery.magics.line_arg_parser import TokenType
+ from google.cloud.bigquery.magics.line_arg_parser.lexer import Token
+
+ # A simple iterable of Tokens is sufficient.
+ fake_lexer = [
+ Token(TokenType.UNKNOWN, lexeme="@!#", pos=2),
+ Token(TokenType.EOL, lexeme="", pos=5),
+ ]
+ parser = parser_class(fake_lexer)
+
+ with pytest.raises(ParseError, match=r"Unknown.*position 2.*@!#.*"):
+ parser.destination_var()
+
+
+def test_option_value_unexpected_input(parser_class):
+ from google.cloud.bigquery.magics.line_arg_parser import ParseError
+ from google.cloud.bigquery.magics.line_arg_parser import TokenType
+ from google.cloud.bigquery.magics.line_arg_parser.lexer import Token
+
+ # A simple iterable of Tokens is sufficient.
+ fake_lexer = [
+ Token(TokenType.UNKNOWN, lexeme="@!#", pos=8),
+ Token(TokenType.OPTION_SPEC, lexeme="--foo", pos=13),
+ ]
+ parser = parser_class(fake_lexer)
+
+ with pytest.raises(ParseError, match=r"Unknown input.*position 8.*@!#.*"):
+ parser.option_value()
+
+
+def test_dict_items_empty_dict(parser_class):
+ from google.cloud.bigquery.magics.line_arg_parser import TokenType
+ from google.cloud.bigquery.magics.line_arg_parser.lexer import Token
+
+ # A simple iterable of Tokens is sufficient.
+ fake_lexer = [Token(TokenType.RCURL, lexeme="}", pos=22)]
+ parser = parser_class(fake_lexer)
+
+ result = parser.dict_items()
+
+ assert result == []
+
+
+def test_dict_items_trailing_comma(parser_class):
+ from google.cloud.bigquery.magics.line_arg_parser import TokenType
+ from google.cloud.bigquery.magics.line_arg_parser.lexer import Token
+
+ # A simple iterable of Tokens is sufficient.
+ fake_lexer = [
+ Token(TokenType.PY_STRING, lexeme="'age'", pos=10),
+ Token(TokenType.COLON, lexeme=":", pos=17),
+ Token(TokenType.PY_NUMBER, lexeme="18", pos=19),
+ Token(TokenType.COMMA, lexeme=",", pos=21),
+ Token(TokenType.RCURL, lexeme="}", pos=22),
+ ]
+ parser = parser_class(fake_lexer)
+
+ result = parser.dict_items()
+
+ assert len(result) == 1
+ dict_item = result[0]
+ assert dict_item.key.key_value == "'age'"
+ assert dict_item.value.raw_value == "18"
+
+
+def test_dict_item_unknown_input(parser_class):
+ from google.cloud.bigquery.magics.line_arg_parser import ParseError
+ from google.cloud.bigquery.magics.line_arg_parser import TokenType
+ from google.cloud.bigquery.magics.line_arg_parser.lexer import Token
+
+ # A simple iterable of Tokens is sufficient.
+ fake_lexer = [Token(TokenType.UNKNOWN, lexeme="#/%", pos=35)]
+ parser = parser_class(fake_lexer)
+
+ with pytest.raises(ParseError, match=r"Unknown.*position 35.*#/%.*"):
+ parser.dict_item()
+
+
+def test_pyvalue_list_containing_dict(parser_class):
+ from google.cloud.bigquery.magics.line_arg_parser import TokenType
+ from google.cloud.bigquery.magics.line_arg_parser.lexer import Token
+ from google.cloud.bigquery.magics.line_arg_parser.parser import PyDict
+ from google.cloud.bigquery.magics.line_arg_parser.parser import PyList
+
+ # A simple iterable of Tokens is sufficient.
+ fake_lexer = [
+ Token(TokenType.LSQUARE, lexeme="[", pos=21),
+ Token(TokenType.LCURL, lexeme="{", pos=22),
+ Token(TokenType.PY_STRING, lexeme="'age'", pos=23),
+ Token(TokenType.COLON, lexeme=":", pos=28),
+ Token(TokenType.PY_NUMBER, lexeme="18", pos=30),
+ Token(TokenType.RCURL, lexeme="}", pos=32),
+ Token(TokenType.COMMA, lexeme=",", pos=33), # trailing comma
+ Token(TokenType.RSQUARE, lexeme="]", pos=34),
+ Token(TokenType.EOL, lexeme="", pos=40),
+ ]
+ parser = parser_class(fake_lexer)
+
+ result = parser.py_value()
+
+ assert isinstance(result, PyList)
+ assert len(result.items) == 1
+
+ element = result.items[0]
+ assert isinstance(element, PyDict)
+ assert len(element.items) == 1
+
+ dict_item = element.items[0]
+ assert dict_item.key.key_value == "'age'"
+ assert dict_item.value.raw_value == "18"
+
+
+def test_pyvalue_invalid_token(parser_class):
+ from google.cloud.bigquery.magics.line_arg_parser import ParseError
+ from google.cloud.bigquery.magics.line_arg_parser import TokenType
+ from google.cloud.bigquery.magics.line_arg_parser.lexer import Token
+
+ # A simple iterable of Tokens is sufficient.
+ fake_lexer = [Token(TokenType.OPTION_SPEC, lexeme="--verbose", pos=75)]
+ parser = parser_class(fake_lexer)
+
+ error_pattern = r"Unexpected token.*OPTION_SPEC.*position 75.*"
+ with pytest.raises(ParseError, match=error_pattern):
+ parser.py_value()
+
+
+def test_collection_items_empty(parser_class):
+ from google.cloud.bigquery.magics.line_arg_parser import TokenType
+ from google.cloud.bigquery.magics.line_arg_parser.lexer import Token
+
+ # A simple iterable of Tokens is sufficient.
+ fake_lexer = [Token(TokenType.RPAREN, lexeme=")", pos=30)]
+ parser = parser_class(fake_lexer)
+
+ result = parser.collection_items()
+
+ assert result == []
diff --git a/testbed/googleapis__python-bigquery/tests/unit/line_arg_parser/test_visitors.py b/testbed/googleapis__python-bigquery/tests/unit/line_arg_parser/test_visitors.py
new file mode 100644
index 0000000000000000000000000000000000000000..288ef5f7179e5e534c2af5e23fe3ff0f9e1145f9
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/line_arg_parser/test_visitors.py
@@ -0,0 +1,36 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+IPython = pytest.importorskip("IPython")
+
+
+@pytest.fixture
+def base_visitor():
+ from google.cloud.bigquery.magics.line_arg_parser.visitors import NodeVisitor
+
+ return NodeVisitor()
+
+
+def test_unknown_node(base_visitor):
+ from google.cloud.bigquery.magics.line_arg_parser.parser import ParseNode
+
+ class UnknownNode(ParseNode):
+ pass
+
+ node = UnknownNode()
+
+ with pytest.raises(Exception, match=r"No visit_UnknownNode method"):
+ base_visitor.visit(node)
diff --git a/testbed/googleapis__python-bigquery/tests/unit/model/__init__.py b/testbed/googleapis__python-bigquery/tests/unit/model/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/testbed/googleapis__python-bigquery/tests/unit/model/test_model.py b/testbed/googleapis__python-bigquery/tests/unit/model/test_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..279a954c78ea842600217801c03c9bec6b91a9a8
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/model/test_model.py
@@ -0,0 +1,488 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+
+import pytest
+
+
+import google.cloud._helpers
+import google.cloud.bigquery.model
+
+KMS_KEY_NAME = "projects/1/locations/us/keyRings/1/cryptoKeys/1"
+
+
+@pytest.fixture
+def target_class():
+ from google.cloud.bigquery import Model
+
+ return Model
+
+
+@pytest.fixture
+def object_under_test(target_class):
+ return target_class("project-id.dataset_id.model_id")
+
+
+def test_ctor(target_class):
+ from google.cloud.bigquery import ModelReference
+
+ ref = ModelReference.from_string("my-proj.my_dset.my_model")
+ got = target_class(ref)
+ assert got.reference == ref
+
+
+def test_ctor_string(target_class):
+ from google.cloud.bigquery import ModelReference
+
+ model_id = "my-proj.my_dset.my_model"
+ ref = ModelReference.from_string(model_id)
+ got = target_class(model_id)
+ assert got.reference == ref
+
+
+def test_from_api_repr(target_class):
+ from google.cloud.bigquery import ModelReference
+
+ creation_time = datetime.datetime(
+ 2010, 5, 19, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
+ )
+ modified_time = datetime.datetime(
+ 2011, 10, 1, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
+ )
+ expiration_time = datetime.datetime(
+ 2012, 12, 21, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
+ )
+ resource = {
+ "modelReference": {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "modelId": "my_model",
+ },
+ "location": "US",
+ "etag": "abcdefg",
+ "creationTime": str(google.cloud._helpers._millis(creation_time)),
+ "lastModifiedTime": str(google.cloud._helpers._millis(modified_time)),
+ "expirationTime": str(google.cloud._helpers._millis(expiration_time)),
+ "description": "A friendly description.",
+ "friendlyName": "A friendly name.",
+ "modelType": "LOGISTIC_REGRESSION",
+ "labels": {"greeting": "こんにちは"},
+ "trainingRuns": [
+ {
+ "trainingOptions": {"initialLearnRate": 1.0},
+ "startTime": str(
+ google.cloud._helpers._datetime_to_rfc3339(creation_time)
+ ),
+ },
+ {
+ "trainingOptions": {"initialLearnRate": 0.5},
+ "startTime": str(
+ google.cloud._helpers._datetime_to_rfc3339(modified_time)
+ ),
+ },
+ {
+ "trainingOptions": {"initialLearnRate": 0.25},
+ "startTime": str(
+ google.cloud._helpers._datetime_to_rfc3339(expiration_time)
+ ),
+ },
+ ],
+ "bestTrialId": "123",
+ "featureColumns": [],
+ "encryptionConfiguration": {"kmsKeyName": KMS_KEY_NAME},
+ }
+ got = target_class.from_api_repr(resource)
+
+ assert got.project == "my-project"
+ assert got.dataset_id == "my_dataset"
+ assert got.model_id == "my_model"
+ assert got.reference == ModelReference.from_string("my-project.my_dataset.my_model")
+ assert got.path == "/projects/my-project/datasets/my_dataset/models/my_model"
+ assert got.location == "US"
+ assert got.etag == "abcdefg"
+ assert got.created == creation_time
+ assert got.modified == modified_time
+ assert got.expires == expiration_time
+ assert got.description == "A friendly description."
+ assert got.friendly_name == "A friendly name."
+ assert got.model_type == "LOGISTIC_REGRESSION"
+ assert got.labels == {"greeting": "こんにちは"}
+ assert got.encryption_configuration.kms_key_name == KMS_KEY_NAME
+ assert got.best_trial_id == 123
+ assert got.training_runs[0]["trainingOptions"]["initialLearnRate"] == 1.0
+ assert (
+ google.cloud._helpers._rfc3339_to_datetime(got.training_runs[0]["startTime"])
+ == creation_time
+ )
+ assert got.training_runs[1]["trainingOptions"]["initialLearnRate"] == 0.5
+ assert (
+ google.cloud._helpers._rfc3339_to_datetime(got.training_runs[1]["startTime"])
+ == modified_time
+ )
+ assert got.training_runs[2]["trainingOptions"]["initialLearnRate"] == 0.25
+ assert (
+ google.cloud._helpers._rfc3339_to_datetime(got.training_runs[2]["startTime"])
+ == expiration_time
+ )
+ assert got.transform_columns == []
+
+
+def test_from_api_repr_w_minimal_resource(target_class):
+ from google.cloud.bigquery import ModelReference
+
+ resource = {
+ "modelReference": {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "modelId": "my_model",
+ }
+ }
+ got = target_class.from_api_repr(resource)
+ assert got.reference == ModelReference.from_string("my-project.my_dataset.my_model")
+ assert got.location is None
+ assert got.etag is None
+ assert got.created is None
+ assert got.modified is None
+ assert got.expires is None
+ assert got.description is None
+ assert got.friendly_name is None
+ assert got.model_type == "MODEL_TYPE_UNSPECIFIED"
+ assert got.labels == {}
+ assert got.encryption_configuration is None
+ assert len(got.training_runs) == 0
+ assert len(got.feature_columns) == 0
+ assert len(got.label_columns) == 0
+ assert got.best_trial_id is None
+
+
+def test_from_api_repr_w_unknown_fields(target_class):
+ from google.cloud.bigquery import ModelReference
+
+ resource = {
+ "modelReference": {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "modelId": "my_model",
+ },
+ "thisFieldIsNotInTheProto": "just ignore me",
+ }
+ got = target_class.from_api_repr(resource)
+ assert got.reference == ModelReference.from_string("my-project.my_dataset.my_model")
+ assert got._properties == resource
+
+
+def test_from_api_repr_w_unknown_type(target_class):
+ from google.cloud.bigquery import ModelReference
+
+ resource = {
+ "modelReference": {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "modelId": "my_model",
+ },
+ "modelType": "BE_A_GOOD_ROLE_MODEL", # This model type does not exist.
+ }
+ got = target_class.from_api_repr(resource)
+ assert got.reference == ModelReference.from_string("my-project.my_dataset.my_model")
+ assert got.model_type == "BE_A_GOOD_ROLE_MODEL" # No checks for invalid types.
+ assert got._properties == resource
+
+
+def test_from_api_repr_w_missing_reference(target_class):
+ resource = {}
+ got = target_class.from_api_repr(resource)
+ assert got.reference is None
+ assert got._properties == resource
+
+
+@pytest.mark.parametrize(
+ "resource,filter_fields,expected",
+ [
+ (
+ {
+ "friendlyName": "hello",
+ "description": "world",
+ "expirationTime": "12345",
+ "labels": {"a-label": "a-value"},
+ },
+ ["description"],
+ {"description": "world"},
+ ),
+ (
+ {"friendlyName": "hello", "description": "world"},
+ ["friendlyName"],
+ {"friendlyName": "hello"},
+ ),
+ (
+ {
+ "friendlyName": "hello",
+ "description": "world",
+ "expirationTime": "12345",
+ "labels": {"a-label": "a-value"},
+ },
+ ["expires"],
+ {"expirationTime": "12345"},
+ ),
+ (
+ {
+ "friendlyName": "hello",
+ "description": "world",
+ "expirationTime": None,
+ "labels": {"a-label": "a-value"},
+ },
+ ["expires"],
+ {"expirationTime": None},
+ ),
+ (
+ {
+ "friendlyName": "hello",
+ "description": "world",
+ "expirationTime": None,
+ "labels": {"a-label": "a-value"},
+ },
+ ["labels"],
+ {"labels": {"a-label": "a-value"}},
+ ),
+ (
+ {
+ "friendlyName": "hello",
+ "description": "world",
+ "expirationTime": None,
+ "labels": {"a-label": "a-value"},
+ "encryptionConfiguration": {"kmsKeyName": KMS_KEY_NAME},
+ },
+ ["encryptionConfiguration"],
+ {"encryptionConfiguration": {"kmsKeyName": KMS_KEY_NAME}},
+ ),
+ ],
+)
+def test_build_resource(object_under_test, resource, filter_fields, expected):
+ object_under_test._properties = resource
+ got = object_under_test._build_resource(filter_fields)
+ assert got == expected
+
+
+def test_feature_columns(object_under_test):
+ from google.cloud.bigquery import standard_sql
+
+ object_under_test._properties["featureColumns"] = [
+ {"name": "col_1", "type": {"typeKind": "STRING"}},
+ {"name": "col_2", "type": {"typeKind": "FLOAT64"}},
+ ]
+ expected = [
+ standard_sql.StandardSqlField(
+ "col_1",
+ standard_sql.StandardSqlDataType(standard_sql.StandardSqlTypeNames.STRING),
+ ),
+ standard_sql.StandardSqlField(
+ "col_2",
+ standard_sql.StandardSqlDataType(standard_sql.StandardSqlTypeNames.FLOAT64),
+ ),
+ ]
+ assert object_under_test.feature_columns == expected
+
+
+def test_from_api_repr_w_transform_columns(target_class):
+ resource = {
+ "modelReference": {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "modelId": "my_model",
+ },
+ "transformColumns": [
+ {
+ "name": "transform_name",
+ "type": {"typeKind": "INT64"},
+ "transformSql": "transform_sql",
+ }
+ ],
+ }
+ got = target_class.from_api_repr(resource)
+ assert len(got.transform_columns) == 1
+ transform_column = got.transform_columns[0]
+ assert isinstance(transform_column, google.cloud.bigquery.model.TransformColumn)
+ assert transform_column.name == "transform_name"
+
+
+def test_transform_column_name():
+ transform_columns = google.cloud.bigquery.model.TransformColumn(
+ {"name": "is_female"}
+ )
+ assert transform_columns.name == "is_female"
+
+
+def test_transform_column_transform_sql():
+ transform_columns = google.cloud.bigquery.model.TransformColumn(
+ {"transformSql": "is_female"}
+ )
+ assert transform_columns.transform_sql == "is_female"
+
+
+def test_transform_column_type():
+ transform_columns = google.cloud.bigquery.model.TransformColumn(
+ {"type": {"typeKind": "BOOL"}}
+ )
+ assert transform_columns.type_.type_kind == "BOOL"
+
+
+def test_transform_column_type_none():
+ transform_columns = google.cloud.bigquery.model.TransformColumn({})
+ assert transform_columns.type_ is None
+
+
+def test_transform_column_from_api_repr_with_unknown_properties():
+ transform_column = google.cloud.bigquery.model.TransformColumn.from_api_repr(
+ {
+ "name": "is_female",
+ "type": {"typeKind": "BOOL"},
+ "transformSql": "is_female",
+ "test": "one",
+ }
+ )
+ assert transform_column._properties == {
+ "name": "is_female",
+ "type": {"typeKind": "BOOL"},
+ "transformSql": "is_female",
+ "test": "one",
+ }
+
+
+def test_label_columns(object_under_test):
+ from google.cloud.bigquery import standard_sql
+
+ object_under_test._properties["labelColumns"] = [
+ {"name": "col_1", "type": {"typeKind": "STRING"}},
+ {"name": "col_2", "type": {"typeKind": "FLOAT64"}},
+ ]
+ expected = [
+ standard_sql.StandardSqlField(
+ "col_1",
+ standard_sql.StandardSqlDataType(standard_sql.StandardSqlTypeNames.STRING),
+ ),
+ standard_sql.StandardSqlField(
+ "col_2",
+ standard_sql.StandardSqlDataType(standard_sql.StandardSqlTypeNames.FLOAT64),
+ ),
+ ]
+ assert object_under_test.label_columns == expected
+
+
+def test_set_description(object_under_test):
+ assert not object_under_test.description
+ object_under_test.description = "A model description."
+ assert object_under_test.description == "A model description."
+ object_under_test.description = None
+ assert not object_under_test.description
+
+
+def test_set_expires(object_under_test):
+ assert not object_under_test.expires
+ expiration_time = datetime.datetime(
+ 2012, 12, 21, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
+ )
+ object_under_test.expires = expiration_time
+ assert object_under_test.expires == expiration_time
+ object_under_test.expires = None
+ assert not object_under_test.expires
+
+
+def test_set_friendly_name(object_under_test):
+ assert not object_under_test.friendly_name
+ object_under_test.friendly_name = "A model name."
+ assert object_under_test.friendly_name == "A model name."
+ object_under_test.friendly_name = None
+ assert not object_under_test.friendly_name
+
+
+def test_set_labels(object_under_test):
+ assert object_under_test.labels == {}
+ object_under_test.labels["data_owner"] = "someteam"
+ assert object_under_test.labels == {"data_owner": "someteam"}
+ del object_under_test.labels["data_owner"]
+ assert object_under_test.labels == {}
+
+
+def test_replace_labels(object_under_test):
+ assert object_under_test.labels == {}
+ object_under_test.labels = {"data_owner": "someteam"}
+ assert object_under_test.labels == {"data_owner": "someteam"}
+ labels = {}
+ object_under_test.labels = labels
+ assert object_under_test.labels is labels
+ object_under_test.labels = None
+ assert object_under_test.labels == {}
+
+
+def test_set_encryption_configuration(object_under_test):
+ from google.cloud.bigquery.encryption_configuration import EncryptionConfiguration
+
+ assert not object_under_test.encryption_configuration
+ object_under_test.encryption_configuration = EncryptionConfiguration(
+ kms_key_name=KMS_KEY_NAME
+ )
+ assert object_under_test.encryption_configuration.kms_key_name == KMS_KEY_NAME
+ object_under_test.encryption_configuration = None
+ assert not object_under_test.encryption_configuration
+
+
+def test_repr(target_class):
+ model = target_class("my-proj.my_dset.my_model")
+ got = repr(model)
+ assert got == (
+ "Model(reference=ModelReference("
+ "project_id='my-proj', dataset_id='my_dset', model_id='my_model'))"
+ )
+
+
+def test_to_api_repr(target_class):
+ model = target_class("my-proj.my_dset.my_model")
+ resource = {
+ "etag": "abcdefg",
+ "modelReference": {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "modelId": "my_model",
+ },
+ "creationTime": "1274284800000",
+ "lastModifiedTime": "1317484800000",
+ "modelType": "LOGISTIC_REGRESSION",
+ "trainingRuns": [
+ {
+ "trainingOptions": {"initialLearnRate": 1.0},
+ "startTime": "2010-05-19T16:00:00Z",
+ },
+ {
+ "trainingOptions": {"initialLearnRate": 0.5},
+ "startTime": "2011-10-01T16:00:00Z",
+ },
+ {
+ "trainingOptions": {"initialLearnRate": 0.25},
+ "startTime": "2012-12-21T16:00:00Z",
+ },
+ ],
+ "description": "A friendly description.",
+ "location": "US",
+ "friendlyName": "A friendly name.",
+ "labels": {"greeting": "こんにちは"},
+ "expirationTime": "1356105600000",
+ "encryptionConfiguration": {
+ "kmsKeyName": "projects/1/locations/us/keyRings/1/cryptoKeys/1"
+ },
+ }
+ model._properties = resource
+ got = model.to_api_repr()
+ assert got == resource
diff --git a/testbed/googleapis__python-bigquery/tests/unit/model/test_model_reference.py b/testbed/googleapis__python-bigquery/tests/unit/model/test_model_reference.py
new file mode 100644
index 0000000000000000000000000000000000000000..39dabb55db68215bfe6404b4a9a9a6ba7ab8d23e
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/model/test_model_reference.py
@@ -0,0 +1,140 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+
+@pytest.fixture
+def target_class():
+ from google.cloud.bigquery import ModelReference
+
+ return ModelReference
+
+
+def test_from_api_repr(target_class):
+ resource = {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "modelId": "my_model",
+ }
+ got = target_class.from_api_repr(resource)
+ assert got.project == "my-project"
+ assert got.dataset_id == "my_dataset"
+ assert got.model_id == "my_model"
+ assert got.path == "/projects/my-project/datasets/my_dataset/models/my_model"
+
+
+def test_from_api_repr_w_unknown_fields(target_class):
+ resource = {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "modelId": "my_model",
+ "thisFieldIsNotInTheProto": "just ignore me",
+ }
+ got = target_class.from_api_repr(resource)
+ assert got.project == "my-project"
+ assert got.dataset_id == "my_dataset"
+ assert got.model_id == "my_model"
+ assert got._properties is resource
+
+
+def test_to_api_repr(target_class):
+ ref = target_class.from_string("my-project.my_dataset.my_model")
+ got = ref.to_api_repr()
+ assert got == {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "modelId": "my_model",
+ }
+
+
+def test_from_string(target_class):
+ got = target_class.from_string("string-project.string_dataset.string_model")
+ assert got.project == "string-project"
+ assert got.dataset_id == "string_dataset"
+ assert got.model_id == "string_model"
+ assert got.path == (
+ "/projects/string-project/datasets/string_dataset/models/string_model"
+ )
+
+
+def test_from_string_legacy_string(target_class):
+ with pytest.raises(ValueError):
+ target_class.from_string("string-project:string_dataset.string_model")
+
+
+def test_from_string_not_fully_qualified(target_class):
+ with pytest.raises(ValueError):
+ target_class.from_string("string_model")
+
+ with pytest.raises(ValueError):
+ target_class.from_string("string_dataset.string_model")
+
+ with pytest.raises(ValueError):
+ target_class.from_string("a.b.c.d")
+
+
+def test_from_string_with_default_project(target_class):
+ got = target_class.from_string(
+ "string_dataset.string_model", default_project="default-project"
+ )
+ assert got.project == "default-project"
+ assert got.dataset_id == "string_dataset"
+ assert got.model_id == "string_model"
+
+
+def test_from_string_ignores_default_project(target_class):
+ got = target_class.from_string(
+ "string-project.string_dataset.string_model", default_project="default-project"
+ )
+ assert got.project == "string-project"
+ assert got.dataset_id == "string_dataset"
+ assert got.model_id == "string_model"
+
+
+def test_eq(target_class):
+ model = target_class.from_string("my-proj.my_dset.my_model")
+ model_too = target_class.from_string("my-proj.my_dset.my_model")
+ assert model == model_too
+ assert not (model != model_too)
+
+ other_model = target_class.from_string("my-proj.my_dset.my_model2")
+ assert not (model == other_model)
+ assert model != other_model
+
+ notamodel = object()
+ assert not (model == notamodel)
+ assert model != notamodel
+
+
+def test_hash(target_class):
+ model = target_class.from_string("my-proj.my_dset.my_model")
+ model2 = target_class.from_string("my-proj.my_dset.model2")
+ got = {model: "hello", model2: "world"}
+ assert got[model] == "hello"
+ assert got[model2] == "world"
+
+ model_too = target_class.from_string("my-proj.my_dset.my_model")
+ assert got[model_too] == "hello"
+
+
+def test_repr(target_class):
+ model = target_class.from_string("my-proj.my_dset.my_model")
+ got = repr(model)
+ assert (
+ got
+ == "ModelReference(project_id='my-proj', dataset_id='my_dset', model_id='my_model')"
+ )
diff --git a/testbed/googleapis__python-bigquery/tests/unit/routine/__init__.py b/testbed/googleapis__python-bigquery/tests/unit/routine/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/testbed/googleapis__python-bigquery/tests/unit/routine/test_remote_function_options.py b/testbed/googleapis__python-bigquery/tests/unit/routine/test_remote_function_options.py
new file mode 100644
index 0000000000000000000000000000000000000000..ffd57e8c1a3891d390c10ee5507569ca51bcbbda
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/routine/test_remote_function_options.py
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+ENDPOINT = "https://some.endpoint"
+CONNECTION = "connection_string"
+MAX_BATCHING_ROWS = 50
+USER_DEFINED_CONTEXT = {
+ "foo": "bar",
+}
+
+
+@pytest.fixture
+def target_class():
+ from google.cloud.bigquery.routine import RemoteFunctionOptions
+
+ return RemoteFunctionOptions
+
+
+def test_ctor(target_class):
+ options = target_class(
+ endpoint=ENDPOINT,
+ connection=CONNECTION,
+ max_batching_rows=MAX_BATCHING_ROWS,
+ user_defined_context=USER_DEFINED_CONTEXT,
+ )
+ assert options.endpoint == ENDPOINT
+ assert options.connection == CONNECTION
+ assert options.max_batching_rows == MAX_BATCHING_ROWS
+ assert options.user_defined_context == USER_DEFINED_CONTEXT
+
+
+def test_empty_ctor(target_class):
+ options = target_class()
+ assert options._properties == {}
+ options = target_class(_properties=None)
+ assert options._properties == {}
+ options = target_class(_properties={})
+ assert options._properties == {}
+
+
+def test_ctor_bad_context(target_class):
+ with pytest.raises(ValueError, match="value must be dictionary"):
+ target_class(user_defined_context=[1, 2, 3, 4])
+
+
+def test_from_api_repr(target_class):
+ resource = {
+ "endpoint": ENDPOINT,
+ "connection": CONNECTION,
+ "maxBatchingRows": MAX_BATCHING_ROWS,
+ "userDefinedContext": USER_DEFINED_CONTEXT,
+ "someRandomField": "someValue",
+ }
+ options = target_class.from_api_repr(resource)
+ assert options.endpoint == ENDPOINT
+ assert options.connection == CONNECTION
+ assert options.max_batching_rows == MAX_BATCHING_ROWS
+ assert options.user_defined_context == USER_DEFINED_CONTEXT
+ assert options._properties["someRandomField"] == "someValue"
+
+
+def test_from_api_repr_w_minimal_resource(target_class):
+ resource = {}
+ options = target_class.from_api_repr(resource)
+ assert options.endpoint is None
+ assert options.connection is None
+ assert options.max_batching_rows is None
+ assert options.user_defined_context is None
+
+
+def test_from_api_repr_w_unknown_fields(target_class):
+ resource = {"thisFieldIsNotInTheProto": "just ignore me"}
+ options = target_class.from_api_repr(resource)
+ assert options._properties is resource
+
+
+def test_eq(target_class):
+ options = target_class(
+ endpoint=ENDPOINT,
+ connection=CONNECTION,
+ max_batching_rows=MAX_BATCHING_ROWS,
+ user_defined_context=USER_DEFINED_CONTEXT,
+ )
+ other_options = target_class(
+ endpoint=ENDPOINT,
+ connection=CONNECTION,
+ max_batching_rows=MAX_BATCHING_ROWS,
+ user_defined_context=USER_DEFINED_CONTEXT,
+ )
+ assert options == other_options
+ assert not (options != other_options)
+
+ empty_options = target_class()
+ assert not (options == empty_options)
+ assert options != empty_options
+
+ notanarg = object()
+ assert not (options == notanarg)
+ assert options != notanarg
+
+
+def test_repr(target_class):
+ options = target_class(
+ endpoint=ENDPOINT,
+ connection=CONNECTION,
+ max_batching_rows=MAX_BATCHING_ROWS,
+ user_defined_context=USER_DEFINED_CONTEXT,
+ )
+ actual_repr = repr(options)
+ assert actual_repr == (
+ "RemoteFunctionOptions(connection='connection_string', endpoint='https://some.endpoint', max_batching_rows=50, user_defined_context={'foo': 'bar'})"
+ )
diff --git a/testbed/googleapis__python-bigquery/tests/unit/routine/test_routine.py b/testbed/googleapis__python-bigquery/tests/unit/routine/test_routine.py
new file mode 100644
index 0000000000000000000000000000000000000000..acd3bc40e2fff2b16479b89bc2cf6ecb7e70a79c
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/routine/test_routine.py
@@ -0,0 +1,607 @@
+#
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+
+import pytest
+
+import google.cloud._helpers
+from google.cloud import bigquery
+
+
+@pytest.fixture
+def target_class():
+ from google.cloud.bigquery.routine import Routine
+
+ return Routine
+
+
+@pytest.fixture
+def object_under_test(target_class):
+ return target_class("project-id.dataset_id.routine_id")
+
+
+def test_ctor(target_class):
+ from google.cloud.bigquery.routine import RoutineReference
+
+ ref = RoutineReference.from_string("my-proj.my_dset.my_routine")
+ actual_routine = target_class(ref)
+ assert actual_routine.reference == ref
+ assert (
+ actual_routine.path == "/projects/my-proj/datasets/my_dset/routines/my_routine"
+ )
+
+
+def test_ctor_w_string(target_class):
+ from google.cloud.bigquery.routine import RoutineReference
+
+ routine_id = "my-proj.my_dset.my_routine"
+ ref = RoutineReference.from_string(routine_id)
+ actual_routine = target_class(routine_id)
+ assert actual_routine.reference == ref
+
+
+def test_ctor_w_properties(target_class):
+ from google.cloud.bigquery.routine import RoutineArgument
+ from google.cloud.bigquery.routine import RoutineReference
+
+ routine_id = "my-proj.my_dset.my_routine"
+ arguments = [
+ RoutineArgument(
+ name="x",
+ data_type=bigquery.standard_sql.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.INT64
+ ),
+ )
+ ]
+ body = "x * 3"
+ language = "SQL"
+ return_type = bigquery.standard_sql.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.INT64
+ )
+ type_ = "SCALAR_FUNCTION"
+ description = "A routine description."
+ determinism_level = bigquery.DeterminismLevel.NOT_DETERMINISTIC
+
+ options = bigquery.RemoteFunctionOptions(
+ endpoint="https://some.endpoint",
+ connection="connection_string",
+ max_batching_rows=99,
+ user_defined_context={"foo": "bar"},
+ )
+
+ actual_routine = target_class(
+ routine_id,
+ arguments=arguments,
+ body=body,
+ language=language,
+ return_type=return_type,
+ type_=type_,
+ description=description,
+ determinism_level=determinism_level,
+ remote_function_options=options,
+ )
+
+ ref = RoutineReference.from_string(routine_id)
+ assert actual_routine.reference == ref
+ assert actual_routine.arguments == arguments
+ assert actual_routine.body == body
+ assert actual_routine.language == language
+ assert actual_routine.return_type == return_type
+ assert actual_routine.type_ == type_
+ assert actual_routine.description == description
+ assert (
+ actual_routine.determinism_level == bigquery.DeterminismLevel.NOT_DETERMINISTIC
+ )
+ assert actual_routine.remote_function_options == options
+
+
+def test_ctor_invalid_remote_function_options(target_class):
+ with pytest.raises(
+ ValueError,
+ match=".*must be google.cloud.bigquery.routine.RemoteFunctionOptions.*",
+ ):
+ target_class(
+ "my-proj.my_dset.my_routine",
+ remote_function_options=object(),
+ )
+
+
+def test_from_api_repr(target_class):
+ from google.cloud.bigquery.routine import RoutineArgument
+ from google.cloud.bigquery.routine import RoutineReference
+
+ creation_time = datetime.datetime(
+ 2010, 5, 19, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
+ )
+ modified_time = datetime.datetime(
+ 2011, 10, 1, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
+ )
+ resource = {
+ "routineReference": {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "routineId": "my_routine",
+ },
+ "etag": "abcdefg",
+ "creationTime": str(google.cloud._helpers._millis(creation_time)),
+ "lastModifiedTime": str(google.cloud._helpers._millis(modified_time)),
+ "definitionBody": "42",
+ "arguments": [{"name": "x", "dataType": {"typeKind": "INT64"}}],
+ "language": "SQL",
+ "returnType": {"typeKind": "INT64"},
+ "routineType": "SCALAR_FUNCTION",
+ "someNewField": "someValue",
+ "description": "A routine description.",
+ "determinismLevel": bigquery.DeterminismLevel.DETERMINISTIC,
+ "remoteFunctionOptions": {
+ "endpoint": "https://some.endpoint",
+ "connection": "connection_string",
+ "maxBatchingRows": 50,
+ "userDefinedContext": {
+ "foo": "bar",
+ },
+ },
+ "dataGovernanceType": "DATA_MASKING",
+ }
+ actual_routine = target_class.from_api_repr(resource)
+
+ assert actual_routine.project == "my-project"
+ assert actual_routine.dataset_id == "my_dataset"
+ assert actual_routine.routine_id == "my_routine"
+ assert (
+ actual_routine.path
+ == "/projects/my-project/datasets/my_dataset/routines/my_routine"
+ )
+ assert actual_routine.reference == RoutineReference.from_string(
+ "my-project.my_dataset.my_routine"
+ )
+ assert actual_routine.etag == "abcdefg"
+ assert actual_routine.created == creation_time
+ assert actual_routine.modified == modified_time
+ assert actual_routine.arguments == [
+ RoutineArgument(
+ name="x",
+ data_type=bigquery.standard_sql.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.INT64
+ ),
+ )
+ ]
+ assert actual_routine.body == "42"
+ assert actual_routine.language == "SQL"
+ assert actual_routine.return_type == bigquery.standard_sql.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.INT64
+ )
+ assert actual_routine.return_table_type is None
+ assert actual_routine.type_ == "SCALAR_FUNCTION"
+ assert actual_routine._properties["someNewField"] == "someValue"
+ assert actual_routine.description == "A routine description."
+ assert actual_routine.determinism_level == "DETERMINISTIC"
+ assert actual_routine.remote_function_options.endpoint == "https://some.endpoint"
+ assert actual_routine.remote_function_options.connection == "connection_string"
+ assert actual_routine.remote_function_options.max_batching_rows == 50
+ assert actual_routine.remote_function_options.user_defined_context == {"foo": "bar"}
+ assert actual_routine.data_governance_type == "DATA_MASKING"
+
+
+def test_from_api_repr_tvf_function(target_class):
+ from google.cloud.bigquery.routine import RoutineArgument
+ from google.cloud.bigquery.routine import RoutineReference
+ from google.cloud.bigquery.routine import RoutineType
+
+ StandardSqlDataType = bigquery.standard_sql.StandardSqlDataType
+ StandardSqlField = bigquery.standard_sql.StandardSqlField
+ StandardSqlTableType = bigquery.standard_sql.StandardSqlTableType
+
+ creation_time = datetime.datetime(
+ 2010, 5, 19, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
+ )
+ modified_time = datetime.datetime(
+ 2011, 10, 1, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
+ )
+ resource = {
+ "routineReference": {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "routineId": "my_routine",
+ },
+ "etag": "abcdefg",
+ "creationTime": str(google.cloud._helpers._millis(creation_time)),
+ "lastModifiedTime": str(google.cloud._helpers._millis(modified_time)),
+ "definitionBody": "SELECT x FROM UNNEST([1,2,3]) x WHERE x > a",
+ "arguments": [{"name": "a", "dataType": {"typeKind": "INT64"}}],
+ "language": "SQL",
+ "returnTableType": {
+ "columns": [{"name": "int_col", "type": {"typeKind": "INT64"}}]
+ },
+ "routineType": "TABLE_VALUED_FUNCTION",
+ "someNewField": "someValue",
+ "description": "A routine description.",
+ "determinismLevel": bigquery.DeterminismLevel.DETERMINISTIC,
+ }
+ actual_routine = target_class.from_api_repr(resource)
+
+ assert actual_routine.project == "my-project"
+ assert actual_routine.dataset_id == "my_dataset"
+ assert actual_routine.routine_id == "my_routine"
+ assert (
+ actual_routine.path
+ == "/projects/my-project/datasets/my_dataset/routines/my_routine"
+ )
+ assert actual_routine.reference == RoutineReference.from_string(
+ "my-project.my_dataset.my_routine"
+ )
+ assert actual_routine.etag == "abcdefg"
+ assert actual_routine.created == creation_time
+ assert actual_routine.modified == modified_time
+ assert actual_routine.arguments == [
+ RoutineArgument(
+ name="a",
+ data_type=StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.INT64
+ ),
+ )
+ ]
+ assert actual_routine.body == "SELECT x FROM UNNEST([1,2,3]) x WHERE x > a"
+ assert actual_routine.language == "SQL"
+ assert actual_routine.return_type is None
+ assert actual_routine.return_table_type == StandardSqlTableType(
+ columns=[
+ StandardSqlField(
+ name="int_col",
+ type=StandardSqlDataType(type_kind=bigquery.StandardSqlTypeNames.INT64),
+ )
+ ]
+ )
+ assert actual_routine.type_ == RoutineType.TABLE_VALUED_FUNCTION
+ assert actual_routine._properties["someNewField"] == "someValue"
+ assert actual_routine.description == "A routine description."
+ assert actual_routine.determinism_level == "DETERMINISTIC"
+
+
+def test_from_api_repr_w_minimal_resource(target_class):
+ from google.cloud.bigquery.routine import RoutineReference
+
+ resource = {
+ "routineReference": {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "routineId": "my_routine",
+ }
+ }
+ actual_routine = target_class.from_api_repr(resource)
+ assert actual_routine.reference == RoutineReference.from_string(
+ "my-project.my_dataset.my_routine"
+ )
+ assert actual_routine.etag is None
+ assert actual_routine.created is None
+ assert actual_routine.modified is None
+ assert actual_routine.arguments == []
+ assert actual_routine.body is None
+ assert actual_routine.language is None
+ assert actual_routine.return_type is None
+ assert actual_routine.type_ is None
+ assert actual_routine.description is None
+ assert actual_routine.determinism_level is None
+ assert actual_routine.remote_function_options is None
+ assert actual_routine.data_governance_type is None
+
+
+def test_from_api_repr_w_unknown_fields(target_class):
+ from google.cloud.bigquery.routine import RoutineReference
+
+ resource = {
+ "routineReference": {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "routineId": "my_routine",
+ },
+ "thisFieldIsNotInTheProto": "just ignore me",
+ }
+ actual_routine = target_class.from_api_repr(resource)
+ assert actual_routine.reference == RoutineReference.from_string(
+ "my-project.my_dataset.my_routine"
+ )
+ assert actual_routine._properties is resource
+
+
+@pytest.mark.parametrize(
+ "resource,filter_fields,expected",
+ [
+ (
+ {
+ "arguments": [{"name": "x", "dataType": {"typeKind": "INT64"}}],
+ "definitionBody": "x * 3",
+ "language": "SQL",
+ "returnType": {"typeKind": "INT64"},
+ "routineType": "SCALAR_FUNCTION",
+ "description": "A routine description.",
+ "determinismLevel": bigquery.DeterminismLevel.DETERMINISM_LEVEL_UNSPECIFIED,
+ },
+ ["arguments"],
+ {"arguments": [{"name": "x", "dataType": {"typeKind": "INT64"}}]},
+ ),
+ (
+ {
+ "arguments": [{"name": "x", "dataType": {"typeKind": "INT64"}}],
+ "definitionBody": "x * 3",
+ "language": "SQL",
+ "returnType": {"typeKind": "INT64"},
+ "routineType": "SCALAR_FUNCTION",
+ "description": "A routine description.",
+ "determinismLevel": bigquery.DeterminismLevel.DETERMINISM_LEVEL_UNSPECIFIED,
+ },
+ ["body"],
+ {"definitionBody": "x * 3"},
+ ),
+ (
+ {
+ "arguments": [{"name": "x", "dataType": {"typeKind": "INT64"}}],
+ "definitionBody": "x * 3",
+ "language": "SQL",
+ "returnType": {"typeKind": "INT64"},
+ "routineType": "SCALAR_FUNCTION",
+ "description": "A routine description.",
+ "determinismLevel": bigquery.DeterminismLevel.DETERMINISM_LEVEL_UNSPECIFIED,
+ },
+ ["language"],
+ {"language": "SQL"},
+ ),
+ (
+ {
+ "arguments": [{"name": "x", "dataType": {"typeKind": "INT64"}}],
+ "definitionBody": "x * 3",
+ "language": "SQL",
+ "returnType": {"typeKind": "INT64"},
+ "routineType": "SCALAR_FUNCTION",
+ "description": "A routine description.",
+ "determinismLevel": bigquery.DeterminismLevel.DETERMINISM_LEVEL_UNSPECIFIED,
+ },
+ ["return_type"],
+ {"returnType": {"typeKind": "INT64"}},
+ ),
+ (
+ {
+ "definitionBody": "SELECT x FROM UNNEST([1,2,3]) x WHERE x > 1",
+ "language": "SQL",
+ "returnTableType": {
+ "columns": [{"name": "int_col", "type": {"typeKind": "INT64"}}]
+ },
+ "routineType": "TABLE_VALUED_FUNCTION",
+ "description": "A routine description.",
+ "determinismLevel": bigquery.DeterminismLevel.DETERMINISM_LEVEL_UNSPECIFIED,
+ },
+ ["return_table_type"],
+ {
+ "returnTableType": {
+ "columns": [{"name": "int_col", "type": {"typeKind": "INT64"}}]
+ }
+ },
+ ),
+ (
+ {
+ "arguments": [{"name": "x", "dataType": {"typeKind": "INT64"}}],
+ "definitionBody": "x * 3",
+ "language": "SQL",
+ "returnType": {"typeKind": "INT64"},
+ "routineType": "SCALAR_FUNCTION",
+ "description": "A routine description.",
+ "determinismLevel": bigquery.DeterminismLevel.DETERMINISM_LEVEL_UNSPECIFIED,
+ },
+ ["type_"],
+ {"routineType": "SCALAR_FUNCTION"},
+ ),
+ (
+ {
+ "arguments": [{"name": "x", "dataType": {"typeKind": "INT64"}}],
+ "definitionBody": "x * 3",
+ "language": "SQL",
+ "returnType": {"typeKind": "INT64"},
+ "routineType": "SCALAR_FUNCTION",
+ "description": "A routine description.",
+ "determinismLevel": bigquery.DeterminismLevel.DETERMINISM_LEVEL_UNSPECIFIED,
+ },
+ ["description"],
+ {"description": "A routine description."},
+ ),
+ (
+ {
+ "arguments": [{"name": "x", "dataType": {"typeKind": "INT64"}}],
+ "definitionBody": "x * 3",
+ "language": "SQL",
+ "returnType": {"typeKind": "INT64"},
+ "routineType": "SCALAR_FUNCTION",
+ "description": "A routine description.",
+ "determinismLevel": bigquery.DeterminismLevel.DETERMINISM_LEVEL_UNSPECIFIED,
+ },
+ ["determinism_level"],
+ {
+ "determinismLevel": bigquery.DeterminismLevel.DETERMINISM_LEVEL_UNSPECIFIED
+ },
+ ),
+ (
+ {
+ "arguments": [{"name": "x", "dataType": {"typeKind": "INT64"}}],
+ "definitionBody": "x * 3",
+ "language": "SQL",
+ "returnType": {"typeKind": "INT64"},
+ "routineType": "SCALAR_FUNCTION",
+ "description": "A routine description.",
+ "determinismLevel": bigquery.DeterminismLevel.DETERMINISM_LEVEL_UNSPECIFIED,
+ "dataGovernanceType": "DATA_MASKING",
+ },
+ ["data_governance_type"],
+ {"dataGovernanceType": "DATA_MASKING"},
+ ),
+ (
+ {},
+ [
+ "arguments",
+ "language",
+ "body",
+ "type_",
+ "return_type",
+ "description",
+ "determinism_level",
+ ],
+ {
+ "arguments": None,
+ "definitionBody": None,
+ "language": None,
+ "returnType": None,
+ "routineType": None,
+ "description": None,
+ "determinismLevel": None,
+ },
+ ),
+ (
+ {"someNewField": "someValue"},
+ ["someNewField"],
+ {"someNewField": "someValue"},
+ ),
+ (
+ {
+ "routineType": "SCALAR_FUNCTION",
+ "remoteFunctionOptions": {
+ "endpoint": "https://some_endpoint",
+ "connection": "connection_string",
+ "max_batching_rows": 101,
+ },
+ },
+ ["remote_function_options"],
+ {
+ "remoteFunctionOptions": {
+ "endpoint": "https://some_endpoint",
+ "connection": "connection_string",
+ "max_batching_rows": 101,
+ },
+ },
+ ),
+ ],
+)
+def test_build_resource(object_under_test, resource, filter_fields, expected):
+ object_under_test._properties = resource
+ actual_routine = object_under_test._build_resource(filter_fields)
+ assert actual_routine == expected
+
+
+def test_set_arguments_w_none(object_under_test):
+ object_under_test.arguments = None
+ assert object_under_test.arguments == []
+ assert object_under_test._properties["arguments"] == []
+
+
+def test_set_imported_libraries(object_under_test):
+ imported_libraries = ["gs://cloud-samples-data/bigquery/udfs/max-value.js"]
+ object_under_test.imported_libraries = imported_libraries
+ assert object_under_test.imported_libraries == imported_libraries
+ assert object_under_test._properties["importedLibraries"] == imported_libraries
+
+
+def test_set_imported_libraries_w_none(object_under_test):
+ object_under_test.imported_libraries = None
+ assert object_under_test.imported_libraries == []
+ assert object_under_test._properties["importedLibraries"] == []
+
+
+def test_set_return_type_w_none(object_under_test):
+ object_under_test.return_type = None
+ assert object_under_test.return_type is None
+ assert object_under_test._properties["returnType"] is None
+
+
+def test_set_return_table_type_w_none(object_under_test):
+ object_under_test.return_table_type = None
+ assert object_under_test.return_table_type is None
+ assert object_under_test._properties["returnTableType"] is None
+
+
+def test_set_return_table_type_w_not_none(object_under_test):
+ StandardSqlDataType = bigquery.standard_sql.StandardSqlDataType
+ StandardSqlField = bigquery.standard_sql.StandardSqlField
+ StandardSqlTableType = bigquery.standard_sql.StandardSqlTableType
+
+ table_type = StandardSqlTableType(
+ columns=[
+ StandardSqlField(
+ name="int_col",
+ type=StandardSqlDataType(type_kind=bigquery.StandardSqlTypeNames.INT64),
+ ),
+ StandardSqlField(
+ name="str_col",
+ type=StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.STRING
+ ),
+ ),
+ ]
+ )
+
+ object_under_test.return_table_type = table_type
+
+ assert object_under_test.return_table_type == table_type
+ assert object_under_test._properties["returnTableType"] == {
+ "columns": [
+ {"name": "int_col", "type": {"typeKind": "INT64"}},
+ {"name": "str_col", "type": {"typeKind": "STRING"}},
+ ]
+ }
+
+
+def test_set_description_w_none(object_under_test):
+ object_under_test.description = None
+ assert object_under_test.description is None
+ assert object_under_test._properties["description"] is None
+
+
+def test_set_remote_function_options_w_none(object_under_test):
+ object_under_test.remote_function_options = None
+ assert object_under_test.remote_function_options is None
+ assert object_under_test._properties["remoteFunctionOptions"] is None
+
+
+def test_set_data_governance_type_w_none(object_under_test):
+ object_under_test.data_governance_type = None
+ assert object_under_test.data_governance_type is None
+ assert object_under_test._properties["dataGovernanceType"] is None
+
+
+def test_set_data_governance_type_valid(object_under_test):
+ object_under_test.data_governance_type = "DATA_MASKING"
+ assert object_under_test.data_governance_type == "DATA_MASKING"
+ assert object_under_test._properties["dataGovernanceType"] == "DATA_MASKING"
+
+
+def test_set_data_governance_type_wrong_type(object_under_test):
+ with pytest.raises(ValueError) as exp:
+ object_under_test.data_governance_type = 1
+ assert "invalid data_governance_type" in str(exp)
+ assert object_under_test.data_governance_type is None
+ assert object_under_test._properties.get("dataGovernanceType") is None
+
+
+def test_set_data_governance_type_wrong_str(object_under_test):
+ """Client does not verify the content of data_governance_type string to be
+ compatible with future upgrades. If the value is not supported, BigQuery
+ itself will report an error.
+ """
+ object_under_test.data_governance_type = "RANDOM_STRING"
+ assert object_under_test.data_governance_type == "RANDOM_STRING"
+ assert object_under_test._properties["dataGovernanceType"] == "RANDOM_STRING"
+
+
+def test_repr(target_class):
+ model = target_class("my-proj.my_dset.my_routine")
+ actual_routine = repr(model)
+ assert actual_routine == "Routine('my-proj.my_dset.my_routine')"
diff --git a/testbed/googleapis__python-bigquery/tests/unit/routine/test_routine_argument.py b/testbed/googleapis__python-bigquery/tests/unit/routine/test_routine_argument.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7f168a301da8f8f9ff6f45c4b1346165f25f830
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/routine/test_routine_argument.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from google.cloud import bigquery
+
+
+@pytest.fixture
+def target_class():
+ from google.cloud.bigquery.routine import RoutineArgument
+
+ return RoutineArgument
+
+
+def test_ctor(target_class):
+ data_type = bigquery.standard_sql.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.INT64
+ )
+ actual_arg = target_class(
+ name="field_name", kind="FIXED_TYPE", mode="IN", data_type=data_type
+ )
+ assert actual_arg.name == "field_name"
+ assert actual_arg.kind == "FIXED_TYPE"
+ assert actual_arg.mode == "IN"
+ assert actual_arg.data_type == data_type
+
+
+def test_from_api_repr(target_class):
+ resource = {
+ "argumentKind": "FIXED_TYPE",
+ "dataType": {"typeKind": "INT64"},
+ "mode": "IN",
+ "name": "field_name",
+ }
+ actual_arg = target_class.from_api_repr(resource)
+ assert actual_arg.name == "field_name"
+ assert actual_arg.kind == "FIXED_TYPE"
+ assert actual_arg.mode == "IN"
+ assert actual_arg.data_type == bigquery.standard_sql.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.INT64
+ )
+
+
+def test_from_api_repr_w_minimal_resource(target_class):
+ resource = {}
+ actual_arg = target_class.from_api_repr(resource)
+ assert actual_arg.name is None
+ assert actual_arg.kind is None
+ assert actual_arg.mode is None
+ assert actual_arg.data_type is None
+
+
+def test_from_api_repr_w_unknown_fields(target_class):
+ resource = {"thisFieldIsNotInTheProto": "just ignore me"}
+ actual_arg = target_class.from_api_repr(resource)
+ assert actual_arg._properties is resource
+
+
+def test_eq(target_class):
+ data_type = bigquery.standard_sql.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.INT64
+ )
+ arg = target_class(
+ name="field_name", kind="FIXED_TYPE", mode="IN", data_type=data_type
+ )
+ arg_too = target_class(
+ name="field_name", kind="FIXED_TYPE", mode="IN", data_type=data_type
+ )
+ assert arg == arg_too
+ assert not (arg != arg_too)
+
+ other_arg = target_class()
+ assert not (arg == other_arg)
+ assert arg != other_arg
+
+ notanarg = object()
+ assert not (arg == notanarg)
+ assert arg != notanarg
+
+
+def test_repr(target_class):
+ arg = target_class(name="field_name", kind="FIXED_TYPE", mode="IN", data_type=None)
+ actual_repr = repr(arg)
+ assert actual_repr == (
+ "RoutineArgument(data_type=None, kind='FIXED_TYPE', mode='IN', name='field_name')"
+ )
diff --git a/testbed/googleapis__python-bigquery/tests/unit/routine/test_routine_reference.py b/testbed/googleapis__python-bigquery/tests/unit/routine/test_routine_reference.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d3d551a6294e6bb0b181b6615018836cac058b2
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/routine/test_routine_reference.py
@@ -0,0 +1,138 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+
+@pytest.fixture
+def target_class():
+ from google.cloud.bigquery.routine import RoutineReference
+
+ return RoutineReference
+
+
+def test_from_api_repr(target_class):
+ resource = {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "routineId": "my_routine",
+ }
+ got = target_class.from_api_repr(resource)
+ assert got.project == "my-project"
+ assert got.dataset_id == "my_dataset"
+ assert got.routine_id == "my_routine"
+ assert got.path == "/projects/my-project/datasets/my_dataset/routines/my_routine"
+
+
+def test_from_api_repr_w_unknown_fields(target_class):
+ resource = {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "routineId": "my_routine",
+ "thisFieldIsNotInTheProto": "just ignore me",
+ }
+ got = target_class.from_api_repr(resource)
+ assert got.project == "my-project"
+ assert got.dataset_id == "my_dataset"
+ assert got.routine_id == "my_routine"
+ assert got._properties is resource
+
+
+def test_to_api_repr(target_class):
+ ref = target_class.from_string("my-project.my_dataset.my_routine")
+ got = ref.to_api_repr()
+ assert got == {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "routineId": "my_routine",
+ }
+
+
+def test_from_string(target_class):
+ got = target_class.from_string("string-project.string_dataset.string_routine")
+ assert got.project == "string-project"
+ assert got.dataset_id == "string_dataset"
+ assert got.routine_id == "string_routine"
+ assert got.path == (
+ "/projects/string-project/datasets/string_dataset/routines/string_routine"
+ )
+
+
+def test_from_string_legacy_string(target_class):
+ with pytest.raises(ValueError):
+ target_class.from_string("string-project:string_dataset.string_routine")
+
+
+def test_from_string_not_fully_qualified(target_class):
+ with pytest.raises(ValueError):
+ target_class.from_string("string_routine")
+
+ with pytest.raises(ValueError):
+ target_class.from_string("string_dataset.string_routine")
+
+ with pytest.raises(ValueError):
+ target_class.from_string("a.b.c.d")
+
+
+def test_from_string_with_default_project(target_class):
+ got = target_class.from_string(
+ "string_dataset.string_routine", default_project="default-project"
+ )
+ assert got.project == "default-project"
+ assert got.dataset_id == "string_dataset"
+ assert got.routine_id == "string_routine"
+
+
+def test_from_string_ignores_default_project(target_class):
+ got = target_class.from_string(
+ "string-project.string_dataset.string_routine",
+ default_project="default-project",
+ )
+ assert got.project == "string-project"
+ assert got.dataset_id == "string_dataset"
+ assert got.routine_id == "string_routine"
+
+
+def test_eq(target_class):
+ routine = target_class.from_string("my-proj.my_dset.my_routine")
+ routine_too = target_class.from_string("my-proj.my_dset.my_routine")
+ assert routine == routine_too
+ assert not (routine != routine_too)
+
+ other_routine = target_class.from_string("my-proj.my_dset.my_routine2")
+ assert not (routine == other_routine)
+ assert routine != other_routine
+
+ notaroutine = object()
+ assert not (routine == notaroutine)
+ assert routine != notaroutine
+
+
+def test_hash(target_class):
+ routine = target_class.from_string("my-proj.my_dset.my_routine")
+ routine2 = target_class.from_string("my-proj.my_dset.routine2")
+ got = {routine: "hello", routine2: "world"}
+ assert got[routine] == "hello"
+ assert got[routine2] == "world"
+
+ routine_too = target_class.from_string("my-proj.my_dset.my_routine")
+ assert got[routine_too] == "hello"
+
+
+def test_repr(target_class):
+ routine = target_class.from_string("my-proj.my_dset.my_routine")
+ got = repr(routine)
+ assert got == "RoutineReference.from_string('my-proj.my_dset.my_routine')"
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test__helpers.py b/testbed/googleapis__python-bigquery/tests/unit/test__helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a307498f6742149802c3a9e86580bfbd2d4dd10
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test__helpers.py
@@ -0,0 +1,1663 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import datetime
+import decimal
+import json
+import os
+import warnings
+import pytest
+import packaging
+import unittest
+from unittest import mock
+
+import google.api_core
+
+
+@pytest.mark.skipif(
+ packaging.version.parse(getattr(google.api_core, "__version__", "0.0.0"))
+ < packaging.version.Version("2.15.0"),
+ reason="universe_domain not supported with google-api-core < 2.15.0",
+)
+class Test_get_client_universe(unittest.TestCase):
+ def test_with_none(self):
+ from google.cloud.bigquery._helpers import _get_client_universe
+
+ self.assertEqual("googleapis.com", _get_client_universe(None))
+
+ def test_with_dict(self):
+ from google.cloud.bigquery._helpers import _get_client_universe
+
+ options = {"universe_domain": "foo.com"}
+ self.assertEqual("foo.com", _get_client_universe(options))
+
+ def test_with_dict_empty(self):
+ from google.cloud.bigquery._helpers import _get_client_universe
+
+ options = {"universe_domain": ""}
+ self.assertEqual("googleapis.com", _get_client_universe(options))
+
+ def test_with_client_options(self):
+ from google.cloud.bigquery._helpers import _get_client_universe
+ from google.api_core import client_options
+
+ options = client_options.from_dict({"universe_domain": "foo.com"})
+ self.assertEqual("foo.com", _get_client_universe(options))
+
+ @mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"})
+ def test_with_environ(self):
+ from google.cloud.bigquery._helpers import _get_client_universe
+
+ self.assertEqual("foo.com", _get_client_universe(None))
+
+ @mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"})
+ def test_with_environ_and_dict(self):
+ from google.cloud.bigquery._helpers import _get_client_universe
+
+ options = ({"credentials_file": "file.json"},)
+ self.assertEqual("foo.com", _get_client_universe(options))
+
+ @mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"})
+ def test_with_environ_and_empty_options(self):
+ from google.cloud.bigquery._helpers import _get_client_universe
+ from google.api_core import client_options
+
+ options = client_options.from_dict({})
+ self.assertEqual("foo.com", _get_client_universe(options))
+
+ @mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": ""})
+ def test_with_environ_empty(self):
+ from google.cloud.bigquery._helpers import _get_client_universe
+
+ self.assertEqual("googleapis.com", _get_client_universe(None))
+
+
+class Test_validate_universe(unittest.TestCase):
+ def test_with_none(self):
+ from google.cloud.bigquery._helpers import _validate_universe
+
+ # should not raise
+ _validate_universe("googleapis.com", None)
+
+ def test_with_no_universe_creds(self):
+ from google.cloud.bigquery._helpers import _validate_universe
+ from .helpers import make_creds
+
+ creds = make_creds(None)
+ # should not raise
+ _validate_universe("googleapis.com", creds)
+
+ def test_with_matched_universe_creds(self):
+ from google.cloud.bigquery._helpers import _validate_universe
+ from .helpers import make_creds
+
+ creds = make_creds("googleapis.com")
+ # should not raise
+ _validate_universe("googleapis.com", creds)
+
+ def test_with_mismatched_universe_creds(self):
+ from google.cloud.bigquery._helpers import _validate_universe
+ from .helpers import make_creds
+
+ creds = make_creds("foo.com")
+ with self.assertRaises(ValueError):
+ _validate_universe("googleapis.com", creds)
+
+
+class Test_not_null(unittest.TestCase):
+ def _call_fut(self, value, field):
+ from google.cloud.bigquery._helpers import _not_null
+
+ return _not_null(value, field)
+
+ def test_w_none_nullable(self):
+ self.assertFalse(self._call_fut(None, _Field("NULLABLE")))
+
+ def test_w_none_required(self):
+ self.assertTrue(self._call_fut(None, _Field("REQUIRED")))
+
+ def test_w_value(self):
+ self.assertTrue(self._call_fut(object(), object()))
+
+
+class Test_int_from_json(unittest.TestCase):
+ def _call_fut(self, value, field):
+ from google.cloud.bigquery._helpers import _int_from_json
+
+ return _int_from_json(value, field)
+
+ def test_w_none_nullable(self):
+ self.assertIsNone(self._call_fut(None, _Field("NULLABLE")))
+
+ def test_w_none_required(self):
+ with self.assertRaises(TypeError):
+ self._call_fut(None, _Field("REQUIRED"))
+
+ def test_w_string_value(self):
+ coerced = self._call_fut("42", object())
+ self.assertEqual(coerced, 42)
+
+ def test_w_float_value(self):
+ coerced = self._call_fut(42, object())
+ self.assertEqual(coerced, 42)
+
+
+class Test_json_from_json(unittest.TestCase):
+ def _call_fut(self, value, field):
+ from google.cloud.bigquery._helpers import _json_from_json
+
+ return _json_from_json(value, field)
+
+ def test_w_none_nullable(self):
+ self.assertIsNone(self._call_fut(None, _Field("NULLABLE")))
+
+ def test_w_none_required(self):
+ with self.assertRaises(TypeError):
+ self._call_fut(None, _Field("REQUIRED"))
+
+ def test_w_json_field(self):
+ data_field = _Field("REQUIRED", "data", "JSON")
+
+ value = json.dumps(
+ {"v": {"key": "value"}},
+ )
+
+ expected_output = {"v": {"key": "value"}}
+ coerced_output = self._call_fut(value, data_field)
+ self.assertEqual(coerced_output, expected_output)
+
+ def test_w_string_value(self):
+ coerced = self._call_fut('"foo"', object())
+ self.assertEqual(coerced, "foo")
+
+
+class Test_float_from_json(unittest.TestCase):
+ def _call_fut(self, value, field):
+ from google.cloud.bigquery._helpers import _float_from_json
+
+ return _float_from_json(value, field)
+
+ def test_w_none_nullable(self):
+ self.assertIsNone(self._call_fut(None, _Field("NULLABLE")))
+
+ def test_w_none_required(self):
+ with self.assertRaises(TypeError):
+ self._call_fut(None, _Field("REQUIRED"))
+
+ def test_w_string_value(self):
+ coerced = self._call_fut("3.1415", object())
+ self.assertEqual(coerced, 3.1415)
+
+ def test_w_float_value(self):
+ coerced = self._call_fut(3.1415, object())
+ self.assertEqual(coerced, 3.1415)
+
+
+class Test_decimal_from_json(unittest.TestCase):
+ def _call_fut(self, value, field):
+ from google.cloud.bigquery._helpers import _decimal_from_json
+
+ return _decimal_from_json(value, field)
+
+ def test_w_none_nullable(self):
+ self.assertIsNone(self._call_fut(None, _Field("NULLABLE")))
+
+ def test_w_none_required(self):
+ with self.assertRaises(TypeError):
+ self._call_fut(None, _Field("REQUIRED"))
+
+ def test_w_string_value(self):
+ coerced = self._call_fut("3.1415", object())
+ self.assertEqual(coerced, decimal.Decimal("3.1415"))
+
+ def test_w_float_value(self):
+ coerced = self._call_fut(3.1415, object())
+ # There is no exact float representation of 3.1415.
+ self.assertEqual(coerced, decimal.Decimal(3.1415))
+
+
+class Test_bool_from_json(unittest.TestCase):
+ def _call_fut(self, value, field):
+ from google.cloud.bigquery._helpers import _bool_from_json
+
+ return _bool_from_json(value, field)
+
+ def test_w_none_nullable(self):
+ self.assertIsNone(self._call_fut(None, _Field("NULLABLE")))
+
+ def test_w_none_required(self):
+ with self.assertRaises(AttributeError):
+ self._call_fut(None, _Field("REQUIRED"))
+
+ def test_w_value_t(self):
+ coerced = self._call_fut("T", object())
+ self.assertTrue(coerced)
+
+ def test_w_value_true(self):
+ coerced = self._call_fut("True", object())
+ self.assertTrue(coerced)
+
+ def test_w_value_1(self):
+ coerced = self._call_fut("1", object())
+ self.assertTrue(coerced)
+
+ def test_w_value_other(self):
+ coerced = self._call_fut("f", object())
+ self.assertFalse(coerced)
+
+
+class Test_string_from_json(unittest.TestCase):
+ def _call_fut(self, value, field):
+ from google.cloud.bigquery._helpers import _string_from_json
+
+ return _string_from_json(value, field)
+
+ def test_w_none_nullable(self):
+ self.assertIsNone(self._call_fut(None, _Field("NULLABLE")))
+
+ def test_w_none_required(self):
+ self.assertIsNone(self._call_fut(None, _Field("REQUIRED")))
+
+ def test_w_string_value(self):
+ coerced = self._call_fut("Wonderful!", object())
+ self.assertEqual(coerced, "Wonderful!")
+
+
+class Test_bytes_from_json(unittest.TestCase):
+ def _call_fut(self, value, field):
+ from google.cloud.bigquery._helpers import _bytes_from_json
+
+ return _bytes_from_json(value, field)
+
+ def test_w_none_nullable(self):
+ self.assertIsNone(self._call_fut(None, _Field("NULLABLE")))
+
+ def test_w_none_required(self):
+ with self.assertRaises(TypeError):
+ self._call_fut(None, _Field("REQUIRED"))
+
+ def test_w_base64_encoded_bytes(self):
+ expected = b"Wonderful!"
+ encoded = base64.standard_b64encode(expected)
+ coerced = self._call_fut(encoded, object())
+ self.assertEqual(coerced, expected)
+
+ def test_w_base64_encoded_text(self):
+ expected = b"Wonderful!"
+ encoded = base64.standard_b64encode(expected).decode("ascii")
+ coerced = self._call_fut(encoded, object())
+ self.assertEqual(coerced, expected)
+
+
+class Test_timestamp_from_json(unittest.TestCase):
+ def _call_fut(self, value, field):
+ from google.cloud.bigquery._helpers import _timestamp_from_json
+
+ return _timestamp_from_json(value, field)
+
+ def test_w_none_nullable(self):
+ self.assertIsNone(self._call_fut(None, _Field("NULLABLE")))
+
+ def test_w_none_required(self):
+ with self.assertRaises(TypeError):
+ self._call_fut(None, _Field("REQUIRED"))
+
+ def test_w_string_int_value(self):
+ from google.cloud._helpers import _EPOCH
+
+ coerced = self._call_fut("1234567", object())
+ self.assertEqual(
+ coerced, _EPOCH + datetime.timedelta(seconds=1, microseconds=234567)
+ )
+
+ def test_w_int_value(self):
+ from google.cloud._helpers import _EPOCH
+
+ coerced = self._call_fut(1234567, object())
+ self.assertEqual(
+ coerced, _EPOCH + datetime.timedelta(seconds=1, microseconds=234567)
+ )
+
+
+class Test_timestamp_query_param_from_json(unittest.TestCase):
+ def _call_fut(self, value, field):
+ from google.cloud.bigquery import _helpers
+
+ return _helpers._timestamp_query_param_from_json(value, field)
+
+ def test_w_none_nullable(self):
+ self.assertIsNone(self._call_fut(None, _Field("NULLABLE")))
+
+ def test_w_timestamp_valid(self):
+ from google.cloud._helpers import UTC
+
+ samples = [
+ (
+ "2016-12-20 15:58:27.339328+00:00",
+ datetime.datetime(2016, 12, 20, 15, 58, 27, 339328, tzinfo=UTC),
+ ),
+ (
+ "2016-12-20 15:58:27+00:00",
+ datetime.datetime(2016, 12, 20, 15, 58, 27, tzinfo=UTC),
+ ),
+ (
+ "2016-12-20T15:58:27.339328+00:00",
+ datetime.datetime(2016, 12, 20, 15, 58, 27, 339328, tzinfo=UTC),
+ ),
+ (
+ "2016-12-20T15:58:27+00:00",
+ datetime.datetime(2016, 12, 20, 15, 58, 27, tzinfo=UTC),
+ ),
+ (
+ "2016-12-20 15:58:27.339328Z",
+ datetime.datetime(2016, 12, 20, 15, 58, 27, 339328, tzinfo=UTC),
+ ),
+ (
+ "2016-12-20 15:58:27Z",
+ datetime.datetime(2016, 12, 20, 15, 58, 27, tzinfo=UTC),
+ ),
+ (
+ "2016-12-20T15:58:27.339328Z",
+ datetime.datetime(2016, 12, 20, 15, 58, 27, 339328, tzinfo=UTC),
+ ),
+ (
+ "2016-12-20T15:58:27Z",
+ datetime.datetime(2016, 12, 20, 15, 58, 27, tzinfo=UTC),
+ ),
+ ]
+ for timestamp_str, expected_result in samples:
+ self.assertEqual(
+ self._call_fut(timestamp_str, _Field("NULLABLE")), expected_result
+ )
+
+ def test_w_timestamp_invalid(self):
+ with self.assertRaises(ValueError):
+ self._call_fut("definitely-not-a-timestamp", _Field("NULLABLE"))
+
+
+class Test_datetime_from_json(unittest.TestCase):
+ def _call_fut(self, value, field):
+ from google.cloud.bigquery._helpers import _datetime_from_json
+
+ return _datetime_from_json(value, field)
+
+ def test_w_none_nullable(self):
+ self.assertIsNone(self._call_fut(None, _Field("NULLABLE")))
+
+ def test_w_none_required(self):
+ with self.assertRaises(TypeError):
+ self._call_fut(None, _Field("REQUIRED"))
+
+ def test_w_string_value(self):
+ coerced = self._call_fut("2016-12-02T18:51:33", object())
+ self.assertEqual(coerced, datetime.datetime(2016, 12, 2, 18, 51, 33))
+
+ def test_w_microseconds(self):
+ coerced = self._call_fut("2015-05-22T10:11:12.987654", object())
+ self.assertEqual(coerced, datetime.datetime(2015, 5, 22, 10, 11, 12, 987654))
+
+
+class Test_date_from_json(unittest.TestCase):
+ def _call_fut(self, value, field):
+ from google.cloud.bigquery._helpers import _date_from_json
+
+ return _date_from_json(value, field)
+
+ def test_w_none_nullable(self):
+ self.assertIsNone(self._call_fut(None, _Field("NULLABLE")))
+
+ def test_w_none_required(self):
+ with self.assertRaises(TypeError):
+ self._call_fut(None, _Field("REQUIRED"))
+
+ def test_w_string_value(self):
+ coerced = self._call_fut("1987-09-22", object())
+ self.assertEqual(coerced, datetime.date(1987, 9, 22))
+
+
+class Test_time_from_json(unittest.TestCase):
+ def _call_fut(self, value, field):
+ from google.cloud.bigquery._helpers import _time_from_json
+
+ return _time_from_json(value, field)
+
+ def test_w_none_nullable(self):
+ self.assertIsNone(self._call_fut(None, _Field("NULLABLE")))
+
+ def test_w_none_required(self):
+ with self.assertRaises(TypeError):
+ self._call_fut(None, _Field("REQUIRED"))
+
+ def test_w_string_value(self):
+ coerced = self._call_fut("12:12:27", object())
+ self.assertEqual(coerced, datetime.time(12, 12, 27))
+
+ def test_w_subsecond_string_value(self):
+ coerced = self._call_fut("12:12:27.123456", object())
+ self.assertEqual(coerced, datetime.time(12, 12, 27, 123456))
+
+ def test_w_bogus_string_value(self):
+ with self.assertRaises(ValueError):
+ self._call_fut("12:12:27.123", object())
+
+
+class Test_range_from_json(unittest.TestCase):
+ def _call_fut(self, value, field):
+ from google.cloud.bigquery._helpers import _range_from_json
+
+ return _range_from_json(value, field)
+
+ def test_w_none_nullable(self):
+ self.assertIsNone(self._call_fut(None, _Field("NULLABLE")))
+
+ def test_w_none_required(self):
+ with self.assertRaises(TypeError):
+ self._call_fut(None, _Field("REQUIRED"))
+
+ def test_w_wrong_format(self):
+ range_field = _Field(
+ "NULLABLE",
+ field_type="RANGE",
+ range_element_type=_Field("NULLABLE", element_type="DATE"),
+ )
+ with self.assertRaises(ValueError):
+ self._call_fut("[2009-06-172019-06-17)", range_field)
+
+ def test_w_wrong_element_type(self):
+ range_field = _Field(
+ "NULLABLE",
+ field_type="RANGE",
+ range_element_type=_Field("NULLABLE", element_type="TIME"),
+ )
+ with self.assertRaises(ValueError):
+ self._call_fut("[15:31:38, 15:50:38)", range_field)
+
+ def test_w_unbounded_value(self):
+ range_field = _Field(
+ "NULLABLE",
+ field_type="RANGE",
+ range_element_type=_Field("NULLABLE", element_type="DATE"),
+ )
+ coerced = self._call_fut("[UNBOUNDED, 2019-06-17)", range_field)
+ self.assertEqual(
+ coerced,
+ {"start": None, "end": datetime.date(2019, 6, 17)},
+ )
+
+ def test_w_date_value(self):
+ range_field = _Field(
+ "NULLABLE",
+ field_type="RANGE",
+ range_element_type=_Field("NULLABLE", element_type="DATE"),
+ )
+ coerced = self._call_fut("[2009-06-17, 2019-06-17)", range_field)
+ self.assertEqual(
+ coerced,
+ {
+ "start": datetime.date(2009, 6, 17),
+ "end": datetime.date(2019, 6, 17),
+ },
+ )
+
+ def test_w_datetime_value(self):
+ range_field = _Field(
+ "NULLABLE",
+ field_type="RANGE",
+ range_element_type=_Field("NULLABLE", element_type="DATETIME"),
+ )
+ coerced = self._call_fut(
+ "[2009-06-17T13:45:30, 2019-06-17T13:45:30)", range_field
+ )
+ self.assertEqual(
+ coerced,
+ {
+ "start": datetime.datetime(2009, 6, 17, 13, 45, 30),
+ "end": datetime.datetime(2019, 6, 17, 13, 45, 30),
+ },
+ )
+
+ def test_w_timestamp_value(self):
+ from google.cloud._helpers import _EPOCH
+
+ range_field = _Field(
+ "NULLABLE",
+ field_type="RANGE",
+ range_element_type=_Field("NULLABLE", element_type="TIMESTAMP"),
+ )
+ coerced = self._call_fut("[1234567, 1234789)", range_field)
+ self.assertEqual(
+ coerced,
+ {
+ "start": _EPOCH + datetime.timedelta(seconds=1, microseconds=234567),
+ "end": _EPOCH + datetime.timedelta(seconds=1, microseconds=234789),
+ },
+ )
+
+
+class Test_record_from_json(unittest.TestCase):
+ def _call_fut(self, value, field):
+ from google.cloud.bigquery._helpers import _record_from_json
+
+ return _record_from_json(value, field)
+
+ def test_w_none_nullable(self):
+ self.assertIsNone(self._call_fut(None, _Field("NULLABLE")))
+
+ def test_w_none_required(self):
+ with self.assertRaises(TypeError):
+ self._call_fut(None, _Field("REQUIRED"))
+
+ def test_w_nullable_subfield_none(self):
+ subfield = _Field("NULLABLE", "age", "INTEGER")
+ field = _Field("REQUIRED", fields=[subfield])
+ value = {"f": [{"v": None}]}
+ coerced = self._call_fut(value, field)
+ self.assertEqual(coerced, {"age": None})
+
+ def test_w_scalar_subfield(self):
+ subfield = _Field("REQUIRED", "age", "INTEGER")
+ field = _Field("REQUIRED", fields=[subfield])
+ value = {"f": [{"v": 42}]}
+ coerced = self._call_fut(value, field)
+ self.assertEqual(coerced, {"age": 42})
+
+ def test_w_scalar_subfield_geography(self):
+ subfield = _Field("REQUIRED", "geo", "GEOGRAPHY")
+ field = _Field("REQUIRED", fields=[subfield])
+ value = {"f": [{"v": "POINT(1, 2)"}]}
+ coerced = self._call_fut(value, field)
+ self.assertEqual(coerced, {"geo": "POINT(1, 2)"})
+
+ def test_w_repeated_subfield(self):
+ subfield = _Field("REPEATED", "color", "STRING")
+ field = _Field("REQUIRED", fields=[subfield])
+ value = {"f": [{"v": [{"v": "red"}, {"v": "yellow"}, {"v": "blue"}]}]}
+ coerced = self._call_fut(value, field)
+ self.assertEqual(coerced, {"color": ["red", "yellow", "blue"]})
+
+ def test_w_record_subfield(self):
+ full_name = _Field("REQUIRED", "full_name", "STRING")
+ area_code = _Field("REQUIRED", "area_code", "STRING")
+ local_number = _Field("REQUIRED", "local_number", "STRING")
+ rank = _Field("REQUIRED", "rank", "INTEGER")
+ phone = _Field(
+ "NULLABLE", "phone", "RECORD", fields=[area_code, local_number, rank]
+ )
+ person = _Field("REQUIRED", "person", "RECORD", fields=[full_name, phone])
+ value = {
+ "f": [
+ {"v": "Phred Phlyntstone"},
+ {"v": {"f": [{"v": "800"}, {"v": "555-1212"}, {"v": 1}]}},
+ ]
+ }
+ expected = {
+ "full_name": "Phred Phlyntstone",
+ "phone": {"area_code": "800", "local_number": "555-1212", "rank": 1},
+ }
+ coerced = self._call_fut(value, person)
+ self.assertEqual(coerced, expected)
+
+
+class Test_field_to_index_mapping(unittest.TestCase):
+ def _call_fut(self, schema):
+ from google.cloud.bigquery._helpers import _field_to_index_mapping
+
+ return _field_to_index_mapping(schema)
+
+ def test_w_empty_schema(self):
+ self.assertEqual(self._call_fut([]), {})
+
+ def test_w_non_empty_schema(self):
+ schema = [
+ _Field("REPEATED", "first", "INTEGER"),
+ _Field("REQUIRED", "second", "INTEGER"),
+ _Field("REPEATED", "third", "INTEGER"),
+ ]
+ self.assertEqual(self._call_fut(schema), {"first": 0, "second": 1, "third": 2})
+
+
+class Test_row_tuple_from_json(unittest.TestCase):
+ def _call_fut(self, row, schema):
+ from google.cloud.bigquery._helpers import _row_tuple_from_json
+
+ with _field_isinstance_patcher():
+ return _row_tuple_from_json(row, schema)
+
+ def test_w_single_scalar_column(self):
+ # SELECT 1 AS col
+ col = _Field("REQUIRED", "col", "INTEGER")
+ row = {"f": [{"v": "1"}]}
+ self.assertEqual(self._call_fut(row, schema=[col]), (1,))
+
+ def test_w_unknown_type(self):
+ # SELECT 1 AS col
+ col = _Field("REQUIRED", "col", "UNKNOWN")
+ row = {"f": [{"v": "1"}]}
+ with warnings.catch_warnings(record=True) as warned:
+ self.assertEqual(self._call_fut(row, schema=[col]), ("1",))
+ self.assertEqual(len(warned), 1)
+ warning = warned[0]
+ self.assertTrue("UNKNOWN" in str(warning))
+ self.assertTrue("col" in str(warning))
+
+ def test_w_single_scalar_geography_column(self):
+ # SELECT 1 AS col
+ col = _Field("REQUIRED", "geo", "GEOGRAPHY")
+ row = {"f": [{"v": "POINT(1, 2)"}]}
+ self.assertEqual(self._call_fut(row, schema=[col]), ("POINT(1, 2)",))
+
+ def test_w_single_struct_column(self):
+ # SELECT (1, 2) AS col
+ sub_1 = _Field("REQUIRED", "sub_1", "INTEGER")
+ sub_2 = _Field("REQUIRED", "sub_2", "INTEGER")
+ col = _Field("REQUIRED", "col", "RECORD", fields=[sub_1, sub_2])
+ row = {"f": [{"v": {"f": [{"v": "1"}, {"v": "2"}]}}]}
+ self.assertEqual(self._call_fut(row, schema=[col]), ({"sub_1": 1, "sub_2": 2},))
+
+ def test_w_single_array_column(self):
+ # SELECT [1, 2, 3] as col
+ col = _Field("REPEATED", "col", "INTEGER")
+ row = {"f": [{"v": [{"v": "1"}, {"v": "2"}, {"v": "3"}]}]}
+ self.assertEqual(self._call_fut(row, schema=[col]), ([1, 2, 3],))
+
+ def test_w_unknown_type_repeated(self):
+ # SELECT 1 AS col
+ col = _Field("REPEATED", "col", "UNKNOWN")
+ row = {"f": [{"v": [{"v": "1"}, {"v": "2"}, {"v": "3"}]}]}
+ with warnings.catch_warnings(record=True) as warned:
+ self.assertEqual(self._call_fut(row, schema=[col]), (["1", "2", "3"],))
+ self.assertEqual(len(warned), 1)
+ warning = warned[0]
+ self.assertTrue("UNKNOWN" in str(warning))
+ self.assertTrue("col" in str(warning))
+
+ def test_w_struct_w_nested_array_column(self):
+ # SELECT ([1, 2], 3, [4, 5]) as col
+ first = _Field("REPEATED", "first", "INTEGER")
+ second = _Field("REQUIRED", "second", "INTEGER")
+ third = _Field("REPEATED", "third", "INTEGER")
+ col = _Field("REQUIRED", "col", "RECORD", fields=[first, second, third])
+ row = {
+ "f": [
+ {
+ "v": {
+ "f": [
+ {"v": [{"v": "1"}, {"v": "2"}]},
+ {"v": "3"},
+ {"v": [{"v": "4"}, {"v": "5"}]},
+ ]
+ }
+ }
+ ]
+ }
+ self.assertEqual(
+ self._call_fut(row, schema=[col]),
+ ({"first": [1, 2], "second": 3, "third": [4, 5]},),
+ )
+
+ def test_w_unknown_type_subfield(self):
+ # SELECT [(1, 2, 3), (4, 5, 6)] as col
+ first = _Field("REPEATED", "first", "UNKNOWN1")
+ second = _Field("REQUIRED", "second", "UNKNOWN2")
+ third = _Field("REPEATED", "third", "INTEGER")
+ col = _Field("REQUIRED", "col", "RECORD", fields=[first, second, third])
+ row = {
+ "f": [
+ {
+ "v": {
+ "f": [
+ {"v": [{"v": "1"}, {"v": "2"}]},
+ {"v": "3"},
+ {"v": [{"v": "4"}, {"v": "5"}]},
+ ]
+ }
+ }
+ ]
+ }
+ with warnings.catch_warnings(record=True) as warned:
+ self.assertEqual(
+ self._call_fut(row, schema=[col]),
+ ({"first": ["1", "2"], "second": "3", "third": [4, 5]},),
+ )
+ self.assertEqual(len(warned), 2) # 1 warning per unknown field.
+ warned = [str(warning) for warning in warned]
+ self.assertTrue(
+ any(["first" in warning and "UNKNOWN1" in warning for warning in warned])
+ )
+ self.assertTrue(
+ any(["second" in warning and "UNKNOWN2" in warning for warning in warned])
+ )
+
+ def test_w_array_of_struct(self):
+ # SELECT [(1, 2, 3), (4, 5, 6)] as col
+ first = _Field("REQUIRED", "first", "INTEGER")
+ second = _Field("REQUIRED", "second", "INTEGER")
+ third = _Field("REQUIRED", "third", "INTEGER")
+ col = _Field("REPEATED", "col", "RECORD", fields=[first, second, third])
+ row = {
+ "f": [
+ {
+ "v": [
+ {"v": {"f": [{"v": "1"}, {"v": "2"}, {"v": "3"}]}},
+ {"v": {"f": [{"v": "4"}, {"v": "5"}, {"v": "6"}]}},
+ ]
+ }
+ ]
+ }
+ self.assertEqual(
+ self._call_fut(row, schema=[col]),
+ (
+ [
+ {"first": 1, "second": 2, "third": 3},
+ {"first": 4, "second": 5, "third": 6},
+ ],
+ ),
+ )
+
+ def test_w_array_of_struct_w_array(self):
+ # SELECT [([1, 2, 3], 4), ([5, 6], 7)]
+ first = _Field("REPEATED", "first", "INTEGER")
+ second = _Field("REQUIRED", "second", "INTEGER")
+ col = _Field("REPEATED", "col", "RECORD", fields=[first, second])
+ row = {
+ "f": [
+ {
+ "v": [
+ {
+ "v": {
+ "f": [
+ {"v": [{"v": "1"}, {"v": "2"}, {"v": "3"}]},
+ {"v": "4"},
+ ]
+ }
+ },
+ {"v": {"f": [{"v": [{"v": "5"}, {"v": "6"}]}, {"v": "7"}]}},
+ ]
+ }
+ ]
+ }
+ self.assertEqual(
+ self._call_fut(row, schema=[col]),
+ ([{"first": [1, 2, 3], "second": 4}, {"first": [5, 6], "second": 7}],),
+ )
+
+
+class Test_rows_from_json(unittest.TestCase):
+ def _call_fut(self, rows, schema):
+ from google.cloud.bigquery._helpers import _rows_from_json
+
+ with _field_isinstance_patcher():
+ return _rows_from_json(rows, schema)
+
+ def test_w_record_subfield(self):
+ from google.cloud.bigquery.table import Row
+
+ full_name = _Field("REQUIRED", "full_name", "STRING")
+ area_code = _Field("REQUIRED", "area_code", "STRING")
+ local_number = _Field("REQUIRED", "local_number", "STRING")
+ rank = _Field("REQUIRED", "rank", "INTEGER")
+ phone = _Field(
+ "NULLABLE", "phone", "RECORD", fields=[area_code, local_number, rank]
+ )
+ color = _Field("REPEATED", "color", "STRING")
+ schema = [full_name, phone, color]
+ rows = [
+ {
+ "f": [
+ {"v": "Phred Phlyntstone"},
+ {"v": {"f": [{"v": "800"}, {"v": "555-1212"}, {"v": 1}]}},
+ {"v": [{"v": "orange"}, {"v": "black"}]},
+ ]
+ },
+ {
+ "f": [
+ {"v": "Bharney Rhubble"},
+ {"v": {"f": [{"v": "877"}, {"v": "768-5309"}, {"v": 2}]}},
+ {"v": [{"v": "brown"}]},
+ ]
+ },
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": None}, {"v": []}]},
+ ]
+ phred_phone = {"area_code": "800", "local_number": "555-1212", "rank": 1}
+ bharney_phone = {"area_code": "877", "local_number": "768-5309", "rank": 2}
+ f2i = {"full_name": 0, "phone": 1, "color": 2}
+ expected = [
+ Row(("Phred Phlyntstone", phred_phone, ["orange", "black"]), f2i),
+ Row(("Bharney Rhubble", bharney_phone, ["brown"]), f2i),
+ Row(("Wylma Phlyntstone", None, []), f2i),
+ ]
+ coerced = self._call_fut(rows, schema)
+ self.assertEqual(coerced, expected)
+
+ def test_w_int64_float64_bool(self):
+ from google.cloud.bigquery.table import Row
+
+ # "Standard" SQL dialect uses 'INT64', 'FLOAT64', 'BOOL'.
+ candidate = _Field("REQUIRED", "candidate", "STRING")
+ votes = _Field("REQUIRED", "votes", "INT64")
+ percentage = _Field("REQUIRED", "percentage", "FLOAT64")
+ incumbent = _Field("REQUIRED", "incumbent", "BOOL")
+ schema = [candidate, votes, percentage, incumbent]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": 8}, {"v": 0.25}, {"v": "true"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": 4}, {"v": 0.125}, {"v": "false"}]},
+ {
+ "f": [
+ {"v": "Wylma Phlyntstone"},
+ {"v": 20},
+ {"v": 0.625},
+ {"v": "false"},
+ ]
+ },
+ ]
+ f2i = {"candidate": 0, "votes": 1, "percentage": 2, "incumbent": 3}
+ expected = [
+ Row(("Phred Phlyntstone", 8, 0.25, True), f2i),
+ Row(("Bharney Rhubble", 4, 0.125, False), f2i),
+ Row(("Wylma Phlyntstone", 20, 0.625, False), f2i),
+ ]
+ coerced = self._call_fut(rows, schema)
+ self.assertEqual(coerced, expected)
+
+
+class Test_int_to_json(unittest.TestCase):
+ def _call_fut(self, value):
+ from google.cloud.bigquery._helpers import _int_to_json
+
+ return _int_to_json(value)
+
+ def test_w_int(self):
+ self.assertEqual(self._call_fut(123), "123")
+
+ def test_w_string(self):
+ self.assertEqual(self._call_fut("123"), "123")
+
+
+class Test_float_to_json(unittest.TestCase):
+ def _call_fut(self, value):
+ from google.cloud.bigquery._helpers import _float_to_json
+
+ return _float_to_json(value)
+
+ def test_w_none(self):
+ self.assertEqual(self._call_fut(None), None)
+
+ def test_w_non_numeric(self):
+ with self.assertRaises(TypeError):
+ self._call_fut(object())
+
+ def test_w_integer(self):
+ result = self._call_fut(123)
+ self.assertIsInstance(result, float)
+ self.assertEqual(result, 123.0)
+
+ def test_w_float(self):
+ self.assertEqual(self._call_fut(1.23), 1.23)
+
+ def test_w_float_as_string(self):
+ self.assertEqual(self._call_fut("1.23"), 1.23)
+
+ def test_w_nan(self):
+ result = self._call_fut(float("nan"))
+ self.assertEqual(result.lower(), "nan")
+
+ def test_w_nan_as_string(self):
+ result = self._call_fut("NaN")
+ self.assertEqual(result.lower(), "nan")
+
+ def test_w_infinity(self):
+ result = self._call_fut(float("inf"))
+ self.assertEqual(result.lower(), "inf")
+
+ def test_w_infinity_as_string(self):
+ result = self._call_fut("inf")
+ self.assertEqual(result.lower(), "inf")
+
+ def test_w_negative_infinity(self):
+ result = self._call_fut(float("-inf"))
+ self.assertEqual(result.lower(), "-inf")
+
+ def test_w_negative_infinity_as_string(self):
+ result = self._call_fut("-inf")
+ self.assertEqual(result.lower(), "-inf")
+
+
+class Test_decimal_to_json(unittest.TestCase):
+ def _call_fut(self, value):
+ from google.cloud.bigquery._helpers import _decimal_to_json
+
+ return _decimal_to_json(value)
+
+ def test_w_float(self):
+ self.assertEqual(self._call_fut(1.23), 1.23)
+
+ def test_w_string(self):
+ self.assertEqual(self._call_fut("1.23"), "1.23")
+
+ def test_w_decimal(self):
+ self.assertEqual(self._call_fut(decimal.Decimal("1.23")), "1.23")
+
+
+class Test_bool_to_json(unittest.TestCase):
+ def _call_fut(self, value):
+ from google.cloud.bigquery._helpers import _bool_to_json
+
+ return _bool_to_json(value)
+
+ def test_w_true(self):
+ self.assertEqual(self._call_fut(True), "true")
+
+ def test_w_false(self):
+ self.assertEqual(self._call_fut(False), "false")
+
+ def test_w_string(self):
+ self.assertEqual(self._call_fut("false"), "false")
+
+
+class Test_bytes_to_json(unittest.TestCase):
+ def _call_fut(self, value):
+ from google.cloud.bigquery._helpers import _bytes_to_json
+
+ return _bytes_to_json(value)
+
+ def test_w_non_bytes(self):
+ non_bytes = object()
+ self.assertIs(self._call_fut(non_bytes), non_bytes)
+
+ def test_w_bytes(self):
+ source = b"source"
+ expected = "c291cmNl"
+ converted = self._call_fut(source)
+ self.assertEqual(converted, expected)
+
+
+class Test_timestamp_to_json_parameter(unittest.TestCase):
+ def _call_fut(self, value):
+ from google.cloud.bigquery._helpers import _timestamp_to_json_parameter
+
+ return _timestamp_to_json_parameter(value)
+
+ def test_w_float(self):
+ self.assertEqual(self._call_fut(1.234567), 1.234567)
+
+ def test_w_string(self):
+ ZULU = "2016-12-20 15:58:27.339328+00:00"
+ self.assertEqual(self._call_fut(ZULU), ZULU)
+
+ def test_w_datetime_wo_zone(self):
+ ZULU = "2016-12-20 15:58:27.339328+00:00"
+ when = datetime.datetime(2016, 12, 20, 15, 58, 27, 339328)
+ self.assertEqual(self._call_fut(when), ZULU)
+
+ def test_w_datetime_w_non_utc_zone(self):
+ class _Zone(datetime.tzinfo):
+ def utcoffset(self, _):
+ return datetime.timedelta(minutes=-240)
+
+ ZULU = "2016-12-20 19:58:27.339328+00:00"
+ when = datetime.datetime(2016, 12, 20, 15, 58, 27, 339328, tzinfo=_Zone())
+ self.assertEqual(self._call_fut(when), ZULU)
+
+ def test_w_datetime_w_utc_zone(self):
+ from google.cloud._helpers import UTC
+
+ ZULU = "2016-12-20 15:58:27.339328+00:00"
+ when = datetime.datetime(2016, 12, 20, 15, 58, 27, 339328, tzinfo=UTC)
+ self.assertEqual(self._call_fut(when), ZULU)
+
+
+class Test_timestamp_to_json_row(unittest.TestCase):
+ def _call_fut(self, value):
+ from google.cloud.bigquery._helpers import _timestamp_to_json_row
+
+ return _timestamp_to_json_row(value)
+
+ def test_w_float(self):
+ self.assertEqual(self._call_fut(1.234567), 1.234567)
+
+ def test_w_string(self):
+ ZULU = "2016-12-20 15:58:27.339328+00:00"
+ self.assertEqual(self._call_fut(ZULU), ZULU)
+
+ def test_w_datetime_no_zone(self):
+ when = datetime.datetime(2016, 12, 20, 15, 58, 27, 339328)
+ self.assertEqual(self._call_fut(when), "2016-12-20T15:58:27.339328Z")
+
+ def test_w_datetime_w_utc_zone(self):
+ from google.cloud._helpers import UTC
+
+ when = datetime.datetime(2020, 11, 17, 1, 6, 52, 353795, tzinfo=UTC)
+ self.assertEqual(self._call_fut(when), "2020-11-17T01:06:52.353795Z")
+
+ def test_w_datetime_w_non_utc_zone(self):
+ class EstZone(datetime.tzinfo):
+ def utcoffset(self, _):
+ return datetime.timedelta(minutes=-300)
+
+ when = datetime.datetime(2020, 11, 17, 1, 6, 52, 353795, tzinfo=EstZone())
+ self.assertEqual(self._call_fut(when), "2020-11-17T06:06:52.353795Z")
+
+
+class Test_datetime_to_json(unittest.TestCase):
+ def _call_fut(self, value):
+ from google.cloud.bigquery._helpers import _datetime_to_json
+
+ return _datetime_to_json(value)
+
+ def test_w_string(self):
+ RFC3339 = "2016-12-03T14:14:51Z"
+ self.assertEqual(self._call_fut(RFC3339), RFC3339)
+
+ def test_w_datetime(self):
+ from google.cloud._helpers import UTC
+
+ when = datetime.datetime(2016, 12, 3, 14, 11, 27, 123456, tzinfo=UTC)
+ self.assertEqual(self._call_fut(when), "2016-12-03T14:11:27.123456")
+
+ def test_w_datetime_w_non_utc_zone(self):
+ class EstZone(datetime.tzinfo):
+ def utcoffset(self, _):
+ return datetime.timedelta(minutes=-300)
+
+ when = datetime.datetime(2016, 12, 3, 14, 11, 27, 123456, tzinfo=EstZone())
+ self.assertEqual(self._call_fut(when), "2016-12-03T19:11:27.123456")
+
+
+class Test_date_to_json(unittest.TestCase):
+ def _call_fut(self, value):
+ from google.cloud.bigquery._helpers import _date_to_json
+
+ return _date_to_json(value)
+
+ def test_w_string(self):
+ RFC3339 = "2016-12-03"
+ self.assertEqual(self._call_fut(RFC3339), RFC3339)
+
+ def test_w_datetime(self):
+ when = datetime.date(2016, 12, 3)
+ self.assertEqual(self._call_fut(when), "2016-12-03")
+
+
+class Test_time_to_json(unittest.TestCase):
+ def _call_fut(self, value):
+ from google.cloud.bigquery._helpers import _time_to_json
+
+ return _time_to_json(value)
+
+ def test_w_string(self):
+ RFC3339 = "12:13:41"
+ self.assertEqual(self._call_fut(RFC3339), RFC3339)
+
+ def test_w_datetime(self):
+ when = datetime.time(12, 13, 41)
+ self.assertEqual(self._call_fut(when), "12:13:41")
+
+
+def _make_field(
+ field_type,
+ mode="NULLABLE",
+ name="testing",
+ fields=(),
+ range_element_type=None,
+):
+ from google.cloud.bigquery.schema import SchemaField
+
+ return SchemaField(
+ name=name,
+ field_type=field_type,
+ mode=mode,
+ fields=fields,
+ range_element_type=range_element_type,
+ )
+
+
+class Test_scalar_field_to_json(unittest.TestCase):
+ def _call_fut(self, field, value):
+ from google.cloud.bigquery._helpers import _scalar_field_to_json
+
+ return _scalar_field_to_json(field, value)
+
+ def test_w_unknown_field_type(self):
+ field = _make_field("UNKNOWN")
+ original = object()
+ with warnings.catch_warnings(record=True) as warned:
+ converted = self._call_fut(field, original)
+ self.assertIs(converted, original)
+ self.assertEqual(len(warned), 1)
+ warning = warned[0]
+ self.assertTrue("UNKNOWN" in str(warning))
+
+ def test_w_known_field_type(self):
+ field = _make_field("INT64")
+ original = 42
+ converted = self._call_fut(field, original)
+ self.assertEqual(converted, str(original))
+
+ def test_w_scalar_none(self):
+ import google.cloud.bigquery._helpers as module_under_test
+
+ scalar_types = module_under_test._SCALAR_VALUE_TO_JSON_ROW.keys()
+ for type_ in scalar_types:
+ field = _make_field(type_)
+ original = None
+ converted = self._call_fut(field, original)
+ self.assertIsNone(converted, msg=f"{type_} did not return None")
+
+
+class Test_single_field_to_json(unittest.TestCase):
+ def _call_fut(self, field, value):
+ from google.cloud.bigquery._helpers import _single_field_to_json
+
+ return _single_field_to_json(field, value)
+
+ def test_w_none(self):
+ field = _make_field("INT64")
+ original = None
+ converted = self._call_fut(field, original)
+ self.assertIsNone(converted)
+
+ def test_w_record(self):
+ subfields = [
+ _make_field("INT64", name="one"),
+ _make_field("STRING", name="two"),
+ ]
+ field = _make_field("RECORD", fields=subfields)
+ original = {"one": 42, "two": "two"}
+ converted = self._call_fut(field, original)
+ self.assertEqual(converted, {"one": "42", "two": "two"})
+
+ def test_w_scalar(self):
+ field = _make_field("INT64")
+ original = 42
+ converted = self._call_fut(field, original)
+ self.assertEqual(converted, str(original))
+
+ def test_w_scalar_ignores_mode(self):
+ field = _make_field("STRING", mode="REPEATED")
+ original = "hello world"
+ converted = self._call_fut(field, original)
+ self.assertEqual(converted, original)
+
+ def test_w_scalar_json(self):
+ field = _make_field("JSON")
+ original = {"alpha": "abc", "num": [1, 2, 3]}
+ converted = self._call_fut(field, original)
+ self.assertEqual(converted, json.dumps(original))
+
+
+class Test_repeated_field_to_json(unittest.TestCase):
+ def _call_fut(self, field, value):
+ from google.cloud.bigquery._helpers import _repeated_field_to_json
+
+ return _repeated_field_to_json(field, value)
+
+ def test_w_empty(self):
+ field = _make_field("INT64", mode="REPEATED")
+ original = []
+ converted = self._call_fut(field, original)
+ self.assertEqual(converted, original)
+ self.assertEqual(field.mode, "REPEATED")
+
+ def test_w_non_empty(self):
+ field = _make_field("INT64", mode="REPEATED")
+ original = [42]
+ converted = self._call_fut(field, original)
+ self.assertEqual(converted, [str(value) for value in original])
+ self.assertEqual(field.mode, "REPEATED")
+
+
+class Test_record_field_to_json(unittest.TestCase):
+ def _call_fut(self, field, value):
+ from google.cloud.bigquery._helpers import _record_field_to_json
+
+ return _record_field_to_json(field, value)
+
+ def test_w_empty(self):
+ fields = []
+ original = []
+ converted = self._call_fut(fields, original)
+ self.assertEqual(converted, {})
+
+ def test_w_non_empty_list(self):
+ fields = [
+ _make_field("INT64", name="one", mode="NULLABLE"),
+ _make_field("STRING", name="two", mode="NULLABLE"),
+ ]
+ original = [42, "two"]
+ converted = self._call_fut(fields, original)
+ self.assertEqual(converted, {"one": "42", "two": "two"})
+
+ def test_w_list_missing_fields(self):
+ fields = [
+ _make_field("INT64", name="one", mode="NULLABLE"),
+ _make_field("STRING", name="two", mode="NULLABLE"),
+ ]
+ original = [42]
+
+ with self.assertRaisesRegex(ValueError, r".*not match schema length.*"):
+ self._call_fut(fields, original)
+
+ def test_w_list_too_many_fields(self):
+ fields = [
+ _make_field("INT64", name="one", mode="NULLABLE"),
+ _make_field("STRING", name="two", mode="NULLABLE"),
+ ]
+ original = [42, "two", "three"]
+
+ with self.assertRaisesRegex(ValueError, r".*not match schema length.*"):
+ self._call_fut(fields, original)
+
+ def test_w_non_empty_dict(self):
+ fields = [
+ _make_field("INT64", name="one", mode="NULLABLE"),
+ _make_field("STRING", name="two", mode="NULLABLE"),
+ ]
+ original = {"one": 42, "two": "two"}
+ converted = self._call_fut(fields, original)
+ self.assertEqual(converted, {"one": "42", "two": "two"})
+
+ def test_w_some_missing_nullables(self):
+ fields = [
+ _make_field("INT64", name="one", mode="NULLABLE"),
+ _make_field("STRING", name="two", mode="NULLABLE"),
+ ]
+ original = {"one": 42}
+ converted = self._call_fut(fields, original)
+
+ # missing fields should not be converted to an explicit None
+ self.assertEqual(converted, {"one": "42"})
+
+ def test_w_all_missing_nullables(self):
+ fields = [
+ _make_field("INT64", name="one", mode="NULLABLE"),
+ _make_field("STRING", name="two", mode="NULLABLE"),
+ ]
+ original = {}
+ converted = self._call_fut(fields, original)
+
+ # we should get an empty dict, not None
+ self.assertEqual(converted, {})
+
+ def test_w_explicit_none_value(self):
+ fields = [
+ _make_field("INT64", name="one", mode="NULLABLE"),
+ _make_field("STRING", name="two", mode="NULLABLE"),
+ _make_field("BOOL", name="three", mode="REPEATED"),
+ ]
+ original = {"three": None, "one": 42, "two": None}
+ converted = self._call_fut(fields, original)
+
+ # None values should be dropped regardless of the field type
+ self.assertEqual(converted, {"one": "42"})
+
+ def test_w_dict_unknown_fields(self):
+ fields = [
+ _make_field("INT64", name="one", mode="NULLABLE"),
+ _make_field("STRING", name="two", mode="NULLABLE"),
+ ]
+ original = {
+ "whoami": datetime.date(2020, 7, 20),
+ "one": 111,
+ "two": "222",
+ "void": None,
+ }
+
+ converted = self._call_fut(fields, original)
+
+ # Unknown fields should be included (if not None), but converted as strings.
+ self.assertEqual(
+ converted,
+ {"whoami": "2020-07-20", "one": "111", "two": "222"},
+ )
+
+
+class Test_range_field_to_json(unittest.TestCase):
+ def _call_fut(self, field, value):
+ from google.cloud.bigquery._helpers import _range_field_to_json
+
+ return _range_field_to_json(field, value)
+
+ def test_w_date(self):
+ field = _make_field("RANGE", range_element_type="DATE")
+ start = datetime.date(2016, 12, 3)
+ original = {"start": start}
+ converted = self._call_fut(field.range_element_type, original)
+ expected = {"start": "2016-12-03", "end": None}
+ self.assertEqual(converted, expected)
+
+ def test_w_date_string(self):
+ field = _make_field("RANGE", range_element_type="DATE")
+ original = {"start": "2016-12-03"}
+ converted = self._call_fut(field.range_element_type, original)
+ expected = {"start": "2016-12-03", "end": None}
+ self.assertEqual(converted, expected)
+
+ def test_w_datetime(self):
+ field = _make_field("RANGE", range_element_type="DATETIME")
+ start = datetime.datetime(2016, 12, 3, 14, 11, 27, 123456)
+ original = {"start": start}
+ converted = self._call_fut(field.range_element_type, original)
+ expected = {"start": "2016-12-03T14:11:27.123456", "end": None}
+ self.assertEqual(converted, expected)
+
+ def test_w_datetime_string(self):
+ field = _make_field("RANGE", range_element_type="DATETIME")
+ original = {"start": "2016-12-03T14:11:27.123456"}
+ converted = self._call_fut(field.range_element_type, original)
+ expected = {"start": "2016-12-03T14:11:27.123456", "end": None}
+ self.assertEqual(converted, expected)
+
+ def test_w_timestamp(self):
+ from google.cloud._helpers import UTC
+
+ field = _make_field("RANGE", range_element_type="TIMESTAMP")
+ start = datetime.datetime(2016, 12, 3, 14, 11, 27, 123456, tzinfo=UTC)
+ original = {"start": start}
+ converted = self._call_fut(field.range_element_type, original)
+ expected = {"start": "2016-12-03T14:11:27.123456Z", "end": None}
+ self.assertEqual(converted, expected)
+
+ def test_w_timestamp_string(self):
+ field = _make_field("RANGE", range_element_type="TIMESTAMP")
+ original = {"start": "2016-12-03T14:11:27.123456Z"}
+ converted = self._call_fut(field.range_element_type, original)
+ expected = {"start": "2016-12-03T14:11:27.123456Z", "end": None}
+ self.assertEqual(converted, expected)
+
+ def test_w_timestamp_float(self):
+ field = _make_field("RANGE", range_element_type="TIMESTAMP")
+ original = {"start": 12.34567}
+ converted = self._call_fut(field.range_element_type, original)
+ expected = {"start": 12.34567, "end": None}
+ self.assertEqual(converted, expected)
+
+ def test_w_string_literal(self):
+ field = _make_field("RANGE", range_element_type="DATE")
+ original = "[2016-12-03, UNBOUNDED)"
+ converted = self._call_fut(field.range_element_type, original)
+ expected = {"start": "2016-12-03", "end": None}
+ self.assertEqual(converted, expected)
+
+ def test_w_unsupported_range_element_type(self):
+ field = _make_field("RANGE", range_element_type="TIME")
+ with self.assertRaises(ValueError):
+ self._call_fut(
+ field.range_element_type,
+ {"start": datetime.time(12, 13, 41)},
+ )
+
+ def test_w_no_range_element_type(self):
+ field = _make_field("RANGE")
+ with self.assertRaises(ValueError):
+ self._call_fut(field.range_element_type, "2016-12-03")
+
+ def test_w_incorrect_literal_format(self):
+ field = _make_field("RANGE", range_element_type="DATE")
+ original = "[2016-12-03, UNBOUNDED]"
+ with self.assertRaises(ValueError):
+ self._call_fut(field.range_element_type, original)
+
+ def test_w_unsupported_representation(self):
+ field = _make_field("RANGE", range_element_type="DATE")
+ with self.assertRaises(ValueError):
+ self._call_fut(field.range_element_type, object())
+
+
+class Test_field_to_json(unittest.TestCase):
+ def _call_fut(self, field, value):
+ from google.cloud.bigquery._helpers import _field_to_json
+
+ return _field_to_json(field, value)
+
+ def test_w_none(self):
+ field = _make_field("INT64")
+ original = None
+ converted = self._call_fut(field, original)
+ self.assertIsNone(converted)
+
+ def test_w_repeated(self):
+ field = _make_field("INT64", mode="REPEATED")
+ original = [42, 17]
+ converted = self._call_fut(field, original)
+ self.assertEqual(converted, [str(value) for value in original])
+
+ def test_w_record(self):
+ subfields = [
+ _make_field("INT64", name="one"),
+ _make_field("STRING", name="two"),
+ ]
+ field = _make_field("RECORD", fields=subfields)
+ original = {"one": 42, "two": "two"}
+ converted = self._call_fut(field, original)
+ self.assertEqual(converted, {"one": "42", "two": "two"})
+
+ def test_w_scalar(self):
+ field = _make_field("INT64")
+ original = 42
+ converted = self._call_fut(field, original)
+ self.assertEqual(converted, str(original))
+
+ def test_w_range(self):
+ field = _make_field("RANGE", range_element_type="DATE")
+ original = {"start": "2016-12-03", "end": "2024-12-03"}
+ converted = self._call_fut(field, original)
+ self.assertEqual(converted, original)
+
+
+class Test_snake_to_camel_case(unittest.TestCase):
+ def _call_fut(self, value):
+ from google.cloud.bigquery._helpers import _snake_to_camel_case
+
+ return _snake_to_camel_case(value)
+
+ def test_w_snake_case_string(self):
+ self.assertEqual(self._call_fut("friendly_name"), "friendlyName")
+
+ def test_w_camel_case_string(self):
+ self.assertEqual(self._call_fut("friendlyName"), "friendlyName")
+
+
+class Test__get_sub_prop(unittest.TestCase):
+ def _call_fut(self, container, keys, **kw):
+ from google.cloud.bigquery._helpers import _get_sub_prop
+
+ return _get_sub_prop(container, keys, **kw)
+
+ def test_w_empty_container_default_default(self):
+ self.assertIsNone(self._call_fut({}, ["key1"]))
+
+ def test_w_missing_key_explicit_default(self):
+ self.assertEqual(self._call_fut({"key2": 2}, ["key1"], default=1), 1)
+
+ def test_w_matching_single_key_in_sequence(self):
+ self.assertEqual(self._call_fut({"key1": 1}, ["key1"]), 1)
+
+ def test_w_matching_single_string_key(self):
+ data = {"k": {"e": {"y": "foo"}}, "key": "bar"}
+ self.assertEqual(self._call_fut(data, "key"), "bar")
+
+ def test_w_matching_first_key_missing_second_key(self):
+ self.assertIsNone(self._call_fut({"key1": {"key3": 3}}, ["key1", "key2"]))
+
+ def test_w_matching_first_key_matching_second_key(self):
+ self.assertEqual(self._call_fut({"key1": {"key2": 2}}, ["key1", "key2"]), 2)
+
+
+class Test__set_sub_prop(unittest.TestCase):
+ def _call_fut(self, container, keys, value):
+ from google.cloud.bigquery._helpers import _set_sub_prop
+
+ return _set_sub_prop(container, keys, value)
+
+ def test_w_empty_container_single_key_in_sequence(self):
+ container = {}
+ self._call_fut(container, ["key1"], "value")
+ self.assertEqual(container, {"key1": "value"})
+
+ def test_w_empty_container_single_string_key(self):
+ container = {}
+ self._call_fut(container, "key", "value")
+ self.assertEqual(container, {"key": "value"})
+
+ def test_w_empty_container_nested_keys(self):
+ container = {}
+ self._call_fut(container, ["key1", "key2", "key3"], "value")
+ self.assertEqual(container, {"key1": {"key2": {"key3": "value"}}})
+
+ def test_w_existing_value(self):
+ container = {"key1": "before"}
+ self._call_fut(container, ["key1"], "after")
+ self.assertEqual(container, {"key1": "after"})
+
+ def test_w_nested_keys_existing_value(self):
+ container = {"key1": {"key2": {"key3": "before"}}}
+ self._call_fut(container, ["key1", "key2", "key3"], "after")
+ self.assertEqual(container, {"key1": {"key2": {"key3": "after"}}})
+
+
+class Test__del_sub_prop(unittest.TestCase):
+ def _call_fut(self, container, keys):
+ from google.cloud.bigquery._helpers import _del_sub_prop
+
+ return _del_sub_prop(container, keys)
+
+ def test_w_single_key(self):
+ container = {"key1": "value"}
+ self._call_fut(container, ["key1"])
+ self.assertEqual(container, {})
+
+ def test_w_empty_container_nested_keys(self):
+ container = {}
+ self._call_fut(container, ["key1", "key2", "key3"])
+ self.assertEqual(container, {"key1": {"key2": {}}})
+
+ def test_w_existing_value_nested_keys(self):
+ container = {"key1": {"key2": {"key3": "value"}}}
+ self._call_fut(container, ["key1", "key2", "key3"])
+ self.assertEqual(container, {"key1": {"key2": {}}})
+
+
+class Test__int_or_none(unittest.TestCase):
+ def _call_fut(self, value):
+ from google.cloud.bigquery._helpers import _int_or_none
+
+ return _int_or_none(value)
+
+ def test_w_num_string(self):
+ self.assertEqual(self._call_fut("123"), 123)
+
+ def test_w_none(self):
+ self.assertIsNone(self._call_fut(None))
+
+ def test_w_int(self):
+ self.assertEqual(self._call_fut(123), 123)
+
+ def test_w_non_num_string(self):
+ with self.assertRaises(ValueError):
+ self._call_fut("ham")
+
+
+class Test__str_or_none(unittest.TestCase):
+ def _call_fut(self, value):
+ from google.cloud.bigquery._helpers import _str_or_none
+
+ return _str_or_none(value)
+
+ def test_w_int(self):
+ self.assertEqual(self._call_fut(123), "123")
+
+ def test_w_none(self):
+ self.assertIsNone(self._call_fut(None))
+
+ def test_w_str(self):
+ self.assertEqual(self._call_fut("ham"), "ham")
+
+
+class _Field(object):
+ def __init__(
+ self,
+ mode,
+ name="unknown",
+ field_type="UNKNOWN",
+ fields=(),
+ range_element_type=None,
+ element_type=None,
+ ):
+ self.mode = mode
+ self.name = name
+ self.field_type = field_type
+ self.fields = fields
+ self.range_element_type = range_element_type
+ self.element_type = element_type
+
+
+def _field_isinstance_patcher():
+ """A patcher thank makes _Field instances seem like SchemaField instances."""
+ from google.cloud.bigquery.schema import SchemaField
+
+ def fake_isinstance(instance, target_class):
+ if instance.__class__.__name__ != "_Field":
+ return isinstance(instance, target_class) # pragma: NO COVER
+
+ # pretend that _Field() instances are actually instances of SchemaField
+ return target_class is SchemaField or (
+ isinstance(target_class, tuple) and SchemaField in target_class
+ )
+
+ patcher = mock.patch(
+ "google.cloud.bigquery.schema.isinstance", side_effect=fake_isinstance
+ )
+ return patcher
+
+
+def test_decimal_as_float_api_repr():
+ """Make sure decimals get converted to float."""
+ import google.cloud.bigquery.query
+ from decimal import Decimal
+
+ param = google.cloud.bigquery.query.ScalarQueryParameter(
+ "x", "FLOAT64", Decimal(42)
+ )
+ assert param.to_api_repr() == {
+ "parameterType": {"type": "FLOAT64"},
+ "parameterValue": {"value": 42.0},
+ "name": "x",
+ }
+
+
+class Test__get_bigquery_host(unittest.TestCase):
+ @staticmethod
+ def _call_fut():
+ from google.cloud.bigquery._helpers import _get_bigquery_host
+
+ return _get_bigquery_host()
+
+ def test_wo_env_var(self):
+ from google.cloud.bigquery._helpers import _DEFAULT_HOST
+
+ with mock.patch("os.environ", {}):
+ host = self._call_fut()
+
+ self.assertEqual(host, _DEFAULT_HOST)
+
+ def test_w_env_var(self):
+ from google.cloud.bigquery._helpers import BIGQUERY_EMULATOR_HOST
+
+ HOST = "https://api.example.com"
+
+ with mock.patch("os.environ", {BIGQUERY_EMULATOR_HOST: HOST}):
+ host = self._call_fut()
+
+ self.assertEqual(host, HOST)
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test__http.py b/testbed/googleapis__python-bigquery/tests/unit/test__http.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd7ecdc428a25e105d12cc979271683976bbd2cf
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test__http.py
@@ -0,0 +1,154 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from unittest import mock
+
+import requests
+
+
+class TestConnection(unittest.TestCase):
+ @staticmethod
+ def _get_default_timeout():
+ from google.cloud.bigquery._http import _http
+
+ return _http._DEFAULT_TIMEOUT
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery._http import Connection
+
+ return Connection
+
+ def _make_one(self, *args, **kw):
+ if "api_endpoint" not in kw:
+ kw["api_endpoint"] = "https://bigquery.googleapis.com"
+
+ return self._get_target_class()(*args, **kw)
+
+ def test_build_api_url_no_extra_query_params(self):
+ from urllib.parse import parse_qsl
+ from urllib.parse import urlsplit
+
+ conn = self._make_one(object())
+ uri = conn.build_api_url("/foo")
+ scheme, netloc, path, qs, _ = urlsplit(uri)
+ self.assertEqual("%s://%s" % (scheme, netloc), conn.API_BASE_URL)
+ self.assertEqual(path, "/".join(["", "bigquery", conn.API_VERSION, "foo"]))
+ parms = dict(parse_qsl(qs))
+ pretty_print = parms.pop("prettyPrint", "false")
+ self.assertEqual(pretty_print, "false")
+ self.assertEqual(parms, {})
+
+ def test_build_api_url_w_custom_endpoint(self):
+ from urllib.parse import parse_qsl
+ from urllib.parse import urlsplit
+
+ custom_endpoint = "https://foo-bigquery.googleapis.com"
+ conn = self._make_one(object(), api_endpoint=custom_endpoint)
+ uri = conn.build_api_url("/foo")
+ scheme, netloc, path, qs, _ = urlsplit(uri)
+ self.assertEqual("%s://%s" % (scheme, netloc), custom_endpoint)
+ self.assertEqual(path, "/".join(["", "bigquery", conn.API_VERSION, "foo"]))
+ parms = dict(parse_qsl(qs))
+ pretty_print = parms.pop("prettyPrint", "false")
+ self.assertEqual(pretty_print, "false")
+ self.assertEqual(parms, {})
+
+ def test_build_api_url_w_extra_query_params(self):
+ from urllib.parse import parse_qsl
+ from urllib.parse import urlsplit
+
+ conn = self._make_one(object())
+ uri = conn.build_api_url("/foo", {"bar": "baz"})
+ scheme, netloc, path, qs, _ = urlsplit(uri)
+ self.assertEqual("%s://%s" % (scheme, netloc), conn.API_BASE_URL)
+ self.assertEqual(path, "/".join(["", "bigquery", conn.API_VERSION, "foo"]))
+ parms = dict(parse_qsl(qs))
+ self.assertEqual(parms["bar"], "baz")
+
+ def test_user_agent(self):
+ from google.cloud import _http as base_http
+
+ http = mock.create_autospec(requests.Session, instance=True)
+ response = requests.Response()
+ response.status_code = 200
+ data = b"brent-spiner"
+ response._content = data
+ http.request.return_value = response
+ client = mock.Mock(_http=http, spec=["_http"])
+
+ conn = self._make_one(client)
+ conn.user_agent = "my-application/1.2.3"
+ req_data = "req-data-boring"
+ result = conn.api_request("GET", "/rainbow", data=req_data, expect_json=False)
+ self.assertEqual(result, data)
+
+ expected_headers = {
+ "Accept-Encoding": "gzip",
+ base_http.CLIENT_INFO_HEADER: conn.user_agent,
+ "User-Agent": conn.user_agent,
+ }
+ expected_uri = conn.build_api_url("/rainbow")
+ http.request.assert_called_once_with(
+ data=req_data,
+ headers=expected_headers,
+ method="GET",
+ url=expected_uri,
+ timeout=self._get_default_timeout(),
+ )
+ self.assertIn("my-application/1.2.3", conn.user_agent)
+
+ def test_extra_headers_replace(self):
+ from google.cloud import _http as base_http
+
+ http = mock.create_autospec(requests.Session, instance=True)
+ response = requests.Response()
+ response.status_code = 200
+ data = b"brent-spiner"
+ response._content = data
+ http.request.return_value = response
+ client = mock.Mock(_http=http, spec=["_http"])
+
+ conn = self._make_one(client)
+ conn.extra_headers = {"x-test-header": "a test value"}
+ req_data = "req-data-boring"
+ result = conn.api_request("GET", "/rainbow", data=req_data, expect_json=False)
+ self.assertEqual(result, data)
+
+ expected_headers = {
+ "Accept-Encoding": "gzip",
+ base_http.CLIENT_INFO_HEADER: conn.user_agent,
+ "User-Agent": conn.user_agent,
+ "x-test-header": "a test value",
+ }
+ expected_uri = conn.build_api_url("/rainbow")
+ http.request.assert_called_once_with(
+ data=req_data,
+ headers=expected_headers,
+ method="GET",
+ url=expected_uri,
+ timeout=self._get_default_timeout(),
+ )
+
+ def test_ctor_mtls(self):
+ conn = self._make_one(object(), api_endpoint=None)
+ self.assertEqual(conn.ALLOW_AUTO_SWITCH_TO_MTLS_URL, True)
+ self.assertEqual(conn.API_BASE_URL, "https://bigquery.googleapis.com")
+ self.assertEqual(conn.API_BASE_MTLS_URL, "https://bigquery.mtls.googleapis.com")
+
+ conn = self._make_one(object(), api_endpoint="http://foo")
+ self.assertEqual(conn.ALLOW_AUTO_SWITCH_TO_MTLS_URL, False)
+ self.assertEqual(conn.API_BASE_URL, "http://foo")
+ self.assertEqual(conn.API_BASE_MTLS_URL, "https://bigquery.mtls.googleapis.com")
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test__job_helpers.py b/testbed/googleapis__python-bigquery/tests/unit/test__job_helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..96914d9f968024f1b6676709188725a28f52b549
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test__job_helpers.py
@@ -0,0 +1,1229 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any, Dict, Optional
+from unittest import mock
+
+import freezegun
+import google.api_core.exceptions
+from google.api_core import retry as retries
+import pytest
+
+from google.cloud.bigquery import _job_helpers
+from google.cloud.bigquery import enums
+from google.cloud.bigquery import retry
+from google.cloud.bigquery.client import Client
+from google.cloud.bigquery.job import copy_ as job_copy
+from google.cloud.bigquery.job import extract as job_extract
+from google.cloud.bigquery.job import load as job_load
+from google.cloud.bigquery.job import query as job_query
+from google.cloud.bigquery.query import ConnectionProperty, ScalarQueryParameter
+
+from .helpers import make_client, make_connection
+
+
+def make_query_request(additional_properties: Optional[Dict[str, Any]] = None):
+ request = {"useLegacySql": False, "formatOptions": {"useInt64Timestamp": True}}
+ if additional_properties is not None:
+ request.update(additional_properties)
+ return request
+
+
+def make_query_response(
+ completed: bool = False,
+ job_id: str = "abcd-efg-hijk-lmnop",
+ location="US",
+ project_id="test-project",
+ errors=None,
+) -> Dict[str, Any]:
+ response = {
+ "jobReference": {
+ "projectId": project_id,
+ "jobId": job_id,
+ "location": location,
+ },
+ "jobComplete": completed,
+ }
+ if errors is not None:
+ response["errors"] = errors
+ return response
+
+
+@pytest.mark.parametrize(
+ ("job_config", "expected"),
+ (
+ pytest.param(
+ None,
+ make_query_request(),
+ id="job_config=None-default-request",
+ ),
+ pytest.param(
+ job_query.QueryJobConfig(),
+ make_query_request(),
+ id="job_config=QueryJobConfig()-default-request",
+ ),
+ pytest.param(
+ job_query.QueryJobConfig.from_api_repr(
+ {
+ "unknownTopLevelProperty": "some-test-value",
+ "query": {
+ "unknownQueryProperty": "some-other-value",
+ },
+ },
+ ),
+ make_query_request(
+ {
+ "unknownTopLevelProperty": "some-test-value",
+ "unknownQueryProperty": "some-other-value",
+ }
+ ),
+ id="job_config-with-unknown-properties-includes-all-properties-in-request",
+ ),
+ pytest.param(
+ job_query.QueryJobConfig(default_dataset="my-project.my_dataset"),
+ make_query_request(
+ {
+ "defaultDataset": {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ }
+ }
+ ),
+ id="job_config-with-default_dataset",
+ ),
+ pytest.param(
+ job_query.QueryJobConfig(dry_run=True),
+ make_query_request({"dryRun": True}),
+ id="job_config-with-dry_run",
+ ),
+ pytest.param(
+ job_query.QueryJobConfig(use_query_cache=False),
+ make_query_request({"useQueryCache": False}),
+ id="job_config-with-use_query_cache",
+ ),
+ pytest.param(
+ job_query.QueryJobConfig(use_legacy_sql=True),
+ make_query_request({"useLegacySql": True}),
+ id="job_config-with-use_legacy_sql",
+ ),
+ pytest.param(
+ job_query.QueryJobConfig(
+ query_parameters=[
+ ScalarQueryParameter("named_param1", "STRING", "param-value"),
+ ScalarQueryParameter("named_param2", "INT64", 123),
+ ]
+ ),
+ make_query_request(
+ {
+ "parameterMode": "NAMED",
+ "queryParameters": [
+ {
+ "name": "named_param1",
+ "parameterType": {"type": "STRING"},
+ "parameterValue": {"value": "param-value"},
+ },
+ {
+ "name": "named_param2",
+ "parameterType": {"type": "INT64"},
+ "parameterValue": {"value": "123"},
+ },
+ ],
+ }
+ ),
+ id="job_config-with-query_parameters-named",
+ ),
+ pytest.param(
+ job_query.QueryJobConfig(
+ query_parameters=[
+ ScalarQueryParameter(None, "STRING", "param-value"),
+ ScalarQueryParameter(None, "INT64", 123),
+ ]
+ ),
+ make_query_request(
+ {
+ "parameterMode": "POSITIONAL",
+ "queryParameters": [
+ {
+ "parameterType": {"type": "STRING"},
+ "parameterValue": {"value": "param-value"},
+ },
+ {
+ "parameterType": {"type": "INT64"},
+ "parameterValue": {"value": "123"},
+ },
+ ],
+ }
+ ),
+ id="job_config-with-query_parameters-positional",
+ ),
+ pytest.param(
+ job_query.QueryJobConfig(
+ connection_properties=[
+ ConnectionProperty(key="time_zone", value="America/Chicago"),
+ ConnectionProperty(key="session_id", value="abcd-efgh-ijkl-mnop"),
+ ]
+ ),
+ make_query_request(
+ {
+ "connectionProperties": [
+ {"key": "time_zone", "value": "America/Chicago"},
+ {"key": "session_id", "value": "abcd-efgh-ijkl-mnop"},
+ ]
+ }
+ ),
+ id="job_config-with-connection_properties",
+ ),
+ pytest.param(
+ job_query.QueryJobConfig(labels={"abc": "def"}),
+ make_query_request({"labels": {"abc": "def"}}),
+ id="job_config-with-labels",
+ ),
+ pytest.param(
+ job_query.QueryJobConfig(maximum_bytes_billed=987654),
+ make_query_request({"maximumBytesBilled": "987654"}),
+ id="job_config-with-maximum_bytes_billed",
+ ),
+ ),
+)
+def test__to_query_request(job_config, expected):
+ result = _job_helpers._to_query_request(job_config, query="SELECT 1")
+ expected["query"] = "SELECT 1"
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ ("job_config", "invalid_key"),
+ (
+ pytest.param(job_copy.CopyJobConfig(), "copy", id="copy"),
+ pytest.param(job_extract.ExtractJobConfig(), "extract", id="extract"),
+ pytest.param(job_load.LoadJobConfig(), "load", id="load"),
+ ),
+)
+def test__to_query_request_raises_for_invalid_config(job_config, invalid_key):
+ with pytest.raises(ValueError, match=f"{repr(invalid_key)} in job_config"):
+ _job_helpers._to_query_request(job_config, query="SELECT 1")
+
+
+def test__to_query_job_defaults():
+ mock_client = mock.create_autospec(Client)
+ response = make_query_response(
+ job_id="test-job", project_id="some-project", location="asia-northeast1"
+ )
+ job: job_query.QueryJob = _job_helpers._to_query_job(
+ mock_client, "query-str", None, response
+ )
+ assert job.query == "query-str"
+ assert job._client is mock_client
+ assert job.job_id == "test-job"
+ assert job.project == "some-project"
+ assert job.location == "asia-northeast1"
+ assert job.error_result is None
+ assert job.errors is None
+
+
+def test__to_query_job_dry_run():
+ mock_client = mock.create_autospec(Client)
+ response = make_query_response(
+ job_id="test-job", project_id="some-project", location="asia-northeast1"
+ )
+ job_config: job_query.QueryJobConfig = job_query.QueryJobConfig()
+ job_config.dry_run = True
+ job: job_query.QueryJob = _job_helpers._to_query_job(
+ mock_client, "query-str", job_config, response
+ )
+ assert job.dry_run is True
+
+
+@pytest.mark.parametrize(
+ ("completed", "expected_state"),
+ (
+ # Always pending so that we refresh the job state to get the
+ # destination table or job stats in case it's needed.
+ (True, "PENDING"),
+ (False, "PENDING"),
+ ),
+)
+def test__to_query_job_sets_state(completed, expected_state):
+ mock_client = mock.create_autospec(Client)
+ response = make_query_response(completed=completed)
+ job: job_query.QueryJob = _job_helpers._to_query_job(
+ mock_client, "query-str", None, response
+ )
+ assert job.state == expected_state
+
+
+def test__to_query_job_sets_errors():
+ mock_client = mock.create_autospec(Client)
+ response = make_query_response(
+ errors=[
+ # https://cloud.google.com/bigquery/docs/reference/rest/v2/ErrorProto
+ {"reason": "backendError", "message": "something went wrong"},
+ {"message": "something else went wrong"},
+ ]
+ )
+ job: job_query.QueryJob = _job_helpers._to_query_job(
+ mock_client, "query-str", None, response
+ )
+ assert len(job.errors) == 2
+ # If we got back a response instead of an HTTP error status code, most
+ # likely the job didn't completely fail.
+ assert job.error_result is None
+
+
+def test_query_jobs_query_defaults():
+ mock_client = mock.create_autospec(Client)
+ mock_retry = mock.create_autospec(retries.Retry)
+ mock_job_retry = mock.create_autospec(retries.Retry)
+ mock_client._call_api.return_value = {
+ "jobReference": {
+ "projectId": "test-project",
+ "jobId": "abc",
+ "location": "asia-northeast1",
+ }
+ }
+ _job_helpers.query_jobs_query(
+ mock_client,
+ "SELECT * FROM test",
+ None,
+ "asia-northeast1",
+ "test-project",
+ mock_retry,
+ None,
+ mock_job_retry,
+ )
+
+ assert mock_client._call_api.call_count == 1
+ call_args, call_kwargs = mock_client._call_api.call_args
+ assert call_args[0] is mock_retry
+ # See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query
+ assert call_kwargs["path"] == "/projects/test-project/queries"
+ assert call_kwargs["method"] == "POST"
+ # See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#QueryRequest
+ request = call_kwargs["data"]
+ assert request["requestId"] is not None
+ assert request["query"] == "SELECT * FROM test"
+ assert request["location"] == "asia-northeast1"
+ assert request["formatOptions"]["useInt64Timestamp"] is True
+ assert "timeoutMs" not in request
+
+
+def test_query_jobs_query_sets_format_options():
+ """Since jobs.query can return results, ensure we use the lossless
+ timestamp format.
+
+ See: https://github.com/googleapis/python-bigquery/issues/395
+ """
+ mock_client = mock.create_autospec(Client)
+ mock_retry = mock.create_autospec(retries.Retry)
+ mock_job_retry = mock.create_autospec(retries.Retry)
+ mock_client._call_api.return_value = {
+ "jobReference": {"projectId": "test-project", "jobId": "abc", "location": "US"}
+ }
+ _job_helpers.query_jobs_query(
+ mock_client,
+ "SELECT * FROM test",
+ None,
+ "US",
+ "test-project",
+ mock_retry,
+ None,
+ mock_job_retry,
+ )
+
+ assert mock_client._call_api.call_count == 1
+ _, call_kwargs = mock_client._call_api.call_args
+ # See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#QueryRequest
+ request = call_kwargs["data"]
+ assert request["formatOptions"]["useInt64Timestamp"] is True
+
+
+@pytest.mark.parametrize(
+ ("timeout", "expected_timeout"),
+ (
+ (-1, 0),
+ (0, 0),
+ (1, 1000 - _job_helpers._TIMEOUT_BUFFER_MILLIS),
+ ),
+)
+def test_query_jobs_query_sets_timeout(timeout, expected_timeout):
+ mock_client = mock.create_autospec(Client)
+ mock_retry = mock.create_autospec(retries.Retry)
+ mock_job_retry = mock.create_autospec(retries.Retry)
+ mock_client._call_api.return_value = {
+ "jobReference": {"projectId": "test-project", "jobId": "abc", "location": "US"}
+ }
+ _job_helpers.query_jobs_query(
+ mock_client,
+ "SELECT * FROM test",
+ None,
+ "US",
+ "test-project",
+ mock_retry,
+ timeout,
+ mock_job_retry,
+ )
+
+ assert mock_client._call_api.call_count == 1
+ _, call_kwargs = mock_client._call_api.call_args
+ # See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#QueryRequest
+ request = call_kwargs["data"]
+ assert request["timeoutMs"] == expected_timeout
+
+
+def test_query_and_wait_uses_jobs_insert():
+ """With unsupported features, call jobs.insert instead of jobs.query."""
+ client = mock.create_autospec(Client)
+ client._call_api.return_value = {
+ "jobReference": {
+ "projectId": "response-project",
+ "jobId": "abc",
+ "location": "response-location",
+ },
+ "query": {
+ "query": "SELECT 1",
+ },
+ # Make sure the job has "started"
+ "status": {"state": "DONE"},
+ "jobComplete": True,
+ }
+ job_config = job_query.QueryJobConfig(
+ destination="dest-project.dest_dset.dest_table",
+ )
+ _job_helpers.query_and_wait(
+ client,
+ query="SELECT 1",
+ location="request-location",
+ project="request-project",
+ job_config=job_config,
+ retry=None,
+ job_retry=None,
+ page_size=None,
+ max_results=None,
+ )
+
+ # We should call jobs.insert since jobs.query doesn't support destination.
+ request_path = "/projects/request-project/jobs"
+ client._call_api.assert_any_call(
+ None, # retry,
+ span_name="BigQuery.job.begin",
+ span_attributes={"path": request_path},
+ job_ref=mock.ANY,
+ method="POST",
+ path=request_path,
+ data={
+ "jobReference": {
+ "jobId": mock.ANY,
+ "projectId": "request-project",
+ "location": "request-location",
+ },
+ "configuration": {
+ "query": {
+ "destinationTable": {
+ "projectId": "dest-project",
+ "datasetId": "dest_dset",
+ "tableId": "dest_table",
+ },
+ "useLegacySql": False,
+ "query": "SELECT 1",
+ }
+ },
+ },
+ timeout=None,
+ )
+
+
+def test_query_and_wait_retries_job():
+ freezegun.freeze_time(auto_tick_seconds=100)
+ client = mock.create_autospec(Client)
+ client._call_api.__name__ = "_call_api"
+ client._call_api.__qualname__ = "Client._call_api"
+ client._call_api.__annotations__ = {}
+ client._call_api.__type_params__ = ()
+ client._call_api.side_effect = (
+ google.api_core.exceptions.BadGateway("retry me"),
+ google.api_core.exceptions.InternalServerError("job_retry me"),
+ google.api_core.exceptions.BadGateway("retry me"),
+ {
+ "jobReference": {
+ "projectId": "response-project",
+ "jobId": "abc",
+ "location": "response-location",
+ },
+ "jobComplete": True,
+ "schema": {
+ "fields": [
+ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "age", "type": "INT64", "mode": "NULLABLE"},
+ ],
+ },
+ "rows": [
+ {"f": [{"v": "Whillma Phlyntstone"}, {"v": "27"}]},
+ {"f": [{"v": "Bhetty Rhubble"}, {"v": "28"}]},
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ],
+ },
+ )
+ rows = _job_helpers.query_and_wait(
+ client,
+ query="SELECT 1",
+ location="request-location",
+ project="request-project",
+ job_config=None,
+ page_size=None,
+ max_results=None,
+ retry=retries.Retry(
+ lambda exc: isinstance(exc, google.api_core.exceptions.BadGateway),
+ multiplier=1.0,
+ ).with_deadline(
+ 200.0
+ ), # Since auto_tick_seconds is 100, we should get at least 1 retry.
+ job_retry=retries.Retry(
+ lambda exc: isinstance(exc, google.api_core.exceptions.InternalServerError),
+ multiplier=1.0,
+ ).with_deadline(600.0),
+ )
+ assert len(list(rows)) == 4
+
+ # For this code path, where the query has finished immediately, we should
+ # only be calling the jobs.query API and no other request path.
+ request_path = "/projects/request-project/queries"
+ for call in client._call_api.call_args_list:
+ _, kwargs = call
+ assert kwargs["method"] == "POST"
+ assert kwargs["path"] == request_path
+
+
+@freezegun.freeze_time(auto_tick_seconds=100)
+def test_query_and_wait_retries_job_times_out():
+ client = mock.create_autospec(Client)
+ client._call_api.__name__ = "_call_api"
+ client._call_api.__qualname__ = "Client._call_api"
+ client._call_api.__annotations__ = {}
+ client._call_api.__type_params__ = ()
+ client._call_api.side_effect = (
+ google.api_core.exceptions.BadGateway("retry me"),
+ google.api_core.exceptions.InternalServerError("job_retry me"),
+ google.api_core.exceptions.BadGateway("retry me"),
+ google.api_core.exceptions.InternalServerError("job_retry me"),
+ )
+
+ with pytest.raises(google.api_core.exceptions.RetryError) as exc_info:
+ _job_helpers.query_and_wait(
+ client,
+ query="SELECT 1",
+ location="request-location",
+ project="request-project",
+ job_config=None,
+ page_size=None,
+ max_results=None,
+ retry=retries.Retry(
+ lambda exc: isinstance(exc, google.api_core.exceptions.BadGateway),
+ multiplier=1.0,
+ ).with_deadline(
+ 200.0
+ ), # Since auto_tick_seconds is 100, we should get at least 1 retry.
+ job_retry=retries.Retry(
+ lambda exc: isinstance(
+ exc, google.api_core.exceptions.InternalServerError
+ ),
+ multiplier=1.0,
+ ).with_deadline(400.0),
+ )
+
+ assert isinstance(
+ exc_info.value.cause, google.api_core.exceptions.InternalServerError
+ )
+
+
+def test_query_and_wait_sets_job_creation_mode(monkeypatch: pytest.MonkeyPatch):
+ monkeypatch.setenv(
+ "QUERY_PREVIEW_ENABLED",
+ # The comparison should be case insensitive.
+ "TrUe",
+ )
+ client = mock.create_autospec(Client)
+ client._call_api.return_value = {
+ "jobReference": {
+ "projectId": "response-project",
+ "jobId": "abc",
+ "location": "response-location",
+ },
+ "jobComplete": True,
+ }
+ _job_helpers.query_and_wait(
+ client,
+ query="SELECT 1",
+ location="request-location",
+ project="request-project",
+ job_config=None,
+ retry=None,
+ job_retry=None,
+ page_size=None,
+ max_results=None,
+ )
+
+ # We should only call jobs.query once, no additional row requests needed.
+ request_path = "/projects/request-project/queries"
+ client._call_api.assert_called_once_with(
+ None, # retry
+ span_name="BigQuery.query",
+ span_attributes={"path": request_path},
+ method="POST",
+ path=request_path,
+ data={
+ "query": "SELECT 1",
+ "location": "request-location",
+ "useLegacySql": False,
+ "formatOptions": {
+ "useInt64Timestamp": True,
+ },
+ "requestId": mock.ANY,
+ "jobCreationMode": "JOB_CREATION_OPTIONAL",
+ },
+ timeout=None,
+ )
+
+
+def test_query_and_wait_sets_location():
+ client = mock.create_autospec(Client)
+ client._call_api.return_value = {
+ "jobReference": {
+ "projectId": "response-project",
+ "jobId": "abc",
+ "location": "response-location",
+ },
+ "jobComplete": True,
+ }
+ rows = _job_helpers.query_and_wait(
+ client,
+ query="SELECT 1",
+ location="request-location",
+ project="request-project",
+ job_config=None,
+ retry=None,
+ job_retry=None,
+ page_size=None,
+ max_results=None,
+ )
+ assert rows.location == "response-location"
+
+ # We should only call jobs.query once, no additional row requests needed.
+ request_path = "/projects/request-project/queries"
+ client._call_api.assert_called_once_with(
+ None, # retry
+ span_name="BigQuery.query",
+ span_attributes={"path": request_path},
+ method="POST",
+ path=request_path,
+ data={
+ "query": "SELECT 1",
+ "location": "request-location",
+ "useLegacySql": False,
+ "formatOptions": {
+ "useInt64Timestamp": True,
+ },
+ "requestId": mock.ANY,
+ },
+ timeout=None,
+ )
+
+
+@pytest.mark.parametrize(
+ ("max_results", "page_size", "expected"),
+ [
+ (10, None, 10),
+ (None, 11, 11),
+ (12, 100, 12),
+ (100, 13, 13),
+ ],
+)
+def test_query_and_wait_sets_max_results(max_results, page_size, expected):
+ client = mock.create_autospec(Client)
+ client._call_api.return_value = {
+ "jobReference": {
+ "projectId": "response-project",
+ "jobId": "abc",
+ "location": "response-location",
+ },
+ "jobComplete": True,
+ }
+ rows = _job_helpers.query_and_wait(
+ client,
+ query="SELECT 1",
+ location="request-location",
+ project="request-project",
+ job_config=None,
+ retry=None,
+ job_retry=None,
+ page_size=page_size,
+ max_results=max_results,
+ )
+ assert rows.location == "response-location"
+
+ # We should only call jobs.query once, no additional row requests needed.
+ request_path = "/projects/request-project/queries"
+ client._call_api.assert_called_once_with(
+ None, # retry
+ span_name="BigQuery.query",
+ span_attributes={"path": request_path},
+ method="POST",
+ path=request_path,
+ data={
+ "query": "SELECT 1",
+ "location": "request-location",
+ "useLegacySql": False,
+ "formatOptions": {
+ "useInt64Timestamp": True,
+ },
+ "requestId": mock.ANY,
+ "maxResults": expected,
+ },
+ timeout=None,
+ )
+
+
+def test_query_and_wait_caches_completed_query_results_one_page():
+ client = mock.create_autospec(Client)
+ client._call_api.return_value = {
+ "jobReference": {
+ "projectId": "response-project",
+ "jobId": "abc",
+ "location": "US",
+ },
+ "jobComplete": True,
+ "queryId": "xyz",
+ "schema": {
+ "fields": [
+ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "age", "type": "INT64", "mode": "NULLABLE"},
+ ],
+ },
+ "rows": [
+ {"f": [{"v": "Whillma Phlyntstone"}, {"v": "27"}]},
+ {"f": [{"v": "Bhetty Rhubble"}, {"v": "28"}]},
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ],
+ # Even though totalRows > len(rows), we should use the presence of a
+ # next page token to decide if there are any more pages.
+ "totalRows": 8,
+ }
+ rows = _job_helpers.query_and_wait(
+ client,
+ query="SELECT full_name, age FROM people;",
+ job_config=None,
+ location=None,
+ project="request-project",
+ retry=None,
+ job_retry=None,
+ page_size=None,
+ max_results=None,
+ )
+ rows_list = list(rows)
+ assert rows.project == "response-project"
+ assert rows.job_id == "abc"
+ assert rows.location == "US"
+ assert rows.query_id == "xyz"
+ assert rows.total_rows == 8
+ assert len(rows_list) == 4
+
+ # We should only call jobs.query once, no additional row requests needed.
+ request_path = "/projects/request-project/queries"
+ client._call_api.assert_called_once_with(
+ None, # retry
+ span_name="BigQuery.query",
+ span_attributes={"path": request_path},
+ method="POST",
+ path=request_path,
+ data={
+ "query": "SELECT full_name, age FROM people;",
+ "useLegacySql": False,
+ "formatOptions": {
+ "useInt64Timestamp": True,
+ },
+ "requestId": mock.ANY,
+ },
+ timeout=None,
+ )
+
+
+def test_query_and_wait_caches_completed_query_results_one_page_no_rows():
+ client = mock.create_autospec(Client)
+ client._call_api.return_value = {
+ "jobReference": {
+ "projectId": "response-project",
+ "jobId": "abc",
+ "location": "US",
+ },
+ "jobComplete": True,
+ "queryId": "xyz",
+ }
+ rows = _job_helpers.query_and_wait(
+ client,
+ query="CREATE TABLE abc;",
+ project="request-project",
+ job_config=None,
+ location=None,
+ retry=None,
+ job_retry=None,
+ page_size=None,
+ max_results=None,
+ )
+ assert rows.project == "response-project"
+ assert rows.job_id == "abc"
+ assert rows.location == "US"
+ assert rows.query_id == "xyz"
+ assert list(rows) == []
+
+ # We should only call jobs.query once, no additional row requests needed.
+ request_path = "/projects/request-project/queries"
+ client._call_api.assert_called_once_with(
+ None, # retry
+ span_name="BigQuery.query",
+ span_attributes={"path": request_path},
+ method="POST",
+ path=request_path,
+ data={
+ "query": "CREATE TABLE abc;",
+ "useLegacySql": False,
+ "formatOptions": {
+ "useInt64Timestamp": True,
+ },
+ "requestId": mock.ANY,
+ },
+ timeout=None,
+ )
+
+
+def test_query_and_wait_caches_completed_query_results_more_pages():
+ client = make_client()
+ conn = client._connection = make_connection(
+ {
+ "jobReference": {
+ "projectId": "response-project",
+ "jobId": "response-job-id",
+ "location": "response-location",
+ },
+ "jobComplete": True,
+ "queryId": "xyz",
+ "schema": {
+ "fields": [
+ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "age", "type": "INT64", "mode": "NULLABLE"},
+ ],
+ },
+ "rows": [
+ {"f": [{"v": "Whillma Phlyntstone"}, {"v": "27"}]},
+ {"f": [{"v": "Bhetty Rhubble"}, {"v": "28"}]},
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ],
+ # Even though totalRows <= len(rows), we should use the presence of a
+ # next page token to decide if there are any more pages.
+ "totalRows": 2,
+ "pageToken": "page-2",
+ },
+ # TODO(swast): This is a case where we can avoid a call to jobs.get,
+ # but currently do so because the RowIterator might need the
+ # destination table, since results aren't fully cached.
+ {
+ "jobReference": {
+ "projectId": "response-project",
+ "jobId": "response-job-id",
+ "location": "response-location",
+ },
+ "status": {"state": "DONE"},
+ },
+ {
+ "rows": [
+ {"f": [{"v": "Pebbles Phlyntstone"}, {"v": "4"}]},
+ {"f": [{"v": "Bamm-Bamm Rhubble"}, {"v": "5"}]},
+ {"f": [{"v": "Joseph Rockhead"}, {"v": "32"}]},
+ {"f": [{"v": "Perry Masonry"}, {"v": "33"}]},
+ ],
+ "totalRows": 3,
+ "pageToken": "page-3",
+ },
+ {
+ "rows": [
+ {"f": [{"v": "Pearl Slaghoople"}, {"v": "53"}]},
+ ],
+ "totalRows": 4,
+ },
+ )
+ rows = _job_helpers.query_and_wait(
+ client,
+ query="SELECT full_name, age FROM people;",
+ project="request-project",
+ job_config=None,
+ location=None,
+ retry=None,
+ job_retry=None,
+ page_size=None,
+ max_results=None,
+ )
+ assert rows.total_rows == 2 # Match the API response.
+ rows_list = list(rows)
+ assert rows.total_rows == 4 # Match the final API response.
+ assert len(rows_list) == 9
+
+ # Start the query.
+ jobs_query_path = "/projects/request-project/queries"
+ conn.api_request.assert_any_call(
+ method="POST",
+ path=jobs_query_path,
+ data={
+ "query": "SELECT full_name, age FROM people;",
+ "useLegacySql": False,
+ "formatOptions": {
+ "useInt64Timestamp": True,
+ },
+ "requestId": mock.ANY,
+ },
+ timeout=None,
+ )
+
+ # Note: There is no get call to
+ # "/projects/response-project/jobs/response-job-id", because fetching job
+ # metadata isn't necessary in this case. The job already completed in
+ # jobs.query and we don't need the full job metadata in query_and_wait.
+
+ # Fetch the remaining two pages.
+ jobs_get_query_results_path = "/projects/response-project/queries/response-job-id"
+ conn.api_request.assert_any_call(
+ timeout=None,
+ method="GET",
+ path=jobs_get_query_results_path,
+ query_params={
+ "pageToken": "page-2",
+ "fields": "jobReference,totalRows,pageToken,rows",
+ "location": "response-location",
+ "formatOptions.useInt64Timestamp": True,
+ },
+ )
+ conn.api_request.assert_any_call(
+ timeout=None,
+ method="GET",
+ path=jobs_get_query_results_path,
+ query_params={
+ "pageToken": "page-3",
+ "fields": "jobReference,totalRows,pageToken,rows",
+ "location": "response-location",
+ "formatOptions.useInt64Timestamp": True,
+ },
+ )
+
+
+def test_query_and_wait_incomplete_query():
+ client = make_client()
+ conn = client._connection = make_connection(
+ # jobs.query
+ {
+ "jobReference": {
+ "projectId": "response-project",
+ "jobId": "response-job-id",
+ "location": "response-location",
+ },
+ "jobComplete": False,
+ },
+ # jobs.get
+ {
+ "jobReference": {
+ "projectId": "response-project",
+ "jobId": "response-job-id",
+ "location": "response-location",
+ },
+ "status": {"state": "RUNNING"},
+ },
+ # jobs.getQueryResults with max_results=0
+ {
+ "jobReference": {
+ "projectId": "response-project",
+ "jobId": "response-job-id",
+ "location": "response-location",
+ },
+ "jobComplete": True,
+ "totalRows": 2,
+ "queryId": "xyz",
+ "schema": {
+ "fields": [
+ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "age", "type": "INT64", "mode": "NULLABLE"},
+ ],
+ },
+ },
+ # jobs.get
+ {
+ "jobReference": {
+ "projectId": "response-project",
+ "jobId": "response-job-id",
+ "location": "response-location",
+ },
+ "status": {"state": "DONE"},
+ },
+ # jobs.getQueryResults
+ # Note: No more jobs.getQueryResults with max_results=0 because the
+ # previous call to jobs.getQueryResults returned with jobComplete=True.
+ {
+ "rows": [
+ {"f": [{"v": "Whillma Phlyntstone"}, {"v": "27"}]},
+ {"f": [{"v": "Bhetty Rhubble"}, {"v": "28"}]},
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ],
+ # Even though totalRows <= len(rows), we should use the presence of a
+ # next page token to decide if there are any more pages.
+ "totalRows": 2,
+ "pageToken": "page-2",
+ },
+ # jobs.getQueryResults
+ {
+ "rows": [
+ {"f": [{"v": "Pearl Slaghoople"}, {"v": "53"}]},
+ ],
+ },
+ )
+ rows = _job_helpers.query_and_wait(
+ client,
+ query="SELECT full_name, age FROM people;",
+ project="request-project",
+ job_config=None,
+ location=None,
+ retry=None,
+ job_retry=None,
+ page_size=None,
+ max_results=None,
+ )
+ rows_list = list(rows)
+ assert rows.total_rows == 2 # Match the API response.
+ assert len(rows_list) == 5
+
+ # Start the query.
+ jobs_query_path = "/projects/request-project/queries"
+ conn.api_request.assert_any_call(
+ method="POST",
+ path=jobs_query_path,
+ data={
+ "query": "SELECT full_name, age FROM people;",
+ "useLegacySql": False,
+ "formatOptions": {
+ "useInt64Timestamp": True,
+ },
+ "requestId": mock.ANY,
+ },
+ timeout=None,
+ )
+
+ # Wait for the query to finish.
+ jobs_get_query_results_path = "/projects/response-project/queries/response-job-id"
+ conn.api_request.assert_any_call(
+ method="GET",
+ path=jobs_get_query_results_path,
+ query_params={
+ # job_query.QueryJob uses getQueryResults to wait for the query to finish.
+ # It avoids fetching the results because:
+ # (1) For large rows this can take a long time, much longer than
+ # our progress bar update frequency.
+ # See: https://github.com/googleapis/python-bigquery/issues/403
+ # (2) Caching the first page of results uses an unexpected increase in memory.
+ # See: https://github.com/googleapis/python-bigquery/issues/394
+ "maxResults": 0,
+ "location": "response-location",
+ },
+ timeout=None,
+ )
+
+ # Fetch the job metadata in case the RowIterator needs the destination table.
+ jobs_get_path = "/projects/response-project/jobs/response-job-id"
+ conn.api_request.assert_any_call(
+ method="GET",
+ path=jobs_get_path,
+ query_params={"projection": "full", "location": "response-location"},
+ timeout=retry.DEFAULT_GET_JOB_TIMEOUT,
+ )
+
+ # Fetch the remaining two pages.
+ conn.api_request.assert_any_call(
+ timeout=None,
+ method="GET",
+ path=jobs_get_query_results_path,
+ query_params={
+ "fields": "jobReference,totalRows,pageToken,rows",
+ "location": "response-location",
+ "formatOptions.useInt64Timestamp": True,
+ },
+ )
+ conn.api_request.assert_any_call(
+ timeout=None,
+ method="GET",
+ path=jobs_get_query_results_path,
+ query_params={
+ "pageToken": "page-2",
+ "fields": "jobReference,totalRows,pageToken,rows",
+ "location": "response-location",
+ "formatOptions.useInt64Timestamp": True,
+ },
+ )
+
+
+def test_make_job_id_wo_suffix():
+ job_id = _job_helpers.make_job_id("job_id")
+ assert job_id == "job_id"
+
+
+def test_make_job_id_w_suffix():
+ with mock.patch("uuid.uuid4", side_effect=["212345"]):
+ job_id = _job_helpers.make_job_id(None, prefix="job_id")
+
+ assert job_id == "job_id212345"
+
+
+def test_make_job_id_random():
+ with mock.patch("uuid.uuid4", side_effect=["212345"]):
+ job_id = _job_helpers.make_job_id(None)
+
+ assert job_id == "212345"
+
+
+def test_make_job_id_w_job_id_overrides_prefix():
+ job_id = _job_helpers.make_job_id("job_id", prefix="unused_prefix")
+ assert job_id == "job_id"
+
+
+@pytest.mark.parametrize(
+ ("job_config", "expected"),
+ (
+ pytest.param(None, True),
+ pytest.param(job_query.QueryJobConfig(), True, id="default"),
+ pytest.param(
+ job_query.QueryJobConfig(use_query_cache=False), True, id="use_query_cache"
+ ),
+ pytest.param(
+ job_query.QueryJobConfig(maximum_bytes_billed=10_000_000),
+ True,
+ id="maximum_bytes_billed",
+ ),
+ pytest.param(
+ job_query.QueryJobConfig(clustering_fields=["a", "b", "c"]),
+ False,
+ id="clustering_fields",
+ ),
+ pytest.param(
+ job_query.QueryJobConfig(destination="p.d.t"), False, id="destination"
+ ),
+ pytest.param(
+ job_query.QueryJobConfig(
+ destination_encryption_configuration=job_query.EncryptionConfiguration(
+ "key"
+ )
+ ),
+ False,
+ id="destination_encryption_configuration",
+ ),
+ # priority="BATCH" is not supported. See:
+ # https://github.com/googleapis/python-bigquery/issues/1867
+ pytest.param(
+ job_query.QueryJobConfig(
+ priority=enums.QueryPriority.BATCH,
+ ),
+ False,
+ id="priority=BATCH",
+ ),
+ ),
+)
+def test_supported_by_jobs_query_from_queryjobconfig(
+ job_config: Optional[job_query.QueryJobConfig], expected: bool
+):
+ request_body = _job_helpers._to_query_request(job_config, query="SELECT 1")
+ assert _job_helpers._supported_by_jobs_query(request_body) == expected
+
+
+def test_wait_or_cancel_no_exception():
+ job = mock.create_autospec(job_query.QueryJob, instance=True)
+ expected_rows = object()
+ job.result.return_value = expected_rows
+ retry = retries.Retry()
+
+ rows = _job_helpers._wait_or_cancel(
+ job,
+ api_timeout=123,
+ wait_timeout=456,
+ retry=retry,
+ page_size=789,
+ max_results=101112,
+ )
+
+ job.result.assert_called_once_with(
+ timeout=456,
+ retry=retry,
+ page_size=789,
+ max_results=101112,
+ )
+ assert rows is expected_rows
+
+
+def test_wait_or_cancel_exception_cancels_job():
+ job = mock.create_autospec(job_query.QueryJob, instance=True)
+ job.result.side_effect = google.api_core.exceptions.BadGateway("test error")
+ retry = retries.Retry()
+
+ with pytest.raises(google.api_core.exceptions.BadGateway):
+ _job_helpers._wait_or_cancel(
+ job,
+ api_timeout=123,
+ wait_timeout=456,
+ retry=retry,
+ page_size=789,
+ max_results=101112,
+ )
+
+ job.result.assert_called_once_with(
+ timeout=456,
+ retry=retry,
+ page_size=789,
+ max_results=101112,
+ )
+ job.cancel.assert_called_once_with(
+ timeout=123,
+ retry=retry,
+ )
+
+
+def test_wait_or_cancel_exception_raises_original_exception():
+ job = mock.create_autospec(job_query.QueryJob, instance=True)
+ job.result.side_effect = google.api_core.exceptions.BadGateway("test error")
+ job.cancel.side_effect = google.api_core.exceptions.NotFound("don't raise me")
+ retry = retries.Retry()
+
+ with pytest.raises(google.api_core.exceptions.BadGateway):
+ _job_helpers._wait_or_cancel(
+ job,
+ api_timeout=123,
+ wait_timeout=456,
+ retry=retry,
+ page_size=789,
+ max_results=101112,
+ )
+
+ job.result.assert_called_once_with(
+ timeout=456,
+ retry=retry,
+ page_size=789,
+ max_results=101112,
+ )
+ job.cancel.assert_called_once_with(
+ timeout=123,
+ retry=retry,
+ )
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test__pandas_helpers.py b/testbed/googleapis__python-bigquery/tests/unit/test__pandas_helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..203cc1d1cec19975773b963be085bd668acdaf52
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test__pandas_helpers.py
@@ -0,0 +1,2055 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import datetime
+import decimal
+import functools
+import operator
+import queue
+from unittest import mock
+import warnings
+
+try:
+ import importlib.metadata as metadata
+except ImportError:
+ import importlib_metadata as metadata
+
+try:
+ import pandas
+ import pandas.api.types
+ import pandas.testing
+except ImportError:
+ pandas = None
+
+try:
+ import geopandas
+except ImportError:
+ geopandas = None
+
+import pytest
+
+from google import api_core
+
+from google.cloud.bigquery import exceptions
+from google.cloud.bigquery import _pyarrow_helpers
+from google.cloud.bigquery import _versions_helpers
+from google.cloud.bigquery import schema
+
+pyarrow = _versions_helpers.PYARROW_VERSIONS.try_import()
+
+if pyarrow:
+ import pyarrow.parquet
+ import pyarrow.types
+
+ _BIGNUMERIC_SUPPORT = True
+else:
+ # Mock out pyarrow when missing, because methods from pyarrow.types are
+ # used in test parameterization.
+ pyarrow = mock.Mock()
+ _BIGNUMERIC_SUPPORT = False
+
+bigquery_storage = _versions_helpers.BQ_STORAGE_VERSIONS.try_import()
+
+if pandas is not None:
+ PANDAS_INSTALLED_VERSION = metadata.version("pandas")
+else:
+ PANDAS_INSTALLED_VERSION = "0.0.0"
+
+
+skip_if_no_bignumeric = pytest.mark.skipif(
+ not _BIGNUMERIC_SUPPORT,
+ reason="BIGNUMERIC support requires pyarrow>=3.0.0",
+)
+
+
+@pytest.fixture
+def module_under_test():
+ from google.cloud.bigquery import _pandas_helpers
+
+ return _pandas_helpers
+
+
+def is_none(value):
+ return value is None
+
+
+def is_datetime(type_):
+ # See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#datetime-type
+ return all_(
+ pyarrow.types.is_timestamp,
+ lambda type_: type_.unit == "us",
+ lambda type_: type_.tz is None,
+ )(type_)
+
+
+def is_numeric(type_):
+ # See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type
+ return all_(
+ pyarrow.types.is_decimal,
+ lambda type_: type_.precision == 38,
+ lambda type_: type_.scale == 9,
+ )(type_)
+
+
+def is_bignumeric(type_):
+ # See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type
+ return all_(
+ pyarrow.types.is_decimal,
+ lambda type_: type_.precision == 76,
+ lambda type_: type_.scale == 38,
+ )(type_)
+
+
+def is_timestamp(type_):
+ # See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type
+ return all_(
+ pyarrow.types.is_timestamp,
+ lambda type_: type_.unit == "us",
+ lambda type_: type_.tz == "UTC",
+ )(type_)
+
+
+def do_all(functions, value):
+ return all((func(value) for func in functions))
+
+
+def all_(*functions):
+ return functools.partial(do_all, functions)
+
+
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_is_datetime():
+ assert is_datetime(pyarrow.timestamp("us", tz=None))
+ assert not is_datetime(pyarrow.timestamp("ms", tz=None))
+ assert not is_datetime(pyarrow.timestamp("us", tz="UTC"))
+ assert not is_datetime(pyarrow.timestamp("ns", tz="UTC"))
+ assert not is_datetime(pyarrow.string())
+
+
+def test_do_all():
+ assert do_all((lambda _: True, lambda _: True), None)
+ assert not do_all((lambda _: True, lambda _: False), None)
+ assert not do_all((lambda _: False,), None)
+
+
+def test_all_():
+ assert all_(lambda _: True, lambda _: True)(None)
+ assert not all_(lambda _: True, lambda _: False)(None)
+
+
+@pytest.mark.parametrize(
+ "bq_type,bq_mode,is_correct_type",
+ [
+ ("STRING", "NULLABLE", pyarrow.types.is_string),
+ ("STRING", None, pyarrow.types.is_string),
+ ("string", "NULLABLE", pyarrow.types.is_string),
+ ("StRiNg", "NULLABLE", pyarrow.types.is_string),
+ ("BYTES", "NULLABLE", pyarrow.types.is_binary),
+ ("INTEGER", "NULLABLE", pyarrow.types.is_int64),
+ ("INT64", "NULLABLE", pyarrow.types.is_int64),
+ ("FLOAT", "NULLABLE", pyarrow.types.is_float64),
+ ("FLOAT64", "NULLABLE", pyarrow.types.is_float64),
+ ("NUMERIC", "NULLABLE", is_numeric),
+ pytest.param(
+ "BIGNUMERIC",
+ "NULLABLE",
+ is_bignumeric,
+ marks=skip_if_no_bignumeric,
+ ),
+ ("BOOLEAN", "NULLABLE", pyarrow.types.is_boolean),
+ ("BOOL", "NULLABLE", pyarrow.types.is_boolean),
+ ("TIMESTAMP", "NULLABLE", is_timestamp),
+ ("DATE", "NULLABLE", pyarrow.types.is_date32),
+ ("TIME", "NULLABLE", pyarrow.types.is_time64),
+ ("DATETIME", "NULLABLE", is_datetime),
+ ("GEOGRAPHY", "NULLABLE", pyarrow.types.is_string),
+ ("UNKNOWN_TYPE", "NULLABLE", is_none),
+ # Use pyarrow.list_(item_type) for repeated (array) fields.
+ (
+ "STRING",
+ "REPEATED",
+ all_(
+ pyarrow.types.is_list,
+ lambda type_: pyarrow.types.is_string(type_.value_type),
+ ),
+ ),
+ (
+ "STRING",
+ "repeated",
+ all_(
+ pyarrow.types.is_list,
+ lambda type_: pyarrow.types.is_string(type_.value_type),
+ ),
+ ),
+ (
+ "STRING",
+ "RePeAtEd",
+ all_(
+ pyarrow.types.is_list,
+ lambda type_: pyarrow.types.is_string(type_.value_type),
+ ),
+ ),
+ (
+ "BYTES",
+ "REPEATED",
+ all_(
+ pyarrow.types.is_list,
+ lambda type_: pyarrow.types.is_binary(type_.value_type),
+ ),
+ ),
+ (
+ "INTEGER",
+ "REPEATED",
+ all_(
+ pyarrow.types.is_list,
+ lambda type_: pyarrow.types.is_int64(type_.value_type),
+ ),
+ ),
+ (
+ "INT64",
+ "REPEATED",
+ all_(
+ pyarrow.types.is_list,
+ lambda type_: pyarrow.types.is_int64(type_.value_type),
+ ),
+ ),
+ (
+ "FLOAT",
+ "REPEATED",
+ all_(
+ pyarrow.types.is_list,
+ lambda type_: pyarrow.types.is_float64(type_.value_type),
+ ),
+ ),
+ (
+ "FLOAT64",
+ "REPEATED",
+ all_(
+ pyarrow.types.is_list,
+ lambda type_: pyarrow.types.is_float64(type_.value_type),
+ ),
+ ),
+ (
+ "NUMERIC",
+ "REPEATED",
+ all_(pyarrow.types.is_list, lambda type_: is_numeric(type_.value_type)),
+ ),
+ pytest.param(
+ "BIGNUMERIC",
+ "REPEATED",
+ all_(pyarrow.types.is_list, lambda type_: is_bignumeric(type_.value_type)),
+ marks=skip_if_no_bignumeric,
+ ),
+ (
+ "BOOLEAN",
+ "REPEATED",
+ all_(
+ pyarrow.types.is_list,
+ lambda type_: pyarrow.types.is_boolean(type_.value_type),
+ ),
+ ),
+ (
+ "BOOL",
+ "REPEATED",
+ all_(
+ pyarrow.types.is_list,
+ lambda type_: pyarrow.types.is_boolean(type_.value_type),
+ ),
+ ),
+ (
+ "TIMESTAMP",
+ "REPEATED",
+ all_(pyarrow.types.is_list, lambda type_: is_timestamp(type_.value_type)),
+ ),
+ (
+ "DATE",
+ "REPEATED",
+ all_(
+ pyarrow.types.is_list,
+ lambda type_: pyarrow.types.is_date32(type_.value_type),
+ ),
+ ),
+ (
+ "TIME",
+ "REPEATED",
+ all_(
+ pyarrow.types.is_list,
+ lambda type_: pyarrow.types.is_time64(type_.value_type),
+ ),
+ ),
+ (
+ "DATETIME",
+ "REPEATED",
+ all_(pyarrow.types.is_list, lambda type_: is_datetime(type_.value_type)),
+ ),
+ (
+ "GEOGRAPHY",
+ "REPEATED",
+ all_(
+ pyarrow.types.is_list,
+ lambda type_: pyarrow.types.is_string(type_.value_type),
+ ),
+ ),
+ ("RECORD", "REPEATED", is_none),
+ ("UNKNOWN_TYPE", "REPEATED", is_none),
+ ],
+)
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_bq_to_arrow_data_type(module_under_test, bq_type, bq_mode, is_correct_type):
+ field = schema.SchemaField("ignored_name", bq_type, mode=bq_mode)
+ actual = module_under_test.bq_to_arrow_data_type(field)
+ assert is_correct_type(actual)
+
+
+@pytest.mark.parametrize("bq_type", ["RECORD", "record", "STRUCT", "struct"])
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_bq_to_arrow_data_type_w_struct(module_under_test, bq_type):
+ fields = (
+ schema.SchemaField("field01", "STRING"),
+ schema.SchemaField("field02", "BYTES"),
+ schema.SchemaField("field03", "INTEGER"),
+ schema.SchemaField("field04", "INT64"),
+ schema.SchemaField("field05", "FLOAT"),
+ schema.SchemaField("field06", "FLOAT64"),
+ schema.SchemaField("field07", "NUMERIC"),
+ schema.SchemaField("field08", "BIGNUMERIC"),
+ schema.SchemaField("field09", "BOOLEAN"),
+ schema.SchemaField("field10", "BOOL"),
+ schema.SchemaField("field11", "TIMESTAMP"),
+ schema.SchemaField("field12", "DATE"),
+ schema.SchemaField("field13", "TIME"),
+ schema.SchemaField("field14", "DATETIME"),
+ schema.SchemaField("field15", "GEOGRAPHY"),
+ )
+
+ field = schema.SchemaField("ignored_name", bq_type, mode="NULLABLE", fields=fields)
+ actual = module_under_test.bq_to_arrow_data_type(field)
+
+ expected = (
+ pyarrow.field("field01", pyarrow.string()),
+ pyarrow.field("field02", pyarrow.binary()),
+ pyarrow.field("field03", pyarrow.int64()),
+ pyarrow.field("field04", pyarrow.int64()),
+ pyarrow.field("field05", pyarrow.float64()),
+ pyarrow.field("field06", pyarrow.float64()),
+ pyarrow.field("field07", _pyarrow_helpers.pyarrow_numeric()),
+ pyarrow.field("field08", _pyarrow_helpers.pyarrow_bignumeric()),
+ pyarrow.field("field09", pyarrow.bool_()),
+ pyarrow.field("field10", pyarrow.bool_()),
+ pyarrow.field("field11", _pyarrow_helpers.pyarrow_timestamp()),
+ pyarrow.field("field12", pyarrow.date32()),
+ pyarrow.field("field13", _pyarrow_helpers.pyarrow_time()),
+ pyarrow.field("field14", _pyarrow_helpers.pyarrow_datetime()),
+ pyarrow.field("field15", pyarrow.string()),
+ )
+ expected = pyarrow.struct(expected)
+
+ assert pyarrow.types.is_struct(actual)
+ assert actual.num_fields == len(fields)
+ assert actual.equals(expected)
+
+
+@pytest.mark.parametrize("bq_type", ["RECORD", "record", "STRUCT", "struct"])
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_bq_to_arrow_data_type_w_array_struct(module_under_test, bq_type):
+ fields = (
+ schema.SchemaField("field01", "STRING"),
+ schema.SchemaField("field02", "BYTES"),
+ schema.SchemaField("field03", "INTEGER"),
+ schema.SchemaField("field04", "INT64"),
+ schema.SchemaField("field05", "FLOAT"),
+ schema.SchemaField("field06", "FLOAT64"),
+ schema.SchemaField("field07", "NUMERIC"),
+ schema.SchemaField("field08", "BIGNUMERIC"),
+ schema.SchemaField("field09", "BOOLEAN"),
+ schema.SchemaField("field10", "BOOL"),
+ schema.SchemaField("field11", "TIMESTAMP"),
+ schema.SchemaField("field12", "DATE"),
+ schema.SchemaField("field13", "TIME"),
+ schema.SchemaField("field14", "DATETIME"),
+ schema.SchemaField("field15", "GEOGRAPHY"),
+ )
+
+ field = schema.SchemaField("ignored_name", bq_type, mode="REPEATED", fields=fields)
+ actual = module_under_test.bq_to_arrow_data_type(field)
+
+ expected = (
+ pyarrow.field("field01", pyarrow.string()),
+ pyarrow.field("field02", pyarrow.binary()),
+ pyarrow.field("field03", pyarrow.int64()),
+ pyarrow.field("field04", pyarrow.int64()),
+ pyarrow.field("field05", pyarrow.float64()),
+ pyarrow.field("field06", pyarrow.float64()),
+ pyarrow.field("field07", _pyarrow_helpers.pyarrow_numeric()),
+ pyarrow.field("field08", _pyarrow_helpers.pyarrow_bignumeric()),
+ pyarrow.field("field09", pyarrow.bool_()),
+ pyarrow.field("field10", pyarrow.bool_()),
+ pyarrow.field("field11", _pyarrow_helpers.pyarrow_timestamp()),
+ pyarrow.field("field12", pyarrow.date32()),
+ pyarrow.field("field13", _pyarrow_helpers.pyarrow_time()),
+ pyarrow.field("field14", _pyarrow_helpers.pyarrow_datetime()),
+ pyarrow.field("field15", pyarrow.string()),
+ )
+ expected_value_type = pyarrow.struct(expected)
+
+ assert pyarrow.types.is_list(actual)
+ assert pyarrow.types.is_struct(actual.value_type)
+ assert actual.value_type.num_fields == len(fields)
+ assert actual.value_type.equals(expected_value_type)
+
+
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_bq_to_arrow_data_type_w_struct_unknown_subfield(module_under_test):
+ fields = (
+ schema.SchemaField("field1", "STRING"),
+ schema.SchemaField("field2", "INTEGER"),
+ # Don't know what to convert UNKNOWN_TYPE to, let type inference work,
+ # instead.
+ schema.SchemaField("field3", "UNKNOWN_TYPE"),
+ )
+ field = schema.SchemaField("ignored_name", "RECORD", mode="NULLABLE", fields=fields)
+
+ with warnings.catch_warnings(record=True) as warned:
+ actual = module_under_test.bq_to_arrow_data_type(field)
+
+ assert actual is None
+ assert len(warned) == 1
+ warning = warned[0]
+ assert "field3" in str(warning)
+
+
+@pytest.mark.parametrize(
+ "bq_type,rows",
+ [
+ ("STRING", ["abc", None, "def", None]),
+ ("BYTES", [b"abc", None, b"def", None]),
+ ("INTEGER", [123, None, 456, None]),
+ ("INT64", [-9223372036854775808, None, 9223372036854775807, 123]),
+ ("FLOAT", [1.25, None, 3.5, None]),
+ (
+ "NUMERIC",
+ [
+ decimal.Decimal("-99999999999999999999999999999.999999999"),
+ None,
+ decimal.Decimal("99999999999999999999999999999.999999999"),
+ decimal.Decimal("999.123456789"),
+ ],
+ ),
+ pytest.param(
+ "BIGNUMERIC",
+ [
+ decimal.Decimal("-{d38}.{d38}".format(d38="9" * 38)),
+ None,
+ decimal.Decimal("{d38}.{d38}".format(d38="9" * 38)),
+ decimal.Decimal("3.141592653589793238462643383279"),
+ ],
+ ),
+ ("BOOLEAN", [True, None, False, None]),
+ ("BOOL", [False, None, True, None]),
+ (
+ "TIMESTAMP",
+ [
+ datetime.datetime(1, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc),
+ None,
+ datetime.datetime(
+ 9999, 12, 31, 23, 59, 59, 999999, tzinfo=datetime.timezone.utc
+ ),
+ datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc),
+ ],
+ ),
+ (
+ "DATE",
+ [
+ datetime.date(1, 1, 1),
+ None,
+ datetime.date(9999, 12, 31),
+ datetime.date(1970, 1, 1),
+ ],
+ ),
+ (
+ "TIME",
+ [
+ datetime.time(0, 0, 0),
+ None,
+ datetime.time(23, 59, 59, 999999),
+ datetime.time(12, 0, 0),
+ ],
+ ),
+ (
+ "DATETIME",
+ [
+ datetime.datetime(1, 1, 1, 0, 0, 0),
+ datetime.datetime(9999, 12, 31, 23, 59, 59, 999999),
+ None,
+ datetime.datetime(1970, 1, 1, 0, 0, 0),
+ datetime.datetime(1999, 3, 14, 15, 9, 26, 535898),
+ ],
+ ),
+ (
+ "GEOGRAPHY",
+ [
+ "POINT(30 10)",
+ None,
+ "LINESTRING (30 10, 10 30, 40 40)",
+ "POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))",
+ ],
+ ),
+ ],
+)
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_bq_to_arrow_array_w_nullable_scalars(module_under_test, bq_type, rows):
+ series = pandas.Series(rows, dtype="object")
+ bq_field = schema.SchemaField("field_name", bq_type)
+ arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)
+ roundtrip = arrow_array.to_pylist()
+ assert rows == roundtrip
+
+
+@pytest.mark.parametrize(
+ "bq_type,rows",
+ [
+ (
+ "TIMESTAMP",
+ [
+ "1971-09-28T23:59:07+00:00",
+ "1975-04-09T23:59:02+00:00",
+ "1979-08-17T23:59:05+00:00",
+ "NaT",
+ "1983-05-09T13:00:00+00:00",
+ ],
+ ),
+ (
+ "DATETIME",
+ [
+ "1971-09-28T23:59:07",
+ "1975-04-09T23:59:02",
+ "1979-08-17T23:59:05",
+ "NaT",
+ "1983-05-09T13:00:00",
+ ],
+ ),
+ ],
+)
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(PANDAS_INSTALLED_VERSION[0:2] not in ["0.", "1."], reason="")
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_bq_to_arrow_array_w_pandas_timestamp(module_under_test, bq_type, rows):
+ rows = [pandas.Timestamp(row) for row in rows]
+ series = pandas.Series(rows)
+ bq_field = schema.SchemaField("field_name", bq_type)
+ arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)
+ roundtrip = arrow_array.to_pandas()
+ assert series.equals(roundtrip)
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_bq_to_arrow_array_w_arrays(module_under_test):
+ rows = [[1, 2, 3], [], [4, 5, 6]]
+ series = pandas.Series(rows, name="test_col", dtype="object")
+ bq_field = schema.SchemaField("field_name", "INTEGER", mode="REPEATED")
+ arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)
+ roundtrip = arrow_array.to_pylist()
+ assert rows == roundtrip
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(pyarrow is None, reason="Requires `pyarrow`")
+def test_bq_to_arrow_array_w_conversion_fail(module_under_test): # pragma: NO COVER
+ rows = [[1, 2, 3], [], [4, 5, 6]]
+ series = pandas.Series(rows, name="test_col", dtype="object")
+ bq_field = schema.SchemaField("field_name", "STRING", mode="REPEATED")
+ exc_msg = f"""Error converting Pandas column with name: "{series.name}" and datatype: "{series.dtype}" to an appropriate pyarrow datatype: Array, ListArray, or StructArray"""
+ with pytest.raises(pyarrow.ArrowTypeError, match=exc_msg):
+ module_under_test.bq_to_arrow_array(series, bq_field)
+ raise pyarrow.ArrowTypeError(exc_msg)
+
+
+@pytest.mark.parametrize("bq_type", ["RECORD", "record", "STRUCT", "struct"])
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_bq_to_arrow_array_w_structs(module_under_test, bq_type):
+ rows = [
+ {"int_col": 123, "string_col": "abc"},
+ None,
+ {"int_col": 456, "string_col": "def"},
+ ]
+ series = pandas.Series(rows, name="test_col", dtype="object")
+ bq_field = schema.SchemaField(
+ "field_name",
+ bq_type,
+ fields=(
+ schema.SchemaField("int_col", "INTEGER"),
+ schema.SchemaField("string_col", "STRING"),
+ ),
+ )
+ arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)
+ roundtrip = arrow_array.to_pylist()
+ assert rows == roundtrip
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_bq_to_arrow_array_w_special_floats(module_under_test):
+ bq_field = schema.SchemaField("field_name", "FLOAT64")
+ rows = [float("-inf"), float("nan"), float("inf"), None]
+ series = pandas.Series(rows, dtype="object")
+ arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)
+ roundtrip = arrow_array.to_pylist()
+ assert len(rows) == len(roundtrip)
+ assert roundtrip[0] == float("-inf")
+ # Since we are converting from pandas, NaN is treated as NULL in pyarrow
+ # due to pandas conventions.
+ # https://arrow.apache.org/docs/python/data.html#none-values-and-nan-handling
+ assert roundtrip[1] is None
+ assert roundtrip[2] == float("inf")
+ assert roundtrip[3] is None
+
+
+@pytest.mark.skipif(geopandas is None, reason="Requires `geopandas`")
+def test_bq_to_arrow_array_w_geography_dtype(module_under_test):
+ from shapely import wkb, wkt
+
+ bq_field = schema.SchemaField("field_name", "GEOGRAPHY")
+
+ series = geopandas.GeoSeries([None, wkt.loads("point(0 0)")])
+ array = module_under_test.bq_to_arrow_array(series, bq_field)
+ # The result is binary, because we use wkb format
+ assert array.type == pyarrow.binary()
+ assert array.to_pylist() == [None, wkb.dumps(series[1])]
+
+ # All na:
+ series = geopandas.GeoSeries([None, None])
+ array = module_under_test.bq_to_arrow_array(series, bq_field)
+ assert array.type == pyarrow.string()
+ assert array.to_pylist() == list(series)
+
+
+@pytest.mark.skipif(geopandas is None, reason="Requires `geopandas`")
+def test_bq_to_arrow_array_w_geography_type_shapely_data(module_under_test):
+ from shapely import wkb, wkt
+
+ bq_field = schema.SchemaField("field_name", "GEOGRAPHY")
+
+ series = pandas.Series([None, wkt.loads("point(0 0)")])
+ array = module_under_test.bq_to_arrow_array(series, bq_field)
+ # The result is binary, because we use wkb format
+ assert array.type == pyarrow.binary()
+ assert array.to_pylist() == [None, wkb.dumps(series[1])]
+
+ # All na:
+ series = pandas.Series([None, None])
+ array = module_under_test.bq_to_arrow_array(series, bq_field)
+ assert array.type == pyarrow.string()
+ assert array.to_pylist() == list(series)
+
+
+@pytest.mark.skipif(geopandas is None, reason="Requires `geopandas`")
+def test_bq_to_arrow_array_w_geography_type_wkb_data(module_under_test):
+ from shapely import wkb, wkt
+
+ bq_field = schema.SchemaField("field_name", "GEOGRAPHY")
+
+ series = pandas.Series([None, wkb.dumps(wkt.loads("point(0 0)"))])
+ array = module_under_test.bq_to_arrow_array(series, bq_field)
+ # The result is binary, because we use wkb format
+ assert array.type == pyarrow.binary()
+ assert array.to_pylist() == list(series)
+
+
+@pytest.mark.parametrize(
+ "bq_schema,expected",
+ [
+ (
+ schema.SchemaField(
+ "field1",
+ "RANGE",
+ range_element_type=schema.FieldElementType("DATE"),
+ mode="NULLABLE",
+ ),
+ pyarrow.struct(
+ [
+ ("start", pyarrow.date32()),
+ ("end", pyarrow.date32()),
+ ]
+ ),
+ ),
+ (
+ schema.SchemaField(
+ "field2",
+ "RANGE",
+ range_element_type=schema.FieldElementType("DATETIME"),
+ mode="NULLABLE",
+ ),
+ pyarrow.struct(
+ [
+ ("start", pyarrow.timestamp("us", tz=None)),
+ ("end", pyarrow.timestamp("us", tz=None)),
+ ]
+ ),
+ ),
+ (
+ schema.SchemaField(
+ "field3",
+ "RANGE",
+ range_element_type=schema.FieldElementType("TIMESTAMP"),
+ mode="NULLABLE",
+ ),
+ pyarrow.struct(
+ [
+ ("start", pyarrow.timestamp("us", tz="UTC")),
+ ("end", pyarrow.timestamp("us", tz="UTC")),
+ ]
+ ),
+ ),
+ ],
+)
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bq_to_arrow_data_type_w_range(module_under_test, bq_schema, expected):
+ actual = module_under_test.bq_to_arrow_data_type(bq_schema)
+ assert actual.equals(expected)
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bq_to_arrow_data_type_w_range_no_element(module_under_test):
+ field = schema.SchemaField("field1", "RANGE", mode="NULLABLE")
+ with pytest.raises(ValueError, match="Range element type cannot be None"):
+ module_under_test.bq_to_arrow_data_type(field)
+
+
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_bq_to_arrow_schema_w_unknown_type(module_under_test):
+ fields = (
+ schema.SchemaField("field1", "STRING"),
+ schema.SchemaField("field2", "INTEGER"),
+ # Don't know what to convert UNKNOWN_TYPE to, let type inference work,
+ # instead.
+ schema.SchemaField("field3", "UNKNOWN_TYPE"),
+ )
+ with warnings.catch_warnings(record=True) as warned:
+ actual = module_under_test.bq_to_arrow_schema(fields)
+ assert actual is None
+
+ assert len(warned) == 1
+ warning = warned[0]
+ assert "field3" in str(warning)
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_get_column_or_index_not_found(module_under_test):
+ dataframe = pandas.DataFrame({"not_the_column_youre_looking_for": [1, 2, 3]})
+ with pytest.raises(ValueError, match="col_is_missing"):
+ module_under_test.get_column_or_index(dataframe, "col_is_missing")
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_get_column_or_index_with_multiindex_not_found(module_under_test):
+ dataframe = pandas.DataFrame(
+ {"column_name": [1, 2, 3, 4, 5, 6]},
+ index=pandas.MultiIndex.from_tuples(
+ [("a", 0), ("a", 1), ("b", 0), ("b", 1), ("c", 0), ("c", 1)]
+ ),
+ )
+ with pytest.raises(ValueError, match="not_in_df"):
+ module_under_test.get_column_or_index(dataframe, "not_in_df")
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_get_column_or_index_with_both_prefers_column(module_under_test):
+ dataframe = pandas.DataFrame(
+ {"some_name": [1, 2, 3]}, index=pandas.Index([0, 1, 2], name="some_name")
+ )
+ series = module_under_test.get_column_or_index(dataframe, "some_name")
+ expected = pandas.Series([1, 2, 3], name="some_name")
+ pandas.testing.assert_series_equal(series, expected)
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_get_column_or_index_with_column(module_under_test):
+ dataframe = pandas.DataFrame({"column_name": [1, 2, 3], "other_column": [4, 5, 6]})
+ series = module_under_test.get_column_or_index(dataframe, "column_name")
+ expected = pandas.Series([1, 2, 3], name="column_name")
+ pandas.testing.assert_series_equal(series, expected)
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_get_column_or_index_with_named_index(module_under_test):
+ dataframe = pandas.DataFrame(
+ {"column_name": [1, 2, 3]}, index=pandas.Index([4, 5, 6], name="index_name")
+ )
+ series = module_under_test.get_column_or_index(dataframe, "index_name")
+ expected = pandas.Series([4, 5, 6], name="index_name")
+ pandas.testing.assert_series_equal(series, expected)
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_get_column_or_index_with_datetimeindex(module_under_test):
+ datetimes = [
+ datetime.datetime(2000, 1, 2, 3, 4, 5, 101),
+ datetime.datetime(2006, 7, 8, 9, 10, 11, 202),
+ datetime.datetime(2012, 1, 14, 15, 16, 17, 303),
+ ]
+ dataframe = pandas.DataFrame(
+ {"column_name": [1, 2, 3]},
+ index=pandas.DatetimeIndex(datetimes, name="index_name"),
+ )
+ series = module_under_test.get_column_or_index(dataframe, "index_name")
+ expected = pandas.Series(datetimes, name="index_name")
+ pandas.testing.assert_series_equal(series, expected)
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_get_column_or_index_with_multiindex(module_under_test):
+ dataframe = pandas.DataFrame(
+ {"column_name": [1, 2, 3, 4, 5, 6]},
+ index=pandas.MultiIndex.from_tuples(
+ [("a", 0), ("a", 1), ("b", 0), ("b", 1), ("c", 0), ("c", 1)],
+ names=["letters", "numbers"],
+ ),
+ )
+
+ series = module_under_test.get_column_or_index(dataframe, "letters")
+ expected = pandas.Series(["a", "a", "b", "b", "c", "c"], name="letters")
+ pandas.testing.assert_series_equal(series, expected)
+
+ series = module_under_test.get_column_or_index(dataframe, "numbers")
+ expected = pandas.Series([0, 1, 0, 1, 0, 1], name="numbers")
+ pandas.testing.assert_series_equal(series, expected)
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_list_columns_and_indexes_without_named_index(module_under_test):
+ df_data = collections.OrderedDict(
+ [
+ ("a_series", [1, 2, 3, 4]),
+ ("b_series", [0.1, 0.2, 0.3, 0.4]),
+ ("c_series", ["a", "b", "c", "d"]),
+ ]
+ )
+ dataframe = pandas.DataFrame(df_data)
+
+ columns_and_indexes = module_under_test.list_columns_and_indexes(dataframe)
+ expected = [
+ ("a_series", pandas.api.types.pandas_dtype("int64")),
+ ("b_series", pandas.api.types.pandas_dtype("float64")),
+ ("c_series", pandas.api.types.pandas_dtype("object")),
+ ]
+ assert columns_and_indexes == expected
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_list_columns_and_indexes_with_named_index_same_as_column_name(
+ module_under_test,
+):
+ df_data = collections.OrderedDict(
+ [
+ ("a_series", [1, 2, 3, 4]),
+ ("b_series", [0.1, 0.2, 0.3, 0.4]),
+ ("c_series", ["a", "b", "c", "d"]),
+ ]
+ )
+ dataframe = pandas.DataFrame(
+ df_data,
+ # Use same name as an integer column but a different datatype so that
+ # we can verify that the column is listed but the index isn't.
+ index=pandas.Index([0.1, 0.2, 0.3, 0.4], name="a_series"),
+ )
+
+ columns_and_indexes = module_under_test.list_columns_and_indexes(dataframe)
+ expected = [
+ ("a_series", pandas.api.types.pandas_dtype("int64")),
+ ("b_series", pandas.api.types.pandas_dtype("float64")),
+ ("c_series", pandas.api.types.pandas_dtype("object")),
+ ]
+ assert columns_and_indexes == expected
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_dataframe_to_json_generator(module_under_test):
+ utcnow = datetime.datetime.utcnow()
+ dataframe = pandas.DataFrame(
+ {
+ "a_series": [1, 2, 3, 4],
+ "b_series": [0.1, float("NaN"), 0.3, 0.4],
+ "c_series": ["a", "b", pandas.NA, "d"],
+ "d_series": [utcnow, utcnow, utcnow, pandas.NaT],
+ "e_series": [True, False, True, None],
+ # Support nullable dtypes.
+ # https://github.com/googleapis/python-bigquery/issues/1815
+ "boolean_series": pandas.Series(
+ [True, False, pandas.NA, False], dtype="boolean"
+ ),
+ "int64_series": pandas.Series([-1, pandas.NA, -3, -4], dtype="Int64"),
+ }
+ )
+
+ # Index is not included, even if it is not the default and has a name.
+ dataframe = dataframe.rename(index=lambda idx: idx + 4)
+ dataframe.index.name = "a_index"
+
+ rows = list(module_under_test.dataframe_to_json_generator(dataframe))
+ expected = [
+ {
+ "a_series": 1,
+ "b_series": 0.1,
+ "c_series": "a",
+ "d_series": utcnow,
+ "e_series": True,
+ "boolean_series": True,
+ "int64_series": -1,
+ },
+ {
+ "a_series": 2,
+ "c_series": "b",
+ "d_series": utcnow,
+ "e_series": False,
+ "boolean_series": False,
+ },
+ {
+ "a_series": 3,
+ "b_series": 0.3,
+ "d_series": utcnow,
+ "e_series": True,
+ "int64_series": -3,
+ },
+ {
+ "a_series": 4,
+ "b_series": 0.4,
+ "c_series": "d",
+ "boolean_series": False,
+ "int64_series": -4,
+ },
+ ]
+ assert rows == expected
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_dataframe_to_json_generator_repeated_field(module_under_test):
+ df_data = [
+ collections.OrderedDict(
+ [("repeated_col", [pandas.NA, 2, None, 4]), ("not_repeated_col", "first")]
+ ),
+ collections.OrderedDict(
+ [
+ ("repeated_col", ["a", "b", mock.sentinel.foo, "d"]),
+ ("not_repeated_col", "second"),
+ ]
+ ),
+ ]
+ dataframe = pandas.DataFrame(df_data)
+
+ rows = module_under_test.dataframe_to_json_generator(dataframe)
+
+ expected = [
+ {"repeated_col": [pandas.NA, 2, None, 4], "not_repeated_col": "first"},
+ {
+ "repeated_col": ["a", "b", mock.sentinel.foo, "d"],
+ "not_repeated_col": "second",
+ },
+ ]
+ assert list(rows) == expected
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_list_columns_and_indexes_with_named_index(module_under_test):
+ df_data = collections.OrderedDict(
+ [
+ ("a_series", [1, 2, 3, 4]),
+ ("b_series", [0.1, 0.2, 0.3, 0.4]),
+ ("c_series", ["a", "b", "c", "d"]),
+ ]
+ )
+ dataframe = pandas.DataFrame(
+ df_data, index=pandas.Index([4, 5, 6, 7], name="a_index")
+ )
+
+ columns_and_indexes = module_under_test.list_columns_and_indexes(dataframe)
+ expected = [
+ ("a_index", pandas.api.types.pandas_dtype("int64")),
+ ("a_series", pandas.api.types.pandas_dtype("int64")),
+ ("b_series", pandas.api.types.pandas_dtype("float64")),
+ ("c_series", pandas.api.types.pandas_dtype("object")),
+ ]
+ assert columns_and_indexes == expected
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_list_columns_and_indexes_with_multiindex(module_under_test):
+ df_data = collections.OrderedDict(
+ [
+ ("a_series", [1, 2, 3, 4]),
+ ("b_series", [0.1, 0.2, 0.3, 0.4]),
+ ("c_series", ["a", "b", "c", "d"]),
+ ]
+ )
+ dataframe = pandas.DataFrame(
+ df_data,
+ index=pandas.MultiIndex.from_tuples(
+ [(0, 0, 41), (0, 0, 42), (1, 0, 41), (1, 1, 41)],
+ names=[
+ "a_index",
+ # Use same name as column, but different dtype so we can verify
+ # the column type is included.
+ "b_series",
+ "c_index",
+ ],
+ ),
+ )
+
+ columns_and_indexes = module_under_test.list_columns_and_indexes(dataframe)
+ expected = [
+ ("a_index", pandas.api.types.pandas_dtype("int64")),
+ ("c_index", pandas.api.types.pandas_dtype("int64")),
+ ("a_series", pandas.api.types.pandas_dtype("int64")),
+ ("b_series", pandas.api.types.pandas_dtype("float64")),
+ ("c_series", pandas.api.types.pandas_dtype("object")),
+ ]
+ assert columns_and_indexes == expected
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_dataframe_to_arrow_with_multiindex(module_under_test):
+ bq_schema = (
+ schema.SchemaField("str_index", "STRING"),
+ # int_index is intentionally omitted, to verify that it's okay to be
+ # missing indexes from the schema.
+ schema.SchemaField("dt_index", "DATETIME"),
+ schema.SchemaField("int_col", "INTEGER"),
+ schema.SchemaField("nullable_int_col", "INTEGER"),
+ schema.SchemaField("str_col", "STRING"),
+ )
+ df_data = collections.OrderedDict(
+ [
+ ("int_col", [1, 2, 3, 4, 5, 6]),
+ ("nullable_int_col", [6.0, float("nan"), 7.0, float("nan"), 8.0, 9.0]),
+ ("str_col", ["apple", "banana", "cherry", "durian", "etrog", "fig"]),
+ ]
+ )
+ df_index = pandas.MultiIndex.from_tuples(
+ [
+ ("a", 0, datetime.datetime(1999, 12, 31, 23, 59, 59, 999999)),
+ ("a", 0, datetime.datetime(2000, 1, 1, 0, 0, 0)),
+ ("a", 1, datetime.datetime(1999, 12, 31, 23, 59, 59, 999999)),
+ ("b", 1, datetime.datetime(2000, 1, 1, 0, 0, 0)),
+ ("b", 0, datetime.datetime(1999, 12, 31, 23, 59, 59, 999999)),
+ ("b", 0, datetime.datetime(2000, 1, 1, 0, 0, 0)),
+ ],
+ names=["str_index", "int_index", "dt_index"],
+ )
+ dataframe = pandas.DataFrame(df_data, index=df_index)
+
+ arrow_table = module_under_test.dataframe_to_arrow(dataframe, bq_schema)
+
+ assert arrow_table.schema.names == [
+ "str_index",
+ "dt_index",
+ "int_col",
+ "nullable_int_col",
+ "str_col",
+ ]
+ arrow_data = arrow_table.to_pydict()
+ assert arrow_data["str_index"] == ["a", "a", "a", "b", "b", "b"]
+ expected_dt_index = [
+ pandas.Timestamp(dt)
+ for dt in (
+ datetime.datetime(1999, 12, 31, 23, 59, 59, 999999),
+ datetime.datetime(2000, 1, 1, 0, 0, 0),
+ datetime.datetime(1999, 12, 31, 23, 59, 59, 999999),
+ datetime.datetime(2000, 1, 1, 0, 0, 0),
+ datetime.datetime(1999, 12, 31, 23, 59, 59, 999999),
+ datetime.datetime(2000, 1, 1, 0, 0, 0),
+ )
+ ]
+ assert arrow_data["dt_index"] == expected_dt_index
+ assert arrow_data["int_col"] == [1, 2, 3, 4, 5, 6]
+ assert arrow_data["nullable_int_col"] == [6, None, 7, None, 8, 9]
+ assert arrow_data["str_col"] == [
+ "apple",
+ "banana",
+ "cherry",
+ "durian",
+ "etrog",
+ "fig",
+ ]
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_dataframe_to_arrow_with_required_fields(module_under_test):
+ bq_schema = (
+ schema.SchemaField("field01", "STRING", mode="REQUIRED"),
+ schema.SchemaField("field02", "BYTES", mode="REQUIRED"),
+ schema.SchemaField("field03", "INTEGER", mode="REQUIRED"),
+ schema.SchemaField("field04", "INT64", mode="REQUIRED"),
+ schema.SchemaField("field05", "FLOAT", mode="REQUIRED"),
+ schema.SchemaField("field06", "FLOAT64", mode="REQUIRED"),
+ schema.SchemaField("field07", "NUMERIC", mode="REQUIRED"),
+ schema.SchemaField("field08", "BIGNUMERIC", mode="REQUIRED"),
+ schema.SchemaField("field09", "BOOLEAN", mode="REQUIRED"),
+ schema.SchemaField("field10", "BOOL", mode="REQUIRED"),
+ schema.SchemaField("field11", "TIMESTAMP", mode="REQUIRED"),
+ schema.SchemaField("field12", "DATE", mode="REQUIRED"),
+ schema.SchemaField("field13", "TIME", mode="REQUIRED"),
+ schema.SchemaField("field14", "DATETIME", mode="REQUIRED"),
+ schema.SchemaField("field15", "GEOGRAPHY", mode="REQUIRED"),
+ )
+
+ data = {
+ "field01": ["hello", None, "world"],
+ "field02": [b"abd", b"efg", b"hij"],
+ "field03": [1, 2, 3],
+ "field04": [4, None, 5],
+ "field05": [1.25, 0.0, 9.75],
+ "field06": [-1.75, None, -3.5],
+ "field07": [
+ decimal.Decimal("1.2345"),
+ decimal.Decimal("6.7891"),
+ -decimal.Decimal("10.111213"),
+ ],
+ "field08": [
+ decimal.Decimal("-{d38}.{d38}".format(d38="9" * 38)),
+ None,
+ decimal.Decimal("{d38}.{d38}".format(d38="9" * 38)),
+ ],
+ "field09": [True, False, True],
+ "field10": [False, True, None],
+ "field11": [
+ datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc),
+ datetime.datetime(2012, 12, 21, 9, 7, 42, tzinfo=datetime.timezone.utc),
+ datetime.datetime(2022, 7, 14, 23, 59, 59, tzinfo=datetime.timezone.utc),
+ ],
+ "field12": [datetime.date(9999, 12, 31), None, datetime.date(1970, 1, 1)],
+ "field13": [datetime.time(23, 59, 59, 999999), None, datetime.time(12, 0, 0)],
+ "field14": [
+ datetime.datetime(1970, 1, 1, 0, 0, 0),
+ None,
+ datetime.datetime(2012, 12, 21, 9, 7, 42),
+ ],
+ "field15": [
+ None,
+ "POINT(30 10)",
+ "POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))",
+ ],
+ }
+ dataframe = pandas.DataFrame(data)
+
+ arrow_table = module_under_test.dataframe_to_arrow(dataframe, bq_schema)
+ arrow_schema = arrow_table.schema
+
+ assert len(arrow_schema) == len(bq_schema)
+ for arrow_field in arrow_schema:
+ # Even if the remote schema is REQUIRED, there's a chance there's
+ # local NULL values. Arrow will gladly interpret these NULL values
+ # as non-NULL and give you an arbitrary value. See:
+ # https://github.com/googleapis/python-bigquery/issues/1692
+ assert arrow_field.nullable
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_dataframe_to_arrow_with_unknown_type(module_under_test):
+ bq_schema = (
+ schema.SchemaField("field00", "UNKNOWN_TYPE"),
+ schema.SchemaField("field01", "STRING"),
+ schema.SchemaField("field02", "BYTES"),
+ schema.SchemaField("field03", "INTEGER"),
+ )
+ dataframe = pandas.DataFrame(
+ {
+ "field00": ["whoami", "whatami"],
+ "field01": ["hello", "world"],
+ "field02": [b"abd", b"efg"],
+ "field03": [1, 2],
+ }
+ )
+
+ with warnings.catch_warnings(record=True) as warned:
+ arrow_table = module_under_test.dataframe_to_arrow(dataframe, bq_schema)
+ arrow_schema = arrow_table.schema
+
+ assert len(warned) == 1
+ warning = warned[0]
+ assert "field00" in str(warning)
+
+ assert len(arrow_schema) == len(bq_schema)
+ assert arrow_schema[0].name == "field00"
+ assert arrow_schema[1].name == "field01"
+ assert arrow_schema[2].name == "field02"
+ assert arrow_schema[3].name == "field03"
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_dataframe_to_arrow_dict_sequence_schema(module_under_test):
+ dict_schema = [
+ {"name": "field01", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "field02", "type": "BOOL", "mode": "NULLABLE"},
+ ]
+
+ dataframe = pandas.DataFrame(
+ {"field01": ["hello", "world"], "field02": [True, False]}
+ )
+
+ arrow_table = module_under_test.dataframe_to_arrow(dataframe, dict_schema)
+ arrow_schema = arrow_table.schema
+
+ expected_fields = [
+ # Even if the remote schema is REQUIRED, there's a chance there's
+ # local NULL values. Arrow will gladly interpret these NULL values
+ # as non-NULL and give you an arbitrary value. See:
+ # https://github.com/googleapis/python-bigquery/issues/1692
+ pyarrow.field("field01", "string", nullable=True),
+ pyarrow.field("field02", "bool", nullable=True),
+ ]
+ assert list(arrow_schema) == expected_fields
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_dataframe_to_parquet_without_pyarrow(module_under_test, monkeypatch):
+ mock_pyarrow_import = mock.Mock()
+ mock_pyarrow_import.side_effect = exceptions.LegacyPyarrowError(
+ "pyarrow not installed"
+ )
+ monkeypatch.setattr(
+ _versions_helpers.PYARROW_VERSIONS, "try_import", mock_pyarrow_import
+ )
+
+ with pytest.raises(exceptions.LegacyPyarrowError):
+ module_under_test.dataframe_to_parquet(pandas.DataFrame(), (), None)
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_dataframe_to_parquet_w_extra_fields(module_under_test):
+ with pytest.raises(ValueError) as exc_context:
+ module_under_test.dataframe_to_parquet(
+ pandas.DataFrame(), (schema.SchemaField("not_in_df", "STRING"),), None
+ )
+ message = str(exc_context.value)
+ assert "bq_schema contains fields not present in dataframe" in message
+ assert "not_in_df" in message
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_dataframe_to_parquet_w_missing_fields(module_under_test):
+ with pytest.raises(ValueError) as exc_context:
+ module_under_test.dataframe_to_parquet(
+ pandas.DataFrame({"not_in_bq": [1, 2, 3]}), (), None
+ )
+ message = str(exc_context.value)
+ assert "bq_schema is missing fields from dataframe" in message
+ assert "not_in_bq" in message
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_dataframe_to_parquet_compression_method(module_under_test):
+ bq_schema = (schema.SchemaField("field00", "STRING"),)
+ dataframe = pandas.DataFrame({"field00": ["foo", "bar"]})
+
+ write_table_patch = mock.patch.object(
+ module_under_test.pyarrow.parquet, "write_table", autospec=True
+ )
+
+ with write_table_patch as fake_write_table:
+ module_under_test.dataframe_to_parquet(
+ dataframe, bq_schema, None, parquet_compression="ZSTD"
+ )
+
+ call_args = fake_write_table.call_args
+ assert call_args is not None
+ assert call_args[1].get("compression") == "ZSTD"
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_dataframe_to_bq_schema_w_named_index(module_under_test):
+ df_data = collections.OrderedDict(
+ [
+ ("str_column", ["hello", "world"]),
+ ("int_column", [42, 8]),
+ ("bool_column", [True, False]),
+ ]
+ )
+ index = pandas.Index(["a", "b"], name="str_index")
+ dataframe = pandas.DataFrame(df_data, index=index)
+
+ returned_schema = module_under_test.dataframe_to_bq_schema(dataframe, [])
+
+ expected_schema = (
+ schema.SchemaField("str_index", "STRING", "NULLABLE"),
+ schema.SchemaField("str_column", "STRING", "NULLABLE"),
+ schema.SchemaField("int_column", "INTEGER", "NULLABLE"),
+ schema.SchemaField("bool_column", "BOOLEAN", "NULLABLE"),
+ )
+ assert returned_schema == expected_schema
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_dataframe_to_bq_schema_w_multiindex(module_under_test):
+ df_data = collections.OrderedDict(
+ [
+ ("str_column", ["hello", "world"]),
+ ("int_column", [42, 8]),
+ ("bool_column", [True, False]),
+ ]
+ )
+ index = pandas.MultiIndex.from_tuples(
+ [
+ ("a", 0, datetime.datetime(1999, 12, 31, 23, 59, 59, 999999)),
+ ("a", 0, datetime.datetime(2000, 1, 1, 0, 0, 0)),
+ ],
+ names=["str_index", "int_index", "dt_index"],
+ )
+ dataframe = pandas.DataFrame(df_data, index=index)
+
+ returned_schema = module_under_test.dataframe_to_bq_schema(dataframe, [])
+
+ expected_schema = (
+ schema.SchemaField("str_index", "STRING", "NULLABLE"),
+ schema.SchemaField("int_index", "INTEGER", "NULLABLE"),
+ schema.SchemaField("dt_index", "DATETIME", "NULLABLE"),
+ schema.SchemaField("str_column", "STRING", "NULLABLE"),
+ schema.SchemaField("int_column", "INTEGER", "NULLABLE"),
+ schema.SchemaField("bool_column", "BOOLEAN", "NULLABLE"),
+ )
+ assert returned_schema == expected_schema
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_dataframe_to_bq_schema_w_bq_schema(module_under_test):
+ df_data = collections.OrderedDict(
+ [
+ ("str_column", ["hello", "world"]),
+ ("int_column", [42, 8]),
+ ("bool_column", [True, False]),
+ ]
+ )
+ dataframe = pandas.DataFrame(df_data)
+
+ dict_schema = [
+ {"name": "str_column", "type": "STRING", "mode": "NULLABLE"},
+ {"name": "bool_column", "type": "BOOL", "mode": "REQUIRED"},
+ ]
+
+ returned_schema = module_under_test.dataframe_to_bq_schema(dataframe, dict_schema)
+
+ expected_schema = (
+ schema.SchemaField("str_column", "STRING", "NULLABLE"),
+ schema.SchemaField("int_column", "INTEGER", "NULLABLE"),
+ schema.SchemaField("bool_column", "BOOL", "REQUIRED"),
+ )
+ assert returned_schema == expected_schema
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_dataframe_to_bq_schema_fallback_needed_wo_pyarrow(module_under_test):
+ dataframe = pandas.DataFrame(
+ data=[
+ {"id": 10, "status": "FOO", "execution_date": datetime.date(2019, 5, 10)},
+ {"id": 20, "status": "BAR", "created_at": datetime.date(2018, 9, 12)},
+ ]
+ )
+
+ no_pyarrow_patch = mock.patch(module_under_test.__name__ + ".pyarrow", None)
+
+ with no_pyarrow_patch, warnings.catch_warnings(record=True) as warned:
+ detected_schema = module_under_test.dataframe_to_bq_schema(
+ dataframe, bq_schema=[]
+ )
+
+ assert detected_schema is None
+
+ # a warning should also be issued
+ expected_warnings = [
+ warning for warning in warned if "could not determine" in str(warning).lower()
+ ]
+ assert len(expected_warnings) == 1
+ msg = str(expected_warnings[0])
+ assert "execution_date" in msg and "created_at" in msg
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_dataframe_to_bq_schema_fallback_needed_w_pyarrow(module_under_test):
+ dataframe = pandas.DataFrame(
+ data=[
+ {"id": 10, "status": "FOO", "created_at": datetime.date(2019, 5, 10)},
+ {"id": 20, "status": "BAR", "created_at": datetime.date(2018, 9, 12)},
+ ]
+ )
+
+ with warnings.catch_warnings(record=True) as warned:
+ detected_schema = module_under_test.dataframe_to_bq_schema(
+ dataframe, bq_schema=[]
+ )
+
+ expected_schema = (
+ schema.SchemaField("id", "INTEGER", mode="NULLABLE"),
+ schema.SchemaField("status", "STRING", mode="NULLABLE"),
+ schema.SchemaField("created_at", "DATE", mode="NULLABLE"),
+ )
+ by_name = operator.attrgetter("name")
+ assert sorted(detected_schema, key=by_name) == sorted(expected_schema, key=by_name)
+
+ # there should be no relevant warnings
+ unwanted_warnings = [
+ warning for warning in warned if "could not determine" in str(warning).lower()
+ ]
+ assert not unwanted_warnings
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_dataframe_to_bq_schema_pyarrow_fallback_fails(module_under_test):
+ dataframe = pandas.DataFrame(
+ data=[
+ {"struct_field": {"one": 2}, "status": "FOO"},
+ {"struct_field": {"two": "222"}, "status": "BAR"},
+ ]
+ )
+
+ with warnings.catch_warnings(record=True) as warned:
+ detected_schema = module_under_test.dataframe_to_bq_schema(
+ dataframe, bq_schema=[]
+ )
+
+ assert detected_schema is None
+
+ # a warning should also be issued
+ expected_warnings = [
+ warning for warning in warned if "could not determine" in str(warning).lower()
+ ]
+ assert len(expected_warnings) == 1
+ assert "struct_field" in str(expected_warnings[0])
+
+
+@pytest.mark.skipif(geopandas is None, reason="Requires `geopandas`")
+def test_dataframe_to_bq_schema_geography(module_under_test):
+ from shapely import wkt
+
+ df = geopandas.GeoDataFrame(
+ pandas.DataFrame(
+ dict(
+ name=["foo", "bar"],
+ geo1=[None, None],
+ geo2=[None, wkt.loads("Point(1 1)")],
+ )
+ ),
+ geometry="geo1",
+ )
+ bq_schema = module_under_test.dataframe_to_bq_schema(df, [])
+ assert bq_schema == (
+ schema.SchemaField("name", "STRING"),
+ schema.SchemaField("geo1", "GEOGRAPHY"),
+ schema.SchemaField("geo2", "GEOGRAPHY"),
+ )
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test__first_array_valid_no_valid_items(module_under_test):
+ series = pandas.Series([None, pandas.NA, float("NaN")])
+ result = module_under_test._first_array_valid(series)
+ assert result is None
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test__first_array_valid_valid_item_exists(module_under_test):
+ series = pandas.Series([None, [0], [1], None])
+ result = module_under_test._first_array_valid(series)
+ assert result == 0
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test__first_array_valid_all_nan_items_in_first_valid_candidate(module_under_test):
+ import numpy
+
+ series = pandas.Series(
+ [
+ None,
+ [None, float("NaN"), pandas.NA, pandas.NaT, numpy.nan],
+ None,
+ [None, None],
+ [None, float("NaN"), pandas.NA, pandas.NaT, numpy.nan, 42, None],
+ [1, 2, 3],
+ None,
+ ]
+ )
+ result = module_under_test._first_array_valid(series)
+ assert result == 42
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test__first_array_valid_no_arrays_with_valid_items(module_under_test):
+ series = pandas.Series([[None, None], [None, None]])
+ result = module_under_test._first_array_valid(series)
+ assert result is None
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_augment_schema_type_detection_succeeds(module_under_test):
+ dataframe = pandas.DataFrame(
+ data=[
+ {
+ "bool_field": False,
+ "int_field": 123,
+ "float_field": 3.141592,
+ "time_field": datetime.time(17, 59, 47),
+ "timestamp_field": datetime.datetime(2005, 5, 31, 14, 25, 55),
+ "date_field": datetime.date(2005, 5, 31),
+ "bytes_field": b"some bytes",
+ "string_field": "some characters",
+ "numeric_field": decimal.Decimal("123.456"),
+ "bignumeric_field": decimal.Decimal("{d38}.{d38}".format(d38="9" * 38)),
+ }
+ ]
+ )
+
+ # NOTE: In Pandas dataframe, the dtype of Python's datetime instances is
+ # set to "datetime64[ns]", and pyarrow converts that to pyarrow.TimestampArray.
+ # We thus cannot expect to get a DATETIME date when converting back to the
+ # BigQuery type.
+
+ current_schema = (
+ schema.SchemaField("bool_field", field_type=None, mode="NULLABLE"),
+ schema.SchemaField("int_field", field_type=None, mode="NULLABLE"),
+ schema.SchemaField("float_field", field_type=None, mode="NULLABLE"),
+ schema.SchemaField("time_field", field_type=None, mode="NULLABLE"),
+ schema.SchemaField("timestamp_field", field_type=None, mode="NULLABLE"),
+ schema.SchemaField("date_field", field_type=None, mode="NULLABLE"),
+ schema.SchemaField("bytes_field", field_type=None, mode="NULLABLE"),
+ schema.SchemaField("string_field", field_type=None, mode="NULLABLE"),
+ schema.SchemaField("numeric_field", field_type=None, mode="NULLABLE"),
+ schema.SchemaField("bignumeric_field", field_type=None, mode="NULLABLE"),
+ )
+
+ with warnings.catch_warnings(record=True) as warned:
+ augmented_schema = module_under_test.augment_schema(dataframe, current_schema)
+
+ # there should be no relevant warnings
+ unwanted_warnings = [
+ warning for warning in warned if "Pyarrow could not" in str(warning)
+ ]
+ assert not unwanted_warnings
+
+ # the augmented schema must match the expected
+ expected_schema = (
+ schema.SchemaField("bool_field", field_type="BOOL", mode="NULLABLE"),
+ schema.SchemaField("int_field", field_type="INT64", mode="NULLABLE"),
+ schema.SchemaField("float_field", field_type="FLOAT64", mode="NULLABLE"),
+ schema.SchemaField("time_field", field_type="TIME", mode="NULLABLE"),
+ schema.SchemaField("timestamp_field", field_type="TIMESTAMP", mode="NULLABLE"),
+ schema.SchemaField("date_field", field_type="DATE", mode="NULLABLE"),
+ schema.SchemaField("bytes_field", field_type="BYTES", mode="NULLABLE"),
+ schema.SchemaField("string_field", field_type="STRING", mode="NULLABLE"),
+ schema.SchemaField("numeric_field", field_type="NUMERIC", mode="NULLABLE"),
+ schema.SchemaField(
+ "bignumeric_field", field_type="BIGNUMERIC", mode="NULLABLE"
+ ),
+ )
+
+ by_name = operator.attrgetter("name")
+ assert sorted(augmented_schema, key=by_name) == sorted(expected_schema, key=by_name)
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_augment_schema_repeated_fields(module_under_test):
+ dataframe = pandas.DataFrame(
+ data=[
+ # Include some values useless for type detection to make sure the logic
+ # indeed finds the value that is suitable.
+ {"string_array": None, "timestamp_array": None, "datetime_array": None},
+ {
+ "string_array": [None],
+ "timestamp_array": [None],
+ "datetime_array": [None],
+ },
+ {"string_array": None, "timestamp_array": None, "datetime_array": None},
+ {
+ "string_array": [None, "foo"],
+ "timestamp_array": [
+ None,
+ datetime.datetime(
+ 2005, 5, 31, 14, 25, 55, tzinfo=datetime.timezone.utc
+ ),
+ ],
+ "datetime_array": [None, datetime.datetime(2005, 5, 31, 14, 25, 55)],
+ },
+ {"string_array": None, "timestamp_array": None, "datetime_array": None},
+ ]
+ )
+
+ current_schema = (
+ schema.SchemaField("string_array", field_type=None, mode="NULLABLE"),
+ schema.SchemaField("timestamp_array", field_type=None, mode="NULLABLE"),
+ schema.SchemaField("datetime_array", field_type=None, mode="NULLABLE"),
+ )
+
+ with warnings.catch_warnings(record=True) as warned:
+ augmented_schema = module_under_test.augment_schema(dataframe, current_schema)
+
+ # there should be no relevant warnings
+ unwanted_warnings = [
+ warning for warning in warned if "Pyarrow could not" in str(warning)
+ ]
+ assert not unwanted_warnings
+
+ # the augmented schema must match the expected
+ expected_schema = (
+ schema.SchemaField("string_array", field_type="STRING", mode="REPEATED"),
+ schema.SchemaField("timestamp_array", field_type="TIMESTAMP", mode="REPEATED"),
+ schema.SchemaField("datetime_array", field_type="DATETIME", mode="REPEATED"),
+ )
+
+ by_name = operator.attrgetter("name")
+ assert sorted(augmented_schema, key=by_name) == sorted(expected_schema, key=by_name)
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_augment_schema_type_detection_fails(module_under_test):
+ dataframe = pandas.DataFrame(
+ data=[
+ {
+ "status": "FOO",
+ "struct_field": {"one": 1},
+ "struct_field_2": {"foo": "123"},
+ },
+ {
+ "status": "BAR",
+ "struct_field": {"two": "111"},
+ "struct_field_2": {"bar": 27},
+ },
+ ]
+ )
+ current_schema = [
+ schema.SchemaField("status", field_type="STRING", mode="NULLABLE"),
+ schema.SchemaField("struct_field", field_type=None, mode="NULLABLE"),
+ schema.SchemaField("struct_field_2", field_type=None, mode="NULLABLE"),
+ ]
+
+ with warnings.catch_warnings(record=True) as warned:
+ augmented_schema = module_under_test.augment_schema(dataframe, current_schema)
+
+ assert augmented_schema is None
+
+ expected_warnings = [
+ warning for warning in warned if "could not determine" in str(warning)
+ ]
+ assert len(expected_warnings) == 1
+ warning_msg = str(expected_warnings[0])
+ assert "pyarrow" in warning_msg.lower()
+ assert "struct_field" in warning_msg and "struct_field_2" in warning_msg
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_augment_schema_type_detection_fails_array_data(module_under_test):
+ dataframe = pandas.DataFrame(
+ data=[{"all_none_array": [None, float("NaN")], "empty_array": []}]
+ )
+ current_schema = [
+ schema.SchemaField("all_none_array", field_type=None, mode="NULLABLE"),
+ schema.SchemaField("empty_array", field_type=None, mode="NULLABLE"),
+ ]
+
+ with warnings.catch_warnings(record=True) as warned:
+ augmented_schema = module_under_test.augment_schema(dataframe, current_schema)
+
+ assert augmented_schema is None
+
+ expected_warnings = [
+ warning for warning in warned if "could not determine" in str(warning)
+ ]
+ assert len(expected_warnings) == 1
+ warning_msg = str(expected_warnings[0])
+ assert "pyarrow" in warning_msg.lower()
+ assert "all_none_array" in warning_msg and "empty_array" in warning_msg
+
+
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_dataframe_to_parquet_dict_sequence_schema(module_under_test):
+ pandas = pytest.importorskip("pandas")
+
+ dict_schema = [
+ {"name": "field01", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "field02", "type": "BOOL", "mode": "NULLABLE"},
+ ]
+
+ dataframe = pandas.DataFrame(
+ {"field01": ["hello", "world"], "field02": [True, False]}
+ )
+
+ write_table_patch = mock.patch.object(
+ module_under_test.pyarrow.parquet, "write_table", autospec=True
+ )
+ to_arrow_patch = mock.patch.object(
+ module_under_test, "dataframe_to_arrow", autospec=True
+ )
+
+ with write_table_patch, to_arrow_patch as fake_to_arrow:
+ module_under_test.dataframe_to_parquet(dataframe, dict_schema, None)
+
+ expected_schema_arg = [
+ schema.SchemaField("field01", "STRING", mode="REQUIRED"),
+ schema.SchemaField("field02", "BOOL", mode="NULLABLE"),
+ ]
+ schema_arg = fake_to_arrow.call_args[0][1]
+ assert schema_arg == expected_schema_arg
+
+
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test__download_table_bqstorage_stream_includes_read_session(
+ monkeypatch, module_under_test
+):
+ import google.cloud.bigquery_storage_v1.reader
+ import google.cloud.bigquery_storage_v1.types
+
+ monkeypatch.setattr(
+ _versions_helpers.BQ_STORAGE_VERSIONS, "_installed_version", None
+ )
+ monkeypatch.setattr(bigquery_storage, "__version__", "2.5.0")
+ bqstorage_client = mock.create_autospec(
+ bigquery_storage.BigQueryReadClient, instance=True
+ )
+ reader = mock.create_autospec(
+ google.cloud.bigquery_storage_v1.reader.ReadRowsStream, instance=True
+ )
+ bqstorage_client.read_rows.return_value = reader
+ session = google.cloud.bigquery_storage_v1.types.ReadSession()
+
+ module_under_test._download_table_bqstorage_stream(
+ module_under_test._DownloadState(),
+ bqstorage_client,
+ session,
+ google.cloud.bigquery_storage_v1.types.ReadStream(name="test"),
+ queue.Queue(),
+ mock.Mock(),
+ )
+
+ reader.rows.assert_called_once_with(session)
+
+
+@pytest.mark.skipif(
+ bigquery_storage is None
+ or not _versions_helpers.BQ_STORAGE_VERSIONS.is_read_session_optional,
+ reason="Requires `google-cloud-bigquery-storage` >= 2.6.0",
+)
+def test__download_table_bqstorage_stream_omits_read_session(
+ monkeypatch, module_under_test
+):
+ import google.cloud.bigquery_storage_v1.reader
+ import google.cloud.bigquery_storage_v1.types
+
+ monkeypatch.setattr(
+ _versions_helpers.BQ_STORAGE_VERSIONS, "_installed_version", None
+ )
+ monkeypatch.setattr(bigquery_storage, "__version__", "2.6.0")
+ bqstorage_client = mock.create_autospec(
+ bigquery_storage.BigQueryReadClient, instance=True
+ )
+ reader = mock.create_autospec(
+ google.cloud.bigquery_storage_v1.reader.ReadRowsStream, instance=True
+ )
+ bqstorage_client.read_rows.return_value = reader
+ session = google.cloud.bigquery_storage_v1.types.ReadSession()
+
+ module_under_test._download_table_bqstorage_stream(
+ module_under_test._DownloadState(),
+ bqstorage_client,
+ session,
+ google.cloud.bigquery_storage_v1.types.ReadStream(name="test"),
+ queue.Queue(),
+ mock.Mock(),
+ )
+
+ reader.rows.assert_called_once_with()
+
+
+@pytest.mark.parametrize(
+ "stream_count,maxsize_kwarg,expected_call_count,expected_maxsize",
+ [
+ (3, {"max_queue_size": 2}, 3, 2), # custom queue size
+ (4, {}, 4, 4), # default queue size
+ (7, {"max_queue_size": None}, 7, 0), # infinite queue size
+ ],
+)
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test__download_table_bqstorage(
+ module_under_test,
+ stream_count,
+ maxsize_kwarg,
+ expected_call_count,
+ expected_maxsize,
+):
+ from google.cloud.bigquery import dataset
+ from google.cloud.bigquery import table
+
+ queue_used = None # A reference to the queue used by code under test.
+
+ bqstorage_client = mock.create_autospec(
+ bigquery_storage.BigQueryReadClient, instance=True
+ )
+ fake_session = mock.Mock(streams=["stream/s{i}" for i in range(stream_count)])
+ bqstorage_client.create_read_session.return_value = fake_session
+
+ table_ref = table.TableReference(
+ dataset.DatasetReference("project-x", "dataset-y"),
+ "table-z",
+ )
+
+ def fake_download_stream(
+ download_state, bqstorage_client, session, stream, worker_queue, page_to_item
+ ):
+ nonlocal queue_used
+ queue_used = worker_queue
+ try:
+ worker_queue.put_nowait("result_page")
+ except queue.Full: # pragma: NO COVER
+ pass
+
+ download_stream = mock.Mock(side_effect=fake_download_stream)
+
+ with mock.patch.object(
+ module_under_test, "_download_table_bqstorage_stream", new=download_stream
+ ):
+ result_gen = module_under_test._download_table_bqstorage(
+ "some-project", table_ref, bqstorage_client, **maxsize_kwarg
+ )
+ list(result_gen)
+
+ # Timing-safe, as the method under test should block until the pool shutdown is
+ # complete, at which point all download stream workers have already been submitted
+ # to the thread pool.
+ assert download_stream.call_count == stream_count # once for each stream
+ assert queue_used.maxsize == expected_maxsize
+
+
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_download_arrow_row_iterator_unknown_field_type(module_under_test):
+ fake_page = api_core.page_iterator.Page(
+ parent=mock.Mock(),
+ items=[{"page_data": "foo"}],
+ item_to_value=api_core.page_iterator._item_to_value_identity,
+ )
+ fake_page._columns = [[1, 10, 100], [2.2, 22.22, 222.222]]
+ pages = [fake_page]
+
+ bq_schema = [
+ schema.SchemaField("population_size", "INTEGER"),
+ schema.SchemaField("alien_field", "ALIEN_FLOAT_TYPE"),
+ ]
+
+ results_gen = module_under_test.download_arrow_row_iterator(pages, bq_schema)
+
+ with warnings.catch_warnings(record=True) as warned:
+ result = next(results_gen)
+
+ unwanted_warnings = [
+ warning
+ for warning in warned
+ if "please pass schema= explicitly" in str(warning).lower()
+ ]
+ assert not unwanted_warnings
+
+ assert len(result.columns) == 2
+ col = result.columns[0]
+ assert type(col) is pyarrow.lib.Int64Array
+ assert col.to_pylist() == [1, 10, 100]
+ col = result.columns[1]
+ assert type(col) is pyarrow.lib.DoubleArray
+ assert col.to_pylist() == [2.2, 22.22, 222.222]
+
+
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_download_arrow_row_iterator_known_field_type(module_under_test):
+ fake_page = api_core.page_iterator.Page(
+ parent=mock.Mock(),
+ items=[{"page_data": "foo"}],
+ item_to_value=api_core.page_iterator._item_to_value_identity,
+ )
+ fake_page._columns = [[1, 10, 100], ["2.2", "22.22", "222.222"]]
+ pages = [fake_page]
+
+ bq_schema = [
+ schema.SchemaField("population_size", "INTEGER"),
+ schema.SchemaField("non_alien_field", "STRING"),
+ ]
+
+ results_gen = module_under_test.download_arrow_row_iterator(pages, bq_schema)
+ with warnings.catch_warnings(record=True) as warned:
+ result = next(results_gen)
+
+ unwanted_warnings = [
+ warning
+ for warning in warned
+ if "please pass schema= explicitly" in str(warning).lower()
+ ]
+ assert not unwanted_warnings
+
+ assert len(result.columns) == 2
+ col = result.columns[0]
+ assert type(col) is pyarrow.lib.Int64Array
+ assert col.to_pylist() == [1, 10, 100]
+ col = result.columns[1]
+ assert type(col) is pyarrow.lib.StringArray
+ assert col.to_pylist() == ["2.2", "22.22", "222.222"]
+
+
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_download_arrow_row_iterator_dict_sequence_schema(module_under_test):
+ fake_page = api_core.page_iterator.Page(
+ parent=mock.Mock(),
+ items=[{"page_data": "foo"}],
+ item_to_value=api_core.page_iterator._item_to_value_identity,
+ )
+ fake_page._columns = [[1, 10, 100], ["2.2", "22.22", "222.222"]]
+ pages = [fake_page]
+
+ dict_schema = [
+ {"name": "population_size", "type": "INTEGER", "mode": "NULLABLE"},
+ {"name": "non_alien_field", "type": "STRING", "mode": "NULLABLE"},
+ ]
+
+ results_gen = module_under_test.download_arrow_row_iterator(pages, dict_schema)
+ result = next(results_gen)
+
+ assert len(result.columns) == 2
+ col = result.columns[0]
+ assert type(col) is pyarrow.lib.Int64Array
+ assert col.to_pylist() == [1, 10, 100]
+ col = result.columns[1]
+ assert type(col) is pyarrow.lib.StringArray
+ assert col.to_pylist() == ["2.2", "22.22", "222.222"]
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_download_dataframe_row_iterator_dict_sequence_schema(module_under_test):
+ fake_page = api_core.page_iterator.Page(
+ parent=mock.Mock(),
+ items=[{"page_data": "foo"}],
+ item_to_value=api_core.page_iterator._item_to_value_identity,
+ )
+ fake_page._columns = [[1, 10, 100], ["2.2", "22.22", "222.222"]]
+ pages = [fake_page]
+
+ dict_schema = [
+ {"name": "population_size", "type": "INTEGER", "mode": "NULLABLE"},
+ {"name": "non_alien_field", "type": "STRING", "mode": "NULLABLE"},
+ ]
+
+ results_gen = module_under_test.download_dataframe_row_iterator(
+ pages, dict_schema, dtypes={}
+ )
+ result = next(results_gen)
+
+ expected_result = pandas.DataFrame(
+ collections.OrderedDict(
+ [
+ ("population_size", [1, 10, 100]),
+ ("non_alien_field", ["2.2", "22.22", "222.222"]),
+ ]
+ )
+ )
+ assert result.equals(expected_result)
+
+ with pytest.raises(StopIteration):
+ result = next(results_gen)
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_table_data_listpage_to_dataframe_skips_stop_iteration(module_under_test):
+ dataframe = module_under_test._row_iterator_page_to_dataframe([], [], {})
+ assert isinstance(dataframe, pandas.DataFrame)
+
+
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_bq_to_arrow_field_type_override(module_under_test):
+ # When loading pandas data, we may need to override the type
+ # decision based on data contents, because GEOGRAPHY data can be
+ # stored as either text or binary.
+
+ assert (
+ module_under_test.bq_to_arrow_field(schema.SchemaField("g", "GEOGRAPHY")).type
+ == pyarrow.string()
+ )
+
+ assert (
+ module_under_test.bq_to_arrow_field(
+ schema.SchemaField("g", "GEOGRAPHY"),
+ pyarrow.binary(),
+ ).type
+ == pyarrow.binary()
+ )
+
+
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_bq_to_arrow_field_set_repeated_nullable_false(module_under_test):
+ assert (
+ module_under_test.bq_to_arrow_field(
+ schema.SchemaField("name", "STRING", mode="REPEATED")
+ ).nullable
+ is False
+ )
+
+ assert (
+ module_under_test.bq_to_arrow_field(
+ schema.SchemaField("name", "STRING", mode="NULLABLE")
+ ).nullable
+ is True
+ )
+
+
+@pytest.mark.parametrize(
+ "field_type, metadata",
+ [
+ ("datetime", {b"ARROW:extension:name": b"google:sqlType:datetime"}),
+ (
+ "geography",
+ {
+ b"ARROW:extension:name": b"google:sqlType:geography",
+ b"ARROW:extension:metadata": b'{"encoding": "WKT"}',
+ },
+ ),
+ ],
+)
+@pytest.mark.skipif(isinstance(pyarrow, mock.Mock), reason="Requires `pyarrow`")
+def test_bq_to_arrow_field_metadata(module_under_test, field_type, metadata):
+ assert (
+ module_under_test.bq_to_arrow_field(
+ schema.SchemaField("g", field_type)
+ ).metadata
+ == metadata
+ )
+
+
+def test_verify_pandas_imports_no_pandas(module_under_test, monkeypatch):
+ monkeypatch.setattr(module_under_test, "pandas", None)
+ with pytest.raises(ValueError, match="Please install the 'pandas' package"):
+ module_under_test.verify_pandas_imports()
+
+
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_verify_pandas_imports_no_db_dtypes(module_under_test, monkeypatch):
+ monkeypatch.setattr(module_under_test, "db_dtypes", None)
+ with pytest.raises(ValueError, match="Please install the 'db-dtypes' package"):
+ module_under_test.verify_pandas_imports()
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test__pyarrow_helpers.py b/testbed/googleapis__python-bigquery/tests/unit/test__pyarrow_helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0a872c884143fe28c342cab0edd248925515d6f
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test__pyarrow_helpers.py
@@ -0,0 +1,38 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+
+pyarrow = pytest.importorskip("pyarrow", minversion="3.0.0")
+
+
+@pytest.fixture
+def module_under_test():
+ from google.cloud.bigquery import _pyarrow_helpers
+
+ return _pyarrow_helpers
+
+
+def test_bq_to_arrow_scalars(module_under_test):
+ assert (
+ module_under_test.bq_to_arrow_scalars("BIGNUMERIC")
+ == module_under_test.pyarrow_bignumeric
+ )
+ assert module_under_test.bq_to_arrow_scalars("UNKNOWN_TYPE") is None
+
+
+def test_arrow_scalar_ids_to_bq(module_under_test):
+ assert module_under_test.arrow_scalar_ids_to_bq(pyarrow.bool_().id) == "BOOL"
+ assert module_under_test.arrow_scalar_ids_to_bq("UNKNOWN_TYPE") is None
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test__versions_helpers.py b/testbed/googleapis__python-bigquery/tests/unit/test__versions_helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1d0ef1acc0ca4d4516cfa3620b064ef9a98a4eb
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test__versions_helpers.py
@@ -0,0 +1,243 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest import mock
+
+import pytest
+
+try:
+ import pyarrow # type: ignore
+except ImportError:
+ pyarrow = None
+
+try:
+ from google.cloud import bigquery_storage # type: ignore
+except ImportError:
+ bigquery_storage = None
+
+try:
+ import pandas # type: ignore
+except ImportError:
+ pandas = None
+
+from google.cloud.bigquery import _versions_helpers
+from google.cloud.bigquery import exceptions
+
+
+@pytest.mark.skipif(pyarrow is None, reason="pyarrow is not installed")
+def test_try_import_raises_no_error_w_recent_pyarrow():
+ versions = _versions_helpers.PyarrowVersions()
+ with mock.patch("pyarrow.__version__", new="5.0.0"):
+ pyarrow = versions.try_import(raise_if_error=True)
+ assert pyarrow is not None
+
+
+@pytest.mark.skipif(pyarrow is None, reason="pyarrow is not installed")
+def test_try_import_returns_none_w_legacy_pyarrow():
+ versions = _versions_helpers.PyarrowVersions()
+ with mock.patch("pyarrow.__version__", new="2.0.0"):
+ pyarrow = versions.try_import()
+ assert pyarrow is None
+
+
+@pytest.mark.skipif(pyarrow is None, reason="pyarrow is not installed")
+def test_try_import_raises_error_w_legacy_pyarrow():
+ versions = _versions_helpers.PyarrowVersions()
+ with mock.patch("pyarrow.__version__", new="2.0.0"):
+ with pytest.raises(exceptions.LegacyPyarrowError):
+ versions.try_import(raise_if_error=True)
+
+
+@pytest.mark.skipif(
+ pyarrow is not None,
+ reason="pyarrow is installed, but this test needs it not to be",
+)
+def test_try_import_raises_error_w_no_pyarrow():
+ versions = _versions_helpers.PyarrowVersions()
+ with pytest.raises(exceptions.LegacyPyarrowError):
+ versions.try_import(raise_if_error=True)
+
+
+@pytest.mark.skipif(pyarrow is None, reason="pyarrow is not installed")
+def test_installed_pyarrow_version_returns_cached():
+ versions = _versions_helpers.PyarrowVersions()
+ versions._installed_version = object()
+ assert versions.installed_version is versions._installed_version
+
+
+@pytest.mark.skipif(pyarrow is None, reason="pyarrow is not installed")
+def test_installed_pyarrow_version_returns_parsed_version():
+ versions = _versions_helpers.PyarrowVersions()
+ with mock.patch("pyarrow.__version__", new="1.2.3"):
+ version = versions.installed_version
+
+ assert version.major == 1
+ assert version.minor == 2
+ assert version.micro == 3
+
+
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test_raises_no_error_w_recent_bqstorage():
+ with mock.patch("google.cloud.bigquery_storage.__version__", new="2.0.0"):
+ try:
+ bqstorage_versions = _versions_helpers.BQStorageVersions()
+ bqstorage_versions.try_import(raise_if_error=True)
+ except exceptions.LegacyBigQueryStorageError: # pragma: NO COVER
+ raise ("Legacy error raised with a non-legacy dependency version.")
+
+
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test_raises_error_w_legacy_bqstorage():
+ with mock.patch("google.cloud.bigquery_storage.__version__", new="1.9.9"):
+ with pytest.raises(exceptions.LegacyBigQueryStorageError):
+ bqstorage_versions = _versions_helpers.BQStorageVersions()
+ bqstorage_versions.try_import(raise_if_error=True)
+
+
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test_returns_none_with_legacy_bqstorage():
+ with mock.patch("google.cloud.bigquery_storage.__version__", new="1.9.9"):
+ try:
+ bqstorage_versions = _versions_helpers.BQStorageVersions()
+ bq_storage = bqstorage_versions.try_import()
+ except exceptions.LegacyBigQueryStorageError: # pragma: NO COVER
+ raise ("Legacy error raised when raise_if_error == False.")
+ assert bq_storage is None
+
+
+@pytest.mark.skipif(
+ bigquery_storage is not None,
+ reason="Tests behavior when `google-cloud-bigquery-storage` isn't installed",
+)
+def test_returns_none_with_bqstorage_uninstalled():
+ try:
+ bqstorage_versions = _versions_helpers.BQStorageVersions()
+ bq_storage = bqstorage_versions.try_import()
+ except exceptions.LegacyBigQueryStorageError: # pragma: NO COVER
+ raise ("NotFound error raised when raise_if_error == False.")
+ assert bq_storage is None
+
+
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test_raises_error_w_unknown_bqstorage_version():
+ with mock.patch("google.cloud.bigquery_storage", autospec=True) as fake_module:
+ del fake_module.__version__
+ error_pattern = r"version found: 0.0.0"
+ with pytest.raises(exceptions.LegacyBigQueryStorageError, match=error_pattern):
+ bqstorage_versions = _versions_helpers.BQStorageVersions()
+ bqstorage_versions.try_import(raise_if_error=True)
+
+
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test_installed_bqstorage_version_returns_cached():
+ bqstorage_versions = _versions_helpers.BQStorageVersions()
+ bqstorage_versions._installed_version = object()
+ assert bqstorage_versions.installed_version is bqstorage_versions._installed_version
+
+
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test_installed_bqstorage_version_returns_parsed_version():
+ bqstorage_versions = _versions_helpers.BQStorageVersions()
+ with mock.patch("google.cloud.bigquery_storage.__version__", new="1.2.3"):
+ bqstorage_versions = bqstorage_versions.installed_version
+
+ assert bqstorage_versions.major == 1
+ assert bqstorage_versions.minor == 2
+ assert bqstorage_versions.micro == 3
+
+
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test_bqstorage_is_read_session_optional_true():
+ bqstorage_versions = _versions_helpers.BQStorageVersions()
+ with mock.patch("google.cloud.bigquery_storage.__version__", new="2.6.0"):
+ assert bqstorage_versions.is_read_session_optional
+
+
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test_bqstorage_is_read_session_optional_false():
+ bqstorage_versions = _versions_helpers.BQStorageVersions()
+ with mock.patch("google.cloud.bigquery_storage.__version__", new="2.5.0"):
+ assert not bqstorage_versions.is_read_session_optional
+
+
+@pytest.mark.skipif(pandas is None, reason="pandas is not installed")
+@pytest.mark.parametrize("version", ["1.5.0", "2.0.0", "2.1.0"])
+def test_try_import_raises_no_error_w_recent_pandas(version):
+ versions = _versions_helpers.PandasVersions()
+ with mock.patch("pandas.__version__", new=version):
+ try:
+ pandas = versions.try_import(raise_if_error=True)
+ assert pandas is not None
+ except exceptions.LegacyPandasError: # pragma: NO COVER
+ raise ("Legacy error raised with a non-legacy dependency version.")
+
+
+@pytest.mark.skipif(pandas is None, reason="pandas is not installed")
+def test_try_import_returns_none_w_legacy_pandas():
+ versions = _versions_helpers.PandasVersions()
+ with mock.patch("pandas.__version__", new="1.0.0"):
+ pandas = versions.try_import()
+ assert pandas is None
+
+
+@pytest.mark.skipif(pandas is None, reason="pandas is not installed")
+def test_try_import_raises_error_w_legacy_pandas():
+ versions = _versions_helpers.PandasVersions()
+ with mock.patch("pandas.__version__", new="1.0.0"):
+ with pytest.raises(exceptions.LegacyPandasError):
+ versions.try_import(raise_if_error=True)
+
+
+@pytest.mark.skipif(
+ pandas is not None,
+ reason="pandas is installed, but this test needs it not to be",
+)
+def test_try_import_raises_error_w_no_pandas():
+ versions = _versions_helpers.PandasVersions()
+ with pytest.raises(exceptions.LegacyPandasError):
+ versions.try_import(raise_if_error=True)
+
+
+@pytest.mark.skipif(pandas is None, reason="pandas is not installed")
+def test_installed_pandas_version_returns_cached():
+ versions = _versions_helpers.PandasVersions()
+ versions._installed_version = object()
+ assert versions.installed_version is versions._installed_version
+
+
+@pytest.mark.skipif(pandas is None, reason="pandas is not installed")
+def test_installed_pandas_version_returns_parsed_version():
+ versions = _versions_helpers.PandasVersions()
+ with mock.patch("pandas.__version__", new="1.1.0"):
+ version = versions.installed_version
+
+ assert version.major == 1
+ assert version.minor == 1
+ assert version.micro == 0
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_client.py b/testbed/googleapis__python-bigquery/tests/unit/test_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd336b73fa64a6c2745505b05feeebf920f3109c
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_client.py
@@ -0,0 +1,9991 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import collections
+import datetime
+import decimal
+import email
+import gzip
+import http.client
+import io
+import itertools
+import json
+import operator
+import os
+import unittest
+from unittest import mock
+import warnings
+
+import requests
+import packaging
+import pytest
+
+
+try:
+ import opentelemetry
+except ImportError:
+ opentelemetry = None
+
+if opentelemetry is not None:
+ try:
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
+ InMemorySpanExporter,
+ )
+ except (ImportError, AttributeError) as exc: # pragma: NO COVER
+ msg = "Error importing from opentelemetry, is the installed version compatible?"
+ raise ImportError(msg) from exc
+
+import google.api_core.exceptions
+from google.api_core import client_info
+import google.cloud._helpers
+from google.cloud import bigquery
+
+from google.cloud.bigquery.dataset import DatasetReference
+from google.cloud.bigquery import exceptions
+from google.cloud.bigquery import ParquetOptions
+import google.cloud.bigquery.retry
+from google.cloud.bigquery.retry import DEFAULT_TIMEOUT
+import google.cloud.bigquery.table
+
+from test_utils.imports import maybe_fail_import
+from tests.unit.helpers import make_connection
+
+
+def _make_credentials():
+ import google.auth.credentials
+
+ return mock.Mock(spec=google.auth.credentials.Credentials)
+
+
+def _make_list_partitons_meta_info(project, dataset_id, table_id, num_rows=0):
+ return {
+ "tableReference": {
+ "projectId": project,
+ "datasetId": dataset_id,
+ "tableId": "{}$__PARTITIONS_SUMMARY__".format(table_id),
+ },
+ "schema": {
+ "fields": [
+ {"name": "project_id", "type": "STRING", "mode": "NULLABLE"},
+ {"name": "dataset_id", "type": "STRING", "mode": "NULLABLE"},
+ {"name": "table_id", "type": "STRING", "mode": "NULLABLE"},
+ {"name": "partition_id", "type": "STRING", "mode": "NULLABLE"},
+ ]
+ },
+ "etag": "ETAG",
+ "numRows": num_rows,
+ }
+
+
+class TestClient(unittest.TestCase):
+ PROJECT = "PROJECT"
+ DS_ID = "DATASET_ID"
+ TABLE_ID = "TABLE_ID"
+ MODEL_ID = "MODEL_ID"
+ TABLE_REF = DatasetReference(PROJECT, DS_ID).table(TABLE_ID)
+ KMS_KEY_NAME = "projects/1/locations/us/keyRings/1/cryptoKeys/1"
+ LOCATION = "us-central"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.client import Client
+
+ return Client
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def _make_table_resource(self):
+ return {
+ "id": "%s:%s:%s" % (self.PROJECT, self.DS_ID, self.TABLE_ID),
+ "tableReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ }
+
+ def test_ctor_defaults(self):
+ from google.cloud.bigquery._http import Connection
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ self.assertIsInstance(client._connection, Connection)
+ self.assertIs(client._connection.credentials, creds)
+ self.assertIs(client._connection.http, http)
+ self.assertIsNone(client.location)
+ self.assertEqual(
+ client._connection.API_BASE_URL, Connection.DEFAULT_API_ENDPOINT
+ )
+
+ def test_ctor_w_empty_client_options(self):
+ from google.api_core.client_options import ClientOptions
+
+ creds = _make_credentials()
+ http = object()
+ client_options = ClientOptions()
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ client_options=client_options,
+ )
+ self.assertEqual(
+ client._connection.API_BASE_URL, client._connection.DEFAULT_API_ENDPOINT
+ )
+
+ @mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"})
+ def test_ctor_w_only_env_universe(self):
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ )
+ self.assertEqual(client._connection.API_BASE_URL, "https://bigquery.foo.com")
+
+ def test_ctor_w_client_options_dict(self):
+ creds = _make_credentials()
+ http = object()
+ client_options = {"api_endpoint": "https://www.foo-googleapis.com"}
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ client_options=client_options,
+ )
+ self.assertEqual(
+ client._connection.API_BASE_URL, "https://www.foo-googleapis.com"
+ )
+
+ def test_ctor_w_client_options_object(self):
+ from google.api_core.client_options import ClientOptions
+
+ creds = _make_credentials()
+ http = object()
+ client_options = ClientOptions(api_endpoint="https://www.foo-googleapis.com")
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ client_options=client_options,
+ )
+ self.assertEqual(
+ client._connection.API_BASE_URL, "https://www.foo-googleapis.com"
+ )
+
+ @pytest.mark.skipif(
+ packaging.version.parse(getattr(google.api_core, "__version__", "0.0.0"))
+ < packaging.version.Version("2.15.0"),
+ reason="universe_domain not supported with google-api-core < 2.15.0",
+ )
+ def test_ctor_w_client_options_universe(self):
+ creds = _make_credentials()
+ http = object()
+ client_options = {"universe_domain": "foo.com"}
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ client_options=client_options,
+ )
+ self.assertEqual(client._connection.API_BASE_URL, "https://bigquery.foo.com")
+
+ def test_ctor_w_location(self):
+ from google.cloud.bigquery._http import Connection
+
+ creds = _make_credentials()
+ http = object()
+ location = "us-central"
+ client = self._make_one(
+ project=self.PROJECT, credentials=creds, _http=http, location=location
+ )
+ self.assertIsInstance(client._connection, Connection)
+ self.assertIs(client._connection.credentials, creds)
+ self.assertIs(client._connection.http, http)
+ self.assertEqual(client.location, location)
+
+ def test_ctor_w_query_job_config(self):
+ from google.cloud.bigquery._http import Connection
+ from google.cloud.bigquery import QueryJobConfig
+
+ creds = _make_credentials()
+ http = object()
+ location = "us-central"
+ job_config = QueryJobConfig()
+ job_config.dry_run = True
+
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ location=location,
+ default_query_job_config=job_config,
+ )
+ self.assertIsInstance(client._connection, Connection)
+ self.assertIs(client._connection.credentials, creds)
+ self.assertIs(client._connection.http, http)
+ self.assertEqual(client.location, location)
+
+ self.assertIsInstance(client._default_query_job_config, QueryJobConfig)
+ self.assertTrue(client._default_query_job_config.dry_run)
+
+ def test_ctor_w_load_job_config(self):
+ from google.cloud.bigquery._http import Connection
+ from google.cloud.bigquery import LoadJobConfig
+
+ creds = _make_credentials()
+ http = object()
+ location = "us-central"
+ job_config = LoadJobConfig()
+ job_config.create_session = True
+
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ location=location,
+ default_load_job_config=job_config,
+ )
+ self.assertIsInstance(client._connection, Connection)
+ self.assertIs(client._connection.credentials, creds)
+ self.assertIs(client._connection.http, http)
+ self.assertEqual(client.location, location)
+
+ self.assertIsInstance(client._default_load_job_config, LoadJobConfig)
+ self.assertTrue(client._default_load_job_config.create_session)
+
+ def test__call_api_extra_headers(self):
+ # Note: We test at a lower layer to ensure that extra headers are
+ # populated when we actually make the call in requests.
+ # Arrange
+ http = mock.create_autospec(requests.Session, instance=True)
+ http.is_mtls = False
+ response = mock.create_autospec(requests.Response, instance=True)
+ response.status_code = 200
+ http.request.return_value = response
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ # Act
+ client._connection.extra_headers = {"x-goog-request-reason": "because-friday"}
+ client._call_api(
+ retry=None, method="GET", path="/bigquery/v2/projects/my-proj/jobs/my-job"
+ )
+
+ # Assert
+ http.request.assert_called_once()
+ _, kwargs = http.request.call_args
+ headers = kwargs["headers"]
+ assert headers["x-goog-request-reason"] == "because-friday"
+
+ def test__call_api_applying_custom_retry_on_timeout(self):
+ from concurrent.futures import TimeoutError
+ from google.cloud.bigquery.retry import DEFAULT_RETRY
+
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+
+ api_request_patcher = mock.patch.object(
+ client._connection,
+ "api_request",
+ side_effect=[TimeoutError, "result"],
+ )
+ retry = DEFAULT_RETRY.with_deadline(1).with_predicate(
+ lambda exc: isinstance(exc, TimeoutError)
+ )
+
+ with api_request_patcher as fake_api_request:
+ result = client._call_api(retry, foo="bar")
+
+ self.assertEqual(result, "result")
+ self.assertEqual(
+ fake_api_request.call_args_list,
+ [mock.call(foo="bar"), mock.call(foo="bar")], # was retried once
+ )
+
+ def test__call_api_span_creator_not_called(self):
+ from concurrent.futures import TimeoutError
+ from google.cloud.bigquery.retry import DEFAULT_RETRY
+
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+
+ api_request_patcher = mock.patch.object(
+ client._connection,
+ "api_request",
+ side_effect=[TimeoutError, "result"],
+ )
+ retry = DEFAULT_RETRY.with_deadline(1).with_predicate(
+ lambda exc: isinstance(exc, TimeoutError)
+ )
+
+ with api_request_patcher:
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client._call_api(retry)
+
+ final_attributes.assert_not_called()
+
+ def test__call_api_span_creator_called(self):
+ from concurrent.futures import TimeoutError
+ from google.cloud.bigquery.retry import DEFAULT_RETRY
+
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+
+ api_request_patcher = mock.patch.object(
+ client._connection,
+ "api_request",
+ side_effect=[TimeoutError, "result"],
+ )
+ retry = DEFAULT_RETRY.with_deadline(1).with_predicate(
+ lambda exc: isinstance(exc, TimeoutError)
+ )
+
+ with api_request_patcher:
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client._call_api(
+ retry,
+ span_name="test_name",
+ span_attributes={"test_attribute": "test_attribute-value"},
+ )
+
+ final_attributes.assert_called_once()
+
+ def test__get_query_results_miss_w_explicit_project_and_timeout(self):
+ from google.cloud.exceptions import NotFound
+
+ creds = _make_credentials()
+ client = self._make_one(self.PROJECT, creds)
+ conn = client._connection = make_connection()
+ path = "/projects/other-project/queries/nothere"
+ with self.assertRaises(NotFound):
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client._get_query_results(
+ "nothere",
+ None,
+ project="other-project",
+ location=self.LOCATION,
+ timeout_ms=500,
+ timeout=420,
+ )
+
+ final_attributes.assert_called_once_with({"path": path}, client, None)
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path=path,
+ query_params={"maxResults": 0, "timeoutMs": 500, "location": self.LOCATION},
+ timeout=420,
+ )
+
+ def test__get_query_results_miss_w_short_timeout(self):
+ import google.cloud.bigquery.client
+ from google.cloud.exceptions import NotFound
+
+ creds = _make_credentials()
+ client = self._make_one(self.PROJECT, creds)
+ conn = client._connection = make_connection()
+ path = "/projects/other-project/queries/nothere"
+ with self.assertRaises(NotFound):
+ client._get_query_results(
+ "nothere",
+ None,
+ project="other-project",
+ location=self.LOCATION,
+ timeout_ms=500,
+ timeout=1,
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path=path,
+ query_params={"maxResults": 0, "timeoutMs": 500, "location": self.LOCATION},
+ timeout=google.cloud.bigquery.client._MIN_GET_QUERY_RESULTS_TIMEOUT,
+ )
+
+ def test__get_query_results_miss_w_default_timeout(self):
+ import google.cloud.bigquery.client
+ from google.cloud.exceptions import NotFound
+
+ creds = _make_credentials()
+ client = self._make_one(self.PROJECT, creds)
+ conn = client._connection = make_connection()
+ path = "/projects/other-project/queries/nothere"
+ with self.assertRaises(NotFound):
+ client._get_query_results(
+ "nothere",
+ None,
+ project="other-project",
+ location=self.LOCATION,
+ timeout_ms=500,
+ timeout=object(), # the api core default timeout
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path=path,
+ query_params={"maxResults": 0, "timeoutMs": 500, "location": self.LOCATION},
+ timeout=google.cloud.bigquery.client._MIN_GET_QUERY_RESULTS_TIMEOUT,
+ )
+
+ def test__get_query_results_miss_w_client_location(self):
+ from google.cloud.exceptions import NotFound
+
+ creds = _make_credentials()
+ client = self._make_one(self.PROJECT, creds, location=self.LOCATION)
+ conn = client._connection = make_connection()
+
+ with self.assertRaises(NotFound):
+ client._get_query_results("nothere", None)
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/projects/PROJECT/queries/nothere",
+ query_params={"maxResults": 0, "location": self.LOCATION},
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test__get_query_results_hit(self):
+ job_id = "query_job"
+ data = {
+ "kind": "bigquery#getQueryResultsResponse",
+ "etag": "some-tag",
+ "schema": {
+ "fields": [
+ {"name": "title", "type": "STRING", "mode": "NULLABLE"},
+ {"name": "unique_words", "type": "INTEGER", "mode": "NULLABLE"},
+ ]
+ },
+ "jobReference": {"projectId": self.PROJECT, "jobId": job_id},
+ "totalRows": "10",
+ "totalBytesProcessed": "2464625",
+ "jobComplete": True,
+ "cacheHit": False,
+ }
+
+ creds = _make_credentials()
+ client = self._make_one(self.PROJECT, creds)
+ client._connection = make_connection(data)
+ query_results = client._get_query_results(job_id, None)
+
+ self.assertEqual(query_results.total_rows, 10)
+ self.assertTrue(query_results.complete)
+
+ def test__list_rows_from_query_results_w_none_timeout(self):
+ from google.cloud.exceptions import NotFound
+ from google.cloud.bigquery.schema import SchemaField
+
+ creds = _make_credentials()
+ client = self._make_one(self.PROJECT, creds)
+ conn = client._connection = make_connection()
+ path = "/projects/project/queries/nothere"
+ iterator = client._list_rows_from_query_results(
+ "nothere",
+ location=None,
+ project="project",
+ schema=[
+ SchemaField("f1", "STRING", mode="REQUIRED"),
+ SchemaField("f2", "INTEGER", mode="REQUIRED"),
+ ],
+ timeout=None,
+ )
+
+ # trigger the iterator to request data
+ with self.assertRaises(NotFound):
+ iterator._get_next_page_response()
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path=path,
+ query_params={
+ "fields": "jobReference,totalRows,pageToken,rows",
+ "location": None,
+ "formatOptions.useInt64Timestamp": True,
+ },
+ timeout=None,
+ )
+
+ def test__list_rows_from_query_results_w_default_timeout(self):
+ import google.cloud.bigquery.client
+ from google.cloud.exceptions import NotFound
+ from google.cloud.bigquery.schema import SchemaField
+
+ creds = _make_credentials()
+ client = self._make_one(self.PROJECT, creds)
+ conn = client._connection = make_connection()
+ path = "/projects/project/queries/nothere"
+ iterator = client._list_rows_from_query_results(
+ "nothere",
+ location=None,
+ project="project",
+ schema=[
+ SchemaField("f1", "STRING", mode="REQUIRED"),
+ SchemaField("f2", "INTEGER", mode="REQUIRED"),
+ ],
+ timeout=object(),
+ )
+
+ # trigger the iterator to request data
+ with self.assertRaises(NotFound):
+ iterator._get_next_page_response()
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path=path,
+ query_params={
+ "fields": "jobReference,totalRows,pageToken,rows",
+ "location": None,
+ "formatOptions.useInt64Timestamp": True,
+ },
+ timeout=google.cloud.bigquery.client._MIN_GET_QUERY_RESULTS_TIMEOUT,
+ )
+
+ def test_default_query_job_config(self):
+ from google.cloud.bigquery import QueryJobConfig
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ self.assertIsNone(client.default_query_job_config)
+
+ job_config = QueryJobConfig()
+ job_config.dry_run = True
+ client.default_query_job_config = job_config
+ self.assertIsInstance(client.default_query_job_config, QueryJobConfig)
+
+ def test_default_load_job_config(self):
+ from google.cloud.bigquery import LoadJobConfig
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ self.assertIsNone(client.default_load_job_config)
+
+ job_config = LoadJobConfig()
+ job_config.create_session = True
+ client.default_load_job_config = job_config
+ self.assertIsInstance(client.default_load_job_config, LoadJobConfig)
+
+ def test_get_service_account_email(self):
+ path = "/projects/%s/serviceAccount" % (self.PROJECT,)
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ email = "bq-123@bigquery-encryption.iam.gserviceaccount.com"
+ resource = {"kind": "bigquery#getServiceAccountResponse", "email": email}
+ conn = client._connection = make_connection(resource)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ service_account_email = client.get_service_account_email(timeout=7.5)
+
+ final_attributes.assert_called_once_with({"path": path}, client, None)
+ conn.api_request.assert_called_once_with(method="GET", path=path, timeout=7.5)
+ self.assertEqual(service_account_email, email)
+
+ def test_get_service_account_email_w_alternate_project(self):
+ project = "my-alternate-project"
+ path = "/projects/%s/serviceAccount" % (project,)
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ email = "bq-123@bigquery-encryption.iam.gserviceaccount.com"
+ resource = {"kind": "bigquery#getServiceAccountResponse", "email": email}
+ conn = client._connection = make_connection(resource)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ service_account_email = client.get_service_account_email(project=project)
+
+ final_attributes.assert_called_once_with({"path": path}, client, None)
+ conn.api_request.assert_called_once_with(
+ method="GET", path=path, timeout=DEFAULT_TIMEOUT
+ )
+ self.assertEqual(service_account_email, email)
+
+ def test_get_service_account_email_w_custom_retry(self):
+ from google.cloud.bigquery.retry import DEFAULT_RETRY
+
+ api_path = "/projects/{}/serviceAccount".format(self.PROJECT)
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ resource = {
+ "kind": "bigquery#getServiceAccountResponse",
+ "email": "bq-123@bigquery-encryption.iam.gserviceaccount.com",
+ }
+ api_request_patcher = mock.patch.object(
+ client._connection,
+ "api_request",
+ side_effect=[ValueError, resource],
+ )
+
+ retry = DEFAULT_RETRY.with_deadline(1).with_predicate(
+ lambda exc: isinstance(exc, ValueError)
+ )
+
+ with api_request_patcher as fake_api_request:
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ service_account_email = client.get_service_account_email(
+ retry=retry, timeout=7.5
+ )
+
+ final_attributes.assert_called_once_with({"path": api_path}, client, None)
+ self.assertEqual(
+ service_account_email, "bq-123@bigquery-encryption.iam.gserviceaccount.com"
+ )
+ self.assertEqual(
+ fake_api_request.call_args_list,
+ [
+ mock.call(method="GET", path=api_path, timeout=7.5),
+ mock.call(method="GET", path=api_path, timeout=7.5), # was retried once
+ ],
+ )
+
+ def test_dataset_with_specified_project(self):
+ from google.cloud.bigquery.dataset import DatasetReference
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ catch_warnings = warnings.catch_warnings(record=True)
+
+ with catch_warnings as warned:
+ dataset = client.dataset(self.DS_ID, self.PROJECT)
+
+ matches = [
+ warning
+ for warning in warned
+ if warning.category in (DeprecationWarning, PendingDeprecationWarning)
+ and "Client.dataset" in str(warning)
+ and "my_project.my_dataset" in str(warning)
+ ]
+ assert matches, "A Client.dataset deprecation warning was not raised."
+ self.assertIsInstance(dataset, DatasetReference)
+ self.assertEqual(dataset.dataset_id, self.DS_ID)
+ self.assertEqual(dataset.project, self.PROJECT)
+
+ def test_dataset_with_default_project(self):
+ from google.cloud.bigquery.dataset import DatasetReference
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ catch_warnings = warnings.catch_warnings(record=True)
+
+ with catch_warnings as warned:
+ dataset = client.dataset(self.DS_ID)
+
+ matches = [
+ warning
+ for warning in warned
+ if warning.category in (DeprecationWarning, PendingDeprecationWarning)
+ and "Client.dataset" in str(warning)
+ and "my_project.my_dataset" in str(warning)
+ ]
+ assert matches, "A Client.dataset deprecation warning was not raised."
+ self.assertIsInstance(dataset, DatasetReference)
+ self.assertEqual(dataset.dataset_id, self.DS_ID)
+ self.assertEqual(dataset.project, self.PROJECT)
+
+ def test_get_dataset(self):
+ from google.cloud.exceptions import ServerError
+
+ path = "projects/%s/datasets/%s" % (self.PROJECT, self.DS_ID)
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ resource = {
+ "id": "%s:%s" % (self.PROJECT, self.DS_ID),
+ "datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
+ }
+ conn = client._connection = make_connection(resource)
+ dataset_ref = DatasetReference(self.PROJECT, self.DS_ID)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ dataset = client.get_dataset(dataset_ref, timeout=7.5)
+
+ final_attributes.assert_called_once_with({"path": "/%s" % path}, client, None)
+
+ conn.api_request.assert_called_once_with(
+ method="GET", path="/%s" % path, timeout=7.5
+ )
+ self.assertEqual(dataset.dataset_id, self.DS_ID)
+
+ # Test retry.
+
+ # Not a cloud API exception (missing 'errors' field).
+ client._connection = make_connection(Exception(""), resource)
+ with self.assertRaises(Exception):
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.get_dataset(dataset_ref)
+
+ final_attributes.assert_called_once_with({"path": "/%s" % path}, client, None)
+
+ # Zero-length errors field.
+ client._connection = make_connection(ServerError(""), resource)
+ with self.assertRaises(ServerError):
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.get_dataset(dataset_ref)
+
+ final_attributes.assert_called_once_with({"path": "/%s" % path}, client, None)
+
+ # Non-retryable reason.
+ client._connection = make_connection(
+ ServerError("", errors=[{"reason": "serious"}]), resource
+ )
+ with self.assertRaises(ServerError):
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.get_dataset(dataset_ref)
+
+ final_attributes.assert_called_once_with({"path": "/%s" % path}, client, None)
+
+ # Retryable reason, but retry is disabled.
+ client._connection = make_connection(
+ ServerError("", errors=[{"reason": "backendError"}]), resource
+ )
+ with self.assertRaises(ServerError):
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.get_dataset(dataset_ref, retry=None)
+
+ final_attributes.assert_called_once_with({"path": "/%s" % path}, client, None)
+
+ # Retryable reason, default retry: success.
+ client._connection = make_connection(
+ ServerError("", errors=[{"reason": "backendError"}]), resource
+ )
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ dataset = client.get_dataset(
+ # Test with a string for dataset ID.
+ dataset_ref.dataset_id
+ )
+
+ final_attributes.assert_called_once_with({"path": "/%s" % path}, client, None)
+
+ self.assertEqual(dataset.dataset_id, self.DS_ID)
+
+ def test_ensure_bqstorage_client_creating_new_instance(self):
+ bigquery_storage = pytest.importorskip("google.cloud.bigquery_storage")
+
+ mock_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ mock_client_instance = object()
+ mock_client.return_value = mock_client_instance
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+
+ with mock.patch(
+ "google.cloud.bigquery_storage.BigQueryReadClient", mock_client
+ ):
+ bqstorage_client = client._ensure_bqstorage_client(
+ client_options=mock.sentinel.client_options,
+ client_info=mock.sentinel.client_info,
+ )
+
+ self.assertIs(bqstorage_client, mock_client_instance)
+ mock_client.assert_called_once_with(
+ credentials=creds,
+ client_options=mock.sentinel.client_options,
+ client_info=mock.sentinel.client_info,
+ )
+
+ def test_ensure_bqstorage_client_missing_dependency(self):
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+
+ def fail_bqstorage_import(name, globals, locals, fromlist, level):
+ # NOTE: *very* simplified, assuming a straightforward absolute import
+ return "bigquery_storage" in name or (
+ fromlist is not None and "bigquery_storage" in fromlist
+ )
+
+ no_bqstorage = maybe_fail_import(predicate=fail_bqstorage_import)
+
+ with no_bqstorage, warnings.catch_warnings(record=True) as warned:
+ bqstorage_client = client._ensure_bqstorage_client()
+
+ self.assertIsNone(bqstorage_client)
+ matching_warnings = [
+ warning
+ for warning in warned
+ if "not installed" in str(warning)
+ and "google-cloud-bigquery-storage" in str(warning)
+ ]
+ assert matching_warnings, "Missing dependency warning not raised."
+
+ def test_ensure_bqstorage_client_obsolete_dependency(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+
+ patcher = mock.patch(
+ "google.cloud.bigquery.client._versions_helpers.BQ_STORAGE_VERSIONS.try_import",
+ side_effect=exceptions.LegacyBigQueryStorageError("BQ Storage too old"),
+ )
+ with patcher, warnings.catch_warnings(record=True) as warned:
+ bqstorage_client = client._ensure_bqstorage_client()
+
+ self.assertIsNone(bqstorage_client)
+ matching_warnings = [
+ warning for warning in warned if "BQ Storage too old" in str(warning)
+ ]
+ assert matching_warnings, "Obsolete dependency warning not raised."
+
+ def test_ensure_bqstorage_client_existing_client_check_passes(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ mock_storage_client = mock.sentinel.mock_storage_client
+
+ bqstorage_client = client._ensure_bqstorage_client(
+ bqstorage_client=mock_storage_client
+ )
+
+ self.assertIs(bqstorage_client, mock_storage_client)
+
+ def test_ensure_bqstorage_client_is_none(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ bqstorage_client = None
+
+ assert bqstorage_client is None
+ bqstorage_client = client._ensure_bqstorage_client(
+ bqstorage_client=bqstorage_client,
+ )
+
+ assert isinstance(
+ bqstorage_client, google.cloud.bigquery_storage_v1.BigQueryReadClient
+ )
+
+ def test_ensure_bqstorage_client_existing_client_check_fails(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ mock_storage_client = mock.sentinel.mock_storage_client
+
+ patcher = mock.patch(
+ "google.cloud.bigquery.client._versions_helpers.BQ_STORAGE_VERSIONS.try_import",
+ side_effect=exceptions.LegacyBigQueryStorageError("BQ Storage too old"),
+ )
+ with patcher, warnings.catch_warnings(record=True) as warned:
+ bqstorage_client = client._ensure_bqstorage_client(mock_storage_client)
+
+ self.assertIsNone(bqstorage_client)
+ matching_warnings = [
+ warning for warning in warned if "BQ Storage too old" in str(warning)
+ ]
+ assert matching_warnings, "Obsolete dependency warning not raised."
+
+ def test_create_routine_w_minimal_resource(self):
+ from google.cloud.bigquery.routine import Routine
+ from google.cloud.bigquery.routine import RoutineReference
+
+ creds = _make_credentials()
+ path = "/projects/test-routine-project/datasets/test_routines/routines"
+ resource = {
+ "routineReference": {
+ "projectId": "test-routine-project",
+ "datasetId": "test_routines",
+ "routineId": "minimal_routine",
+ }
+ }
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ conn = client._connection = make_connection(resource)
+ full_routine_id = "test-routine-project.test_routines.minimal_routine"
+ routine = Routine(full_routine_id)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ actual_routine = client.create_routine(routine, timeout=7.5)
+
+ final_attributes.assert_called_once_with({"path": path}, client, None)
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=path,
+ data=resource,
+ timeout=7.5,
+ )
+ self.assertEqual(
+ actual_routine.reference, RoutineReference.from_string(full_routine_id)
+ )
+
+ def test_create_routine_w_conflict(self):
+ from google.cloud.bigquery.routine import Routine
+
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ conn = client._connection = make_connection(
+ google.api_core.exceptions.AlreadyExists("routine already exists")
+ )
+ path = "/projects/test-routine-project/datasets/test_routines/routines"
+ full_routine_id = "test-routine-project.test_routines.minimal_routine"
+ routine = Routine(full_routine_id)
+
+ with pytest.raises(google.api_core.exceptions.AlreadyExists):
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.create_routine(routine)
+
+ final_attributes.assert_called_once_with({"path": path}, client, None)
+
+ resource = {
+ "routineReference": {
+ "projectId": "test-routine-project",
+ "datasetId": "test_routines",
+ "routineId": "minimal_routine",
+ }
+ }
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=path,
+ data=resource,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_span_status_is_set(self):
+ pytest.importorskip("opentelemetry")
+ from google.cloud.bigquery.routine import Routine
+
+ tracer_provider = TracerProvider()
+ memory_exporter = InMemorySpanExporter()
+ span_processor = SimpleSpanProcessor(memory_exporter)
+ tracer_provider.add_span_processor(span_processor)
+
+ # OpenTelemetry API >= 0.12b0 does not allow overriding the tracer once
+ # initialized, thus directly override the internal global var.
+ tracer_patcher = mock.patch.object(trace, "_TRACER_PROVIDER", tracer_provider)
+
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ conn = client._connection = make_connection(
+ google.api_core.exceptions.AlreadyExists("routine already exists")
+ )
+ path = "/projects/test-routine-project/datasets/test_routines/routines"
+ full_routine_id = "test-routine-project.test_routines.minimal_routine"
+ routine = Routine(full_routine_id)
+
+ with pytest.raises(google.api_core.exceptions.AlreadyExists), tracer_patcher:
+ client.create_routine(routine)
+
+ span_list = memory_exporter.get_finished_spans()
+ self.assertTrue(span_list[0].status is not None)
+
+ resource = {
+ "routineReference": {
+ "projectId": "test-routine-project",
+ "datasetId": "test_routines",
+ "routineId": "minimal_routine",
+ }
+ }
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=path,
+ data=resource,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_create_routine_w_conflict_exists_ok(self):
+ from google.cloud.bigquery.routine import Routine
+
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ resource = {
+ "routineReference": {
+ "projectId": "test-routine-project",
+ "datasetId": "test_routines",
+ "routineId": "minimal_routine",
+ }
+ }
+ path = "/projects/test-routine-project/datasets/test_routines/routines"
+
+ conn = client._connection = make_connection(
+ google.api_core.exceptions.AlreadyExists("routine already exists"), resource
+ )
+ full_routine_id = "test-routine-project.test_routines.minimal_routine"
+ routine = Routine(full_routine_id)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ actual_routine = client.create_routine(routine, exists_ok=True)
+
+ final_attributes.assert_called_with(
+ {"path": "%s/minimal_routine" % path}, client, None
+ )
+
+ self.assertEqual(actual_routine.project, "test-routine-project")
+ self.assertEqual(actual_routine.dataset_id, "test_routines")
+ self.assertEqual(actual_routine.routine_id, "minimal_routine")
+ conn.api_request.assert_has_calls(
+ [
+ mock.call(
+ method="POST",
+ path=path,
+ data=resource,
+ timeout=DEFAULT_TIMEOUT,
+ ),
+ mock.call(
+ method="GET",
+ path="/projects/test-routine-project/datasets/test_routines/routines/minimal_routine",
+ timeout=DEFAULT_TIMEOUT,
+ ),
+ ]
+ )
+
+ def test_create_table_w_day_partition(self):
+ from google.cloud.bigquery.table import Table
+ from google.cloud.bigquery.table import TimePartitioning
+
+ path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ resource = self._make_table_resource()
+ conn = client._connection = make_connection(resource)
+ table = Table(self.TABLE_REF)
+ table.time_partitioning = TimePartitioning()
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ got = client.create_table(table, timeout=7.5)
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "dataset_id": table.dataset_id}, client, None
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/%s" % path,
+ data={
+ "tableReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ "timePartitioning": {"type": "DAY"},
+ "labels": {},
+ },
+ timeout=7.5,
+ )
+ self.assertEqual(table.time_partitioning.type_, "DAY")
+ self.assertEqual(got.table_id, self.TABLE_ID)
+
+ def test_create_table_w_custom_property(self):
+ # The library should handle sending properties to the API that are not
+ # yet part of the library
+ from google.cloud.bigquery.table import Table
+
+ path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ resource = self._make_table_resource()
+ resource["newAlphaProperty"] = "unreleased property"
+ conn = client._connection = make_connection(resource)
+ table = Table(self.TABLE_REF)
+ table._properties["newAlphaProperty"] = "unreleased property"
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ got = client.create_table(table)
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "dataset_id": table.dataset_id}, client, None
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/%s" % path,
+ data={
+ "tableReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ "newAlphaProperty": "unreleased property",
+ "labels": {},
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+ self.assertEqual(got._properties["newAlphaProperty"], "unreleased property")
+ self.assertEqual(got.table_id, self.TABLE_ID)
+
+ def test_create_table_w_encryption_configuration(self):
+ from google.cloud.bigquery.encryption_configuration import (
+ EncryptionConfiguration,
+ )
+ from google.cloud.bigquery.table import Table
+
+ path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ resource = self._make_table_resource()
+ conn = client._connection = make_connection(resource)
+ table = Table(self.TABLE_REF)
+ table.encryption_configuration = EncryptionConfiguration(
+ kms_key_name=self.KMS_KEY_NAME
+ )
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ got = client.create_table(table)
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "dataset_id": table.dataset_id}, client, None
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/%s" % path,
+ data={
+ "tableReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ "labels": {},
+ "encryptionConfiguration": {"kmsKeyName": self.KMS_KEY_NAME},
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+ self.assertEqual(got.table_id, self.TABLE_ID)
+
+ def test_create_table_w_day_partition_and_expire(self):
+ from google.cloud.bigquery.table import Table
+ from google.cloud.bigquery.table import TimePartitioning
+
+ path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ resource = self._make_table_resource()
+ conn = client._connection = make_connection(resource)
+ table = Table(self.TABLE_REF)
+ table.time_partitioning = TimePartitioning(expiration_ms=100)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ got = client.create_table(table)
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "dataset_id": table.dataset_id}, client, None
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/%s" % path,
+ data={
+ "tableReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ "timePartitioning": {"type": "DAY", "expirationMs": "100"},
+ "labels": {},
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+ self.assertEqual(table.time_partitioning.type_, "DAY")
+ self.assertEqual(table.time_partitioning.expiration_ms, 100)
+ self.assertEqual(got.table_id, self.TABLE_ID)
+
+ def test_create_table_w_schema_and_query(self):
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
+ query = "SELECT * from %s:%s" % (self.DS_ID, self.TABLE_ID)
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ resource = self._make_table_resource()
+ resource.update(
+ {
+ "schema": {
+ "fields": [
+ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "age", "type": "INTEGER", "mode": "REQUIRED"},
+ ]
+ },
+ "view": {"query": query},
+ }
+ )
+ schema = [
+ SchemaField("full_name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ conn = client._connection = make_connection(resource)
+ table = Table(self.TABLE_REF, schema=schema)
+ table.view_query = query
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ got = client.create_table(table)
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "dataset_id": table.dataset_id}, client, None
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/%s" % path,
+ data={
+ "tableReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ "schema": {
+ "fields": [
+ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "age", "type": "INTEGER", "mode": "REQUIRED"},
+ ]
+ },
+ "view": {"query": query, "useLegacySql": False},
+ "labels": {},
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+ self.assertEqual(got.table_id, self.TABLE_ID)
+ self.assertEqual(got.project, self.PROJECT)
+ self.assertEqual(got.dataset_id, self.DS_ID)
+ self.assertEqual(got.schema, schema)
+ self.assertEqual(got.view_query, query)
+
+ def test_create_table_w_external(self):
+ from google.cloud.bigquery.external_config import ExternalConfig
+ from google.cloud.bigquery.job import SourceFormat
+ from google.cloud.bigquery.table import Table
+
+ path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ resource = self._make_table_resource()
+ resource.update(
+ {
+ "externalDataConfiguration": {
+ "sourceFormat": SourceFormat.CSV,
+ "autodetect": True,
+ }
+ }
+ )
+ conn = client._connection = make_connection(resource)
+ table = Table(self.TABLE_REF)
+ ec = ExternalConfig("CSV")
+ ec.autodetect = True
+ table.external_data_configuration = ec
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ got = client.create_table(table)
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "dataset_id": table.dataset_id}, client, None
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/%s" % path,
+ data={
+ "tableReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ "externalDataConfiguration": {
+ "sourceFormat": SourceFormat.CSV,
+ "autodetect": True,
+ },
+ "labels": {},
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+ self.assertEqual(got.table_id, self.TABLE_ID)
+ self.assertEqual(got.project, self.PROJECT)
+ self.assertEqual(got.dataset_id, self.DS_ID)
+ self.assertEqual(
+ got.external_data_configuration.source_format, SourceFormat.CSV
+ )
+ self.assertEqual(got.external_data_configuration.autodetect, True)
+
+ def test_create_table_w_reference(self):
+ path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ resource = self._make_table_resource()
+ conn = client._connection = make_connection(resource)
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ got = client.create_table(self.TABLE_REF)
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "dataset_id": self.TABLE_REF.dataset_id},
+ client,
+ None,
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/%s" % path,
+ data={
+ "tableReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ "labels": {},
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+ self.assertEqual(got.table_id, self.TABLE_ID)
+
+ def test_create_table_w_fully_qualified_string(self):
+ path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ resource = self._make_table_resource()
+ conn = client._connection = make_connection(resource)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ got = client.create_table(
+ "{}.{}.{}".format(self.PROJECT, self.DS_ID, self.TABLE_ID)
+ )
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "dataset_id": self.TABLE_REF.dataset_id},
+ client,
+ None,
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/%s" % path,
+ data={
+ "tableReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ "labels": {},
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+ self.assertEqual(got.table_id, self.TABLE_ID)
+
+ def test_create_table_w_string(self):
+ path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ resource = self._make_table_resource()
+ conn = client._connection = make_connection(resource)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ got = client.create_table("{}.{}".format(self.DS_ID, self.TABLE_ID))
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "dataset_id": self.TABLE_REF.dataset_id},
+ client,
+ None,
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/%s" % path,
+ data={
+ "tableReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ "labels": {},
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+ self.assertEqual(got.table_id, self.TABLE_ID)
+
+ def test_create_table_alreadyexists_w_exists_ok_false(self):
+ post_path = "/projects/{}/datasets/{}/tables".format(self.PROJECT, self.DS_ID)
+ creds = _make_credentials()
+ client = self._make_one(
+ project=self.PROJECT, credentials=creds, location=self.LOCATION
+ )
+ conn = client._connection = make_connection(
+ google.api_core.exceptions.AlreadyExists("table already exists")
+ )
+
+ with pytest.raises(google.api_core.exceptions.AlreadyExists):
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.create_table("{}.{}".format(self.DS_ID, self.TABLE_ID))
+
+ final_attributes.assert_called_with(
+ {"path": post_path, "dataset_id": self.TABLE_REF.dataset_id},
+ client,
+ None,
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=post_path,
+ data={
+ "tableReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ "labels": {},
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_create_table_alreadyexists_w_exists_ok_true(self):
+ post_path = "/projects/{}/datasets/{}/tables".format(self.PROJECT, self.DS_ID)
+ get_path = "/projects/{}/datasets/{}/tables/{}".format(
+ self.PROJECT, self.DS_ID, self.TABLE_ID
+ )
+ resource = self._make_table_resource()
+ creds = _make_credentials()
+ client = self._make_one(
+ project=self.PROJECT, credentials=creds, location=self.LOCATION
+ )
+ conn = client._connection = make_connection(
+ google.api_core.exceptions.AlreadyExists("table already exists"), resource
+ )
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ got = client.create_table(
+ "{}.{}".format(self.DS_ID, self.TABLE_ID), exists_ok=True
+ )
+
+ final_attributes.assert_called_with({"path": get_path}, client, None)
+
+ self.assertEqual(got.project, self.PROJECT)
+ self.assertEqual(got.dataset_id, self.DS_ID)
+ self.assertEqual(got.table_id, self.TABLE_ID)
+
+ conn.api_request.assert_has_calls(
+ [
+ mock.call(
+ method="POST",
+ path=post_path,
+ data={
+ "tableReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ "labels": {},
+ },
+ timeout=DEFAULT_TIMEOUT,
+ ),
+ mock.call(method="GET", path=get_path, timeout=DEFAULT_TIMEOUT),
+ ]
+ )
+
+ def test_close(self):
+ creds = _make_credentials()
+ http = mock.Mock()
+ http._auth_request.session = mock.Mock()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ client.close()
+
+ http.close.assert_called_once()
+ http._auth_request.session.close.assert_called_once()
+
+ def test_get_model(self):
+ path = "projects/%s/datasets/%s/models/%s" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.MODEL_ID,
+ )
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ resource = {
+ "modelReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "modelId": self.MODEL_ID,
+ }
+ }
+ conn = client._connection = make_connection(resource)
+
+ model_ref = DatasetReference(self.PROJECT, self.DS_ID).model(self.MODEL_ID)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ got = client.get_model(model_ref, timeout=7.5)
+
+ final_attributes.assert_called_once_with({"path": "/%s" % path}, client, None)
+
+ conn.api_request.assert_called_once_with(
+ method="GET", path="/%s" % path, timeout=7.5
+ )
+ self.assertEqual(got.model_id, self.MODEL_ID)
+
+ def test_get_model_w_string(self):
+ path = "projects/%s/datasets/%s/models/%s" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.MODEL_ID,
+ )
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ resource = {
+ "modelReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "modelId": self.MODEL_ID,
+ }
+ }
+ conn = client._connection = make_connection(resource)
+
+ model_id = "{}.{}.{}".format(self.PROJECT, self.DS_ID, self.MODEL_ID)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ got = client.get_model(model_id)
+
+ final_attributes.assert_called_once_with({"path": "/%s" % path}, client, None)
+
+ conn.api_request.assert_called_once_with(
+ method="GET", path="/%s" % path, timeout=DEFAULT_TIMEOUT
+ )
+ self.assertEqual(got.model_id, self.MODEL_ID)
+
+ def test_get_routine(self):
+ from google.cloud.bigquery.routine import Routine
+ from google.cloud.bigquery.routine import RoutineReference
+
+ full_routine_id = "test-routine-project.test_routines.minimal_routine"
+ routines = [
+ full_routine_id,
+ Routine(full_routine_id),
+ RoutineReference.from_string(full_routine_id),
+ ]
+ for routine in routines:
+ creds = _make_credentials()
+ resource = {
+ "etag": "im-an-etag",
+ "routineReference": {
+ "projectId": "test-routine-project",
+ "datasetId": "test_routines",
+ "routineId": "minimal_routine",
+ },
+ "routineType": "SCALAR_FUNCTION",
+ }
+ path = "/projects/test-routine-project/datasets/test_routines/routines/minimal_routine"
+
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ conn = client._connection = make_connection(resource)
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ actual_routine = client.get_routine(routine, timeout=7.5)
+
+ final_attributes.assert_called_once_with({"path": path}, client, None)
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path=path,
+ timeout=7.5,
+ )
+ self.assertEqual(
+ actual_routine.reference,
+ RoutineReference.from_string(full_routine_id),
+ msg="routine={}".format(repr(routine)),
+ )
+ self.assertEqual(
+ actual_routine.etag,
+ "im-an-etag",
+ msg="routine={}".format(repr(routine)),
+ )
+ self.assertEqual(
+ actual_routine.type_,
+ "SCALAR_FUNCTION",
+ msg="routine={}".format(repr(routine)),
+ )
+
+ def test_get_table(self):
+ path = "projects/%s/datasets/%s/tables/%s" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ resource = self._make_table_resource()
+ conn = client._connection = make_connection(resource)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ table = client.get_table(self.TABLE_REF, timeout=7.5)
+
+ final_attributes.assert_called_once_with({"path": "/%s" % path}, client, None)
+
+ conn.api_request.assert_called_once_with(
+ method="GET", path="/%s" % path, timeout=7.5
+ )
+ self.assertEqual(table.table_id, self.TABLE_ID)
+
+ def test_get_table_sets_user_agent(self):
+ creds = _make_credentials()
+ http = mock.create_autospec(requests.Session)
+ mock_response = http.request(
+ url=mock.ANY, method=mock.ANY, headers=mock.ANY, data=mock.ANY
+ )
+ http.reset_mock()
+ http.is_mtls = False
+ mock_response.status_code = 200
+ mock_response.json.return_value = self._make_table_resource()
+ user_agent_override = client_info.ClientInfo(user_agent="my-application/1.2.3")
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ client_info=user_agent_override,
+ _http=http,
+ )
+
+ client.get_table(self.TABLE_REF)
+
+ expected_user_agent = user_agent_override.to_user_agent()
+ http.request.assert_called_once_with(
+ url=mock.ANY,
+ method="GET",
+ headers={
+ "X-Goog-API-Client": expected_user_agent,
+ "Accept-Encoding": "gzip",
+ "User-Agent": expected_user_agent,
+ },
+ data=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+ self.assertIn("my-application/1.2.3", expected_user_agent)
+
+ def test_get_iam_policy(self):
+ from google.cloud.bigquery.iam import BIGQUERY_DATA_OWNER_ROLE
+ from google.cloud.bigquery.iam import BIGQUERY_DATA_EDITOR_ROLE
+ from google.cloud.bigquery.iam import BIGQUERY_DATA_VIEWER_ROLE
+ from google.api_core.iam import Policy
+
+ PATH = "/projects/{}/datasets/{}/tables/{}:getIamPolicy".format(
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ BODY = {"options": {"requestedPolicyVersion": 1}}
+ ETAG = "CARDI"
+ VERSION = 1
+ OWNER1 = "user:phred@example.com"
+ OWNER2 = "group:cloud-logs@google.com"
+ EDITOR1 = "domain:google.com"
+ EDITOR2 = "user:phred@example.com"
+ VIEWER1 = "serviceAccount:1234-abcdef@service.example.com"
+ VIEWER2 = "user:phred@example.com"
+ RETURNED = {
+ "resourceId": PATH,
+ "etag": ETAG,
+ "version": VERSION,
+ "bindings": [
+ {"role": BIGQUERY_DATA_OWNER_ROLE, "members": [OWNER1, OWNER2]},
+ {"role": BIGQUERY_DATA_EDITOR_ROLE, "members": [EDITOR1, EDITOR2]},
+ {"role": BIGQUERY_DATA_VIEWER_ROLE, "members": [VIEWER1, VIEWER2]},
+ ],
+ }
+ EXPECTED = {
+ binding["role"]: set(binding["members"]) for binding in RETURNED["bindings"]
+ }
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RETURNED)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ policy = client.get_iam_policy(self.TABLE_REF, timeout=7.5)
+
+ final_attributes.assert_called_once_with({"path": PATH}, client, None)
+
+ conn.api_request.assert_called_once_with(
+ method="POST", path=PATH, data=BODY, timeout=7.5
+ )
+
+ self.assertIsInstance(policy, Policy)
+ self.assertEqual(policy.etag, RETURNED["etag"])
+ self.assertEqual(policy.version, RETURNED["version"])
+ self.assertEqual(dict(policy), EXPECTED)
+
+ def test_get_iam_policy_w_invalid_table(self):
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ table_resource_string = "projects/{}/datasets/{}/tables/{}".format(
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+
+ with self.assertRaises(ValueError):
+ client.get_iam_policy(table_resource_string)
+
+ def test_get_iam_policy_w_invalid_version(self):
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ with self.assertRaises(ValueError):
+ client.get_iam_policy(self.TABLE_REF, requested_policy_version=2)
+
+ def test_set_iam_policy(self):
+ from google.cloud.bigquery.iam import BIGQUERY_DATA_OWNER_ROLE
+ from google.cloud.bigquery.iam import BIGQUERY_DATA_EDITOR_ROLE
+ from google.cloud.bigquery.iam import BIGQUERY_DATA_VIEWER_ROLE
+ from google.api_core.iam import Policy
+
+ PATH = "/projects/%s/datasets/%s/tables/%s:setIamPolicy" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ ETAG = "foo"
+ VERSION = 1
+ OWNER1 = "user:phred@example.com"
+ OWNER2 = "group:cloud-logs@google.com"
+ EDITOR1 = "domain:google.com"
+ EDITOR2 = "user:phred@example.com"
+ VIEWER1 = "serviceAccount:1234-abcdef@service.example.com"
+ VIEWER2 = "user:phred@example.com"
+ BINDINGS = [
+ {"role": BIGQUERY_DATA_OWNER_ROLE, "members": [OWNER1, OWNER2]},
+ {"role": BIGQUERY_DATA_EDITOR_ROLE, "members": [EDITOR1, EDITOR2]},
+ {"role": BIGQUERY_DATA_VIEWER_ROLE, "members": [VIEWER1, VIEWER2]},
+ ]
+ FIELDS = ("bindings", "etag")
+ RETURNED = {"etag": ETAG, "version": VERSION, "bindings": BINDINGS}
+
+ policy = Policy()
+ for binding in BINDINGS:
+ policy[binding["role"]] = binding["members"]
+
+ BODY = {"policy": policy.to_api_repr(), "updateMask": "bindings,etag"}
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RETURNED)
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ returned_policy = client.set_iam_policy(
+ self.TABLE_REF, policy, fields=FIELDS, timeout=7.5
+ )
+
+ final_attributes.assert_called_once_with({"path": PATH}, client, None)
+
+ conn.api_request.assert_called_once_with(
+ method="POST", path=PATH, data=BODY, timeout=7.5
+ )
+ self.assertEqual(returned_policy.etag, ETAG)
+ self.assertEqual(returned_policy.version, VERSION)
+ self.assertEqual(dict(returned_policy), dict(policy))
+
+ def test_set_iam_policy_updateMask(self):
+ from google.cloud.bigquery.iam import BIGQUERY_DATA_OWNER_ROLE
+ from google.cloud.bigquery.iam import BIGQUERY_DATA_EDITOR_ROLE
+ from google.cloud.bigquery.iam import BIGQUERY_DATA_VIEWER_ROLE
+ from google.api_core.iam import Policy
+
+ PATH = "/projects/%s/datasets/%s/tables/%s:setIamPolicy" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ ETAG = "foo"
+ VERSION = 1
+ OWNER1 = "user:phred@example.com"
+ OWNER2 = "group:cloud-logs@google.com"
+ EDITOR1 = "domain:google.com"
+ EDITOR2 = "user:phred@example.com"
+ VIEWER1 = "serviceAccount:1234-abcdef@service.example.com"
+ VIEWER2 = "user:phred@example.com"
+ BINDINGS = [
+ {"role": BIGQUERY_DATA_OWNER_ROLE, "members": [OWNER1, OWNER2]},
+ {"role": BIGQUERY_DATA_EDITOR_ROLE, "members": [EDITOR1, EDITOR2]},
+ {"role": BIGQUERY_DATA_VIEWER_ROLE, "members": [VIEWER1, VIEWER2]},
+ ]
+ MASK = "bindings,etag"
+ RETURNED = {"etag": ETAG, "version": VERSION, "bindings": BINDINGS}
+
+ policy = Policy()
+ for binding in BINDINGS:
+ policy[binding["role"]] = binding["members"]
+
+ BODY = {"policy": policy.to_api_repr(), "updateMask": MASK}
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RETURNED)
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ returned_policy = client.set_iam_policy(
+ self.TABLE_REF, policy, updateMask=MASK, timeout=7.5
+ )
+
+ final_attributes.assert_called_once_with({"path": PATH}, client, None)
+
+ conn.api_request.assert_called_once_with(
+ method="POST", path=PATH, data=BODY, timeout=7.5
+ )
+ self.assertEqual(returned_policy.etag, ETAG)
+ self.assertEqual(returned_policy.version, VERSION)
+ self.assertEqual(dict(returned_policy), dict(policy))
+
+ def test_set_iam_policy_no_mask(self):
+ from google.api_core.iam import Policy
+
+ PATH = "/projects/%s/datasets/%s/tables/%s:setIamPolicy" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ RETURNED = {"etag": "foo", "version": 1, "bindings": []}
+
+ policy = Policy()
+ BODY = {"policy": policy.to_api_repr()}
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RETURNED)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.set_iam_policy(self.TABLE_REF, policy, timeout=7.5)
+
+ final_attributes.assert_called_once_with({"path": PATH}, client, None)
+
+ conn.api_request.assert_called_once_with(
+ method="POST", path=PATH, data=BODY, timeout=7.5
+ )
+
+ def test_set_ia_policy_updateMask_and_fields(self):
+ from google.api_core.iam import Policy
+
+ policy = Policy()
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ with pytest.raises(ValueError, match="updateMask"):
+ client.set_iam_policy(
+ self.TABLE_REF, policy, updateMask="bindings", fields=("bindings",)
+ )
+
+ def test_set_iam_policy_invalid_policy(self):
+ from google.api_core.iam import Policy
+
+ policy = Policy()
+ invalid_policy_repr = policy.to_api_repr()
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ with self.assertRaises(TypeError):
+ client.set_iam_policy(self.TABLE_REF, invalid_policy_repr)
+
+ def test_set_iam_policy_w_invalid_table(self):
+ from google.api_core.iam import Policy
+
+ policy = Policy()
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ table_resource_string = "projects/%s/datasets/%s/tables/%s" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+
+ with self.assertRaises(ValueError):
+ client.set_iam_policy(table_resource_string, policy)
+
+ def test_test_iam_permissions(self):
+ PATH = "/projects/%s/datasets/%s/tables/%s:testIamPermissions" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+
+ PERMISSIONS = ["bigquery.tables.get", "bigquery.tables.update"]
+ BODY = {"permissions": PERMISSIONS}
+ RETURNED = {"permissions": PERMISSIONS}
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RETURNED)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.test_iam_permissions(self.TABLE_REF, PERMISSIONS, timeout=7.5)
+
+ final_attributes.assert_called_once_with({"path": PATH}, client, None)
+
+ conn.api_request.assert_called_once_with(
+ method="POST", path=PATH, data=BODY, timeout=7.5
+ )
+
+ def test_test_iam_permissions_w_invalid_table(self):
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ table_resource_string = "projects/%s/datasets/%s/tables/%s" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+
+ PERMISSIONS = ["bigquery.tables.get", "bigquery.tables.update"]
+
+ with self.assertRaises(ValueError):
+ client.test_iam_permissions(table_resource_string, PERMISSIONS)
+
+ def test_update_dataset_w_invalid_field(self):
+ from google.cloud.bigquery.dataset import Dataset
+
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ with self.assertRaises(ValueError):
+ client.update_dataset(
+ Dataset("{}.{}".format(self.PROJECT, self.DS_ID)), ["foo"]
+ )
+
+ def test_update_dataset(self):
+ from google.cloud.bigquery.dataset import Dataset, AccessEntry
+
+ PATH = "projects/%s/datasets/%s" % (self.PROJECT, self.DS_ID)
+ DESCRIPTION = "DESCRIPTION"
+ FRIENDLY_NAME = "TITLE"
+ LOCATION = "loc"
+ LABELS = {"priority": "high"}
+ ACCESS = [{"role": "OWNER", "userByEmail": "phred@example.com"}]
+ EXP = 17
+ RESOURCE = {
+ "datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
+ "etag": "etag",
+ "description": DESCRIPTION,
+ "friendlyName": FRIENDLY_NAME,
+ "location": LOCATION,
+ "defaultTableExpirationMs": EXP,
+ "labels": LABELS,
+ "access": ACCESS,
+ }
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ conn = client._connection = make_connection(RESOURCE, RESOURCE)
+ ds = Dataset(DatasetReference(self.PROJECT, self.DS_ID))
+ ds.description = DESCRIPTION
+ ds.friendly_name = FRIENDLY_NAME
+ ds.location = LOCATION
+ ds.default_table_expiration_ms = EXP
+ ds.labels = LABELS
+ ds.access_entries = [AccessEntry("OWNER", "userByEmail", "phred@example.com")]
+ fields = [
+ "description",
+ "friendly_name",
+ "location",
+ "labels",
+ "access_entries",
+ ]
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ ds2 = client.update_dataset(
+ ds,
+ fields=fields,
+ timeout=7.5,
+ )
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % PATH, "fields": fields}, client, None
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="PATCH",
+ data={
+ "description": DESCRIPTION,
+ "friendlyName": FRIENDLY_NAME,
+ "location": LOCATION,
+ "labels": LABELS,
+ "access": ACCESS,
+ },
+ path="/" + PATH,
+ timeout=7.5,
+ )
+ self.assertEqual(ds2.description, ds.description)
+ self.assertEqual(ds2.friendly_name, ds.friendly_name)
+ self.assertEqual(ds2.location, ds.location)
+ self.assertEqual(ds2.labels, ds.labels)
+ self.assertEqual(ds2.access_entries, ds.access_entries)
+
+ # ETag becomes If-Match header.
+ ds._properties["etag"] = "etag"
+ client.update_dataset(ds, [])
+ req = conn.api_request.call_args
+ self.assertEqual(req[1]["headers"]["If-Match"], "etag")
+
+ def test_update_dataset_w_custom_property(self):
+ # The library should handle sending properties to the API that are not
+ # yet part of the library
+ from google.cloud.bigquery.dataset import Dataset
+
+ path = "/projects/%s/datasets/%s" % (self.PROJECT, self.DS_ID)
+ resource = {
+ "datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
+ "newAlphaProperty": "unreleased property",
+ }
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ conn = client._connection = make_connection(resource)
+ dataset = Dataset(DatasetReference(self.PROJECT, self.DS_ID))
+ dataset._properties["newAlphaProperty"] = "unreleased property"
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ dataset = client.update_dataset(dataset, ["newAlphaProperty"])
+
+ final_attributes.assert_called_once_with(
+ {"path": path, "fields": ["newAlphaProperty"]}, client, None
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="PATCH",
+ data={"newAlphaProperty": "unreleased property"},
+ path=path,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ self.assertEqual(dataset.dataset_id, self.DS_ID)
+ self.assertEqual(dataset.project, self.PROJECT)
+ self.assertEqual(dataset._properties["newAlphaProperty"], "unreleased property")
+
+ def test_update_model(self):
+ from google.cloud.bigquery.model import Model
+
+ path = "projects/%s/datasets/%s/models/%s" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.MODEL_ID,
+ )
+ description = "description"
+ title = "title"
+ expires = datetime.datetime(
+ 2012, 12, 21, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
+ )
+ resource = {
+ "modelReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "modelId": self.MODEL_ID,
+ },
+ "description": description,
+ "etag": "etag",
+ "expirationTime": str(google.cloud._helpers._millis(expires)),
+ "friendlyName": title,
+ "labels": {"x": "y"},
+ }
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ conn = client._connection = make_connection(resource, resource)
+ model_id = "{}.{}.{}".format(self.PROJECT, self.DS_ID, self.MODEL_ID)
+ model = Model(model_id)
+ model.description = description
+ model.friendly_name = title
+ model.expires = expires
+ model.labels = {"x": "y"}
+ fields = ["description", "friendly_name", "labels", "expires"]
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ updated_model = client.update_model(model, fields, timeout=7.5)
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "fields": fields}, client, None
+ )
+
+ sent = {
+ "description": description,
+ "expirationTime": str(google.cloud._helpers._millis(expires)),
+ "friendlyName": title,
+ "labels": {"x": "y"},
+ }
+ conn.api_request.assert_called_once_with(
+ method="PATCH", data=sent, path="/" + path, timeout=7.5
+ )
+ self.assertEqual(updated_model.model_id, model.model_id)
+ self.assertEqual(updated_model.description, model.description)
+ self.assertEqual(updated_model.friendly_name, model.friendly_name)
+ self.assertEqual(updated_model.labels, model.labels)
+ self.assertEqual(updated_model.expires, model.expires)
+
+ # ETag becomes If-Match header.
+ model._properties["etag"] = "etag"
+ client.update_model(model, [])
+ req = conn.api_request.call_args
+ self.assertEqual(req[1]["headers"]["If-Match"], "etag")
+
+ def test_update_routine(self):
+ from google.cloud.bigquery.routine import Routine
+ from google.cloud.bigquery.routine import RoutineArgument
+
+ full_routine_id = "routines-project.test_routines.updated_routine"
+ resource = {
+ "routineReference": {
+ "projectId": "routines-project",
+ "datasetId": "test_routines",
+ "routineId": "updated_routine",
+ },
+ "routineType": "SCALAR_FUNCTION",
+ "language": "SQL",
+ "definitionBody": "x * 3",
+ "arguments": [{"name": "x", "dataType": {"typeKind": "INT64"}}],
+ "returnType": None,
+ "someNewField": "someValue",
+ }
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ conn = client._connection = make_connection(resource, resource)
+ routine = Routine(full_routine_id)
+ routine.arguments = [
+ RoutineArgument(
+ name="x",
+ data_type=bigquery.standard_sql.StandardSqlDataType(
+ type_kind=bigquery.StandardSqlTypeNames.INT64
+ ),
+ )
+ ]
+ routine.body = "x * 3"
+ routine.language = "SQL"
+ routine.type_ = "SCALAR_FUNCTION"
+ routine._properties["someNewField"] = "someValue"
+ fields = [
+ "arguments",
+ "language",
+ "body",
+ "type_",
+ "return_type",
+ "someNewField",
+ ]
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ actual_routine = client.update_routine(
+ routine,
+ fields,
+ timeout=7.5,
+ )
+
+ final_attributes.assert_called_once_with(
+ {"path": routine.path, "fields": fields}, client, None
+ )
+
+ # TODO: routineReference isn't needed when the Routines API supports
+ # partial updates.
+ sent = resource
+ conn.api_request.assert_called_once_with(
+ method="PUT",
+ data=sent,
+ path="/projects/routines-project/datasets/test_routines/routines/updated_routine",
+ timeout=7.5,
+ )
+ self.assertEqual(actual_routine.arguments, routine.arguments)
+ self.assertEqual(actual_routine.body, routine.body)
+ self.assertEqual(actual_routine.language, routine.language)
+ self.assertEqual(actual_routine.type_, routine.type_)
+
+ # ETag becomes If-Match header.
+ routine._properties["etag"] = "im-an-etag"
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.update_routine(routine, [])
+
+ final_attributes.assert_called_once_with(
+ {"path": routine.path, "fields": []}, client, None
+ )
+
+ req = conn.api_request.call_args
+ self.assertEqual(req[1]["headers"]["If-Match"], "im-an-etag")
+
+ def test_update_table(self):
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.schema import PolicyTagList
+ from google.cloud.bigquery.table import Table
+
+ path = "projects/%s/datasets/%s/tables/%s" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ description = "description"
+ title = "title"
+ resource = self._make_table_resource()
+ resource.update(
+ {
+ "schema": {
+ "fields": [
+ {
+ "name": "full_name",
+ "type": "STRING",
+ "mode": "REQUIRED",
+ "description": None,
+ "policyTags": {"names": []},
+ },
+ {
+ "name": "age",
+ "type": "INTEGER",
+ "mode": "REQUIRED",
+ "description": "New field description",
+ },
+ ]
+ },
+ "etag": "etag",
+ "description": description,
+ "friendlyName": title,
+ "labels": {"x": "y"},
+ }
+ )
+ schema = [
+ # Explicly setting policyTags to no names should be included in the sent resource.
+ # https://github.com/googleapis/python-bigquery/issues/981
+ SchemaField(
+ "full_name",
+ "STRING",
+ mode="REQUIRED",
+ description=None,
+ policy_tags=PolicyTagList(names=()),
+ ),
+ SchemaField(
+ "age", "INTEGER", mode="REQUIRED", description="New field description"
+ ),
+ ]
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ conn = client._connection = make_connection(resource, resource)
+ table = Table(self.TABLE_REF, schema=schema)
+ table.description = description
+ table.friendly_name = title
+ table.labels = {"x": "y"}
+ fields = ["schema", "description", "friendly_name", "labels"]
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ updated_table = client.update_table(table, fields, timeout=7.5)
+ span_path = "/%s" % path
+
+ final_attributes.assert_called_once_with(
+ {"path": span_path, "fields": fields}, client, None
+ )
+
+ sent = {
+ "schema": {
+ "fields": [
+ {
+ "name": "full_name",
+ "type": "STRING",
+ "mode": "REQUIRED",
+ "description": None,
+ "policyTags": {"names": []},
+ },
+ {
+ "name": "age",
+ "type": "INTEGER",
+ "mode": "REQUIRED",
+ "description": "New field description",
+ },
+ ]
+ },
+ "description": description,
+ "friendlyName": title,
+ "labels": {"x": "y"},
+ }
+ conn.api_request.assert_called_once_with(
+ method="PATCH", data=sent, path="/" + path, timeout=7.5
+ )
+ self.assertEqual(updated_table.description, table.description)
+ self.assertEqual(updated_table.friendly_name, table.friendly_name)
+ self.assertEqual(updated_table.schema, table.schema)
+ self.assertEqual(updated_table.labels, table.labels)
+
+ # ETag becomes If-Match header.
+ table._properties["etag"] = "etag"
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.update_table(table, [])
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "fields": []}, client, None
+ )
+
+ req = conn.api_request.call_args
+ self.assertEqual(req[1]["headers"]["If-Match"], "etag")
+
+ def test_update_table_w_custom_property(self):
+ from google.cloud.bigquery.table import Table
+
+ path = "projects/%s/datasets/%s/tables/%s" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ resource = self._make_table_resource()
+ resource["newAlphaProperty"] = "unreleased property"
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ conn = client._connection = make_connection(resource)
+ table = Table(self.TABLE_REF)
+ table._properties["newAlphaProperty"] = "unreleased property"
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ updated_table = client.update_table(table, ["newAlphaProperty"])
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "fields": ["newAlphaProperty"]},
+ client,
+ None,
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="PATCH",
+ path="/%s" % path,
+ data={"newAlphaProperty": "unreleased property"},
+ timeout=DEFAULT_TIMEOUT,
+ )
+ self.assertEqual(
+ updated_table._properties["newAlphaProperty"], "unreleased property"
+ )
+
+ def test_update_table_only_use_legacy_sql(self):
+ from google.cloud.bigquery.table import Table
+
+ path = "projects/%s/datasets/%s/tables/%s" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ resource = self._make_table_resource()
+ resource["view"] = {"useLegacySql": True}
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ conn = client._connection = make_connection(resource)
+ table = Table(self.TABLE_REF)
+ table.view_use_legacy_sql = True
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ updated_table = client.update_table(table, ["view_use_legacy_sql"])
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "fields": ["view_use_legacy_sql"]},
+ client,
+ None,
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="PATCH",
+ path="/%s" % path,
+ data={"view": {"useLegacySql": True}},
+ timeout=DEFAULT_TIMEOUT,
+ )
+ self.assertEqual(updated_table.view_use_legacy_sql, table.view_use_legacy_sql)
+
+ def test_update_table_w_query(self):
+ import datetime
+ from google.cloud._helpers import UTC
+ from google.cloud._helpers import _millis
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ path = "projects/%s/datasets/%s/tables/%s" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ query = "select fullname, age from person_ages"
+ location = "EU"
+ exp_time = datetime.datetime(2015, 8, 1, 23, 59, 59, tzinfo=UTC)
+ schema_resource = {
+ "fields": [
+ {
+ "name": "full_name",
+ "type": "STRING",
+ "mode": "REQUIRED",
+ "description": None,
+ },
+ {
+ "name": "age",
+ "type": "INTEGER",
+ "mode": "REQUIRED",
+ "description": "this is a column",
+ },
+ {"name": "country", "type": "STRING", "mode": "NULLABLE"},
+ ]
+ }
+ schema = [
+ SchemaField(
+ "full_name",
+ "STRING",
+ mode="REQUIRED",
+ # Explicitly unset the description.
+ description=None,
+ ),
+ SchemaField(
+ "age", "INTEGER", mode="REQUIRED", description="this is a column"
+ ),
+ # Omit the description to not make updates to it.
+ SchemaField("country", "STRING"),
+ ]
+ resource = self._make_table_resource()
+ resource.update(
+ {
+ "schema": schema_resource,
+ "view": {"query": query, "useLegacySql": True},
+ "location": location,
+ "expirationTime": _millis(exp_time),
+ }
+ )
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ conn = client._connection = make_connection(resource)
+ table = Table(self.TABLE_REF, schema=schema)
+ table.expires = exp_time
+ table.view_query = query
+ table.view_use_legacy_sql = True
+ updated_properties = ["schema", "view_query", "expires", "view_use_legacy_sql"]
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ updated_table = client.update_table(table, updated_properties)
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "fields": updated_properties},
+ client,
+ None,
+ )
+
+ self.assertEqual(updated_table.schema, table.schema)
+ self.assertEqual(updated_table.view_query, table.view_query)
+ self.assertEqual(updated_table.expires, table.expires)
+ self.assertEqual(updated_table.view_use_legacy_sql, table.view_use_legacy_sql)
+ self.assertEqual(updated_table.location, location)
+
+ conn.api_request.assert_called_once_with(
+ method="PATCH",
+ path="/%s" % path,
+ data={
+ "view": {"query": query, "useLegacySql": True},
+ "expirationTime": str(_millis(exp_time)),
+ "schema": schema_resource,
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_update_table_w_schema_None(self):
+ # Simulate deleting schema: not sure if back-end will actually
+ # allow this operation, but the spec says it is optional.
+ path = "projects/%s/datasets/%s/tables/%s" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ resource1 = self._make_table_resource()
+ resource1.update(
+ {
+ "schema": {
+ "fields": [
+ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "age", "type": "INTEGER", "mode": "REQUIRED"},
+ ]
+ }
+ }
+ )
+ resource2 = self._make_table_resource()
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ conn = client._connection = make_connection(resource1, resource2)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ table = client.get_table(
+ # Test with string for table ID
+ "{}.{}.{}".format(
+ self.TABLE_REF.project,
+ self.TABLE_REF.dataset_id,
+ self.TABLE_REF.table_id,
+ )
+ )
+
+ final_attributes.assert_called_once_with({"path": "/%s" % path}, client, None)
+
+ table.schema = None
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ updated_table = client.update_table(table, ["schema"])
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "fields": ["schema"]}, client, None
+ )
+
+ self.assertEqual(len(conn.api_request.call_args_list), 2)
+ req = conn.api_request.call_args_list[1]
+ self.assertEqual(req[1]["method"], "PATCH")
+ sent = {"schema": None}
+ self.assertEqual(req[1]["data"], sent)
+ self.assertEqual(req[1]["path"], "/%s" % path)
+ self.assertEqual(len(updated_table.schema), 0)
+
+ def test_update_table_delete_property(self):
+ from google.cloud.bigquery.table import Table
+
+ description = "description"
+ title = "title"
+ path = "projects/%s/datasets/%s/tables/%s" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ resource1 = self._make_table_resource()
+ resource1.update({"description": description, "friendlyName": title})
+ resource2 = self._make_table_resource()
+ resource2["description"] = None
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ conn = client._connection = make_connection(resource1, resource2)
+ table = Table(self.TABLE_REF)
+ table.description = description
+ table.friendly_name = title
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ table2 = client.update_table(table, ["description", "friendly_name"])
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "fields": ["description", "friendly_name"]},
+ client,
+ None,
+ )
+
+ self.assertEqual(table2.description, table.description)
+ table2.description = None
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ table3 = client.update_table(table2, ["description"])
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path, "fields": ["description"]}, client, None
+ )
+
+ self.assertEqual(len(conn.api_request.call_args_list), 2)
+ req = conn.api_request.call_args_list[1]
+ self.assertEqual(req[1]["method"], "PATCH")
+ self.assertEqual(req[1]["path"], "/%s" % path)
+ sent = {"description": None}
+ self.assertEqual(req[1]["data"], sent)
+ self.assertIsNone(table3.description)
+
+ def test_delete_job_metadata_not_found(self):
+ creds = _make_credentials()
+ client = self._make_one("client-proj", creds, location="client-loc")
+ conn = client._connection = make_connection(
+ google.api_core.exceptions.NotFound("job not found"),
+ google.api_core.exceptions.NotFound("job not found"),
+ )
+
+ with self.assertRaises(google.api_core.exceptions.NotFound):
+ client.delete_job_metadata("my-job")
+
+ conn.api_request.reset_mock()
+ client.delete_job_metadata("my-job", not_found_ok=True)
+
+ conn.api_request.assert_called_once_with(
+ method="DELETE",
+ path="/projects/client-proj/jobs/my-job/delete",
+ query_params={"location": "client-loc"},
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_delete_job_metadata_with_id(self):
+ creds = _make_credentials()
+ client = self._make_one(self.PROJECT, creds)
+ conn = client._connection = make_connection({})
+
+ client.delete_job_metadata("my-job", project="param-proj", location="param-loc")
+
+ conn.api_request.assert_called_once_with(
+ method="DELETE",
+ path="/projects/param-proj/jobs/my-job/delete",
+ query_params={"location": "param-loc"},
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_delete_job_metadata_with_resource(self):
+ from google.cloud.bigquery.job import QueryJob
+
+ query_resource = {
+ "jobReference": {
+ "projectId": "job-based-proj",
+ "jobId": "query_job",
+ "location": "us-east1",
+ },
+ "configuration": {"query": {}},
+ }
+ creds = _make_credentials()
+ client = self._make_one(self.PROJECT, creds)
+ conn = client._connection = make_connection(query_resource)
+ job_from_resource = QueryJob.from_api_repr(query_resource, client)
+
+ client.delete_job_metadata(job_from_resource)
+
+ conn.api_request.assert_called_once_with(
+ method="DELETE",
+ path="/projects/job-based-proj/jobs/query_job/delete",
+ query_params={"location": "us-east1"},
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_delete_model(self):
+ from google.cloud.bigquery.model import Model
+
+ path = "projects/%s/datasets/%s/models/%s" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.MODEL_ID,
+ )
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ model_id = "{}.{}.{}".format(self.PROJECT, self.DS_ID, self.MODEL_ID)
+ models = (
+ model_id,
+ DatasetReference(self.PROJECT, self.DS_ID).model(self.MODEL_ID),
+ Model(model_id),
+ )
+ conn = client._connection = make_connection(*([{}] * len(models)))
+
+ for arg in models:
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.delete_model(arg, timeout=7.5)
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path}, client, None
+ )
+ conn.api_request.assert_called_with(
+ method="DELETE", path="/%s" % path, timeout=7.5
+ )
+
+ def test_delete_model_w_wrong_type(self):
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ with self.assertRaises(TypeError):
+ client.delete_model(DatasetReference(self.PROJECT, self.DS_ID))
+
+ def test_delete_model_w_not_found_ok_false(self):
+ path = "/projects/{}/datasets/{}/models/{}".format(
+ self.PROJECT, self.DS_ID, self.MODEL_ID
+ )
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(
+ google.api_core.exceptions.NotFound("model not found")
+ )
+
+ with self.assertRaises(google.api_core.exceptions.NotFound):
+ client.delete_model("{}.{}".format(self.DS_ID, self.MODEL_ID))
+
+ conn.api_request.assert_called_with(
+ method="DELETE", path=path, timeout=DEFAULT_TIMEOUT
+ )
+
+ def test_delete_model_w_not_found_ok_true(self):
+ path = "/projects/{}/datasets/{}/models/{}".format(
+ self.PROJECT, self.DS_ID, self.MODEL_ID
+ )
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(
+ google.api_core.exceptions.NotFound("model not found")
+ )
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.delete_model(
+ "{}.{}".format(self.DS_ID, self.MODEL_ID), not_found_ok=True
+ )
+
+ final_attributes.assert_called_once_with({"path": path}, client, None)
+
+ conn.api_request.assert_called_with(
+ method="DELETE", path=path, timeout=DEFAULT_TIMEOUT
+ )
+
+ def test_delete_routine(self):
+ from google.cloud.bigquery.routine import Routine
+ from google.cloud.bigquery.routine import RoutineReference
+
+ full_routine_id = "test-routine-project.test_routines.minimal_routine"
+ routines = [
+ full_routine_id,
+ Routine(full_routine_id),
+ RoutineReference.from_string(full_routine_id),
+ ]
+ creds = _make_credentials()
+ http = object()
+ path = "/projects/test-routine-project/datasets/test_routines/routines/minimal_routine"
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(*([{}] * len(routines)))
+
+ for routine in routines:
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.delete_routine(routine, timeout=7.5)
+
+ final_attributes.assert_called_once_with({"path": path}, client, None)
+
+ conn.api_request.assert_called_with(
+ method="DELETE",
+ path=path,
+ timeout=7.5,
+ )
+
+ def test_delete_routine_w_wrong_type(self):
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ with self.assertRaises(TypeError):
+ client.delete_routine(DatasetReference(self.PROJECT, self.DS_ID))
+
+ def test_delete_routine_w_not_found_ok_false(self):
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(
+ google.api_core.exceptions.NotFound("routine not found")
+ )
+ path = "/projects/routines-project/datasets/test_routines/routines/test_routine"
+
+ with self.assertRaises(google.api_core.exceptions.NotFound):
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.delete_routine("routines-project.test_routines.test_routine")
+
+ final_attributes.assert_called_once_with({"path": path}, client, None)
+
+ conn.api_request.assert_called_with(
+ method="DELETE",
+ path=path,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_delete_routine_w_not_found_ok_true(self):
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(
+ google.api_core.exceptions.NotFound("routine not found")
+ )
+ path = "/projects/routines-project/datasets/test_routines/routines/test_routine"
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.delete_routine(
+ "routines-project.test_routines.test_routine", not_found_ok=True
+ )
+
+ final_attributes.assert_called_once_with({"path": path}, client, None)
+
+ conn.api_request.assert_called_with(
+ method="DELETE",
+ path=path,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_delete_table(self):
+ from google.cloud.bigquery.table import Table
+
+ tables = (
+ self.TABLE_REF,
+ Table(self.TABLE_REF),
+ "{}.{}.{}".format(
+ self.TABLE_REF.project,
+ self.TABLE_REF.dataset_id,
+ self.TABLE_REF.table_id,
+ ),
+ )
+ path = "projects/%s/datasets/%s/tables/%s" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(*([{}] * len(tables)))
+
+ for arg in tables:
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.delete_table(arg, timeout=7.5)
+
+ final_attributes.assert_called_once_with(
+ {"path": "/%s" % path}, client, None
+ )
+
+ conn.api_request.assert_called_with(
+ method="DELETE", path="/%s" % path, timeout=7.5
+ )
+
+ def test_delete_table_w_wrong_type(self):
+ creds = _make_credentials()
+ client = self._make_one(project=self.PROJECT, credentials=creds)
+ with self.assertRaises(TypeError):
+ client.delete_table(DatasetReference(self.PROJECT, self.DS_ID))
+
+ def test_delete_table_w_not_found_ok_false(self):
+ path = "/projects/{}/datasets/{}/tables/{}".format(
+ self.PROJECT, self.DS_ID, self.TABLE_ID
+ )
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(
+ google.api_core.exceptions.NotFound("table not found")
+ )
+
+ with self.assertRaises(google.api_core.exceptions.NotFound):
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.delete_table("{}.{}".format(self.DS_ID, self.TABLE_ID))
+
+ final_attributes.assert_called_once_with({"path": path}, client, None)
+
+ conn.api_request.assert_called_with(
+ method="DELETE", path=path, timeout=DEFAULT_TIMEOUT
+ )
+
+ def test_delete_table_w_not_found_ok_true(self):
+ path = "/projects/{}/datasets/{}/tables/{}".format(
+ self.PROJECT, self.DS_ID, self.TABLE_ID
+ )
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(
+ google.api_core.exceptions.NotFound("table not found")
+ )
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ client.delete_table(
+ "{}.{}".format(self.DS_ID, self.TABLE_ID), not_found_ok=True
+ )
+
+ final_attributes.assert_called_once_with({"path": path}, client, None)
+
+ conn.api_request.assert_called_with(
+ method="DELETE", path=path, timeout=DEFAULT_TIMEOUT
+ )
+
+ def _create_job_helper(self, job_config):
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ resource = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": "random-id"},
+ "configuration": job_config,
+ }
+ expected = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": mock.ANY},
+ "configuration": job_config,
+ }
+ conn = client._connection = make_connection(resource)
+ client.create_job(job_config=job_config)
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/%s/jobs" % self.PROJECT,
+ data=expected,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_create_job_load_config(self):
+ configuration = {
+ "load": {
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": "source_table",
+ },
+ "sourceUris": ["gs://test_bucket/src_object*"],
+ }
+ }
+
+ self._create_job_helper(configuration)
+
+ def test_create_job_copy_config(self):
+ configuration = {
+ "copy": {
+ "sourceTables": [
+ {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": "source_table",
+ }
+ ],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": "destination_table",
+ },
+ }
+ }
+
+ self._create_job_helper(configuration)
+
+ def test_create_job_copy_config_w_single_source(self):
+ configuration = {
+ "copy": {
+ "sourceTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": "source_table",
+ },
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": "destination_table",
+ },
+ }
+ }
+
+ self._create_job_helper(configuration)
+
+ def test_create_job_extract_config(self):
+ configuration = {
+ "extract": {
+ "sourceTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": "source_table",
+ },
+ "destinationUris": ["gs://test_bucket/dst_object*"],
+ }
+ }
+ self._create_job_helper(configuration)
+
+ def test_create_job_extract_config_for_model(self):
+ configuration = {
+ "extract": {
+ "sourceModel": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "modelId": "source_model",
+ },
+ "destinationUris": ["gs://test_bucket/dst_object*"],
+ }
+ }
+ self._create_job_helper(configuration)
+
+ def test_create_job_query_config(self):
+ configuration = {
+ "query": {
+ "query": "query",
+ "destinationTable": {"tableId": "table_id"},
+ "useLegacySql": False,
+ }
+ }
+ self._create_job_helper(configuration)
+
+ def test_create_job_query_config_w_rateLimitExceeded_error(self):
+ from google.cloud.exceptions import Forbidden
+ from google.cloud.bigquery.retry import DEFAULT_RETRY
+
+ query = "select count(*) from persons"
+ configuration = {
+ "query": {
+ "query": query,
+ "useLegacySql": False,
+ "destinationTable": {"tableId": "table_id"},
+ }
+ }
+ resource = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": mock.ANY},
+ "configuration": {
+ "query": {
+ "query": query,
+ "useLegacySql": False,
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": "query_destination_table",
+ },
+ }
+ },
+ }
+ data_without_destination = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": mock.ANY},
+ "configuration": configuration,
+ }
+
+ creds = _make_credentials()
+ http = object()
+ retry = DEFAULT_RETRY.with_deadline(1).with_predicate(
+ lambda exc: isinstance(exc, Forbidden)
+ )
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ api_request_patcher = mock.patch.object(
+ client._connection,
+ "api_request",
+ side_effect=[
+ Forbidden("", errors=[{"reason": "rateLimitExceeded"}]),
+ resource,
+ ],
+ )
+
+ with api_request_patcher as fake_api_request:
+ job = client.create_job(job_config=configuration, retry=retry)
+
+ self.assertEqual(job.destination.table_id, "query_destination_table")
+ self.assertEqual(len(fake_api_request.call_args_list), 2) # was retried once
+ self.assertEqual(
+ fake_api_request.call_args_list[1],
+ mock.call(
+ method="POST",
+ path="/projects/PROJECT/jobs",
+ data=data_without_destination,
+ timeout=DEFAULT_TIMEOUT,
+ ),
+ )
+
+ def test_create_job_w_invalid_job_config(self):
+ configuration = {"unknown": {}}
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ with self.assertRaises(TypeError) as exc:
+ client.create_job(job_config=configuration)
+
+ self.assertIn("Invalid job configuration", exc.exception.args[0])
+
+ def test_job_from_resource_unknown_type(self):
+ from google.cloud.bigquery.job import UnknownJob
+
+ creds = _make_credentials()
+ client = self._make_one(self.PROJECT, creds)
+ got = client.job_from_resource({}) # Can parse redacted job.
+ self.assertIsInstance(got, UnknownJob)
+ self.assertEqual(got.project, self.PROJECT)
+
+ def test_get_job_miss_w_explict_project(self):
+ from google.cloud.exceptions import NotFound
+ from google.cloud.bigquery.retry import DEFAULT_GET_JOB_TIMEOUT
+
+ OTHER_PROJECT = "OTHER_PROJECT"
+ JOB_ID = "NONESUCH"
+ creds = _make_credentials()
+ client = self._make_one(self.PROJECT, creds)
+ conn = client._connection = make_connection()
+
+ with self.assertRaises(NotFound):
+ client.get_job(JOB_ID, project=OTHER_PROJECT)
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/projects/OTHER_PROJECT/jobs/NONESUCH",
+ query_params={"projection": "full"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+
+ def test_get_job_miss_w_client_location(self):
+ from google.cloud.exceptions import NotFound
+ from google.cloud.bigquery.retry import DEFAULT_GET_JOB_TIMEOUT
+
+ JOB_ID = "NONESUCH"
+ creds = _make_credentials()
+ client = self._make_one("client-proj", creds, location="client-loc")
+ conn = client._connection = make_connection()
+
+ with self.assertRaises(NotFound):
+ client.get_job(JOB_ID)
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/projects/client-proj/jobs/NONESUCH",
+ query_params={"projection": "full", "location": "client-loc"},
+ timeout=DEFAULT_GET_JOB_TIMEOUT,
+ )
+
+ def test_get_job_hit_w_timeout(self):
+ from google.cloud.bigquery.job import CreateDisposition
+ from google.cloud.bigquery.job import QueryJob
+ from google.cloud.bigquery.job import WriteDisposition
+
+ JOB_ID = "query_job"
+ QUERY_DESTINATION_TABLE = "query_destination_table"
+ QUERY = "SELECT * from test_dataset:test_table"
+ ASYNC_QUERY_DATA = {
+ "id": "{}:{}".format(self.PROJECT, JOB_ID),
+ "jobReference": {
+ "projectId": "resource-proj",
+ "jobId": "query_job",
+ "location": "us-east1",
+ },
+ "state": "DONE",
+ "configuration": {
+ "query": {
+ "query": QUERY,
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": QUERY_DESTINATION_TABLE,
+ },
+ "createDisposition": CreateDisposition.CREATE_IF_NEEDED,
+ "writeDisposition": WriteDisposition.WRITE_TRUNCATE,
+ }
+ },
+ }
+ creds = _make_credentials()
+ client = self._make_one(self.PROJECT, creds)
+ conn = client._connection = make_connection(ASYNC_QUERY_DATA)
+ job_from_resource = QueryJob.from_api_repr(ASYNC_QUERY_DATA, client)
+
+ job = client.get_job(job_from_resource, timeout=7.5)
+
+ self.assertIsInstance(job, QueryJob)
+ self.assertEqual(job.job_id, JOB_ID)
+ self.assertEqual(job.project, "resource-proj")
+ self.assertEqual(job.location, "us-east1")
+ self.assertEqual(job.create_disposition, CreateDisposition.CREATE_IF_NEEDED)
+ self.assertEqual(job.write_disposition, WriteDisposition.WRITE_TRUNCATE)
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/projects/resource-proj/jobs/query_job",
+ query_params={"projection": "full", "location": "us-east1"},
+ timeout=7.5,
+ )
+
+ def test_cancel_job_miss_w_explict_project(self):
+ from google.cloud.exceptions import NotFound
+
+ OTHER_PROJECT = "OTHER_PROJECT"
+ JOB_ID = "NONESUCH"
+ creds = _make_credentials()
+ client = self._make_one(self.PROJECT, creds)
+ conn = client._connection = make_connection()
+
+ with self.assertRaises(NotFound):
+ client.cancel_job(JOB_ID, project=OTHER_PROJECT, location=self.LOCATION)
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/OTHER_PROJECT/jobs/NONESUCH/cancel",
+ query_params={"projection": "full", "location": self.LOCATION},
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_cancel_job_miss_w_client_location(self):
+ from google.cloud.exceptions import NotFound
+
+ OTHER_PROJECT = "OTHER_PROJECT"
+ JOB_ID = "NONESUCH"
+ creds = _make_credentials()
+ client = self._make_one(self.PROJECT, creds, location=self.LOCATION)
+ conn = client._connection = make_connection()
+
+ with self.assertRaises(NotFound):
+ client.cancel_job(JOB_ID, project=OTHER_PROJECT)
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/OTHER_PROJECT/jobs/NONESUCH/cancel",
+ query_params={"projection": "full", "location": self.LOCATION},
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_cancel_job_hit(self):
+ from google.cloud.bigquery.job import QueryJob
+
+ JOB_ID = "query_job"
+ QUERY = "SELECT * from test_dataset:test_table"
+ QUERY_JOB_RESOURCE = {
+ "id": "{}:{}".format(self.PROJECT, JOB_ID),
+ "jobReference": {
+ "projectId": "job-based-proj",
+ "jobId": "query_job",
+ "location": "asia-northeast1",
+ },
+ "state": "RUNNING",
+ "configuration": {"query": {"query": QUERY}},
+ }
+ RESOURCE = {"job": QUERY_JOB_RESOURCE}
+ creds = _make_credentials()
+ client = self._make_one(self.PROJECT, creds)
+ conn = client._connection = make_connection(RESOURCE)
+ job_from_resource = QueryJob.from_api_repr(QUERY_JOB_RESOURCE, client)
+
+ job = client.cancel_job(job_from_resource)
+
+ self.assertIsInstance(job, QueryJob)
+ self.assertEqual(job.job_id, JOB_ID)
+ self.assertEqual(job.project, "job-based-proj")
+ self.assertEqual(job.location, "asia-northeast1")
+ self.assertEqual(job.query, QUERY)
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/job-based-proj/jobs/query_job/cancel",
+ query_params={"projection": "full", "location": "asia-northeast1"},
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_cancel_job_w_timeout(self):
+ JOB_ID = "query_job"
+ QUERY = "SELECT * from test_dataset:test_table"
+ QUERY_JOB_RESOURCE = {
+ "id": "{}:{}".format(self.PROJECT, JOB_ID),
+ "jobReference": {"projectId": self.PROJECT, "jobId": "query_job"},
+ "state": "RUNNING",
+ "configuration": {"query": {"query": QUERY}},
+ }
+ RESOURCE = {"job": QUERY_JOB_RESOURCE}
+
+ creds = _make_credentials()
+ client = self._make_one(self.PROJECT, creds)
+ conn = client._connection = make_connection(RESOURCE)
+
+ client.cancel_job(JOB_ID, timeout=7.5)
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/{}/jobs/query_job/cancel".format(self.PROJECT),
+ query_params={"projection": "full"},
+ timeout=7.5,
+ )
+
+ def test_load_table_from_uri(self):
+ from google.cloud.bigquery.job import LoadJob, LoadJobConfig
+
+ JOB = "job_name"
+ DESTINATION = "destination_table"
+ SOURCE_URI = "http://example.com/source.csv"
+ RESOURCE = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": JOB},
+ "configuration": {
+ "load": {
+ "sourceUris": [SOURCE_URI],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": DESTINATION,
+ },
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ job_config = LoadJobConfig()
+ original_config_copy = copy.deepcopy(job_config)
+
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RESOURCE)
+ destination = DatasetReference(self.PROJECT, self.DS_ID).table(DESTINATION)
+
+ job = client.load_table_from_uri(
+ SOURCE_URI, destination, job_id=JOB, job_config=job_config, timeout=7.5
+ )
+
+ # Check that load_table_from_uri actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/%s/jobs" % self.PROJECT,
+ data=RESOURCE,
+ timeout=7.5,
+ )
+
+ # the original config object should not have been modified
+ self.assertEqual(job_config.to_api_repr(), original_config_copy.to_api_repr())
+
+ self.assertIsInstance(job, LoadJob)
+ self.assertIsInstance(job.configuration, LoadJobConfig)
+ self.assertIs(job._client, client)
+ self.assertEqual(job.job_id, JOB)
+ self.assertEqual(list(job.source_uris), [SOURCE_URI])
+ self.assertEqual(job.destination, destination)
+
+ conn = client._connection = make_connection(RESOURCE)
+
+ job = client.load_table_from_uri([SOURCE_URI], destination, job_id=JOB)
+ self.assertIsInstance(job, LoadJob)
+ self.assertIs(job._client, client)
+ self.assertEqual(job.job_id, JOB)
+ self.assertEqual(list(job.source_uris), [SOURCE_URI])
+ self.assertEqual(job.destination, destination)
+
+ def test_load_table_from_uri_w_explicit_project(self):
+ job_id = "this-is-a-job-id"
+ destination_id = "destination_table"
+ source_uri = "gs://example/source.csv"
+ resource = {
+ "jobReference": {
+ "projectId": "other-project",
+ "location": self.LOCATION,
+ "jobId": job_id,
+ },
+ "configuration": {
+ "load": {
+ "sourceUris": [source_uri],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": destination_id,
+ },
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(resource)
+ destination = DatasetReference(self.PROJECT, self.DS_ID).table(destination_id)
+
+ client.load_table_from_uri(
+ source_uri,
+ destination,
+ job_id=job_id,
+ project="other-project",
+ location=self.LOCATION,
+ )
+
+ # Check that load_table_from_uri actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/other-project/jobs",
+ data=resource,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_load_table_from_uri_w_client_location(self):
+ job_id = "this-is-a-job-id"
+ destination_id = "destination_table"
+ source_uri = "gs://example/source.csv"
+ resource = {
+ "jobReference": {
+ "projectId": "other-project",
+ "location": self.LOCATION,
+ "jobId": job_id,
+ },
+ "configuration": {
+ "load": {
+ "sourceUris": [source_uri],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": destination_id,
+ },
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(
+ project=self.PROJECT, credentials=creds, _http=http, location=self.LOCATION
+ )
+ conn = client._connection = make_connection(resource)
+
+ client.load_table_from_uri(
+ source_uri,
+ # Test with string for table ID.
+ "{}.{}".format(self.DS_ID, destination_id),
+ job_id=job_id,
+ project="other-project",
+ )
+
+ # Check that load_table_from_uri actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/other-project/jobs",
+ data=resource,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_load_table_from_uri_w_invalid_job_config(self):
+ from google.cloud.bigquery import job
+
+ JOB = "job_name"
+ DESTINATION = "destination_table"
+ SOURCE_URI = "http://example.com/source.csv"
+
+ creds = _make_credentials()
+ http = object()
+ job_config = job.CopyJobConfig()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ destination = DatasetReference(self.PROJECT, self.DS_ID).table(DESTINATION)
+
+ with self.assertRaises(TypeError) as exc:
+ client.load_table_from_uri(
+ SOURCE_URI, destination, job_id=JOB, job_config=job_config
+ )
+
+ self.assertIn("Expected an instance of LoadJobConfig", exc.exception.args[0])
+
+ def test_load_table_from_uri_w_explicit_job_config(self):
+ from google.cloud.bigquery.job import LoadJobConfig
+
+ JOB = "job_name"
+ DESTINATION = "destination_table"
+ SOURCE_URI = "http://example.com/source.csv"
+ RESOURCE = {
+ "jobReference": {"jobId": JOB, "projectId": self.PROJECT},
+ "configuration": {
+ "load": {
+ "sourceUris": [SOURCE_URI],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": DESTINATION,
+ },
+ "createSession": True,
+ "encoding": "UTF-8",
+ }
+ },
+ }
+
+ creds = _make_credentials()
+ http = object()
+
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RESOURCE)
+ destination = DatasetReference(self.PROJECT, self.DS_ID).table(DESTINATION)
+
+ job_config = LoadJobConfig()
+ job_config.create_session = True
+ job_config.encoding = "UTF-8"
+ client.load_table_from_uri(
+ SOURCE_URI, destination, job_id=JOB, job_config=job_config
+ )
+
+ # Check that load_table_from_uri actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/%s/jobs" % self.PROJECT,
+ data=RESOURCE,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_load_table_from_uri_w_explicit_job_config_override(self):
+ from google.cloud.bigquery.job import LoadJobConfig
+
+ JOB = "job_name"
+ DESTINATION = "destination_table"
+ SOURCE_URI = "http://example.com/source.csv"
+ RESOURCE = {
+ "jobReference": {"jobId": JOB, "projectId": self.PROJECT},
+ "configuration": {
+ "load": {
+ "sourceUris": [SOURCE_URI],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": DESTINATION,
+ },
+ "createSession": False,
+ "encoding": "ISO-8859-1",
+ }
+ },
+ }
+
+ creds = _make_credentials()
+ http = object()
+ default_job_config = LoadJobConfig()
+ default_job_config.create_session = True
+ default_job_config.encoding = "ISO-8859-1"
+
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ default_load_job_config=default_job_config,
+ )
+ conn = client._connection = make_connection(RESOURCE)
+ destination = DatasetReference(self.PROJECT, self.DS_ID).table(DESTINATION)
+
+ job_config = LoadJobConfig()
+ job_config.create_session = False
+ client.load_table_from_uri(
+ SOURCE_URI, destination, job_id=JOB, job_config=job_config
+ )
+
+ # Check that load_table_from_uri actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/%s/jobs" % self.PROJECT,
+ data=RESOURCE,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_load_table_from_uri_w_default_load_config(self):
+ from google.cloud.bigquery.job import LoadJobConfig
+
+ JOB = "job_name"
+ DESTINATION = "destination_table"
+ SOURCE_URI = "http://example.com/source.csv"
+ RESOURCE = {
+ "jobReference": {"jobId": JOB, "projectId": self.PROJECT},
+ "configuration": {
+ "load": {
+ "sourceUris": [SOURCE_URI],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": DESTINATION,
+ },
+ "encoding": "ISO-8859-1",
+ }
+ },
+ }
+
+ creds = _make_credentials()
+ http = object()
+ default_job_config = LoadJobConfig()
+ default_job_config.encoding = "ISO-8859-1"
+
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ default_load_job_config=default_job_config,
+ )
+ conn = client._connection = make_connection(RESOURCE)
+ destination = DatasetReference(self.PROJECT, self.DS_ID).table(DESTINATION)
+
+ client.load_table_from_uri(SOURCE_URI, destination, job_id=JOB)
+
+ # Check that load_table_from_uri actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/%s/jobs" % self.PROJECT,
+ data=RESOURCE,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ @staticmethod
+ def _mock_requests_response(status_code, headers, content=b""):
+ return mock.Mock(
+ content=content,
+ headers=headers,
+ status_code=status_code,
+ spec=["content", "headers", "status_code"],
+ )
+
+ def _mock_transport(self, status_code, headers, content=b""):
+ fake_transport = mock.Mock(spec=["request"])
+ fake_response = self._mock_requests_response(
+ status_code, headers, content=content
+ )
+ fake_transport.request.return_value = fake_response
+ return fake_transport
+
+ def _initiate_resumable_upload_helper(self, num_retries=None, mtls=False):
+ from google.resumable_media.requests import ResumableUpload
+ from google.cloud.bigquery.client import _DEFAULT_CHUNKSIZE
+ from google.cloud.bigquery.client import _GENERIC_CONTENT_TYPE
+ from google.cloud.bigquery.client import _get_upload_headers
+ from google.cloud.bigquery.job import LoadJob
+ from google.cloud.bigquery.job import LoadJobConfig
+ from google.cloud.bigquery.job import SourceFormat
+
+ # Create mocks to be checked for doing transport.
+ resumable_url = "http://test.invalid?upload_id=hey-you"
+ response_headers = {"location": resumable_url}
+ fake_transport = self._mock_transport(http.client.OK, response_headers)
+ client = self._make_one(project=self.PROJECT, _http=fake_transport)
+ conn = client._connection = make_connection()
+ if mtls:
+ conn.get_api_base_url_for_mtls = mock.Mock(return_value="https://foo.mtls")
+
+ # Create some mock arguments and call the method under test.
+ data = b"goodbye gudbi gootbee"
+ stream = io.BytesIO(data)
+ config = LoadJobConfig()
+ config.source_format = SourceFormat.CSV
+ job = LoadJob(None, None, self.TABLE_REF, client, job_config=config)
+ metadata = job.to_api_repr()
+ upload, transport = client._initiate_resumable_upload(
+ stream, metadata, num_retries, None
+ )
+
+ # Check the returned values.
+ self.assertIsInstance(upload, ResumableUpload)
+
+ host_name = "https://foo.mtls" if mtls else "https://bigquery.googleapis.com"
+ upload_url = (
+ f"{host_name}/upload/bigquery/v2/projects/{self.PROJECT}"
+ "/jobs?uploadType=resumable"
+ )
+ self.assertEqual(upload.upload_url, upload_url)
+ expected_headers = _get_upload_headers(conn.user_agent)
+ self.assertEqual(upload._headers, expected_headers)
+ self.assertFalse(upload.finished)
+ self.assertEqual(upload._chunk_size, _DEFAULT_CHUNKSIZE)
+ self.assertIs(upload._stream, stream)
+ self.assertIsNone(upload._total_bytes)
+ self.assertEqual(upload._content_type, _GENERIC_CONTENT_TYPE)
+ self.assertEqual(upload.resumable_url, resumable_url)
+
+ retry_strategy = upload._retry_strategy
+ self.assertEqual(retry_strategy.max_sleep, 64.0)
+ if num_retries is None:
+ self.assertEqual(retry_strategy.max_cumulative_retry, 600.0)
+ self.assertIsNone(retry_strategy.max_retries)
+ else:
+ self.assertIsNone(retry_strategy.max_cumulative_retry)
+ self.assertEqual(retry_strategy.max_retries, num_retries)
+ self.assertIs(transport, fake_transport)
+ # Make sure we never read from the stream.
+ self.assertEqual(stream.tell(), 0)
+
+ # Check the mocks.
+ request_headers = expected_headers.copy()
+ request_headers["x-upload-content-type"] = _GENERIC_CONTENT_TYPE
+ fake_transport.request.assert_called_once_with(
+ "POST",
+ upload_url,
+ data=json.dumps(metadata).encode("utf-8"),
+ headers=request_headers,
+ timeout=mock.ANY,
+ )
+
+ def test__initiate_resumable_upload(self):
+ self._initiate_resumable_upload_helper()
+
+ def test__initiate_resumable_upload_mtls(self):
+ self._initiate_resumable_upload_helper(mtls=True)
+
+ def test__initiate_resumable_upload_with_retry(self):
+ self._initiate_resumable_upload_helper(num_retries=11)
+
+ def _do_multipart_upload_success_helper(
+ self, get_boundary, num_retries=None, project=None, mtls=False
+ ):
+ from google.cloud.bigquery.client import _get_upload_headers
+ from google.cloud.bigquery.job import LoadJob
+ from google.cloud.bigquery.job import LoadJobConfig
+ from google.cloud.bigquery.job import SourceFormat
+
+ fake_transport = self._mock_transport(http.client.OK, {})
+ client = self._make_one(project=self.PROJECT, _http=fake_transport)
+ conn = client._connection = make_connection()
+ if mtls:
+ conn.get_api_base_url_for_mtls = mock.Mock(return_value="https://foo.mtls")
+
+ if project is None:
+ project = self.PROJECT
+
+ # Create some mock arguments.
+ data = b"Bzzzz-zap \x00\x01\xf4"
+ stream = io.BytesIO(data)
+ config = LoadJobConfig()
+ config.source_format = SourceFormat.CSV
+ job = LoadJob(None, None, self.TABLE_REF, client, job_config=config)
+ metadata = job.to_api_repr()
+ size = len(data)
+
+ response = client._do_multipart_upload(
+ stream, metadata, size, num_retries, None, project=project
+ )
+
+ # Check the mocks and the returned value.
+ self.assertIs(response, fake_transport.request.return_value)
+ self.assertEqual(stream.tell(), size)
+ get_boundary.assert_called_once_with()
+
+ host_name = "https://foo.mtls" if mtls else "https://bigquery.googleapis.com"
+ upload_url = (
+ f"{host_name}/upload/bigquery/v2/projects/{project}"
+ "/jobs?uploadType=multipart"
+ )
+ payload = (
+ b"--==0==\r\n"
+ b"content-type: application/json; charset=UTF-8\r\n\r\n"
+ b"%(json_metadata)s"
+ b"\r\n"
+ b"--==0==\r\n"
+ b"content-type: */*\r\n\r\n"
+ b"%(data)s"
+ b"\r\n"
+ b"--==0==--"
+ ) % {b"json_metadata": json.dumps(metadata).encode("utf-8"), b"data": data}
+
+ headers = _get_upload_headers(conn.user_agent)
+ headers["content-type"] = b'multipart/related; boundary="==0=="'
+ fake_transport.request.assert_called_once_with(
+ "POST", upload_url, data=payload, headers=headers, timeout=mock.ANY
+ )
+
+ @mock.patch("google.resumable_media._upload.get_boundary", return_value=b"==0==")
+ def test__do_multipart_upload(self, get_boundary):
+ self._do_multipart_upload_success_helper(get_boundary)
+
+ @mock.patch("google.resumable_media._upload.get_boundary", return_value=b"==0==")
+ def test__do_multipart_upload_mtls(self, get_boundary):
+ self._do_multipart_upload_success_helper(get_boundary, mtls=True)
+
+ @mock.patch("google.resumable_media._upload.get_boundary", return_value=b"==0==")
+ def test__do_multipart_upload_with_retry(self, get_boundary):
+ self._do_multipart_upload_success_helper(get_boundary, num_retries=8)
+
+ @mock.patch("google.resumable_media._upload.get_boundary", return_value=b"==0==")
+ def test__do_multipart_upload_with_custom_project(self, get_boundary):
+ self._do_multipart_upload_success_helper(get_boundary, project="custom-project")
+
+ def test_copy_table(self):
+ from google.cloud.bigquery.job import CopyJob
+
+ JOB = "job_name"
+ SOURCE = "source_table"
+ DESTINATION = "destination_table"
+ RESOURCE = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": JOB},
+ "configuration": {
+ "copy": {
+ "sourceTables": [
+ {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": SOURCE,
+ }
+ ],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": DESTINATION,
+ },
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RESOURCE)
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ source = dataset.table(SOURCE)
+ destination = dataset.table(DESTINATION)
+
+ job = client.copy_table(source, destination, job_id=JOB, timeout=7.5)
+
+ # Check that copy_table actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/%s/jobs" % self.PROJECT,
+ data=RESOURCE,
+ timeout=7.5,
+ )
+
+ self.assertIsInstance(job, CopyJob)
+ self.assertIs(job._client, client)
+ self.assertEqual(job.job_id, JOB)
+ self.assertEqual(list(job.sources), [source])
+ self.assertEqual(job.destination, destination)
+
+ def test_copy_table_w_multiple_sources(self):
+ from google.cloud.bigquery.job import CopyJob
+ from google.cloud.bigquery.table import TableReference
+
+ job_id = "job_name"
+ source_id = "my-project.my_dataset.source_table"
+ source_id2 = "my-project.my_dataset.source_table2"
+ destination_id = "my-other-project.another_dataset.destination_table"
+ expected_resource = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": job_id},
+ "configuration": {
+ "copy": {
+ "sourceTables": [
+ {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "tableId": "source_table",
+ },
+ {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "tableId": "source_table2",
+ },
+ ],
+ "destinationTable": {
+ "projectId": "my-other-project",
+ "datasetId": "another_dataset",
+ "tableId": "destination_table",
+ },
+ }
+ },
+ }
+ returned_resource = expected_resource.copy()
+ returned_resource["statistics"] = {}
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(returned_resource)
+
+ job = client.copy_table([source_id, source_id2], destination_id, job_id=job_id)
+
+ # Check that copy_table actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/%s/jobs" % self.PROJECT,
+ data=expected_resource,
+ timeout=DEFAULT_TIMEOUT,
+ )
+ self.assertIsInstance(job, CopyJob)
+ self.assertIs(job._client, client)
+ self.assertEqual(job.job_id, job_id)
+ self.assertEqual(
+ list(sorted(job.sources, key=lambda tbl: tbl.table_id)),
+ [
+ TableReference.from_string(source_id),
+ TableReference.from_string(source_id2),
+ ],
+ )
+ self.assertEqual(job.destination, TableReference.from_string(destination_id))
+
+ def test_copy_table_w_explicit_project(self):
+ job_id = "this-is-a-job-id"
+ source_id = "source_table"
+ destination_id = "destination_table"
+ resource = {
+ "jobReference": {
+ "projectId": "other-project",
+ "location": self.LOCATION,
+ "jobId": job_id,
+ },
+ "configuration": {
+ "copy": {
+ "sourceTables": [
+ {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": source_id,
+ }
+ ],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": destination_id,
+ },
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(resource)
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ source = dataset.table(source_id)
+ destination = dataset.table(destination_id)
+
+ client.copy_table(
+ source,
+ destination,
+ job_id=job_id,
+ project="other-project",
+ location=self.LOCATION,
+ )
+
+ # Check that copy_table actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/other-project/jobs",
+ data=resource,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_copy_table_w_client_location(self):
+ job_id = "this-is-a-job-id"
+ source_id = "source_table"
+ destination_id = "destination_table"
+ resource = {
+ "jobReference": {
+ "projectId": "other-project",
+ "location": self.LOCATION,
+ "jobId": job_id,
+ },
+ "configuration": {
+ "copy": {
+ "sourceTables": [
+ {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": source_id,
+ }
+ ],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": destination_id,
+ },
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(
+ project=self.PROJECT, credentials=creds, _http=http, location=self.LOCATION
+ )
+ conn = client._connection = make_connection(resource)
+
+ client.copy_table(
+ # Test with string for table IDs.
+ "{}.{}".format(self.DS_ID, source_id),
+ "{}.{}".format(self.DS_ID, destination_id),
+ job_id=job_id,
+ project="other-project",
+ )
+
+ # Check that copy_table actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/other-project/jobs",
+ data=resource,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_copy_table_w_source_strings(self):
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection({})
+ sources = [
+ "dataset_wo_proj.some_table",
+ "other_project.other_dataset.other_table",
+ DatasetReference(client.project, "dataset_from_ref").table(
+ "table_from_ref"
+ ),
+ ]
+ destination = "some_project.some_dataset.destination_table"
+
+ job = client.copy_table(sources, destination)
+
+ # Replace job with the request instead of response so we can verify those properties.
+ _, kwargs = conn.api_request.call_args
+ request = kwargs["data"]
+ job._properties = request
+
+ expected_sources = [
+ DatasetReference(client.project, "dataset_wo_proj").table("some_table"),
+ DatasetReference("other_project", "other_dataset").table("other_table"),
+ DatasetReference(client.project, "dataset_from_ref").table(
+ "table_from_ref"
+ ),
+ ]
+ self.assertEqual(list(job.sources), expected_sources)
+ expected_destination = DatasetReference("some_project", "some_dataset").table(
+ "destination_table"
+ )
+ self.assertEqual(job.destination, expected_destination)
+
+ def test_copy_table_w_invalid_job_config(self):
+ from google.cloud.bigquery import job
+
+ JOB = "job_name"
+ SOURCE = "source_table"
+ DESTINATION = "destination_table"
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ job_config = job.ExtractJobConfig()
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ source = dataset.table(SOURCE)
+ destination = dataset.table(DESTINATION)
+ with self.assertRaises(TypeError) as exc:
+ client.copy_table(source, destination, job_id=JOB, job_config=job_config)
+
+ self.assertIn("Expected an instance of CopyJobConfig", exc.exception.args[0])
+
+ def test_copy_table_w_valid_job_config(self):
+ from google.cloud.bigquery.job import CopyJobConfig
+
+ JOB = "job_name"
+ SOURCE = "source_table"
+ DESTINATION = "destination_table"
+ RESOURCE = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": JOB},
+ "configuration": {
+ "copy": {
+ "sourceTables": [
+ {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": SOURCE,
+ }
+ ],
+ "destinationTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": DESTINATION,
+ },
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RESOURCE)
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ source = dataset.table(SOURCE)
+ destination = dataset.table(DESTINATION)
+
+ job_config = CopyJobConfig()
+ original_config_copy = copy.deepcopy(job_config)
+ job = client.copy_table(source, destination, job_id=JOB, job_config=job_config)
+
+ # Check that copy_table actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/%s/jobs" % self.PROJECT,
+ data=RESOURCE,
+ timeout=DEFAULT_TIMEOUT,
+ )
+ self.assertIsInstance(job.configuration, CopyJobConfig)
+
+ # the original config object should not have been modified
+ assert job_config.to_api_repr() == original_config_copy.to_api_repr()
+
+ def test_extract_table(self):
+ from google.cloud.bigquery.job import ExtractJob
+
+ JOB = "job_id"
+ SOURCE = "source_table"
+ DESTINATION = "gs://bucket_name/object_name"
+ RESOURCE = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": JOB},
+ "configuration": {
+ "extract": {
+ "sourceTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": SOURCE,
+ },
+ "destinationUris": [DESTINATION],
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RESOURCE)
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ source = dataset.table(SOURCE)
+
+ job = client.extract_table(source, DESTINATION, job_id=JOB, timeout=7.5)
+
+ # Check that extract_table actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/PROJECT/jobs",
+ data=RESOURCE,
+ timeout=7.5,
+ )
+
+ # Check the job resource.
+ self.assertIsInstance(job, ExtractJob)
+ self.assertIs(job._client, client)
+ self.assertEqual(job.job_id, JOB)
+ self.assertEqual(job.source, source)
+ self.assertEqual(list(job.destination_uris), [DESTINATION])
+
+ def test_extract_table_w_invalid_job_config(self):
+ from google.cloud.bigquery import job
+
+ JOB = "job_id"
+ SOURCE = "source_table"
+ DESTINATION = "gs://bucket_name/object_name"
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ source = dataset.table(SOURCE)
+ job_config = job.LoadJobConfig()
+ with self.assertRaises(TypeError) as exc:
+ client.extract_table(source, DESTINATION, job_id=JOB, job_config=job_config)
+
+ self.assertIn("Expected an instance of ExtractJobConfig", exc.exception.args[0])
+
+ def test_extract_table_w_explicit_project(self):
+ job_id = "job_id"
+ source_id = "source_table"
+ destination = "gs://bucket_name/object_name"
+ resource = {
+ "jobReference": {
+ "projectId": "other-project",
+ "location": self.LOCATION,
+ "jobId": job_id,
+ },
+ "configuration": {
+ "extract": {
+ "sourceTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": source_id,
+ },
+ "destinationUris": [destination],
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(resource)
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ source = dataset.table(source_id)
+
+ client.extract_table(
+ source,
+ destination,
+ job_id=job_id,
+ project="other-project",
+ location=self.LOCATION,
+ )
+
+ # Check that extract_table actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/other-project/jobs",
+ data=resource,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_extract_table_w_client_location(self):
+ job_id = "job_id"
+ source_id = "source_table"
+ destination = "gs://bucket_name/object_name"
+ resource = {
+ "jobReference": {
+ "projectId": "other-project",
+ "location": self.LOCATION,
+ "jobId": job_id,
+ },
+ "configuration": {
+ "extract": {
+ "sourceTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": source_id,
+ },
+ "destinationUris": [destination],
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(
+ project=self.PROJECT, credentials=creds, _http=http, location=self.LOCATION
+ )
+ conn = client._connection = make_connection(resource)
+
+ client.extract_table(
+ # Test with string for table ID.
+ "{}.{}".format(self.DS_ID, source_id),
+ destination,
+ job_id=job_id,
+ project="other-project",
+ )
+
+ # Check that extract_table actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/other-project/jobs",
+ data=resource,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_extract_table_generated_job_id(self):
+ from google.cloud.bigquery.job import ExtractJob
+ from google.cloud.bigquery.job import ExtractJobConfig
+ from google.cloud.bigquery.job import DestinationFormat
+
+ JOB = "job_id"
+ SOURCE = "source_table"
+ DESTINATION = "gs://bucket_name/object_name"
+ RESOURCE = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": JOB},
+ "configuration": {
+ "extract": {
+ "sourceTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": SOURCE,
+ },
+ "destinationUris": [DESTINATION],
+ "destinationFormat": "NEWLINE_DELIMITED_JSON",
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RESOURCE)
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ source = dataset.table(SOURCE)
+ job_config = ExtractJobConfig()
+ job_config.destination_format = DestinationFormat.NEWLINE_DELIMITED_JSON
+ original_config_copy = copy.deepcopy(job_config)
+
+ job = client.extract_table(source, DESTINATION, job_config=job_config)
+
+ # Check that extract_table actually starts the job.
+ conn.api_request.assert_called_once()
+ _, req = conn.api_request.call_args
+ self.assertEqual(req["method"], "POST")
+ self.assertEqual(req["path"], "/projects/PROJECT/jobs")
+ self.assertIsInstance(req["data"]["jobReference"]["jobId"], str)
+ self.assertEqual(req["timeout"], DEFAULT_TIMEOUT)
+
+ # Check the job resource.
+ self.assertIsInstance(job, ExtractJob)
+ self.assertIs(job._client, client)
+ self.assertEqual(job.source, source)
+ self.assertEqual(list(job.destination_uris), [DESTINATION])
+
+ # the original config object should not have been modified
+ assert job_config.to_api_repr() == original_config_copy.to_api_repr()
+
+ def test_extract_table_w_destination_uris(self):
+ from google.cloud.bigquery.job import ExtractJob
+
+ JOB = "job_id"
+ SOURCE = "source_table"
+ DESTINATION1 = "gs://bucket_name/object_one"
+ DESTINATION2 = "gs://bucket_name/object_two"
+ RESOURCE = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": JOB},
+ "configuration": {
+ "extract": {
+ "sourceTable": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": SOURCE,
+ },
+ "destinationUris": [DESTINATION1, DESTINATION2],
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RESOURCE)
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ source = dataset.table(SOURCE)
+
+ job = client.extract_table(source, [DESTINATION1, DESTINATION2], job_id=JOB)
+
+ # Check that extract_table actually starts the job.
+ conn.api_request.assert_called_once()
+ _, req = conn.api_request.call_args
+ self.assertEqual(req["method"], "POST")
+ self.assertEqual(req["path"], "/projects/PROJECT/jobs")
+ self.assertEqual(req["timeout"], DEFAULT_TIMEOUT)
+
+ # Check the job resource.
+ self.assertIsInstance(job, ExtractJob)
+ self.assertIs(job._client, client)
+ self.assertEqual(job.job_id, JOB)
+ self.assertEqual(job.source, source)
+ self.assertEqual(list(job.destination_uris), [DESTINATION1, DESTINATION2])
+
+ def test_extract_table_for_source_type_model(self):
+ from google.cloud.bigquery.job import ExtractJob
+
+ JOB = "job_id"
+ SOURCE = "source_model"
+ DESTINATION = "gs://bucket_name/object_name"
+ RESOURCE = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": JOB},
+ "configuration": {
+ "extract": {
+ "sourceModel": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "modelId": SOURCE,
+ },
+ "destinationUris": [DESTINATION],
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RESOURCE)
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ source = dataset.model(SOURCE)
+
+ job = client.extract_table(
+ source, DESTINATION, job_id=JOB, timeout=7.5, source_type="Model"
+ )
+
+ # Check that extract_table actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/PROJECT/jobs",
+ data=RESOURCE,
+ timeout=7.5,
+ )
+
+ # Check the job resource.
+ self.assertIsInstance(job, ExtractJob)
+ self.assertIs(job._client, client)
+ self.assertEqual(job.job_id, JOB)
+ self.assertEqual(job.source, source)
+ self.assertEqual(list(job.destination_uris), [DESTINATION])
+
+ def test_extract_table_for_source_type_model_w_string_model_id(self):
+ JOB = "job_id"
+ source_id = "source_model"
+ DESTINATION = "gs://bucket_name/object_name"
+ RESOURCE = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": JOB},
+ "configuration": {
+ "extract": {
+ "sourceModel": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "modelId": source_id,
+ },
+ "destinationUris": [DESTINATION],
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RESOURCE)
+
+ client.extract_table(
+ # Test with string for model ID.
+ "{}.{}".format(self.DS_ID, source_id),
+ DESTINATION,
+ job_id=JOB,
+ timeout=7.5,
+ source_type="Model",
+ )
+
+ # Check that extract_table actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/PROJECT/jobs",
+ data=RESOURCE,
+ timeout=7.5,
+ )
+
+ def test_extract_table_for_source_type_model_w_model_object(self):
+ from google.cloud.bigquery.model import Model
+
+ JOB = "job_id"
+ DESTINATION = "gs://bucket_name/object_name"
+ model_id = "{}.{}.{}".format(self.PROJECT, self.DS_ID, self.MODEL_ID)
+ model = Model(model_id)
+ RESOURCE = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": JOB},
+ "configuration": {
+ "extract": {
+ "sourceModel": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "modelId": self.MODEL_ID,
+ },
+ "destinationUris": [DESTINATION],
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RESOURCE)
+
+ client.extract_table(
+ # Test with Model class object.
+ model,
+ DESTINATION,
+ job_id=JOB,
+ timeout=7.5,
+ source_type="Model",
+ )
+
+ # Check that extract_table actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/PROJECT/jobs",
+ data=RESOURCE,
+ timeout=7.5,
+ )
+
+ def test_extract_table_for_invalid_source_type_model(self):
+ JOB = "job_id"
+ SOURCE = "source_model"
+ DESTINATION = "gs://bucket_name/object_name"
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ source = dataset.model(SOURCE)
+
+ with self.assertRaises(ValueError) as exc:
+ client.extract_table(
+ source, DESTINATION, job_id=JOB, timeout=7.5, source_type="foo"
+ )
+
+ self.assertIn("Cannot pass", exc.exception.args[0])
+
+ def test_query_defaults(self):
+ from google.cloud.bigquery.job import QueryJob
+
+ QUERY = "select count(*) from persons"
+ RESOURCE = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": "some-random-id"},
+ "configuration": {"query": {"query": QUERY, "useLegacySql": False}},
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RESOURCE)
+
+ job = client.query(QUERY)
+
+ self.assertIsInstance(job, QueryJob)
+ self.assertIsInstance(job.job_id, str)
+ self.assertIs(job._client, client)
+ self.assertEqual(job.query, QUERY)
+ self.assertEqual(job.udf_resources, [])
+ self.assertEqual(job.query_parameters, [])
+
+ # Check that query actually starts the job.
+ conn.api_request.assert_called_once()
+ _, req = conn.api_request.call_args
+ self.assertEqual(req["method"], "POST")
+ self.assertEqual(req["path"], "/projects/PROJECT/jobs")
+ self.assertEqual(req["timeout"], DEFAULT_TIMEOUT)
+ sent = req["data"]
+ self.assertIsInstance(sent["jobReference"]["jobId"], str)
+ sent_config = sent["configuration"]["query"]
+ self.assertEqual(sent_config["query"], QUERY)
+ self.assertFalse(sent_config["useLegacySql"])
+
+ def test_query_w_api_method_query(self):
+ query = "select count(*) from persons"
+ response = {
+ "jobReference": {
+ "projectId": self.PROJECT,
+ "location": "EU",
+ "jobId": "abcd",
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(response)
+
+ job = client.query(query, location="EU", api_method="QUERY")
+
+ self.assertEqual(job.query, query)
+ self.assertEqual(job.job_id, "abcd")
+ self.assertEqual(job.location, "EU")
+
+ # Check that query actually starts the job.
+ expected_resource = {
+ "query": query,
+ "useLegacySql": False,
+ "location": "EU",
+ "formatOptions": {"useInt64Timestamp": True},
+ "requestId": mock.ANY,
+ }
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=f"/projects/{self.PROJECT}/queries",
+ data=expected_resource,
+ timeout=None,
+ )
+
+ def test_query_w_api_method_query_legacy_sql(self):
+ from google.cloud.bigquery import QueryJobConfig
+
+ query = "select count(*) from persons"
+ response = {
+ "jobReference": {
+ "projectId": self.PROJECT,
+ "location": "EU",
+ "jobId": "abcd",
+ },
+ }
+ job_config = QueryJobConfig()
+ job_config.use_legacy_sql = True
+ job_config.maximum_bytes_billed = 100
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(response)
+
+ job = client.query(
+ query, location="EU", job_config=job_config, api_method="QUERY"
+ )
+
+ self.assertEqual(job.query, query)
+ self.assertEqual(job.job_id, "abcd")
+ self.assertEqual(job.location, "EU")
+
+ # Check that query actually starts the job.
+ expected_resource = {
+ "query": query,
+ "useLegacySql": True,
+ "location": "EU",
+ "formatOptions": {"useInt64Timestamp": True},
+ "requestId": mock.ANY,
+ "maximumBytesBilled": "100",
+ }
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=f"/projects/{self.PROJECT}/queries",
+ data=expected_resource,
+ timeout=None,
+ )
+
+ def test_query_w_api_method_query_parameters(self):
+ from google.cloud.bigquery import QueryJobConfig, ScalarQueryParameter
+
+ query = "select count(*) from persons"
+ response = {
+ "jobReference": {
+ "projectId": self.PROJECT,
+ "location": "EU",
+ "jobId": "abcd",
+ },
+ }
+ job_config = QueryJobConfig()
+ job_config.dry_run = True
+ job_config.query_parameters = [ScalarQueryParameter("param1", "INTEGER", 123)]
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(response)
+
+ job = client.query(
+ query, location="EU", job_config=job_config, api_method="QUERY"
+ )
+
+ self.assertEqual(job.query, query)
+ self.assertEqual(job.job_id, "abcd")
+ self.assertEqual(job.location, "EU")
+
+ # Check that query actually starts the job.
+ expected_resource = {
+ "query": query,
+ "dryRun": True,
+ "useLegacySql": False,
+ "location": "EU",
+ "formatOptions": {"useInt64Timestamp": True},
+ "requestId": mock.ANY,
+ "parameterMode": "NAMED",
+ "queryParameters": [
+ {
+ "name": "param1",
+ "parameterType": {"type": "INTEGER"},
+ "parameterValue": {"value": "123"},
+ },
+ ],
+ }
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=f"/projects/{self.PROJECT}/queries",
+ data=expected_resource,
+ timeout=None,
+ )
+
+ def test_query_w_api_method_query_and_job_id_fails(self):
+ query = "select count(*) from persons"
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ client._connection = make_connection({})
+
+ with self.assertRaises(TypeError) as exc:
+ client.query(query, job_id="abcd", api_method="QUERY")
+ self.assertIn(
+ "`job_id` was provided, but the 'QUERY' `api_method` was requested",
+ exc.exception.args[0],
+ )
+
+ def test_query_w_api_method_unknown(self):
+ query = "select count(*) from persons"
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ client._connection = make_connection({})
+
+ with self.assertRaises(ValueError) as exc:
+ client.query(query, api_method="UNKNOWN")
+ self.assertIn("Got unexpected value for api_method: ", exc.exception.args[0])
+
+ def test_query_w_explicit_timeout(self):
+ query = "select count(*) from persons"
+ resource = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": mock.ANY},
+ "configuration": {"query": {"query": query, "useLegacySql": False}},
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(resource)
+
+ client.query(query, timeout=7.5)
+
+ # Check that query actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/{}/jobs".format(self.PROJECT),
+ data=resource,
+ timeout=7.5,
+ )
+
+ def test_query_w_explicit_project(self):
+ job_id = "some-job-id"
+ query = "select count(*) from persons"
+ resource = {
+ "jobReference": {
+ "projectId": "other-project",
+ "location": self.LOCATION,
+ "jobId": job_id,
+ },
+ "configuration": {"query": {"query": query, "useLegacySql": False}},
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(resource)
+
+ client.query(
+ query, job_id=job_id, project="other-project", location=self.LOCATION
+ )
+
+ # Check that query actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/other-project/jobs",
+ data=resource,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_query_w_explicit_job_config(self):
+ job_id = "some-job-id"
+ query = "select count(*) from persons"
+ resource = {
+ "jobReference": {
+ "jobId": job_id,
+ "projectId": self.PROJECT,
+ "location": self.LOCATION,
+ },
+ "configuration": {
+ "query": {
+ "query": query,
+ "defaultDataset": {
+ "projectId": self.PROJECT,
+ "datasetId": "some-dataset",
+ },
+ "useLegacySql": False,
+ "useQueryCache": True,
+ "maximumBytesBilled": "2000",
+ }
+ },
+ }
+
+ creds = _make_credentials()
+ http = object()
+
+ from google.cloud.bigquery import QueryJobConfig, DatasetReference
+
+ default_job_config = QueryJobConfig()
+ default_job_config.default_dataset = DatasetReference(
+ self.PROJECT, "some-dataset"
+ )
+ default_job_config.maximum_bytes_billed = 1000
+
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ default_query_job_config=default_job_config,
+ )
+ conn = client._connection = make_connection(resource)
+
+ job_config = QueryJobConfig()
+ job_config.use_query_cache = True
+ job_config.maximum_bytes_billed = 2000
+ original_config_copy = copy.deepcopy(job_config)
+
+ client.query(
+ query, job_id=job_id, location=self.LOCATION, job_config=job_config
+ )
+
+ # Check that query actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/PROJECT/jobs",
+ data=resource,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ # the original config object should not have been modified
+ assert job_config.to_api_repr() == original_config_copy.to_api_repr()
+
+ def test_query_preserving_explicit_job_config(self):
+ job_id = "some-job-id"
+ query = "select count(*) from persons"
+ resource = {
+ "jobReference": {
+ "jobId": job_id,
+ "projectId": self.PROJECT,
+ "location": self.LOCATION,
+ },
+ "configuration": {
+ "query": {
+ "query": query,
+ "useLegacySql": False,
+ "useQueryCache": True,
+ "maximumBytesBilled": "2000",
+ }
+ },
+ }
+
+ creds = _make_credentials()
+ http = object()
+
+ from google.cloud.bigquery import QueryJobConfig
+
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ )
+ conn = client._connection = make_connection(resource)
+
+ job_config = QueryJobConfig()
+ job_config.use_query_cache = True
+ job_config.maximum_bytes_billed = 2000
+ original_config_copy = copy.deepcopy(job_config)
+
+ client.query(
+ query, job_id=job_id, location=self.LOCATION, job_config=job_config
+ )
+
+ # Check that query actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/PROJECT/jobs",
+ data=resource,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ # the original config object should not have been modified
+ assert job_config.to_api_repr() == original_config_copy.to_api_repr()
+
+ def test_query_preserving_explicit_default_job_config(self):
+ job_id = "some-job-id"
+ query = "select count(*) from persons"
+ resource = {
+ "jobReference": {
+ "jobId": job_id,
+ "projectId": self.PROJECT,
+ "location": self.LOCATION,
+ },
+ "configuration": {
+ "query": {
+ "query": query,
+ "defaultDataset": {
+ "projectId": self.PROJECT,
+ "datasetId": "some-dataset",
+ },
+ "useLegacySql": False,
+ "maximumBytesBilled": "1000",
+ }
+ },
+ }
+
+ creds = _make_credentials()
+ http = object()
+
+ from google.cloud.bigquery import QueryJobConfig, DatasetReference
+
+ default_job_config = QueryJobConfig()
+ default_job_config.default_dataset = DatasetReference(
+ self.PROJECT, "some-dataset"
+ )
+ default_job_config.maximum_bytes_billed = 1000
+ default_config_copy = copy.deepcopy(default_job_config)
+
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ default_query_job_config=default_job_config,
+ )
+ conn = client._connection = make_connection(resource)
+
+ client.query(query, job_id=job_id, location=self.LOCATION, job_config=None)
+
+ # Check that query actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/PROJECT/jobs",
+ data=resource,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ # the original default config object should not have been modified
+ assert default_job_config.to_api_repr() == default_config_copy.to_api_repr()
+
+ def test_query_w_invalid_job_config(self):
+ from google.cloud.bigquery import QueryJobConfig, DatasetReference
+ from google.cloud.bigquery import job
+
+ job_id = "some-job-id"
+ query = "select count(*) from persons"
+ creds = _make_credentials()
+ http = object()
+ default_job_config = QueryJobConfig()
+ default_job_config.default_dataset = DatasetReference(
+ self.PROJECT, "some-dataset"
+ )
+ default_job_config.maximum_bytes_billed = 1000
+
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ default_query_job_config=default_job_config,
+ )
+
+ job_config = job.LoadJobConfig()
+
+ with self.assertRaises(TypeError) as exc:
+ client.query(
+ query, job_id=job_id, location=self.LOCATION, job_config=job_config
+ )
+ self.assertIn("Expected an instance of QueryJobConfig", exc.exception.args[0])
+
+ def test_query_w_explicit_job_config_override(self):
+ job_id = "some-job-id"
+ query = "select count(*) from persons"
+ resource = {
+ "jobReference": {
+ "jobId": job_id,
+ "projectId": self.PROJECT,
+ "location": self.LOCATION,
+ },
+ "configuration": {
+ "query": {
+ "query": query,
+ "defaultDataset": None,
+ "useLegacySql": False,
+ "useQueryCache": True,
+ "maximumBytesBilled": "2000",
+ }
+ },
+ }
+
+ creds = _make_credentials()
+ http = object()
+
+ from google.cloud.bigquery import QueryJobConfig, DatasetReference
+
+ default_job_config = QueryJobConfig()
+ default_job_config.default_dataset = DatasetReference(
+ self.PROJECT, "some-dataset"
+ )
+ default_job_config.maximum_bytes_billed = 1000
+
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ default_query_job_config=default_job_config,
+ )
+ conn = client._connection = make_connection(resource)
+
+ job_config = QueryJobConfig()
+ job_config.use_query_cache = True
+ job_config.maximum_bytes_billed = 2000
+ job_config.default_dataset = None
+
+ client.query(
+ query, job_id=job_id, location=self.LOCATION, job_config=job_config
+ )
+
+ # Check that query actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/PROJECT/jobs",
+ data=resource,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_query_w_client_default_config_no_incoming(self):
+ job_id = "some-job-id"
+ query = "select count(*) from persons"
+ resource = {
+ "jobReference": {
+ "jobId": job_id,
+ "projectId": self.PROJECT,
+ "location": self.LOCATION,
+ },
+ "configuration": {
+ "query": {
+ "query": query,
+ "useLegacySql": False,
+ "maximumBytesBilled": "1000",
+ }
+ },
+ }
+
+ creds = _make_credentials()
+ http = object()
+
+ from google.cloud.bigquery import QueryJobConfig
+
+ default_job_config = QueryJobConfig()
+ default_job_config.maximum_bytes_billed = 1000
+
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ default_query_job_config=default_job_config,
+ )
+ conn = client._connection = make_connection(resource)
+
+ client.query(query, job_id=job_id, location=self.LOCATION)
+
+ # Check that query actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/PROJECT/jobs",
+ data=resource,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_query_w_invalid_default_job_config(self):
+ creds = _make_credentials()
+ http = object()
+ default_job_config = object()
+
+ with self.assertRaises(TypeError) as exc:
+ self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ default_query_job_config=default_job_config,
+ )
+ self.assertIn("Expected an instance of QueryJobConfig", exc.exception.args[0])
+
+ def test_query_w_client_location(self):
+ job_id = "some-job-id"
+ query = "select count(*) from persons"
+ resource = {
+ "jobReference": {
+ "projectId": "other-project",
+ "location": self.LOCATION,
+ "jobId": job_id,
+ },
+ "configuration": {"query": {"query": query, "useLegacySql": False}},
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(
+ project=self.PROJECT, credentials=creds, _http=http, location=self.LOCATION
+ )
+ conn = client._connection = make_connection(resource)
+
+ client.query(query, job_id=job_id, project="other-project")
+
+ # Check that query actually starts the job.
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/other-project/jobs",
+ data=resource,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_query_detect_location(self):
+ query = "select count(*) from persons"
+ resource_location = "EU"
+ resource = {
+ "jobReference": {
+ "projectId": self.PROJECT,
+ # Location not set in request, but present in the response.
+ "location": resource_location,
+ "jobId": "some-random-id",
+ },
+ "configuration": {"query": {"query": query, "useLegacySql": False}},
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(resource)
+
+ job = client.query(query)
+
+ self.assertEqual(job.location, resource_location)
+
+ # Check that request did not contain a location.
+ conn.api_request.assert_called_once()
+ _, req = conn.api_request.call_args
+ sent = req["data"]
+ self.assertIsNone(sent["jobReference"].get("location"))
+
+ def test_query_w_udf_resources(self):
+ from google.cloud.bigquery.job import QueryJob
+ from google.cloud.bigquery.job import QueryJobConfig
+ from google.cloud.bigquery.query import UDFResource
+
+ RESOURCE_URI = "gs://some-bucket/js/lib.js"
+ JOB = "job_name"
+ QUERY = "select count(*) from persons"
+ RESOURCE = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": JOB},
+ "configuration": {
+ "query": {
+ "query": QUERY,
+ "useLegacySql": True,
+ "userDefinedFunctionResources": [{"resourceUri": RESOURCE_URI}],
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RESOURCE)
+ udf_resources = [UDFResource("resourceUri", RESOURCE_URI)]
+ config = QueryJobConfig()
+ config.udf_resources = udf_resources
+ config.use_legacy_sql = True
+
+ job = client.query(QUERY, job_config=config, job_id=JOB)
+
+ self.assertIsInstance(job, QueryJob)
+ self.assertIs(job._client, client)
+ self.assertEqual(job.job_id, JOB)
+ self.assertEqual(job.query, QUERY)
+ self.assertEqual(job.udf_resources, udf_resources)
+ self.assertEqual(job.query_parameters, [])
+
+ # Check that query actually starts the job.
+ conn.api_request.assert_called_once()
+ _, req = conn.api_request.call_args
+ self.assertEqual(req["method"], "POST")
+ self.assertEqual(req["path"], "/projects/PROJECT/jobs")
+ self.assertEqual(req["timeout"], DEFAULT_TIMEOUT)
+ sent = req["data"]
+ self.assertIsInstance(sent["jobReference"]["jobId"], str)
+ sent_config = sent["configuration"]["query"]
+ self.assertEqual(sent_config["query"], QUERY)
+ self.assertTrue(sent_config["useLegacySql"])
+ self.assertEqual(
+ sent_config["userDefinedFunctionResources"][0],
+ {"resourceUri": RESOURCE_URI},
+ )
+
+ def test_query_w_query_parameters(self):
+ from google.cloud.bigquery.job import QueryJob
+ from google.cloud.bigquery.job import QueryJobConfig
+ from google.cloud.bigquery.query import ScalarQueryParameter
+
+ JOB = "job_name"
+ QUERY = "select count(*) from persons"
+ RESOURCE = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": JOB},
+ "configuration": {
+ "query": {
+ "query": QUERY,
+ "useLegacySql": False,
+ "queryParameters": [
+ {
+ "name": "foo",
+ "parameterType": {"type": "INT64"},
+ "parameterValue": {"value": "123"},
+ }
+ ],
+ }
+ },
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RESOURCE)
+ query_parameters = [ScalarQueryParameter("foo", "INT64", 123)]
+ config = QueryJobConfig()
+ config.query_parameters = query_parameters
+
+ job = client.query(QUERY, job_config=config, job_id=JOB)
+
+ self.assertIsInstance(job, QueryJob)
+ self.assertIs(job._client, client)
+ self.assertEqual(job.job_id, JOB)
+ self.assertEqual(job.query, QUERY)
+ self.assertEqual(job.udf_resources, [])
+ self.assertEqual(job.query_parameters, query_parameters)
+
+ # Check that query actually starts the job.
+ conn.api_request.assert_called_once()
+ _, req = conn.api_request.call_args
+ self.assertEqual(req["method"], "POST")
+ self.assertEqual(req["path"], "/projects/PROJECT/jobs")
+ self.assertEqual(req["timeout"], DEFAULT_TIMEOUT)
+ sent = req["data"]
+ self.assertEqual(sent["jobReference"]["jobId"], JOB)
+ sent_config = sent["configuration"]["query"]
+ self.assertEqual(sent_config["query"], QUERY)
+ self.assertFalse(sent_config["useLegacySql"])
+ self.assertEqual(
+ sent_config["queryParameters"][0],
+ {
+ "name": "foo",
+ "parameterType": {"type": "INT64"},
+ "parameterValue": {"value": "123"},
+ },
+ )
+
+ def test_query_job_rpc_fail_w_random_error(self):
+ from google.api_core.exceptions import Unknown
+ from google.cloud.bigquery.job import QueryJob
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ job_create_error = Unknown("Not sure what went wrong.")
+ job_begin_patcher = mock.patch.object(
+ QueryJob, "_begin", side_effect=job_create_error
+ )
+ with job_begin_patcher:
+ with pytest.raises(Unknown, match="Not sure what went wrong."):
+ client.query("SELECT 1;", job_id="123")
+
+ def test_query_job_rpc_fail_w_conflict_job_id_given(self):
+ from google.api_core.exceptions import Conflict
+ from google.cloud.bigquery.job import QueryJob
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ job_create_error = Conflict("Job already exists.")
+ job_begin_patcher = mock.patch.object(
+ QueryJob, "_begin", side_effect=job_create_error
+ )
+ with job_begin_patcher:
+ with pytest.raises(Conflict, match="Job already exists."):
+ client.query("SELECT 1;", job_id="123")
+
+ def test_query_job_rpc_fail_w_conflict_random_id_job_fetch_fails(self):
+ from google.api_core.exceptions import Conflict
+ from google.api_core.exceptions import DataLoss
+ from google.cloud.bigquery.job import QueryJob
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ job_create_error = Conflict("Job already exists.")
+ job_begin_patcher = mock.patch.object(
+ QueryJob, "_begin", side_effect=job_create_error
+ )
+ get_job_patcher = mock.patch.object(
+ client, "get_job", side_effect=DataLoss("we lost your job, sorry")
+ )
+
+ with job_begin_patcher, get_job_patcher:
+ # If get job request fails but supposedly there does exist a job
+ # with this ID already, raise the exception explaining why we
+ # couldn't recover the job.
+ with pytest.raises(DataLoss, match="we lost your job, sorry"):
+ client.query("SELECT 1;", job_id=None)
+
+ def test_query_job_rpc_fail_w_conflict_random_id_job_fetch_succeeds(self):
+ from google.api_core.exceptions import Conflict
+ from google.cloud.bigquery.job import QueryJob
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ job_create_error = Conflict("Job already exists.")
+ job_begin_patcher = mock.patch.object(
+ QueryJob, "_begin", side_effect=job_create_error
+ )
+ get_job_patcher = mock.patch.object(
+ client, "get_job", return_value=mock.sentinel.query_job
+ )
+
+ with job_begin_patcher, get_job_patcher:
+ result = client.query("SELECT 1;", job_id=None)
+
+ assert result is mock.sentinel.query_job
+
+ def test_query_and_wait_defaults(self):
+ query = "select count(*) from `bigquery-public-data.usa_names.usa_1910_2013`"
+ jobs_query_response = {
+ "jobComplete": True,
+ "schema": {
+ "fields": [
+ {
+ "name": "f0_",
+ "type": "INTEGER",
+ "mode": "NULLABLE",
+ },
+ ],
+ },
+ "totalRows": "1",
+ "rows": [{"f": [{"v": "5552452"}]}],
+ "queryId": "job_abcDEF_",
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(jobs_query_response)
+
+ rows = client.query_and_wait(query)
+
+ self.assertIsInstance(rows, google.cloud.bigquery.table.RowIterator)
+ self.assertEqual(rows.query_id, "job_abcDEF_")
+ self.assertEqual(rows.total_rows, 1)
+ # No job reference in the response should be OK for completed query.
+ self.assertIsNone(rows.job_id)
+ self.assertIsNone(rows.project)
+ self.assertIsNone(rows.location)
+
+ # Verify the request we send is to jobs.query.
+ conn.api_request.assert_called_once()
+ _, req = conn.api_request.call_args
+ self.assertEqual(req["method"], "POST")
+ self.assertEqual(req["path"], "/projects/PROJECT/queries")
+ self.assertEqual(req["timeout"], DEFAULT_TIMEOUT)
+ sent = req["data"]
+ self.assertEqual(sent["query"], query)
+ self.assertFalse(sent["useLegacySql"])
+
+ def test_query_and_wait_w_default_query_job_config(self):
+ from google.cloud.bigquery import job
+
+ query = "select count(*) from `bigquery-public-data.usa_names.usa_1910_2013`"
+ jobs_query_response = {
+ "jobComplete": True,
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ default_query_job_config=job.QueryJobConfig(
+ labels={
+ "default-label": "default-value",
+ },
+ ),
+ )
+ conn = client._connection = make_connection(jobs_query_response)
+
+ _ = client.query_and_wait(query)
+
+ # Verify the request we send is to jobs.query.
+ conn.api_request.assert_called_once()
+ _, req = conn.api_request.call_args
+ self.assertEqual(req["method"], "POST")
+ self.assertEqual(req["path"], f"/projects/{self.PROJECT}/queries")
+ sent = req["data"]
+ self.assertEqual(sent["labels"], {"default-label": "default-value"})
+
+ def test_query_and_wait_w_job_config(self):
+ from google.cloud.bigquery import job
+
+ query = "select count(*) from `bigquery-public-data.usa_names.usa_1910_2013`"
+ jobs_query_response = {
+ "jobComplete": True,
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(
+ project=self.PROJECT,
+ credentials=creds,
+ _http=http,
+ )
+ conn = client._connection = make_connection(jobs_query_response)
+
+ _ = client.query_and_wait(
+ query,
+ job_config=job.QueryJobConfig(
+ labels={
+ "job_config-label": "job_config-value",
+ },
+ ),
+ )
+
+ # Verify the request we send is to jobs.query.
+ conn.api_request.assert_called_once()
+ _, req = conn.api_request.call_args
+ self.assertEqual(req["method"], "POST")
+ self.assertEqual(req["path"], f"/projects/{self.PROJECT}/queries")
+ sent = req["data"]
+ self.assertEqual(sent["labels"], {"job_config-label": "job_config-value"})
+
+ def test_query_and_wait_w_location(self):
+ query = "select count(*) from `bigquery-public-data.usa_names.usa_1910_2013`"
+ jobs_query_response = {
+ "jobComplete": True,
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(jobs_query_response)
+
+ _ = client.query_and_wait(query, location="not-the-client-location")
+
+ # Verify the request we send is to jobs.query.
+ conn.api_request.assert_called_once()
+ _, req = conn.api_request.call_args
+ self.assertEqual(req["method"], "POST")
+ self.assertEqual(req["path"], f"/projects/{self.PROJECT}/queries")
+ sent = req["data"]
+ self.assertEqual(sent["location"], "not-the-client-location")
+
+ def test_query_and_wait_w_max_results(self):
+ query = "select count(*) from `bigquery-public-data.usa_names.usa_1910_2013`"
+ jobs_query_response = {
+ "jobComplete": True,
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(jobs_query_response)
+
+ _ = client.query_and_wait(query, max_results=11)
+
+ # Verify the request we send is to jobs.query.
+ conn.api_request.assert_called_once()
+ _, req = conn.api_request.call_args
+ self.assertEqual(req["method"], "POST")
+ self.assertEqual(req["path"], f"/projects/{self.PROJECT}/queries")
+ sent = req["data"]
+ self.assertTrue(sent["formatOptions"]["useInt64Timestamp"])
+ self.assertTrue(sent["maxResults"], 11)
+
+ def test_query_and_wait_w_page_size(self):
+ query = "select count(*) from `bigquery-public-data.usa_names.usa_1910_2013`"
+ jobs_query_response = {
+ "jobComplete": True,
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(jobs_query_response)
+
+ _ = client.query_and_wait(query, page_size=11)
+
+ # Verify the request we send is to jobs.query.
+ conn.api_request.assert_called_once()
+ _, req = conn.api_request.call_args
+ self.assertEqual(req["method"], "POST")
+ self.assertEqual(req["path"], f"/projects/{self.PROJECT}/queries")
+ sent = req["data"]
+ self.assertTrue(sent["formatOptions"]["useInt64Timestamp"])
+ self.assertTrue(sent["maxResults"], 11)
+
+ def test_query_and_wait_w_page_size_multiple_requests(self):
+ """
+ For queries that last longer than the intial (about 10s) call to
+ jobs.query, we should still pass through the page size to the
+ subsequent calls to jobs.getQueryResults.
+
+ See internal issue 344008814.
+ """
+ query = "select count(*) from `bigquery-public-data.usa_names.usa_1910_2013`"
+ job_reference = {
+ "projectId": "my-jobs-project",
+ "location": "my-jobs-location",
+ "jobId": "my-jobs-id",
+ }
+ jobs_query_response = {
+ "jobComplete": False,
+ "jobReference": job_reference,
+ }
+ jobs_get_response = {
+ "jobReference": job_reference,
+ "status": {"state": "DONE"},
+ }
+ get_query_results_response = {
+ "jobComplete": True,
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(
+ jobs_query_response,
+ jobs_get_response,
+ get_query_results_response,
+ )
+
+ _ = client.query_and_wait(query, page_size=11)
+
+ conn.api_request.assert_has_calls(
+ [
+ # Verify the request we send is to jobs.query.
+ mock.call(
+ method="POST",
+ path=f"/projects/{self.PROJECT}/queries",
+ data={
+ "useLegacySql": False,
+ "query": query,
+ "formatOptions": {"useInt64Timestamp": True},
+ "maxResults": 11,
+ "requestId": mock.ANY,
+ },
+ timeout=None,
+ ),
+ # jobs.get: Check if the job has finished.
+ mock.call(
+ method="GET",
+ path="/projects/my-jobs-project/jobs/my-jobs-id",
+ query_params={
+ "projection": "full",
+ "location": "my-jobs-location",
+ },
+ timeout=google.cloud.bigquery.retry.DEFAULT_GET_JOB_TIMEOUT,
+ ),
+ # jobs.getQueryResults: wait for the query / fetch first page
+ mock.call(
+ method="GET",
+ path="/projects/my-jobs-project/queries/my-jobs-id",
+ query_params={
+ # We should still pass through the page size to the
+ # subsequent calls to jobs.getQueryResults.
+ #
+ # See internal issue 344008814.
+ "maxResults": 11,
+ "formatOptions.useInt64Timestamp": True,
+ "location": "my-jobs-location",
+ },
+ timeout=None,
+ ),
+ ]
+ )
+
+ def test_query_and_wait_w_project(self):
+ query = "select count(*) from `bigquery-public-data.usa_names.usa_1910_2013`"
+ jobs_query_response = {
+ "jobComplete": True,
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(jobs_query_response)
+
+ _ = client.query_and_wait(query, project="not-the-client-project")
+
+ # Verify the request we send is to jobs.query.
+ conn.api_request.assert_called_once()
+ _, req = conn.api_request.call_args
+ self.assertEqual(req["method"], "POST")
+ self.assertEqual(req["path"], "/projects/not-the-client-project/queries")
+
+ def test_insert_rows_w_timeout(self):
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection({})
+ table = Table(self.TABLE_REF)
+
+ ROWS = [
+ ("Phred Phlyntstone", 32),
+ ("Bharney Rhubble", 33),
+ ]
+ schema = [
+ SchemaField("full_name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+
+ client.insert_rows(table, ROWS, selected_fields=schema, timeout=7.5)
+
+ conn.api_request.assert_called_once()
+ _, req = conn.api_request.call_args
+ self.assertEqual(req.get("timeout"), 7.5)
+
+ def test_insert_rows_wo_schema(self):
+ from google.cloud.bigquery.table import Table
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ table = Table(self.TABLE_REF)
+ ROWS = [
+ ("Phred Phlyntstone", 32),
+ ("Bharney Rhubble", 33),
+ ("Wylma Phlyntstone", 29),
+ ("Bhettye Rhubble", 27),
+ ]
+
+ with self.assertRaises(ValueError) as exc:
+ client.insert_rows(table, ROWS)
+
+ self.assertIn("Could not determine schema for table", exc.exception.args[0])
+
+ def test_insert_rows_w_schema(self):
+ import datetime
+ from google.cloud._helpers import UTC
+ from google.cloud._helpers import _datetime_to_rfc3339
+ from google.cloud._helpers import _RFC3339_MICROS
+ from google.cloud.bigquery.schema import SchemaField
+
+ WHEN_TS = 1437767599.006
+ WHEN = datetime.datetime.utcfromtimestamp(WHEN_TS).replace(tzinfo=UTC)
+ PATH = "projects/%s/datasets/%s/tables/%s/insertAll" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection({})
+ schema = [
+ SchemaField("full_name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ SchemaField("joined", "TIMESTAMP", mode="NULLABLE"),
+ ]
+ ROWS = [
+ ("Phred Phlyntstone", 32, _datetime_to_rfc3339(WHEN)),
+ ("Bharney Rhubble", 33, WHEN + datetime.timedelta(seconds=1)),
+ ("Wylma Phlyntstone", 29, WHEN + datetime.timedelta(seconds=2)),
+ ("Bhettye Rhubble", 27, None),
+ ]
+
+ def _row_data(row):
+ result = {"full_name": row[0], "age": str(row[1])}
+ joined = row[2]
+ if isinstance(joined, datetime.datetime):
+ joined = joined.strftime(_RFC3339_MICROS)
+ if joined is not None:
+ result["joined"] = joined
+ return result
+
+ SENT = {
+ "rows": [
+ {"json": _row_data(row), "insertId": str(i)}
+ for i, row in enumerate(ROWS)
+ ]
+ }
+
+ with mock.patch("uuid.uuid4", side_effect=map(str, range(len(ROWS)))):
+ # Test with using string IDs for the table.
+ errors = client.insert_rows(
+ "{}.{}".format(self.DS_ID, self.TABLE_ID), ROWS, selected_fields=schema
+ )
+
+ self.assertEqual(len(errors), 0)
+ conn.api_request.assert_called_once()
+ _, req = conn.api_request.call_args
+ self.assertEqual(req["method"], "POST")
+ self.assertEqual(req["path"], "/%s" % PATH)
+ self.assertEqual(req["data"], SENT)
+ self.assertEqual(req["timeout"], DEFAULT_TIMEOUT)
+
+ def test_insert_rows_w_list_of_dictionaries(self):
+ import datetime
+ from google.cloud._helpers import UTC
+ from google.cloud._helpers import _datetime_to_rfc3339
+ from google.cloud._helpers import _RFC3339_MICROS
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ WHEN_TS = 1437767599.006
+ WHEN = datetime.datetime.utcfromtimestamp(WHEN_TS).replace(tzinfo=UTC)
+ PATH = "projects/%s/datasets/%s/tables/%s/insertAll" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection({})
+ schema = [
+ SchemaField("full_name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ SchemaField("joined", "TIMESTAMP", mode="NULLABLE"),
+ ]
+ table = Table(self.TABLE_REF, schema=schema)
+ ROWS = [
+ {
+ "full_name": "Phred Phlyntstone",
+ "age": 32,
+ "joined": _datetime_to_rfc3339(WHEN),
+ },
+ {
+ "full_name": "Bharney Rhubble",
+ "age": 33,
+ "joined": WHEN + datetime.timedelta(seconds=1),
+ },
+ {
+ "full_name": "Wylma Phlyntstone",
+ "age": 29,
+ "joined": WHEN + datetime.timedelta(seconds=2),
+ },
+ {"full_name": "Bhettye Rhubble", "age": 27, "joined": None},
+ ]
+
+ def _row_data(row):
+ joined = row["joined"]
+ if joined is None:
+ row = copy.deepcopy(row)
+ del row["joined"]
+ elif isinstance(joined, datetime.datetime):
+ row["joined"] = joined.strftime(_RFC3339_MICROS)
+ row["age"] = str(row["age"])
+ return row
+
+ SENT = {
+ "rows": [
+ {"json": _row_data(row), "insertId": str(i)}
+ for i, row in enumerate(ROWS)
+ ]
+ }
+
+ with mock.patch("uuid.uuid4", side_effect=map(str, range(len(ROWS)))):
+ errors = client.insert_rows(table, ROWS)
+
+ self.assertEqual(len(errors), 0)
+ conn.api_request.assert_called_once_with(
+ method="POST", path="/%s" % PATH, data=SENT, timeout=DEFAULT_TIMEOUT
+ )
+
+ def test_insert_rows_w_list_of_Rows(self):
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+ from google.cloud.bigquery.table import Row
+
+ PATH = "projects/%s/datasets/%s/tables/%s/insertAll" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection({})
+ schema = [
+ SchemaField("full_name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ table = Table(self.TABLE_REF, schema=schema)
+ f2i = {"full_name": 0, "age": 1}
+ ROWS = [
+ Row(("Phred Phlyntstone", 32), f2i),
+ Row(("Bharney Rhubble", 33), f2i),
+ Row(("Wylma Phlyntstone", 29), f2i),
+ Row(("Bhettye Rhubble", 27), f2i),
+ ]
+
+ def _row_data(row):
+ return {"full_name": row[0], "age": str(row[1])}
+
+ SENT = {
+ "rows": [
+ {"json": _row_data(row), "insertId": str(i)}
+ for i, row in enumerate(ROWS)
+ ]
+ }
+
+ with mock.patch("uuid.uuid4", side_effect=map(str, range(len(ROWS)))):
+ errors = client.insert_rows(table, ROWS)
+
+ self.assertEqual(len(errors), 0)
+ conn.api_request.assert_called_once_with(
+ method="POST", path="/%s" % PATH, data=SENT, timeout=DEFAULT_TIMEOUT
+ )
+
+ def test_insert_rows_w_skip_invalid_and_ignore_unknown(self):
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ PATH = "projects/%s/datasets/%s/tables/%s/insertAll" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ RESPONSE = {
+ "insertErrors": [
+ {
+ "index": 1,
+ "errors": [
+ {
+ "reason": "REASON",
+ "location": "LOCATION",
+ "debugInfo": "INFO",
+ "message": "MESSAGE",
+ }
+ ],
+ }
+ ]
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(RESPONSE)
+ schema = [
+ SchemaField("full_name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ SchemaField("voter", "BOOLEAN", mode="NULLABLE"),
+ ]
+ table = Table(self.TABLE_REF, schema=schema)
+ ROWS = [
+ ("Phred Phlyntstone", 32, True),
+ ("Bharney Rhubble", 33, False),
+ ("Wylma Phlyntstone", 29, True),
+ ("Bhettye Rhubble", 27, True),
+ ]
+
+ def _row_data(row):
+ return {
+ "full_name": row[0],
+ "age": str(row[1]),
+ "voter": row[2] and "true" or "false",
+ }
+
+ SENT = {
+ "skipInvalidRows": True,
+ "ignoreUnknownValues": True,
+ "templateSuffix": "20160303",
+ "rows": [
+ {"insertId": index, "json": _row_data(row)}
+ for index, row in enumerate(ROWS)
+ ],
+ }
+
+ errors = client.insert_rows(
+ table,
+ ROWS,
+ row_ids=[index for index, _ in enumerate(ROWS)],
+ skip_invalid_rows=True,
+ ignore_unknown_values=True,
+ template_suffix="20160303",
+ )
+
+ self.assertEqual(len(errors), 1)
+ self.assertEqual(errors[0]["index"], 1)
+ self.assertEqual(len(errors[0]["errors"]), 1)
+ self.assertEqual(
+ errors[0]["errors"][0], RESPONSE["insertErrors"][0]["errors"][0]
+ )
+ conn.api_request.assert_called_once_with(
+ method="POST", path="/%s" % PATH, data=SENT, timeout=DEFAULT_TIMEOUT
+ )
+
+ def test_insert_rows_w_repeated_fields(self):
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ PATH = "projects/%s/datasets/%s/tables/%s/insertAll" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection({})
+ color = SchemaField("color", "STRING", mode="REPEATED")
+ items = SchemaField("items", "INTEGER", mode="REPEATED")
+ score = SchemaField("score", "INTEGER")
+ times = SchemaField("times", "TIMESTAMP", mode="REPEATED")
+ distances = SchemaField("distances", "FLOAT", mode="REPEATED")
+ structs = SchemaField(
+ "structs", "RECORD", mode="REPEATED", fields=[score, times, distances]
+ )
+ table = Table(self.TABLE_REF, schema=[color, items, structs])
+ ROWS = [
+ (
+ ["red", "green"],
+ [1, 2],
+ [
+ (
+ 12,
+ [
+ datetime.datetime(
+ 2018, 12, 1, 12, 0, 0, tzinfo=datetime.timezone.utc
+ ),
+ datetime.datetime(
+ 2018, 12, 1, 13, 0, 0, tzinfo=datetime.timezone.utc
+ ),
+ ],
+ [1.25, 2.5],
+ ),
+ {
+ "score": 13,
+ "times": [
+ datetime.datetime(
+ 2018, 12, 2, 12, 0, 0, tzinfo=datetime.timezone.utc
+ ),
+ datetime.datetime(
+ 2018, 12, 2, 13, 0, 0, tzinfo=datetime.timezone.utc
+ ),
+ ],
+ "distances": [-1.25, -2.5],
+ },
+ ],
+ ),
+ {"color": None, "items": [], "structs": [(None, [], [3.5])]},
+ ]
+
+ SENT = {
+ "rows": [
+ {
+ "json": {
+ "color": ["red", "green"],
+ "items": ["1", "2"],
+ "structs": [
+ {
+ "score": "12",
+ "times": [
+ "2018-12-01T12:00:00.000000Z",
+ "2018-12-01T13:00:00.000000Z",
+ ],
+ "distances": [1.25, 2.5],
+ },
+ {
+ "score": "13",
+ "times": [
+ "2018-12-02T12:00:00.000000Z",
+ "2018-12-02T13:00:00.000000Z",
+ ],
+ "distances": [-1.25, -2.5],
+ },
+ ],
+ },
+ "insertId": "0",
+ },
+ {
+ "json": {
+ "items": [],
+ "structs": [{"times": [], "distances": [3.5]}],
+ },
+ "insertId": "1",
+ },
+ ]
+ }
+
+ with mock.patch("uuid.uuid4", side_effect=map(str, range(len(ROWS)))):
+ errors = client.insert_rows(table, ROWS)
+
+ self.assertEqual(len(errors), 0)
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/%s" % PATH,
+ data=SENT,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_insert_rows_w_record_schema(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ PATH = "projects/%s/datasets/%s/tables/%s/insertAll" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection({})
+ full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
+ area_code = SchemaField("area_code", "STRING", "REQUIRED")
+ local_number = SchemaField("local_number", "STRING", "REQUIRED")
+ rank = SchemaField("rank", "INTEGER", "REQUIRED")
+ phone = SchemaField(
+ "phone", "RECORD", mode="NULLABLE", fields=[area_code, local_number, rank]
+ )
+ ROWS = [
+ (
+ "Phred Phlyntstone",
+ {"area_code": "800", "local_number": "555-1212", "rank": 1},
+ ),
+ ("Bharney Rhubble", ("877", "768-5309", 2)),
+ ("Wylma Phlyntstone", None),
+ ]
+
+ SENT = {
+ "rows": [
+ {
+ "json": {
+ "full_name": "Phred Phlyntstone",
+ "phone": {
+ "area_code": "800",
+ "local_number": "555-1212",
+ "rank": "1",
+ },
+ },
+ "insertId": "0",
+ },
+ {
+ "json": {
+ "full_name": "Bharney Rhubble",
+ "phone": {
+ "area_code": "877",
+ "local_number": "768-5309",
+ "rank": "2",
+ },
+ },
+ "insertId": "1",
+ },
+ {"json": {"full_name": "Wylma Phlyntstone"}, "insertId": "2"},
+ ]
+ }
+
+ with mock.patch("uuid.uuid4", side_effect=map(str, range(len(ROWS)))):
+ errors = client.insert_rows(
+ self.TABLE_REF, ROWS, selected_fields=[full_name, phone]
+ )
+
+ self.assertEqual(len(errors), 0)
+ conn.api_request.assert_called_once_with(
+ method="POST", path="/%s" % PATH, data=SENT, timeout=DEFAULT_TIMEOUT
+ )
+
+ def test_insert_rows_w_explicit_none_insert_ids(self):
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ PATH = "projects/{}/datasets/{}/tables/{}/insertAll".format(
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection({})
+ schema = [
+ SchemaField("full_name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ table = Table(self.TABLE_REF, schema=schema)
+ ROWS = [
+ {"full_name": "Phred Phlyntstone", "age": 32},
+ {"full_name": "Bharney Rhubble", "age": 33},
+ ]
+
+ def _row_data(row):
+ row["age"] = str(row["age"])
+ return row
+
+ SENT = {"rows": [{"json": _row_data(row), "insertId": None} for row in ROWS]}
+
+ errors = client.insert_rows(table, ROWS, row_ids=[None] * len(ROWS))
+
+ self.assertEqual(len(errors), 0)
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/{}".format(PATH),
+ data=SENT,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_insert_rows_errors(self):
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ ROWS = [
+ ("Phred Phlyntstone", 32, True),
+ ("Bharney Rhubble", 33, False),
+ ("Wylma Phlyntstone", 29, True),
+ ("Bhettye Rhubble", 27, True),
+ ]
+ creds = _make_credentials()
+ http = object()
+
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ # table ref with no selected fields
+ with self.assertRaises(ValueError):
+ client.insert_rows(self.TABLE_REF, ROWS)
+
+ # table with no schema
+ with self.assertRaises(ValueError):
+ client.insert_rows(Table(self.TABLE_REF), ROWS)
+
+ # neither Table nor TableReference
+ with self.assertRaises(TypeError):
+ client.insert_rows(1, ROWS)
+
+ schema = [
+ SchemaField("full_name", "STRING", mode="REQUIRED"),
+ ]
+ table = Table(self.TABLE_REF, schema=schema)
+
+ # rows is just a dict
+ with self.assertRaises(TypeError):
+ client.insert_rows(table, {"full_name": "value"})
+
+ def test_insert_rows_w_numeric(self):
+ from google.cloud.bigquery import table
+ from google.cloud.bigquery.schema import SchemaField
+
+ project = "PROJECT"
+ ds_id = "DS_ID"
+ table_id = "TABLE_ID"
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=project, credentials=creds, _http=http)
+ table_ref = DatasetReference(project, ds_id).table(table_id)
+ rows = [
+ ("Savings", decimal.Decimal("23.47")),
+ ("Checking", decimal.Decimal("1.98")),
+ ("Mortgage", decimal.Decimal("-12345678909.87654321")),
+ ]
+ schemas = [
+ [SchemaField("account", "STRING"), SchemaField("balance", "NUMERIC")],
+ [SchemaField("account", "STRING"), SchemaField("balance", "BIGNUMERIC")],
+ ]
+
+ for schema in schemas:
+ conn = client._connection = make_connection({})
+
+ insert_table = table.Table(table_ref, schema=schema)
+ with mock.patch("uuid.uuid4", side_effect=map(str, range(len(rows)))):
+ errors = client.insert_rows(insert_table, rows)
+
+ self.assertEqual(len(errors), 0)
+ rows_json = [
+ {"account": "Savings", "balance": "23.47"},
+ {"account": "Checking", "balance": "1.98"},
+ {"account": "Mortgage", "balance": "-12345678909.87654321"},
+ ]
+ sent = {
+ "rows": [
+ {"json": row, "insertId": str(i)} for i, row in enumerate(rows_json)
+ ]
+ }
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/{}/datasets/{}/tables/{}/insertAll".format(
+ project, ds_id, table_id
+ ),
+ data=sent,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_insert_rows_from_dataframe(self):
+ pandas = pytest.importorskip("pandas")
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ API_PATH = "/projects/{}/datasets/{}/tables/{}/insertAll".format(
+ self.PROJECT, self.DS_ID, self.TABLE_REF.table_id
+ )
+
+ data = [
+ {
+ "name": "Little One",
+ "age": 10,
+ "adult": False,
+ "bdate": datetime.date(2011, 1, 2),
+ "btime": datetime.time(19, 1, 10),
+ },
+ {
+ "name": "Young Gun",
+ "age": 20,
+ "adult": True,
+ "bdate": datetime.date(2001, 1, 2),
+ "btime": datetime.time(19, 1, 20),
+ },
+ {
+ "name": "Dad",
+ "age": 30,
+ "adult": True,
+ "bdate": datetime.date(1991, 1, 2),
+ "btime": datetime.time(19, 1, 30),
+ },
+ {
+ "name": "Stranger",
+ "age": 40,
+ "adult": True,
+ "bdate": datetime.date(1981, 1, 2),
+ "btime": datetime.time(19, 1, 40),
+ },
+ ]
+ dataframe = pandas.DataFrame(data)
+ dataframe["bdate"] = dataframe["bdate"].astype("dbdate")
+ dataframe["btime"] = dataframe["btime"].astype("dbtime")
+
+ # create client
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection({}, {})
+
+ # create table
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ SchemaField("adult", "BOOLEAN", mode="REQUIRED"),
+ SchemaField("bdata", "DATE", mode="REQUIRED"),
+ SchemaField("btime", "TIME", mode="REQUIRED"),
+ ]
+ table = Table(self.TABLE_REF, schema=schema)
+
+ with mock.patch("uuid.uuid4", side_effect=map(str, range(len(dataframe)))):
+ error_info = client.insert_rows_from_dataframe(
+ table, dataframe, chunk_size=3, timeout=7.5
+ )
+
+ self.assertEqual(len(error_info), 2)
+ for chunk_errors in error_info:
+ assert chunk_errors == []
+
+ for row in data:
+ row["age"] = str(row["age"])
+ row["adult"] = str(row["adult"]).lower()
+ row["bdate"] = row["bdate"].isoformat()
+ row["btime"] = row["btime"].isoformat()
+
+ rows = [dict(insertId=str(i), json=row) for i, row in enumerate(data)]
+ EXPECTED_SENT_DATA = [dict(rows=rows[:3]), dict(rows=rows[3:])]
+
+ actual_calls = conn.api_request.call_args_list
+
+ for call, expected_data in itertools.zip_longest(
+ actual_calls, EXPECTED_SENT_DATA
+ ):
+ expected_call = mock.call(
+ method="POST", path=API_PATH, data=expected_data, timeout=7.5
+ )
+ assert call == expected_call
+
+ def test_insert_rows_from_dataframe_nan(self):
+ pandas = pytest.importorskip("pandas")
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ API_PATH = "/projects/{}/datasets/{}/tables/{}/insertAll".format(
+ self.PROJECT, self.DS_ID, self.TABLE_REF.table_id
+ )
+
+ dataframe = pandas.DataFrame(
+ {
+ "str_col": ["abc", "def", float("NaN"), "jkl"],
+ "int_col": [1, float("NaN"), 3, 4],
+ "float_col": [float("NaN"), 0.25, 0.5, 0.125],
+ }
+ )
+
+ # create client
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection({}, {})
+
+ # create table
+ schema = [
+ SchemaField("str_col", "STRING"),
+ SchemaField("int_col", "INTEGER"),
+ SchemaField("float_col", "FLOAT"),
+ ]
+ table = Table(self.TABLE_REF, schema=schema)
+
+ with mock.patch("uuid.uuid4", side_effect=map(str, range(len(dataframe)))):
+ error_info = client.insert_rows_from_dataframe(
+ table, dataframe, chunk_size=3, timeout=7.5
+ )
+
+ self.assertEqual(len(error_info), 2)
+ for chunk_errors in error_info:
+ assert chunk_errors == []
+
+ EXPECTED_SENT_DATA = [
+ {
+ "rows": [
+ {"insertId": "0", "json": {"str_col": "abc", "int_col": 1}},
+ {"insertId": "1", "json": {"str_col": "def", "float_col": 0.25}},
+ {"insertId": "2", "json": {"int_col": 3, "float_col": 0.5}},
+ ]
+ },
+ {
+ "rows": [
+ {
+ "insertId": "3",
+ "json": {"str_col": "jkl", "int_col": 4, "float_col": 0.125},
+ }
+ ]
+ },
+ ]
+
+ actual_calls = conn.api_request.call_args_list
+
+ for call, expected_data in itertools.zip_longest(
+ actual_calls, EXPECTED_SENT_DATA
+ ):
+ expected_call = mock.call(
+ method="POST", path=API_PATH, data=expected_data, timeout=7.5
+ )
+ assert call == expected_call
+
+ def test_insert_rows_from_dataframe_many_columns(self):
+ pandas = pytest.importorskip("pandas")
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ API_PATH = "/projects/{}/datasets/{}/tables/{}/insertAll".format(
+ self.PROJECT, self.DS_ID, self.TABLE_REF.table_id
+ )
+ N_COLUMNS = 256 # should be >= 256
+
+ dataframe = pandas.DataFrame(
+ [{"foo_{}".format(i): "bar_{}".format(i) for i in range(N_COLUMNS)}]
+ )
+
+ # create client
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection({}, {})
+
+ # create table
+ schema = [SchemaField("foo_{}".format(i), "STRING") for i in range(N_COLUMNS)]
+ table = Table(self.TABLE_REF, schema=schema)
+
+ with mock.patch("uuid.uuid4", side_effect=map(str, range(len(dataframe)))):
+ error_info = client.insert_rows_from_dataframe(
+ table, dataframe, chunk_size=3
+ )
+
+ assert len(error_info) == 1
+ assert error_info[0] == []
+
+ EXPECTED_SENT_DATA = {
+ "rows": [
+ {
+ "insertId": "0",
+ "json": {
+ "foo_{}".format(i): "bar_{}".format(i) for i in range(N_COLUMNS)
+ },
+ }
+ ]
+ }
+ expected_call = mock.call(
+ method="POST",
+ path=API_PATH,
+ data=EXPECTED_SENT_DATA,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ actual_calls = conn.api_request.call_args_list
+ assert len(actual_calls) == 1
+ assert actual_calls[0] == expected_call
+
+ def test_insert_rows_from_dataframe_w_explicit_none_insert_ids(self):
+ pandas = pytest.importorskip("pandas")
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ API_PATH = "/projects/{}/datasets/{}/tables/{}/insertAll".format(
+ self.PROJECT, self.DS_ID, self.TABLE_REF.table_id
+ )
+
+ dataframe = pandas.DataFrame(
+ [
+ {"name": "Little One", "adult": False},
+ {"name": "Young Gun", "adult": True},
+ ]
+ )
+
+ # create client
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection({}, {})
+
+ # create table
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("adult", "BOOLEAN", mode="REQUIRED"),
+ ]
+ table = Table(self.TABLE_REF, schema=schema)
+
+ error_info = client.insert_rows_from_dataframe(
+ table, dataframe, row_ids=[None] * len(dataframe)
+ )
+
+ self.assertEqual(len(error_info), 1)
+ assert error_info[0] == [] # no chunk errors
+
+ EXPECTED_SENT_DATA = {
+ "rows": [
+ {"insertId": None, "json": {"name": "Little One", "adult": "false"}},
+ {"insertId": None, "json": {"name": "Young Gun", "adult": "true"}},
+ ]
+ }
+
+ actual_calls = conn.api_request.call_args_list
+ assert len(actual_calls) == 1
+ assert actual_calls[0] == mock.call(
+ method="POST",
+ path=API_PATH,
+ data=EXPECTED_SENT_DATA,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_insert_rows_json_default_behavior(self):
+ from google.cloud.bigquery.dataset import DatasetReference
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ PROJECT = "PROJECT"
+ DS_ID = "DS_ID"
+ TABLE_ID = "TABLE_ID"
+ PATH = "projects/%s/datasets/%s/tables/%s/insertAll" % (
+ PROJECT,
+ DS_ID,
+ TABLE_ID,
+ )
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection({})
+ table_ref = DatasetReference(PROJECT, DS_ID).table(TABLE_ID)
+ schema = [
+ SchemaField("full_name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ SchemaField("joined", "TIMESTAMP", mode="NULLABLE"),
+ ]
+ table = Table(table_ref, schema=schema)
+ ROWS = [
+ {
+ "full_name": "Phred Phlyntstone",
+ "age": "32",
+ "joined": "2015-07-24T19:53:19.006000Z",
+ },
+ {"full_name": "Bharney Rhubble", "age": "33", "joined": 1437767600.006},
+ {"full_name": "Wylma Phlyntstone", "age": "29", "joined": 1437767601.006},
+ {"full_name": "Bhettye Rhubble", "age": "27", "joined": None},
+ ]
+
+ SENT = {
+ "rows": [{"json": row, "insertId": str(i)} for i, row in enumerate(ROWS)]
+ }
+
+ with mock.patch("uuid.uuid4", side_effect=map(str, range(len(ROWS)))):
+ errors = client.insert_rows_json(table, ROWS, timeout=7.5)
+
+ self.assertEqual(len(errors), 0)
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/%s" % PATH,
+ data=SENT,
+ timeout=7.5,
+ )
+
+ def test_insert_rows_json_w_explicitly_requested_autogenerated_insert_ids(self):
+ from google.cloud.bigquery import AutoRowIDs
+
+ rows = [{"col1": "val1"}, {"col2": "val2"}]
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(
+ project="default-project", credentials=creds, _http=http
+ )
+ conn = client._connection = make_connection({})
+
+ uuid_patcher = mock.patch("uuid.uuid4", side_effect=map(str, range(len(rows))))
+ with uuid_patcher:
+ errors = client.insert_rows_json(
+ "proj.dset.tbl", rows, row_ids=AutoRowIDs.GENERATE_UUID
+ )
+
+ self.assertEqual(len(errors), 0)
+
+ # Check row data sent to the backend.
+ expected_row_data = {
+ "rows": [
+ {"json": {"col1": "val1"}, "insertId": "0"},
+ {"json": {"col2": "val2"}, "insertId": "1"},
+ ]
+ }
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/proj/datasets/dset/tables/tbl/insertAll",
+ data=expected_row_data,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_insert_rows_json_w_explicitly_disabled_insert_ids(self):
+ from google.cloud.bigquery import AutoRowIDs
+
+ rows = [{"col1": "val1"}, {"col2": "val2"}]
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(
+ project="default-project", credentials=creds, _http=http
+ )
+ conn = client._connection = make_connection({})
+
+ errors = client.insert_rows_json(
+ "proj.dset.tbl",
+ rows,
+ row_ids=AutoRowIDs.DISABLED,
+ )
+
+ self.assertEqual(len(errors), 0)
+
+ expected_row_data = {
+ "rows": [
+ {"json": {"col1": "val1"}, "insertId": None},
+ {"json": {"col2": "val2"}, "insertId": None},
+ ]
+ }
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/proj/datasets/dset/tables/tbl/insertAll",
+ data=expected_row_data,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_insert_rows_json_with_iterator_row_ids(self):
+ rows = [{"col1": "val1"}, {"col2": "val2"}, {"col3": "val3"}]
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(
+ project="default-project", credentials=creds, _http=http
+ )
+ conn = client._connection = make_connection({})
+
+ row_ids_iter = map(str, itertools.count(42))
+ errors = client.insert_rows_json("proj.dset.tbl", rows, row_ids=row_ids_iter)
+
+ self.assertEqual(len(errors), 0)
+ expected_row_data = {
+ "rows": [
+ {"json": {"col1": "val1"}, "insertId": "42"},
+ {"json": {"col2": "val2"}, "insertId": "43"},
+ {"json": {"col3": "val3"}, "insertId": "44"},
+ ]
+ }
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/proj/datasets/dset/tables/tbl/insertAll",
+ data=expected_row_data,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_insert_rows_json_with_non_iterable_row_ids(self):
+ rows = [{"col1": "val1"}]
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(
+ project="default-project", credentials=creds, _http=http
+ )
+ client._connection = make_connection({})
+
+ with self.assertRaises(TypeError) as exc:
+ client.insert_rows_json("proj.dset.tbl", rows, row_ids=object())
+
+ err_msg = str(exc.exception)
+ self.assertIn("row_ids", err_msg)
+ self.assertIn("iterable", err_msg)
+
+ def test_insert_rows_json_with_too_few_row_ids(self):
+ rows = [{"col1": "val1"}, {"col2": "val2"}, {"col3": "val3"}]
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(
+ project="default-project", credentials=creds, _http=http
+ )
+ client._connection = make_connection({})
+
+ insert_ids = ["10", "20"]
+
+ error_msg_pattern = "row_ids did not generate enough IDs.*index 2"
+ with self.assertRaisesRegex(ValueError, error_msg_pattern):
+ client.insert_rows_json("proj.dset.tbl", rows, row_ids=insert_ids)
+
+ def test_insert_rows_json_w_explicit_none_insert_ids(self):
+ rows = [{"col1": "val1"}, {"col2": "val2"}]
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(
+ project="default-project", credentials=creds, _http=http
+ )
+ conn = client._connection = make_connection({})
+
+ errors = client.insert_rows_json(
+ "proj.dset.tbl",
+ rows,
+ row_ids=[None] * len(rows),
+ )
+
+ self.assertEqual(len(errors), 0)
+ expected = {"rows": [{"json": row, "insertId": None} for row in rows]}
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/proj/datasets/dset/tables/tbl/insertAll",
+ data=expected,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_insert_rows_json_w_none_insert_ids_sequence(self):
+ rows = [{"col1": "val1"}, {"col2": "val2"}]
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(
+ project="default-project", credentials=creds, _http=http
+ )
+ conn = client._connection = make_connection({})
+
+ uuid_patcher = mock.patch("uuid.uuid4", side_effect=map(str, range(len(rows))))
+ with warnings.catch_warnings(record=True) as warned, uuid_patcher:
+ errors = client.insert_rows_json("proj.dset.tbl", rows, row_ids=None)
+
+ self.assertEqual(len(errors), 0)
+
+ # Passing row_ids=None should have resulted in a deprecation warning.
+ matches = [
+ warning
+ for warning in warned
+ if issubclass(warning.category, DeprecationWarning)
+ and "row_ids" in str(warning)
+ and "AutoRowIDs.GENERATE_UUID" in str(warning)
+ ]
+ assert matches, "The expected deprecation warning was not raised."
+
+ # Check row data sent to the backend.
+ expected_row_data = {
+ "rows": [
+ {"json": {"col1": "val1"}, "insertId": "0"},
+ {"json": {"col2": "val2"}, "insertId": "1"},
+ ]
+ }
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/projects/proj/datasets/dset/tables/tbl/insertAll",
+ data=expected_row_data,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_insert_rows_w_wrong_arg(self):
+ from google.cloud.bigquery.dataset import DatasetReference
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ PROJECT = "PROJECT"
+ DS_ID = "DS_ID"
+ TABLE_ID = "TABLE_ID"
+ ROW = {"full_name": "Bhettye Rhubble", "age": "27", "joined": None}
+
+ creds = _make_credentials()
+ client = self._make_one(project=PROJECT, credentials=creds, _http=object())
+ client._connection = make_connection({})
+
+ table_ref = DatasetReference(PROJECT, DS_ID).table(TABLE_ID)
+ schema = [
+ SchemaField("full_name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ SchemaField("joined", "TIMESTAMP", mode="NULLABLE"),
+ ]
+ table = Table(table_ref, schema=schema)
+
+ with self.assertRaises(TypeError):
+ client.insert_rows_json(table, ROW)
+
+ def test_list_partitions(self):
+ from google.cloud.bigquery.table import Table
+
+ rows = 3
+ meta_info = _make_list_partitons_meta_info(
+ self.PROJECT, self.DS_ID, self.TABLE_ID, rows
+ )
+
+ data = {
+ "totalRows": str(rows),
+ "rows": [
+ {"f": [{"v": "20180101"}]},
+ {"f": [{"v": "20180102"}]},
+ {"f": [{"v": "20180103"}]},
+ ],
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ client._connection = make_connection(meta_info, data)
+ table = Table(self.TABLE_REF)
+
+ partition_list = client.list_partitions(table)
+ self.assertEqual(len(partition_list), rows)
+ self.assertIn("20180102", partition_list)
+
+ def test_list_partitions_with_string_id(self):
+ meta_info = _make_list_partitons_meta_info(
+ self.PROJECT, self.DS_ID, self.TABLE_ID, 0
+ )
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ client._connection = make_connection(meta_info, {})
+
+ partition_list = client.list_partitions(
+ "{}.{}".format(self.DS_ID, self.TABLE_ID)
+ )
+
+ self.assertEqual(len(partition_list), 0)
+
+ def test_list_rows(self):
+ import datetime
+ from google.cloud._helpers import UTC
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+ from google.cloud.bigquery.table import Row
+
+ PATH = "projects/%s/datasets/%s/tables/%s/data" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ WHEN_TS = 1437767599006000
+
+ WHEN = datetime.datetime.utcfromtimestamp(WHEN_TS / 1e6).replace(tzinfo=UTC)
+ WHEN_1 = WHEN + datetime.timedelta(microseconds=1)
+ WHEN_2 = WHEN + datetime.timedelta(microseconds=2)
+ ROWS = 1234
+ TOKEN = "TOKEN"
+
+ DATA = {
+ "totalRows": str(ROWS),
+ "pageToken": TOKEN,
+ "rows": [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}, {"v": WHEN_TS}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}, {"v": WHEN_TS + 1}]},
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}, {"v": WHEN_TS + 2}]},
+ {"f": [{"v": "Bhettye Rhubble"}, {"v": None}, {"v": None}]},
+ ],
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(DATA, DATA)
+ full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
+ age = SchemaField("age", "INTEGER", mode="NULLABLE")
+ joined = SchemaField("joined", "TIMESTAMP", mode="NULLABLE")
+ table = Table(self.TABLE_REF, schema=[full_name, age, joined])
+ table._properties["location"] = "us-central1"
+ table._properties["numRows"] = 7
+
+ iterator = client.list_rows(table, timeout=7.5)
+
+ # Check that initial RowIterator is populated from the table metadata.
+ self.assertIsNone(iterator.job_id)
+ self.assertEqual(iterator.location, "us-central1")
+ self.assertEqual(iterator.project, table.project)
+ self.assertIsNone(iterator.query_id)
+ self.assertEqual(iterator.total_rows, 7)
+ page = next(iterator.pages)
+ rows = list(page)
+
+ # Check that total_rows is updated based on API response.
+ self.assertEqual(iterator.total_rows, ROWS)
+
+ f2i = {"full_name": 0, "age": 1, "joined": 2}
+ self.assertEqual(len(rows), 4)
+ self.assertEqual(rows[0], Row(("Phred Phlyntstone", 32, WHEN), f2i))
+ self.assertEqual(rows[1], Row(("Bharney Rhubble", 33, WHEN_1), f2i))
+ self.assertEqual(rows[2], Row(("Wylma Phlyntstone", 29, WHEN_2), f2i))
+ self.assertEqual(rows[3], Row(("Bhettye Rhubble", None, None), f2i))
+ self.assertEqual(iterator.next_page_token, TOKEN)
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/%s" % PATH,
+ query_params={"formatOptions.useInt64Timestamp": True},
+ timeout=7.5,
+ )
+
+ def test_list_rows_w_start_index_w_page_size(self):
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+ from google.cloud.bigquery.table import Row
+
+ PATH = "projects/%s/datasets/%s/tables/%s/data" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+
+ page_1 = {
+ "totalRows": 4,
+ "pageToken": "some-page-token",
+ "rows": [
+ {"f": [{"v": "Phred Phlyntstone"}]},
+ {"f": [{"v": "Bharney Rhubble"}]},
+ ],
+ }
+ page_2 = {
+ "totalRows": 4,
+ "rows": [
+ {"f": [{"v": "Wylma Phlyntstone"}]},
+ {"f": [{"v": "Bhettye Rhubble"}]},
+ ],
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(page_1, page_2)
+ full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
+ table = Table(self.TABLE_REF, schema=[full_name])
+ iterator = client.list_rows(table, max_results=4, page_size=2, start_index=1)
+ pages = iterator.pages
+ rows = list(next(pages))
+ extra_params = iterator.extra_params
+ f2i = {"full_name": 0}
+ self.assertEqual(len(rows), 2)
+ self.assertEqual(rows[0], Row(("Phred Phlyntstone",), f2i))
+ self.assertEqual(rows[1], Row(("Bharney Rhubble",), f2i))
+
+ rows = list(next(pages))
+
+ self.assertEqual(len(rows), 2)
+ self.assertEqual(rows[0], Row(("Wylma Phlyntstone",), f2i))
+ self.assertEqual(rows[1], Row(("Bhettye Rhubble",), f2i))
+ self.assertEqual(
+ extra_params, {"startIndex": 1, "formatOptions.useInt64Timestamp": True}
+ )
+
+ conn.api_request.assert_has_calls(
+ [
+ mock.call(
+ method="GET",
+ path="/%s" % PATH,
+ query_params={
+ "startIndex": 1,
+ "maxResults": 2,
+ "formatOptions.useInt64Timestamp": True,
+ },
+ timeout=DEFAULT_TIMEOUT,
+ ),
+ mock.call(
+ method="GET",
+ path="/%s" % PATH,
+ query_params={
+ "pageToken": "some-page-token",
+ "maxResults": 2,
+ "formatOptions.useInt64Timestamp": True,
+ },
+ timeout=DEFAULT_TIMEOUT,
+ ),
+ ]
+ )
+
+ def test_list_rows_empty_table(self):
+ response = {"totalRows": "0", "rows": []}
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ client._connection = make_connection(response, response)
+
+ # Table that has no schema because it's an empty table.
+ rows = client.list_rows(
+ # Test with using a string for the table ID.
+ "{}.{}.{}".format(
+ self.TABLE_REF.project,
+ self.TABLE_REF.dataset_id,
+ self.TABLE_REF.table_id,
+ ),
+ selected_fields=[],
+ )
+
+ self.assertIsNone(rows.job_id)
+ self.assertIsNone(rows.location)
+ self.assertEqual(rows.project, self.TABLE_REF.project)
+ self.assertIsNone(rows.query_id)
+ # When a table reference / string and selected_fields is provided,
+ # total_rows can't be populated until iteration starts.
+ self.assertIsNone(rows.total_rows)
+ self.assertEqual(tuple(rows), ())
+ self.assertEqual(rows.total_rows, 0)
+
+ def test_list_rows_query_params(self):
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ table = Table(
+ self.TABLE_REF, schema=[SchemaField("age", "INTEGER", mode="NULLABLE")]
+ )
+ tests = [
+ ({}, {}),
+ ({"start_index": 1}, {"startIndex": 1}),
+ ({"max_results": 2}, {"maxResults": 2}),
+ ({"start_index": 1, "max_results": 2}, {"startIndex": 1, "maxResults": 2}),
+ ]
+ conn = client._connection = make_connection(*len(tests) * [{}])
+ for i, test in enumerate(tests):
+ iterator = client.list_rows(table, **test[0])
+ next(iterator.pages)
+ req = conn.api_request.call_args_list[i]
+ test[1]["formatOptions.useInt64Timestamp"] = True
+ self.assertEqual(req[1]["query_params"], test[1], "for kwargs %s" % test[0])
+
+ def test_list_rows_w_numeric(self):
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ resource = {
+ "totalRows": 3,
+ "rows": [
+ {"f": [{"v": "-1.23456789"}, {"v": "-123456789.987654321"}]},
+ {"f": [{"v": None}, {"v": "3.141592653589793238462643383279502884"}]},
+ {"f": [{"v": "2718281828459045235360287471.352662497"}, {"v": None}]},
+ ],
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ client._connection = make_connection(resource)
+ schema = [
+ SchemaField("num", "NUMERIC"),
+ SchemaField("bignum", "BIGNUMERIC"),
+ ]
+ table = Table(self.TABLE_REF, schema=schema)
+
+ iterator = client.list_rows(table)
+ rows = list(iterator)
+
+ self.assertEqual(len(rows), 3)
+ self.assertEqual(rows[0]["num"], decimal.Decimal("-1.23456789"))
+ self.assertEqual(rows[0]["bignum"], decimal.Decimal("-123456789.987654321"))
+ self.assertIsNone(rows[1]["num"])
+ self.assertEqual(
+ rows[1]["bignum"], decimal.Decimal("3.141592653589793238462643383279502884")
+ )
+ self.assertEqual(
+ rows[2]["num"], decimal.Decimal("2718281828459045235360287471.352662497")
+ )
+ self.assertIsNone(rows[2]["bignum"])
+
+ def test_list_rows_repeated_fields(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ PATH = "projects/%s/datasets/%s/tables/%s/data" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ ROWS = 1234
+ TOKEN = "TOKEN"
+ DATA = {
+ "totalRows": ROWS,
+ "pageToken": TOKEN,
+ "rows": [
+ {
+ "f": [
+ {"v": [{"v": "red"}, {"v": "green"}]},
+ {
+ "v": [
+ {
+ "v": {
+ "f": [
+ {"v": [{"v": "1"}, {"v": "2"}]},
+ {"v": [{"v": "3.1415"}, {"v": "1.414"}]},
+ ]
+ }
+ }
+ ]
+ },
+ ]
+ }
+ ],
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(DATA)
+ color = SchemaField("color", "STRING", mode="REPEATED")
+ index = SchemaField("index", "INTEGER", "REPEATED")
+ score = SchemaField("score", "FLOAT", "REPEATED")
+ struct = SchemaField("struct", "RECORD", mode="REPEATED", fields=[index, score])
+
+ iterator = client.list_rows(self.TABLE_REF, selected_fields=[color, struct])
+ page = next(iterator.pages)
+ rows = list(page)
+ total_rows = iterator.total_rows
+ page_token = iterator.next_page_token
+
+ self.assertEqual(len(rows), 1)
+ self.assertEqual(rows[0][0], ["red", "green"])
+ self.assertEqual(rows[0][1], [{"index": [1, 2], "score": [3.1415, 1.414]}])
+ self.assertEqual(total_rows, ROWS)
+ self.assertEqual(page_token, TOKEN)
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/%s" % PATH,
+ query_params={
+ "selectedFields": "color,struct",
+ "formatOptions.useInt64Timestamp": True,
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_list_rows_w_record_schema(self):
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ PATH = "projects/%s/datasets/%s/tables/%s/data" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_ID,
+ )
+ ROWS = 1234
+ TOKEN = "TOKEN"
+ DATA = {
+ "totalRows": ROWS,
+ "pageToken": TOKEN,
+ "rows": [
+ {
+ "f": [
+ {"v": "Phred Phlyntstone"},
+ {"v": {"f": [{"v": "800"}, {"v": "555-1212"}, {"v": 1}]}},
+ ]
+ },
+ {
+ "f": [
+ {"v": "Bharney Rhubble"},
+ {"v": {"f": [{"v": "877"}, {"v": "768-5309"}, {"v": 2}]}},
+ ]
+ },
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": None}]},
+ ],
+ }
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(DATA)
+ full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
+ area_code = SchemaField("area_code", "STRING", "REQUIRED")
+ local_number = SchemaField("local_number", "STRING", "REQUIRED")
+ rank = SchemaField("rank", "INTEGER", "REQUIRED")
+ phone = SchemaField(
+ "phone", "RECORD", mode="NULLABLE", fields=[area_code, local_number, rank]
+ )
+ table = Table(self.TABLE_REF, schema=[full_name, phone])
+
+ iterator = client.list_rows(table)
+ page = next(iterator.pages)
+ rows = list(page)
+ total_rows = iterator.total_rows
+ page_token = iterator.next_page_token
+
+ self.assertEqual(len(rows), 3)
+ self.assertEqual(rows[0][0], "Phred Phlyntstone")
+ self.assertEqual(
+ rows[0][1], {"area_code": "800", "local_number": "555-1212", "rank": 1}
+ )
+ self.assertEqual(rows[1][0], "Bharney Rhubble")
+ self.assertEqual(
+ rows[1][1], {"area_code": "877", "local_number": "768-5309", "rank": 2}
+ )
+ self.assertEqual(rows[2][0], "Wylma Phlyntstone")
+ self.assertIsNone(rows[2][1])
+ self.assertEqual(total_rows, ROWS)
+ self.assertEqual(page_token, TOKEN)
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/%s" % PATH,
+ query_params={"formatOptions.useInt64Timestamp": True},
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_list_rows_with_missing_schema(self):
+ from google.cloud.bigquery.table import Table, TableListItem
+
+ table_path = "/projects/{}/datasets/{}/tables/{}".format(
+ self.PROJECT, self.DS_ID, self.TABLE_ID
+ )
+ tabledata_path = "{}/data".format(table_path)
+
+ table_list_item_data = {
+ "id": "%s:%s:%s" % (self.PROJECT, self.DS_ID, self.TABLE_ID),
+ "tableReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_ID,
+ },
+ }
+ table_data = copy.deepcopy(table_list_item_data)
+ # Intentionally make wrong, since total_rows can update during iteration.
+ table_data["numRows"] = 2
+ table_data["schema"] = {
+ "fields": [
+ {"name": "name", "type": "STRING"},
+ {"name": "age", "type": "INTEGER"},
+ ]
+ }
+ rows_data = {
+ "totalRows": 3,
+ "pageToken": None,
+ "rows": [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "31"}]},
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": None}]},
+ ],
+ }
+
+ creds = _make_credentials()
+ http = object()
+
+ schemaless_tables = (
+ "{}.{}".format(self.DS_ID, self.TABLE_ID),
+ self.TABLE_REF,
+ Table(self.TABLE_REF),
+ TableListItem(table_list_item_data),
+ )
+
+ for table in schemaless_tables:
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+ conn = client._connection = make_connection(table_data, rows_data)
+
+ row_iter = client.list_rows(table)
+
+ conn.api_request.assert_called_once_with(
+ method="GET", path=table_path, timeout=DEFAULT_TIMEOUT
+ )
+ conn.api_request.reset_mock()
+ self.assertEqual(row_iter.total_rows, 2, msg=repr(table))
+
+ rows = list(row_iter)
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path=tabledata_path,
+ query_params={"formatOptions.useInt64Timestamp": True},
+ timeout=DEFAULT_TIMEOUT,
+ )
+ self.assertEqual(row_iter.total_rows, 3, msg=repr(table))
+ self.assertEqual(rows[0].name, "Phred Phlyntstone", msg=repr(table))
+ self.assertEqual(rows[1].age, 31, msg=repr(table))
+ self.assertIsNone(rows[2].age, msg=repr(table))
+
+ def test_list_rows_error(self):
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ # neither Table nor TableReference
+ with self.assertRaises(TypeError):
+ client.list_rows(1)
+
+ def test_context_manager_enter_returns_itself(self):
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ with mock.patch.object(client, "close"), client as context_var:
+ pass
+
+ self.assertIs(client, context_var)
+
+ def test_context_manager_exit_closes_client(self):
+ creds = _make_credentials()
+ http = object()
+ client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
+
+ fake_close = mock.Mock()
+ with mock.patch.object(client, "close", fake_close):
+ with client:
+ pass
+
+ fake_close.assert_called_once()
+
+
+class TestClientUpload(object):
+ # NOTE: This is a "partner" to `TestClient` meant to test some of the
+ # "load_table_from_file" portions of `Client`. It also uses
+ # `pytest`-style tests rather than `unittest`-style.
+ from google.cloud.bigquery.job import SourceFormat
+
+ PROJECT = "project_id"
+ TABLE_REF = DatasetReference(PROJECT, "test_dataset").table("test_table")
+
+ LOCATION = "us-central"
+
+ @classmethod
+ def _make_client(cls, transport=None, location=None):
+ from google.cloud.bigquery import _http
+ from google.cloud.bigquery import client
+
+ cl = client.Client(
+ project=cls.PROJECT,
+ credentials=_make_credentials(),
+ _http=transport,
+ location=location,
+ )
+ cl._connection = mock.create_autospec(_http.Connection, instance=True)
+ return cl
+
+ @staticmethod
+ def _make_response(status_code, content="", headers={}):
+ """Make a mock HTTP response."""
+ import requests
+
+ response = requests.Response()
+ response.request = requests.Request("POST", "http://example.com").prepare()
+ response._content = content.encode("utf-8")
+ response.headers.update(headers)
+ response.status_code = status_code
+ return response
+
+ @classmethod
+ def _make_do_upload_patch(cls, client, method, resource={}, side_effect=None):
+ """Patches the low-level upload helpers."""
+ if side_effect is None:
+ side_effect = [
+ cls._make_response(
+ http.client.OK,
+ json.dumps(resource),
+ {"Content-Type": "application/json"},
+ )
+ ]
+ return mock.patch.object(client, method, side_effect=side_effect, autospec=True)
+
+ EXPECTED_CONFIGURATION = {
+ "jobReference": {"projectId": PROJECT, "jobId": "job_id"},
+ "configuration": {
+ "load": {
+ "sourceFormat": SourceFormat.CSV,
+ "destinationTable": {
+ "projectId": PROJECT,
+ "datasetId": "test_dataset",
+ "tableId": "test_table",
+ },
+ }
+ },
+ }
+
+ @staticmethod
+ def _make_file_obj():
+ return io.BytesIO(b"hello, is it me you're looking for?")
+
+ def _make_gzip_file_obj(self, writable):
+ if writable:
+ return gzip.GzipFile(mode="w", fileobj=io.BytesIO())
+ else:
+ return gzip.GzipFile(mode="r", fileobj=self._make_file_obj())
+
+ @staticmethod
+ def _make_config():
+ from google.cloud.bigquery.job import LoadJobConfig
+ from google.cloud.bigquery.job import SourceFormat
+
+ config = LoadJobConfig()
+ config.source_format = SourceFormat.CSV
+ return config
+
+ # High-level tests
+
+ def test_load_table_from_file_resumable(self):
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+
+ client = self._make_client()
+ file_obj = self._make_file_obj()
+ job_config = self._make_config()
+ original_config_copy = copy.deepcopy(job_config)
+
+ do_upload_patch = self._make_do_upload_patch(
+ client, "_do_resumable_upload", self.EXPECTED_CONFIGURATION
+ )
+ with do_upload_patch as do_upload:
+ client.load_table_from_file(
+ file_obj,
+ self.TABLE_REF,
+ job_id="job_id",
+ job_config=job_config,
+ )
+
+ do_upload.assert_called_once_with(
+ file_obj,
+ self.EXPECTED_CONFIGURATION,
+ _DEFAULT_NUM_RETRIES,
+ DEFAULT_TIMEOUT,
+ project=self.EXPECTED_CONFIGURATION["jobReference"]["projectId"],
+ )
+
+ # the original config object should not have been modified
+ assert job_config.to_api_repr() == original_config_copy.to_api_repr()
+
+ def test_load_table_from_file_w_explicit_project(self):
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+
+ client = self._make_client()
+ file_obj = self._make_file_obj()
+
+ do_upload_patch = self._make_do_upload_patch(
+ client, "_do_resumable_upload", self.EXPECTED_CONFIGURATION
+ )
+ with do_upload_patch as do_upload:
+ client.load_table_from_file(
+ file_obj,
+ self.TABLE_REF,
+ job_id="job_id",
+ project="other-project",
+ location=self.LOCATION,
+ job_config=self._make_config(),
+ )
+
+ expected_resource = copy.deepcopy(self.EXPECTED_CONFIGURATION)
+ expected_resource["jobReference"]["location"] = self.LOCATION
+ expected_resource["jobReference"]["projectId"] = "other-project"
+ do_upload.assert_called_once_with(
+ file_obj,
+ expected_resource,
+ _DEFAULT_NUM_RETRIES,
+ DEFAULT_TIMEOUT,
+ project="other-project",
+ )
+
+ def test_load_table_from_file_w_client_location(self):
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+
+ client = self._make_client(location=self.LOCATION)
+ file_obj = self._make_file_obj()
+
+ do_upload_patch = self._make_do_upload_patch(
+ client, "_do_resumable_upload", self.EXPECTED_CONFIGURATION
+ )
+ with do_upload_patch as do_upload:
+ client.load_table_from_file(
+ file_obj,
+ # Test with string for table ID.
+ "{}.{}.{}".format(
+ self.TABLE_REF.project,
+ self.TABLE_REF.dataset_id,
+ self.TABLE_REF.table_id,
+ ),
+ job_id="job_id",
+ project="other-project",
+ job_config=self._make_config(),
+ )
+
+ expected_resource = copy.deepcopy(self.EXPECTED_CONFIGURATION)
+ expected_resource["jobReference"]["location"] = self.LOCATION
+ expected_resource["jobReference"]["projectId"] = "other-project"
+ do_upload.assert_called_once_with(
+ file_obj,
+ expected_resource,
+ _DEFAULT_NUM_RETRIES,
+ DEFAULT_TIMEOUT,
+ project="other-project",
+ )
+
+ def test_load_table_from_file_resumable_metadata(self):
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery.job import CreateDisposition
+ from google.cloud.bigquery.job import WriteDisposition
+
+ client = self._make_client()
+ file_obj = self._make_file_obj()
+
+ config = self._make_config()
+ config.allow_jagged_rows = False
+ config.allow_quoted_newlines = False
+ config.create_disposition = CreateDisposition.CREATE_IF_NEEDED
+ config.encoding = "utf8"
+ config.field_delimiter = ","
+ config.ignore_unknown_values = False
+ config.max_bad_records = 0
+ config.quote_character = '"'
+ config.skip_leading_rows = 1
+ config.write_disposition = WriteDisposition.WRITE_APPEND
+ config.null_marker = r"\N"
+
+ expected_config = {
+ "jobReference": {"projectId": self.PROJECT, "jobId": "job_id"},
+ "configuration": {
+ "load": {
+ "destinationTable": {
+ "projectId": self.TABLE_REF.project,
+ "datasetId": self.TABLE_REF.dataset_id,
+ "tableId": self.TABLE_REF.table_id,
+ },
+ "sourceFormat": config.source_format,
+ "allowJaggedRows": config.allow_jagged_rows,
+ "allowQuotedNewlines": config.allow_quoted_newlines,
+ "createDisposition": config.create_disposition,
+ "encoding": config.encoding,
+ "fieldDelimiter": config.field_delimiter,
+ "ignoreUnknownValues": config.ignore_unknown_values,
+ "maxBadRecords": config.max_bad_records,
+ "quote": config.quote_character,
+ "skipLeadingRows": str(config.skip_leading_rows),
+ "writeDisposition": config.write_disposition,
+ "nullMarker": config.null_marker,
+ }
+ },
+ }
+
+ do_upload_patch = self._make_do_upload_patch(
+ client, "_do_resumable_upload", expected_config
+ )
+ with do_upload_patch as do_upload:
+ client.load_table_from_file(
+ file_obj, self.TABLE_REF, job_id="job_id", job_config=config
+ )
+
+ do_upload.assert_called_once_with(
+ file_obj,
+ expected_config,
+ _DEFAULT_NUM_RETRIES,
+ DEFAULT_TIMEOUT,
+ project=self.EXPECTED_CONFIGURATION["jobReference"]["projectId"],
+ )
+
+ def test_load_table_from_file_multipart(self):
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+
+ client = self._make_client()
+ file_obj = self._make_file_obj()
+ file_obj_size = 10
+ config = self._make_config()
+
+ do_upload_patch = self._make_do_upload_patch(
+ client, "_do_multipart_upload", self.EXPECTED_CONFIGURATION
+ )
+ with do_upload_patch as do_upload:
+ client.load_table_from_file(
+ file_obj,
+ self.TABLE_REF,
+ job_id="job_id",
+ job_config=config,
+ size=file_obj_size,
+ )
+
+ do_upload.assert_called_once_with(
+ file_obj,
+ self.EXPECTED_CONFIGURATION,
+ file_obj_size,
+ _DEFAULT_NUM_RETRIES,
+ DEFAULT_TIMEOUT,
+ project=self.PROJECT,
+ )
+
+ def test_load_table_from_file_with_retries(self):
+ client = self._make_client()
+ file_obj = self._make_file_obj()
+ num_retries = 20
+
+ do_upload_patch = self._make_do_upload_patch(
+ client, "_do_resumable_upload", self.EXPECTED_CONFIGURATION
+ )
+ with do_upload_patch as do_upload:
+ client.load_table_from_file(
+ file_obj,
+ self.TABLE_REF,
+ num_retries=num_retries,
+ job_id="job_id",
+ job_config=self._make_config(),
+ )
+
+ do_upload.assert_called_once_with(
+ file_obj,
+ self.EXPECTED_CONFIGURATION,
+ num_retries,
+ DEFAULT_TIMEOUT,
+ project=self.EXPECTED_CONFIGURATION["jobReference"]["projectId"],
+ )
+
+ def test_load_table_from_file_with_rewind(self):
+ client = self._make_client()
+ file_obj = self._make_file_obj()
+ file_obj.seek(2)
+
+ with self._make_do_upload_patch(
+ client, "_do_resumable_upload", self.EXPECTED_CONFIGURATION
+ ):
+ client.load_table_from_file(file_obj, self.TABLE_REF, rewind=True)
+
+ assert file_obj.tell() == 0
+
+ def test_load_table_from_file_with_readable_gzip(self):
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+
+ client = self._make_client()
+ gzip_file = self._make_gzip_file_obj(writable=False)
+
+ do_upload_patch = self._make_do_upload_patch(
+ client, "_do_resumable_upload", self.EXPECTED_CONFIGURATION
+ )
+ with do_upload_patch as do_upload:
+ client.load_table_from_file(
+ gzip_file,
+ self.TABLE_REF,
+ job_id="job_id",
+ job_config=self._make_config(),
+ )
+
+ do_upload.assert_called_once_with(
+ gzip_file,
+ self.EXPECTED_CONFIGURATION,
+ _DEFAULT_NUM_RETRIES,
+ DEFAULT_TIMEOUT,
+ project=self.EXPECTED_CONFIGURATION["jobReference"]["projectId"],
+ )
+
+ def test_load_table_from_file_with_writable_gzip(self):
+ client = self._make_client()
+ gzip_file = self._make_gzip_file_obj(writable=True)
+
+ with pytest.raises(ValueError):
+ client.load_table_from_file(
+ gzip_file,
+ self.TABLE_REF,
+ job_id="job_id",
+ job_config=self._make_config(),
+ )
+
+ def test_load_table_from_file_failure(self):
+ from google.resumable_media import InvalidResponse
+ from google.cloud import exceptions
+
+ client = self._make_client()
+ file_obj = self._make_file_obj()
+
+ response = self._make_response(
+ content="Someone is already in this spot.", status_code=http.client.CONFLICT
+ )
+
+ do_upload_patch = self._make_do_upload_patch(
+ client, "_do_resumable_upload", side_effect=InvalidResponse(response)
+ )
+
+ with do_upload_patch, pytest.raises(exceptions.Conflict) as exc_info:
+ client.load_table_from_file(file_obj, self.TABLE_REF, rewind=True)
+
+ assert response.text in exc_info.value.message
+ assert exc_info.value.errors == []
+
+ def test_load_table_from_file_bad_mode(self):
+ client = self._make_client()
+ file_obj = mock.Mock(spec=["mode"])
+ file_obj.mode = "x"
+
+ with pytest.raises(ValueError):
+ client.load_table_from_file(file_obj, self.TABLE_REF)
+
+ def test_load_table_from_file_w_invalid_job_config(self):
+ from google.cloud.bigquery import job
+
+ client = self._make_client()
+ gzip_file = self._make_gzip_file_obj(writable=True)
+ config = job.QueryJobConfig()
+ with pytest.raises(TypeError) as exc:
+ client.load_table_from_file(
+ gzip_file, self.TABLE_REF, job_id="job_id", job_config=config
+ )
+ err_msg = str(exc.value)
+ assert "Expected an instance of LoadJobConfig" in err_msg
+
+ def test_load_table_from_file_w_explicit_job_config(self):
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+
+ client = self._make_client()
+ file_obj = self._make_file_obj()
+
+ job_config = self._make_config()
+ job_config.create_session = True
+ job_config.encoding = "UTF-8"
+ do_upload_patch = self._make_do_upload_patch(
+ client, "_do_resumable_upload", self.EXPECTED_CONFIGURATION
+ )
+ with do_upload_patch as do_upload:
+ client.load_table_from_file(
+ file_obj,
+ self.TABLE_REF,
+ job_id="job_id",
+ project=self.PROJECT,
+ location=self.LOCATION,
+ job_config=job_config,
+ )
+
+ expected_resource = copy.deepcopy(self.EXPECTED_CONFIGURATION)
+ expected_resource["jobReference"]["location"] = self.LOCATION
+ expected_resource["jobReference"]["projectId"] = self.PROJECT
+ expected_resource["configuration"]["load"]["createSession"] = True
+ expected_resource["configuration"]["load"]["encoding"] = "UTF-8"
+ do_upload.assert_called_once_with(
+ file_obj,
+ expected_resource,
+ _DEFAULT_NUM_RETRIES,
+ DEFAULT_TIMEOUT,
+ project=self.PROJECT,
+ )
+
+ def test_load_table_from_file_w_explicit_job_config_override(self):
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery.job import LoadJobConfig
+
+ client = self._make_client()
+ file_obj = self._make_file_obj()
+
+ default_job_config = LoadJobConfig()
+ default_job_config.create_session = True
+ default_job_config.encoding = "ISO-8859-1"
+ client.default_load_job_config = default_job_config
+
+ job_config = self._make_config()
+ job_config.create_session = False
+ do_upload_patch = self._make_do_upload_patch(
+ client, "_do_resumable_upload", self.EXPECTED_CONFIGURATION
+ )
+ with do_upload_patch as do_upload:
+ client.load_table_from_file(
+ file_obj,
+ self.TABLE_REF,
+ job_id="job_id",
+ project=self.PROJECT,
+ location=self.LOCATION,
+ job_config=job_config,
+ )
+
+ expected_resource = copy.deepcopy(self.EXPECTED_CONFIGURATION)
+ expected_resource["jobReference"]["location"] = self.LOCATION
+ expected_resource["jobReference"]["projectId"] = self.PROJECT
+ expected_resource["configuration"]["load"]["createSession"] = False
+ expected_resource["configuration"]["load"]["encoding"] = "ISO-8859-1"
+ do_upload.assert_called_once_with(
+ file_obj,
+ expected_resource,
+ _DEFAULT_NUM_RETRIES,
+ DEFAULT_TIMEOUT,
+ project=self.PROJECT,
+ )
+
+ def test_load_table_from_file_w_default_load_config(self):
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery.job import LoadJobConfig
+
+ client = self._make_client()
+ file_obj = self._make_file_obj()
+
+ default_job_config = LoadJobConfig()
+ default_job_config.encoding = "ISO-8859-1"
+ client.default_load_job_config = default_job_config
+
+ job_config = self._make_config()
+ do_upload_patch = self._make_do_upload_patch(
+ client, "_do_resumable_upload", self.EXPECTED_CONFIGURATION
+ )
+ with do_upload_patch as do_upload:
+ client.load_table_from_file(
+ file_obj,
+ self.TABLE_REF,
+ job_id="job_id",
+ project=self.PROJECT,
+ location=self.LOCATION,
+ job_config=job_config,
+ )
+
+ expected_resource = copy.deepcopy(self.EXPECTED_CONFIGURATION)
+ expected_resource["jobReference"]["location"] = self.LOCATION
+ expected_resource["jobReference"]["projectId"] = self.PROJECT
+ expected_resource["configuration"]["load"]["encoding"] = "ISO-8859-1"
+ do_upload.assert_called_once_with(
+ file_obj,
+ expected_resource,
+ _DEFAULT_NUM_RETRIES,
+ DEFAULT_TIMEOUT,
+ project=self.PROJECT,
+ )
+
+ def test_load_table_from_dataframe(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import PolicyTagList, SchemaField
+
+ client = self._make_client()
+ records = [
+ {"id": 1, "age": 100, "accounts": [2, 3]},
+ {"id": 2, "age": 60, "accounts": [5]},
+ {"id": 3, "age": 40, "accounts": []},
+ ]
+ # Mixup column order so that we can verify sent schema matches the
+ # serialized order, not the table column order.
+ column_order = ["age", "accounts", "id"]
+ dataframe = pandas.DataFrame(records, columns=column_order)
+ table_fields = {
+ "id": SchemaField(
+ "id",
+ "INTEGER",
+ mode="REQUIRED",
+ description="integer column",
+ policy_tags=PolicyTagList(names=("foo", "bar")),
+ ),
+ "age": SchemaField(
+ "age",
+ "INTEGER",
+ mode="NULLABLE",
+ description="age column",
+ policy_tags=PolicyTagList(names=("baz",)),
+ ),
+ "accounts": SchemaField(
+ "accounts",
+ "INTEGER",
+ mode="REPEATED",
+ description="array column",
+ ),
+ }
+ get_table_schema = [
+ table_fields["id"],
+ table_fields["age"],
+ table_fields["accounts"],
+ ]
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ return_value=mock.Mock(schema=get_table_schema),
+ )
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_dataframe(dataframe, self.TABLE_REF)
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=None,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_file = load_table_from_file.mock_calls[0][1][1]
+ assert sent_file.closed
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"].to_api_repr()[
+ "load"
+ ]
+ assert sent_config["sourceFormat"] == job.SourceFormat.PARQUET
+ for field_index, field in enumerate(sent_config["schema"]["fields"]):
+ assert field["name"] == column_order[field_index]
+ table_field = table_fields[field["name"]]
+ assert field["name"] == table_field.name
+ assert field["type"] == table_field.field_type
+ assert field["mode"] == table_field.mode
+ assert len(field.get("fields", [])) == len(table_field.fields)
+ # Avoid accidentally updating policy tags when not explicitly included.
+ # https://github.com/googleapis/python-bigquery/issues/981
+ # Also, avoid 403 if someone has permission to write to table but
+ # not update policy tags by omitting policy tags we might have
+ # received from a get table request.
+ # https://github.com/googleapis/python-bigquery/pull/557
+ assert "policyTags" not in field
+ # Omit unnecessary fields when they come from getting the table
+ # (not passed in via job_config)
+ assert "description" not in field
+
+ def test_load_table_from_dataframe_w_client_location(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client(location=self.LOCATION)
+ records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
+ dataframe = pandas.DataFrame(records)
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ return_value=mock.Mock(
+ schema=[SchemaField("id", "INTEGER"), SchemaField("age", "INTEGER")]
+ ),
+ )
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_dataframe(dataframe, self.TABLE_REF)
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_file = load_table_from_file.mock_calls[0][1][1]
+ assert sent_file.closed
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.PARQUET
+
+ def test_load_table_from_dataframe_w_custom_job_config_wihtout_source_format(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+ records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
+ dataframe = pandas.DataFrame(records)
+ job_config = job.LoadJobConfig(
+ write_disposition=job.WriteDisposition.WRITE_TRUNCATE,
+ )
+ original_config_copy = copy.deepcopy(job_config)
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ return_value=mock.Mock(
+ schema=[SchemaField("id", "INTEGER"), SchemaField("age", "INTEGER")]
+ ),
+ )
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+ with load_patch as load_table_from_file, get_table_patch as get_table:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
+ )
+
+ # no need to fetch and inspect table schema for WRITE_TRUNCATE jobs
+ assert not get_table.called
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.PARQUET
+ assert sent_config.write_disposition == job.WriteDisposition.WRITE_TRUNCATE
+
+ # the original config object should not have been modified
+ assert job_config.to_api_repr() == original_config_copy.to_api_repr()
+
+ def test_load_table_from_dataframe_w_custom_job_config_w_source_format(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+ records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
+ dataframe = pandas.DataFrame(records)
+ job_config = job.LoadJobConfig(
+ write_disposition=job.WriteDisposition.WRITE_TRUNCATE,
+ source_format=job.SourceFormat.PARQUET,
+ )
+ original_config_copy = copy.deepcopy(job_config)
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ return_value=mock.Mock(
+ schema=[SchemaField("id", "INTEGER"), SchemaField("age", "INTEGER")]
+ ),
+ )
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+ with load_patch as load_table_from_file, get_table_patch as get_table:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
+ )
+
+ # no need to fetch and inspect table schema for WRITE_TRUNCATE jobs
+ assert not get_table.called
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.PARQUET
+ assert sent_config.write_disposition == job.WriteDisposition.WRITE_TRUNCATE
+
+ # the original config object should not have been modified
+ assert job_config.to_api_repr() == original_config_copy.to_api_repr()
+
+ def test_load_table_from_dataframe_w_parquet_options_none(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+ records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
+ dataframe = pandas.DataFrame(records)
+
+ job_config = job.LoadJobConfig(
+ write_disposition=job.WriteDisposition.WRITE_TRUNCATE,
+ source_format=job.SourceFormat.PARQUET,
+ )
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ return_value=mock.Mock(
+ schema=[SchemaField("id", "INTEGER"), SchemaField("age", "INTEGER")]
+ ),
+ )
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+ with load_patch as load_table_from_file, get_table_patch as get_table:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
+ )
+
+ # no need to fetch and inspect table schema for WRITE_TRUNCATE jobs
+ assert not get_table.called
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.parquet_options.enable_list_inference is True
+
+ def test_load_table_from_dataframe_w_list_inference_none(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+ records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
+ dataframe = pandas.DataFrame(records)
+
+ parquet_options = ParquetOptions()
+
+ job_config = job.LoadJobConfig(
+ write_disposition=job.WriteDisposition.WRITE_TRUNCATE,
+ source_format=job.SourceFormat.PARQUET,
+ )
+ job_config.parquet_options = parquet_options
+
+ original_config_copy = copy.deepcopy(job_config)
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ return_value=mock.Mock(
+ schema=[SchemaField("id", "INTEGER"), SchemaField("age", "INTEGER")]
+ ),
+ )
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+ with load_patch as load_table_from_file, get_table_patch as get_table:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
+ )
+
+ # no need to fetch and inspect table schema for WRITE_TRUNCATE jobs
+ assert not get_table.called
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.parquet_options.enable_list_inference is None
+
+ # the original config object should not have been modified
+ assert job_config.to_api_repr() == original_config_copy.to_api_repr()
+
+ def test_load_table_from_dataframe_w_explicit_job_config_override(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+ records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
+ dataframe = pandas.DataFrame(records)
+
+ client.default_load_job_config = job.LoadJobConfig(
+ encoding="ISO-8859-1",
+ write_disposition=job.WriteDisposition.WRITE_TRUNCATE,
+ source_format=job.SourceFormat.PARQUET,
+ )
+
+ job_config = job.LoadJobConfig(
+ write_disposition=job.WriteDisposition.WRITE_APPEND,
+ source_format=job.SourceFormat.PARQUET,
+ )
+ original_config_copy = copy.deepcopy(job_config)
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ return_value=mock.Mock(
+ schema=[SchemaField("id", "INTEGER"), SchemaField("age", "INTEGER")]
+ ),
+ )
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.write_disposition == job.WriteDisposition.WRITE_APPEND
+ assert sent_config.source_format == job.SourceFormat.PARQUET
+ assert sent_config.encoding == "ISO-8859-1"
+
+ # the original config object should not have been modified
+ assert job_config.to_api_repr() == original_config_copy.to_api_repr()
+
+ def test_load_table_from_dataframe_w_default_load_config(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+ records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
+ dataframe = pandas.DataFrame(records)
+
+ client.default_load_job_config = job.LoadJobConfig(
+ write_disposition=job.WriteDisposition.WRITE_TRUNCATE,
+ source_format=job.SourceFormat.PARQUET,
+ )
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ return_value=mock.Mock(
+ schema=[SchemaField("id", "INTEGER"), SchemaField("age", "INTEGER")]
+ ),
+ )
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, location=self.LOCATION
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.write_disposition == job.WriteDisposition.WRITE_TRUNCATE
+ assert sent_config.source_format == job.SourceFormat.PARQUET
+
+ def test_load_table_from_dataframe_w_list_inference_false(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+ records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
+ dataframe = pandas.DataFrame(records)
+
+ parquet_options = ParquetOptions()
+ parquet_options.enable_list_inference = False
+
+ job_config = job.LoadJobConfig(
+ write_disposition=job.WriteDisposition.WRITE_TRUNCATE,
+ source_format=job.SourceFormat.PARQUET,
+ )
+ job_config.parquet_options = parquet_options
+
+ original_config_copy = copy.deepcopy(job_config)
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ return_value=mock.Mock(
+ schema=[SchemaField("id", "INTEGER"), SchemaField("age", "INTEGER")]
+ ),
+ )
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+ with load_patch as load_table_from_file, get_table_patch as get_table:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
+ )
+
+ # no need to fetch and inspect table schema for WRITE_TRUNCATE jobs
+ assert not get_table.called
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.parquet_options.enable_list_inference is False
+
+ # the original config object should not have been modified
+ assert job_config.to_api_repr() == original_config_copy.to_api_repr()
+
+ def test_load_table_from_dataframe_w_custom_job_config_w_wrong_source_format(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery import job
+
+ client = self._make_client()
+ records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
+ dataframe = pandas.DataFrame(records)
+ job_config = job.LoadJobConfig(
+ write_disposition=job.WriteDisposition.WRITE_TRUNCATE,
+ source_format=job.SourceFormat.ORC,
+ )
+
+ with pytest.raises(ValueError) as exc:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
+ )
+
+ assert "Got unexpected source_format:" in str(exc.value)
+
+ def test_load_table_from_dataframe_w_automatic_schema(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+ df_data = collections.OrderedDict(
+ [
+ ("int_col", [1, 2, 3]),
+ ("float_col", [1.0, 2.0, 3.0]),
+ ("bool_col", [True, False, True]),
+ (
+ "dt_col",
+ pandas.Series(
+ [
+ datetime.datetime(2010, 1, 2, 3, 44, 50),
+ datetime.datetime(2011, 2, 3, 14, 50, 59),
+ datetime.datetime(2012, 3, 14, 15, 16),
+ ],
+ dtype="datetime64[ns]",
+ ),
+ ),
+ (
+ "ts_col",
+ pandas.Series(
+ [
+ datetime.datetime(2010, 1, 2, 3, 44, 50),
+ datetime.datetime(2011, 2, 3, 14, 50, 59),
+ datetime.datetime(2012, 3, 14, 15, 16),
+ ],
+ dtype="datetime64[ns]",
+ ).dt.tz_localize(datetime.timezone.utc),
+ ),
+ (
+ "date_col",
+ pandas.Series(
+ [
+ datetime.date(2010, 1, 2),
+ datetime.date(2011, 2, 3),
+ datetime.date(2012, 3, 14),
+ ],
+ dtype="dbdate",
+ ),
+ ),
+ (
+ "time_col",
+ pandas.Series(
+ [
+ datetime.time(3, 44, 50),
+ datetime.time(14, 50, 59),
+ datetime.time(15, 16),
+ ],
+ dtype="dbtime",
+ ),
+ ),
+ ]
+ )
+ dataframe = pandas.DataFrame(df_data, columns=df_data.keys())
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ side_effect=google.api_core.exceptions.NotFound("Table not found"),
+ )
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, location=self.LOCATION
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.PARQUET
+ assert tuple(sent_config.schema) == (
+ SchemaField("int_col", "INTEGER"),
+ SchemaField("float_col", "FLOAT"),
+ SchemaField("bool_col", "BOOLEAN"),
+ SchemaField("dt_col", "DATETIME"),
+ SchemaField("ts_col", "TIMESTAMP"),
+ SchemaField("date_col", "DATE"),
+ SchemaField("time_col", "TIME"),
+ )
+
+ def test_load_table_from_dataframe_w_automatic_schema_detection_fails(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+
+ client = self._make_client()
+
+ df_data = [
+ [[{"name": "n1.1", "value": 1.1}, {"name": "n1.2", "value": 1.2}]],
+ [[{"name": "n2.1", "value": 2.1}, {"name": "n2.2", "value": 2.2}]],
+ ]
+ dataframe = pandas.DataFrame(df_data, columns=["col_record_list"])
+
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ side_effect=google.api_core.exceptions.NotFound("Table not found"),
+ )
+
+ with load_patch as load_table_from_file, get_table_patch:
+ with warnings.catch_warnings(record=True) as warned:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, location=self.LOCATION
+ )
+
+ # There should be a warning that schema detection failed.
+ expected_warnings = [
+ warning
+ for warning in warned
+ if "schema could not be detected" in str(warning).lower()
+ ]
+ assert len(expected_warnings) == 1
+ assert issubclass(
+ expected_warnings[0].category,
+ (DeprecationWarning, PendingDeprecationWarning),
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.PARQUET
+ assert sent_config.schema is None
+
+ def test_load_table_from_dataframe_w_index_and_auto_schema(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+ df_data = collections.OrderedDict(
+ [("int_col", [10, 20, 30]), ("float_col", [1.0, 2.0, 3.0])]
+ )
+ dataframe = pandas.DataFrame(
+ df_data,
+ index=pandas.Index(name="unique_name", data=["one", "two", "three"]),
+ )
+
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ return_value=mock.Mock(
+ schema=[
+ SchemaField("int_col", "INTEGER"),
+ SchemaField("float_col", "FLOAT"),
+ SchemaField("unique_name", "STRING"),
+ ]
+ ),
+ )
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, location=self.LOCATION
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.PARQUET
+
+ sent_schema = sorted(sent_config.schema, key=operator.attrgetter("name"))
+ expected_sent_schema = [
+ SchemaField("float_col", "FLOAT"),
+ SchemaField("int_col", "INTEGER"),
+ SchemaField("unique_name", "STRING"),
+ ]
+ assert sent_schema == expected_sent_schema
+
+ def test_load_table_from_dataframe_unknown_table(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+
+ client = self._make_client()
+ records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
+ dataframe = pandas.DataFrame(records)
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ side_effect=google.api_core.exceptions.NotFound("Table not found"),
+ )
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+ with load_patch as load_table_from_file, get_table_patch:
+ # there should be no error
+ client.load_table_from_dataframe(dataframe, self.TABLE_REF)
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=None,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ def test_load_table_from_dataframe_w_nullable_int64_datatype(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+ dataframe = pandas.DataFrame({"x": [1, 2, None, 4]}, dtype="Int64")
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ return_value=mock.Mock(schema=[SchemaField("x", "INT64", "NULLABLE")]),
+ )
+
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, location=self.LOCATION
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.PARQUET
+ assert tuple(sent_config.schema) == (
+ SchemaField("x", "INT64", "NULLABLE", None),
+ )
+
+ def test_load_table_from_dataframe_w_nullable_int64_datatype_automatic_schema(self):
+ pandas = pytest.importorskip("pandas")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+ dataframe = pandas.DataFrame({"x": [1, 2, None, 4]}, dtype="Int64")
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ side_effect=google.api_core.exceptions.NotFound("Table not found"),
+ )
+
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, location=self.LOCATION
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.PARQUET
+ assert tuple(sent_config.schema) == (
+ SchemaField("x", "INT64", "NULLABLE", None),
+ )
+
+ def test_load_table_from_dataframe_struct_fields(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+
+ records = [(3.14, {"foo": 1, "bar": 1})]
+ dataframe = pandas.DataFrame(
+ data=records, columns=["float_column", "struct_column"]
+ )
+
+ schema = [
+ SchemaField("float_column", "FLOAT"),
+ SchemaField(
+ "struct_column",
+ "RECORD",
+ fields=[SchemaField("foo", "INTEGER"), SchemaField("bar", "INTEGER")],
+ ),
+ ]
+ job_config = job.LoadJobConfig(schema=schema)
+
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ side_effect=google.api_core.exceptions.NotFound("Table not found"),
+ )
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_dataframe(
+ dataframe,
+ self.TABLE_REF,
+ job_config=job_config,
+ location=self.LOCATION,
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.PARQUET
+ assert sent_config.schema == schema
+
+ def test_load_table_from_dataframe_array_fields(self):
+ """Test that a DataFrame with array columns can be uploaded correctly.
+
+ See: https://github.com/googleapis/python-bigquery/issues/19
+ """
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+
+ records = [(3.14, [1, 2])]
+ dataframe = pandas.DataFrame(
+ data=records, columns=["float_column", "array_column"]
+ )
+
+ schema = [
+ SchemaField("float_column", "FLOAT"),
+ SchemaField(
+ "array_column",
+ "INTEGER",
+ mode="REPEATED",
+ ),
+ ]
+ job_config = job.LoadJobConfig(schema=schema)
+
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ side_effect=google.api_core.exceptions.NotFound("Table not found"),
+ )
+
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_dataframe(
+ dataframe,
+ self.TABLE_REF,
+ job_config=job_config,
+ location=self.LOCATION,
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.PARQUET
+ assert sent_config.schema == schema
+
+ def test_load_table_from_dataframe_array_fields_w_auto_schema(self):
+ """Test that a DataFrame with array columns can be uploaded correctly.
+
+ See: https://github.com/googleapis/python-bigquery/issues/19
+ """
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+
+ records = [(3.14, [1, 2])]
+ dataframe = pandas.DataFrame(
+ data=records, columns=["float_column", "array_column"]
+ )
+
+ expected_schema = [
+ SchemaField("float_column", "FLOAT"),
+ SchemaField(
+ "array_column",
+ "INT64",
+ mode="REPEATED",
+ ),
+ ]
+
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ side_effect=google.api_core.exceptions.NotFound("Table not found"),
+ )
+
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_dataframe(
+ dataframe,
+ self.TABLE_REF,
+ location=self.LOCATION,
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.PARQUET
+ assert sent_config.schema == expected_schema
+
+ def test_load_table_from_dataframe_w_partial_schema(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+ df_data = collections.OrderedDict(
+ [
+ ("int_col", [1, 2, 3]),
+ ("int_as_float_col", [1.0, float("nan"), 3.0]),
+ ("float_col", [1.0, 2.0, 3.0]),
+ ("bool_col", [True, False, True]),
+ (
+ "dt_col",
+ pandas.Series(
+ [
+ datetime.datetime(2010, 1, 2, 3, 44, 50),
+ datetime.datetime(2011, 2, 3, 14, 50, 59),
+ datetime.datetime(2012, 3, 14, 15, 16),
+ ],
+ dtype="datetime64[ns]",
+ ),
+ ),
+ (
+ "ts_col",
+ pandas.Series(
+ [
+ datetime.datetime(2010, 1, 2, 3, 44, 50),
+ datetime.datetime(2011, 2, 3, 14, 50, 59),
+ datetime.datetime(2012, 3, 14, 15, 16),
+ ],
+ dtype="datetime64[ns]",
+ ).dt.tz_localize(datetime.timezone.utc),
+ ),
+ ("string_col", ["abc", None, "def"]),
+ ("bytes_col", [b"abc", b"def", None]),
+ ]
+ )
+ dataframe = pandas.DataFrame(df_data, columns=df_data.keys())
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ schema = (
+ SchemaField("int_as_float_col", "INTEGER"),
+ SchemaField("string_col", "STRING"),
+ SchemaField("bytes_col", "BYTES"),
+ )
+ job_config = job.LoadJobConfig(schema=schema)
+ with load_patch as load_table_from_file:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.PARQUET
+ assert tuple(sent_config.schema) == (
+ SchemaField("int_col", "INTEGER"),
+ SchemaField("int_as_float_col", "INTEGER"),
+ SchemaField("float_col", "FLOAT"),
+ SchemaField("bool_col", "BOOLEAN"),
+ SchemaField("dt_col", "DATETIME"),
+ SchemaField("ts_col", "TIMESTAMP"),
+ SchemaField("string_col", "STRING"),
+ SchemaField("bytes_col", "BYTES"),
+ )
+
+ def test_load_table_from_dataframe_w_partial_schema_extra_types(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+ df_data = collections.OrderedDict(
+ [
+ ("int_col", [1, 2, 3]),
+ ("int_as_float_col", [1.0, float("nan"), 3.0]),
+ ("string_col", ["abc", None, "def"]),
+ ]
+ )
+ dataframe = pandas.DataFrame(df_data, columns=df_data.keys())
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ schema = (
+ SchemaField("int_as_float_col", "INTEGER"),
+ SchemaField("string_col", "STRING"),
+ SchemaField("unknown_col", "BYTES"),
+ )
+ job_config = job.LoadJobConfig(schema=schema)
+ with load_patch as load_table_from_file, pytest.raises(
+ ValueError
+ ) as exc_context:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
+ )
+
+ load_table_from_file.assert_not_called()
+ message = str(exc_context.value)
+ assert "bq_schema contains fields not present in dataframe" in message
+ assert "unknown_col" in message
+
+ def test_load_table_from_dataframe_w_schema_arrow_custom_compression(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+ records = [{"name": "Monty", "age": 100}, {"name": "Python", "age": 60}]
+ dataframe = pandas.DataFrame(records)
+ schema = (SchemaField("name", "STRING"), SchemaField("age", "INTEGER"))
+ job_config = job.LoadJobConfig(schema=schema)
+
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+ to_parquet_patch = mock.patch(
+ "google.cloud.bigquery.client._pandas_helpers.dataframe_to_parquet",
+ autospec=True,
+ )
+
+ with load_patch, to_parquet_patch as fake_to_parquet:
+ client.load_table_from_dataframe(
+ dataframe,
+ self.TABLE_REF,
+ job_config=job_config,
+ location=self.LOCATION,
+ parquet_compression="LZ4",
+ )
+
+ call_args = fake_to_parquet.call_args[1]
+ assert call_args is not None
+ assert call_args.get("parquet_compression") == "LZ4"
+
+ def test_load_table_from_dataframe_wo_pyarrow_raises_error(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ client = self._make_client()
+ records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
+ dataframe = pandas.DataFrame(records)
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ side_effect=google.api_core.exceptions.NotFound("Table not found"),
+ )
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+ pyarrow_patch = mock.patch("google.cloud.bigquery.client.pyarrow", None)
+ to_parquet_patch = mock.patch.object(
+ dataframe, "to_parquet", wraps=dataframe.to_parquet
+ )
+
+ with load_patch, get_table_patch, pyarrow_patch, to_parquet_patch:
+ with pytest.raises(ValueError):
+ client.load_table_from_dataframe(
+ dataframe,
+ self.TABLE_REF,
+ location=self.LOCATION,
+ parquet_compression="gzip",
+ )
+
+ def test_load_table_from_dataframe_w_bad_pyarrow_issues_warning(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+
+ client = self._make_client()
+ records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
+ dataframe = pandas.DataFrame(records)
+
+ pyarrow_version_patch = mock.patch(
+ "google.cloud.bigquery._versions_helpers.PYARROW_VERSIONS._installed_version",
+ packaging.version.parse("2.0.0"), # A known bad version of pyarrow.
+ )
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ side_effect=google.api_core.exceptions.NotFound("Table not found"),
+ )
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ with load_patch, get_table_patch, pyarrow_version_patch:
+ with pytest.raises(exceptions.LegacyPyarrowError):
+ client.load_table_from_dataframe(
+ dataframe,
+ self.TABLE_REF,
+ location=self.LOCATION,
+ )
+
+ def test_load_table_from_dataframe_w_nulls(self):
+ """Test that a DataFrame with null columns can be uploaded if a
+ BigQuery schema is specified.
+
+ See: https://github.com/googleapis/google-cloud-python/issues/7370
+ """
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+
+ client = self._make_client()
+ records = [{"name": None, "age": None}, {"name": None, "age": None}]
+ dataframe = pandas.DataFrame(records, columns=["name", "age"])
+ schema = [SchemaField("name", "STRING"), SchemaField("age", "INTEGER")]
+ job_config = job.LoadJobConfig(schema=schema)
+
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+ with load_patch as load_table_from_file:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.schema == schema
+ assert sent_config.source_format == job.SourceFormat.PARQUET
+
+ def test_load_table_from_dataframe_w_invaild_job_config(self):
+ pandas = pytest.importorskip("pandas")
+ from google.cloud.bigquery import job
+
+ client = self._make_client()
+
+ records = [{"float_column": 3.14, "struct_column": [{"foo": 1}, {"bar": -1}]}]
+ dataframe = pandas.DataFrame(data=records)
+ job_config = job.CopyJobConfig()
+
+ with pytest.raises(TypeError) as exc:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
+ )
+
+ err_msg = str(exc.value)
+ assert "Expected an instance of LoadJobConfig" in err_msg
+
+ def test_load_table_from_dataframe_with_csv_source_format(self):
+ pandas = pytest.importorskip("pandas")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+ records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
+ dataframe = pandas.DataFrame(records)
+ job_config = job.LoadJobConfig(
+ write_disposition=job.WriteDisposition.WRITE_TRUNCATE,
+ source_format=job.SourceFormat.CSV,
+ )
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ return_value=mock.Mock(
+ schema=[SchemaField("id", "INTEGER"), SchemaField("age", "INTEGER")]
+ ),
+ )
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, job_config=job_config
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=None,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_file = load_table_from_file.mock_calls[0][1][1]
+ assert sent_file.closed
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.CSV
+
+ def test_load_table_from_dataframe_w_higher_scale_decimal128_datatype(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.schema import SchemaField
+ from decimal import Decimal
+
+ client = self._make_client()
+ dataframe = pandas.DataFrame({"x": [Decimal("0.1234567891")]})
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table", autospec=True
+ )
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_dataframe(
+ dataframe, self.TABLE_REF, location=self.LOCATION
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ rewind=True,
+ size=mock.ANY,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=self.LOCATION,
+ project=None,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.PARQUET
+ assert tuple(sent_config.schema) == (
+ SchemaField("x", "BIGNUMERIC", "NULLABLE", None),
+ )
+
+ # With autodetect specified, we pass the value as is. For more info, see
+ # https://github.com/googleapis/python-bigquery/issues/1228#issuecomment-1910946297
+ def test_load_table_from_json_basic_use(self):
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+
+ client = self._make_client()
+
+ json_rows = [
+ {"name": "One", "age": 11, "birthday": "2008-09-10", "adult": False},
+ {"name": "Two", "age": 22, "birthday": "1997-08-09", "adult": True},
+ ]
+
+ job_config = job.LoadJobConfig(autodetect=True)
+
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ # mock: remote table already exists
+ get_table_reference = {
+ "projectId": "project_id",
+ "datasetId": "test_dataset",
+ "tableId": "test_table",
+ }
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ return_value=mock.Mock(table_reference=get_table_reference),
+ )
+
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_json(
+ json_rows, self.TABLE_REF, job_config=job_config
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ size=mock.ANY,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=client.location,
+ project=client.project,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.NEWLINE_DELIMITED_JSON
+ assert sent_config.schema is None
+ assert sent_config.autodetect
+
+ def test_load_table_from_json_non_default_args(self):
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+
+ json_rows = [
+ {"name": "One", "age": 11, "birthday": "2008-09-10", "adult": False},
+ {"name": "Two", "age": 22, "birthday": "1997-08-09", "adult": True},
+ ]
+
+ schema = [
+ SchemaField("name", "STRING"),
+ SchemaField("age", "INTEGER"),
+ SchemaField("adult", "BOOLEAN"),
+ ]
+ job_config = job.LoadJobConfig(schema=schema)
+ job_config._properties["load"]["unknown_field"] = "foobar"
+ original_config_copy = copy.deepcopy(job_config)
+
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ with load_patch as load_table_from_file:
+ client.load_table_from_json(
+ json_rows,
+ self.TABLE_REF,
+ job_config=job_config,
+ project="project-x",
+ location="EU",
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ size=mock.ANY,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location="EU",
+ project="project-x",
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.NEWLINE_DELIMITED_JSON
+ assert sent_config.schema == schema
+ assert not sent_config.autodetect
+ # all properties should have been cloned and sent to the backend
+ assert sent_config._properties.get("load", {}).get("unknown_field") == "foobar"
+
+ # the original config object should not have been modified
+ assert job_config.to_api_repr() == original_config_copy.to_api_repr()
+
+ def test_load_table_from_json_w_invalid_job_config(self):
+ from google.cloud.bigquery import job
+
+ client = self._make_client()
+ json_rows = [
+ {"name": "One", "age": 11, "birthday": "2008-09-10", "adult": False},
+ {"name": "Two", "age": 22, "birthday": "1997-08-09", "adult": True},
+ ]
+ job_config = job.CopyJobConfig()
+ with pytest.raises(TypeError) as exc:
+ client.load_table_from_json(
+ json_rows,
+ self.TABLE_REF,
+ job_config=job_config,
+ project="project-x",
+ location="EU",
+ )
+ err_msg = str(exc.value)
+ assert "Expected an instance of LoadJobConfig" in err_msg
+
+ # When all following are true:
+ # (1) no schema provided;
+ # (2) no autodetect value provided;
+ # (3) writeDisposition == WRITE_APPEND or None;
+ # (4) table already exists,
+ # client sets autodetect == False
+ # For more details, see https://github.com/googleapis/python-bigquery/issues/1228#issuecomment-1910946297
+ def test_load_table_from_json_wo_schema_wo_autodetect_write_append_w_table(self):
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.job import WriteDisposition
+
+ client = self._make_client()
+
+ json_rows = [
+ {"name": "One", "age": 11, "birthday": "2008-09-10", "adult": False},
+ {"name": "Two", "age": 22, "birthday": "1997-08-09", "adult": True},
+ ]
+
+ job_config = job.LoadJobConfig(write_disposition=WriteDisposition.WRITE_APPEND)
+
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ # mock: remote table already exists
+ get_table_reference = {
+ "projectId": "project_id",
+ "datasetId": "test_dataset",
+ "tableId": "test_table",
+ }
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ return_value=mock.Mock(table_reference=get_table_reference),
+ )
+
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_json(
+ json_rows, self.TABLE_REF, job_config=job_config
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ size=mock.ANY,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=client.location,
+ project=client.project,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.NEWLINE_DELIMITED_JSON
+ assert sent_config.schema is None
+ assert not sent_config.autodetect
+
+ # When all following are true:
+ # (1) no schema provided;
+ # (2) no autodetect value provided;
+ # (3) writeDisposition == WRITE_APPEND or None;
+ # (4) table does NOT exist,
+ # client sets autodetect == True
+ # For more details, see https://github.com/googleapis/python-bigquery/issues/1228#issuecomment-1910946297
+ def test_load_table_from_json_wo_schema_wo_autodetect_write_append_wo_table(self):
+ import google.api_core.exceptions as core_exceptions
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.job import WriteDisposition
+
+ client = self._make_client()
+
+ json_rows = [
+ {"name": "One", "age": 11, "birthday": "2008-09-10", "adult": False},
+ {"name": "Two", "age": 22, "birthday": "1997-08-09", "adult": True},
+ ]
+
+ job_config = job.LoadJobConfig(write_disposition=WriteDisposition.WRITE_APPEND)
+
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ # mock: remote table doesn't exist
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ side_effect=core_exceptions.NotFound(""),
+ )
+
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_json(
+ json_rows, self.TABLE_REF, job_config=job_config
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ size=mock.ANY,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=client.location,
+ project=client.project,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.NEWLINE_DELIMITED_JSON
+ assert sent_config.schema is None
+ assert sent_config.autodetect
+
+ # When all following are true:
+ # (1) no schema provided;
+ # (2) no autodetect value provided;
+ # (3) writeDisposition == WRITE_TRUNCATE or WRITE_EMPTY;
+ # client sets autodetect == True
+ # For more details, see https://github.com/googleapis/python-bigquery/issues/1228#issuecomment-1910946297
+ def test_load_table_from_json_wo_schema_wo_autodetect_others(self):
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.job import WriteDisposition
+
+ client = self._make_client()
+
+ json_rows = [
+ {"name": "One", "age": 11, "birthday": "2008-09-10", "adult": False},
+ {"name": "Two", "age": 22, "birthday": "1997-08-09", "adult": True},
+ ]
+
+ job_config = job.LoadJobConfig(
+ write_disposition=WriteDisposition.WRITE_TRUNCATE
+ )
+
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ with load_patch as load_table_from_file:
+ client.load_table_from_json(
+ json_rows, self.TABLE_REF, job_config=job_config
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ size=mock.ANY,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=client.location,
+ project=client.project,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.NEWLINE_DELIMITED_JSON
+ assert sent_config.schema is None
+ assert sent_config.autodetect
+
+ def test_load_table_from_json_w_explicit_job_config_override(self):
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+
+ json_rows = [
+ {"name": "One", "age": 11, "birthday": "2008-09-10", "adult": False},
+ {"name": "Two", "age": 22, "birthday": "1997-08-09", "adult": True},
+ ]
+
+ schema = [
+ SchemaField("name", "STRING"),
+ SchemaField("age", "INTEGER"),
+ SchemaField("adult", "BOOLEAN"),
+ ]
+ client.default_load_job_config = job.LoadJobConfig(
+ schema=schema, encoding="ISO-8859-1"
+ )
+
+ override_schema = schema
+ override_schema[0] = SchemaField("username", "STRING")
+ job_config = job.LoadJobConfig(schema=override_schema)
+ original_config_copy = copy.deepcopy(job_config)
+
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ with load_patch as load_table_from_file:
+ client.load_table_from_json(
+ json_rows,
+ self.TABLE_REF,
+ job_config=job_config,
+ project="project-x",
+ location="EU",
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ size=mock.ANY,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location="EU",
+ project="project-x",
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.NEWLINE_DELIMITED_JSON
+ assert sent_config.schema == override_schema
+ assert sent_config.encoding == "ISO-8859-1"
+ assert not sent_config.autodetect
+
+ # the original config object should not have been modified
+ assert job_config.to_api_repr() == original_config_copy.to_api_repr()
+
+ def test_load_table_from_json_w_default_job_config(self):
+ from google.cloud.bigquery import job
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+ from google.cloud.bigquery.schema import SchemaField
+
+ client = self._make_client()
+
+ json_rows = [
+ {"name": "One", "age": 11, "birthday": "2008-09-10", "adult": False},
+ {"name": "Two", "age": 22, "birthday": "1997-08-09", "adult": True},
+ ]
+
+ schema = [
+ SchemaField("name", "STRING"),
+ SchemaField("age", "INTEGER"),
+ SchemaField("adult", "BOOLEAN"),
+ ]
+ client.default_load_job_config = job.LoadJobConfig(schema=schema)
+
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+
+ with load_patch as load_table_from_file:
+ client.load_table_from_json(
+ json_rows,
+ self.TABLE_REF,
+ job_config=None,
+ project="project-x",
+ location="EU",
+ )
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ size=mock.ANY,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location="EU",
+ project="project-x",
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
+ assert sent_config.source_format == job.SourceFormat.NEWLINE_DELIMITED_JSON
+ assert sent_config.schema == schema
+
+ def test_load_table_from_json_unicode_emoji_data_case(self):
+ from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
+
+ client = self._make_client()
+
+ emoji = "\U0001F3E6"
+ json_row = {"emoji": emoji}
+ json_rows = [json_row]
+
+ load_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
+ )
+ # mock: remote table already exists
+ get_table_reference = {
+ "projectId": "project_id",
+ "datasetId": "test_dataset",
+ "tableId": "test_table",
+ }
+ get_table_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.get_table",
+ autospec=True,
+ return_value=mock.Mock(table_reference=get_table_reference),
+ )
+
+ with load_patch as load_table_from_file, get_table_patch:
+ client.load_table_from_json(json_rows, self.TABLE_REF)
+
+ load_table_from_file.assert_called_once_with(
+ client,
+ mock.ANY,
+ self.TABLE_REF,
+ size=mock.ANY,
+ num_retries=_DEFAULT_NUM_RETRIES,
+ job_id=mock.ANY,
+ job_id_prefix=None,
+ location=client.location,
+ project=client.project,
+ job_config=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+ sent_data_file = load_table_from_file.mock_calls[0][1][1]
+
+ # make sure json_row's unicode characters are only encoded one time
+ expected_bytes = b'{"emoji": "' + emoji.encode("utf8") + b'"}'
+ assert sent_data_file.getvalue() == expected_bytes
+
+ # Low-level tests
+
+ @classmethod
+ def _make_resumable_upload_responses(cls, size):
+ """Make a series of responses for a successful resumable upload."""
+ from google import resumable_media
+
+ resumable_url = "http://test.invalid?upload_id=and-then-there-was-1"
+ initial_response = cls._make_response(
+ http.client.OK, "", {"location": resumable_url}
+ )
+ data_response = cls._make_response(
+ resumable_media.PERMANENT_REDIRECT,
+ "",
+ {"range": "bytes=0-{:d}".format(size - 1)},
+ )
+ final_response = cls._make_response(
+ http.client.OK,
+ json.dumps({"size": size}),
+ {"Content-Type": "application/json"},
+ )
+ return [initial_response, data_response, final_response]
+
+ @staticmethod
+ def _make_transport(responses=None):
+ import google.auth.transport.requests
+
+ transport = mock.create_autospec(
+ google.auth.transport.requests.AuthorizedSession, instance=True
+ )
+ transport.request.side_effect = responses
+ return transport
+
+ def test__do_resumable_upload(self):
+ file_obj = self._make_file_obj()
+ file_obj_len = len(file_obj.getvalue())
+ transport = self._make_transport(
+ self._make_resumable_upload_responses(file_obj_len)
+ )
+ client = self._make_client(transport)
+
+ result = client._do_resumable_upload(
+ file_obj, self.EXPECTED_CONFIGURATION, None, None
+ )
+
+ content = result.content.decode("utf-8")
+ assert json.loads(content) == {"size": file_obj_len}
+
+ # Verify that configuration data was passed in with the initial
+ # request.
+ transport.request.assert_any_call(
+ "POST",
+ mock.ANY,
+ data=json.dumps(self.EXPECTED_CONFIGURATION).encode("utf-8"),
+ headers=mock.ANY,
+ timeout=mock.ANY,
+ )
+
+ def test__do_resumable_upload_custom_project(self):
+ file_obj = self._make_file_obj()
+ file_obj_len = len(file_obj.getvalue())
+ transport = self._make_transport(
+ self._make_resumable_upload_responses(file_obj_len)
+ )
+ client = self._make_client(transport)
+
+ result = client._do_resumable_upload(
+ file_obj,
+ self.EXPECTED_CONFIGURATION,
+ None,
+ None,
+ project="custom-project",
+ )
+
+ content = result.content.decode("utf-8")
+ assert json.loads(content) == {"size": file_obj_len}
+
+ # Verify that configuration data was passed in with the initial
+ # request.
+ transport.request.assert_any_call(
+ "POST",
+ mock.ANY,
+ data=json.dumps(self.EXPECTED_CONFIGURATION).encode("utf-8"),
+ headers=mock.ANY,
+ timeout=mock.ANY,
+ )
+
+ initiation_url = next(
+ (
+ call[0][1]
+ for call in transport.request.call_args_list
+ if call[0][0] == "POST" and "uploadType=resumable" in call[0][1]
+ ),
+ None,
+ ) # pragma: NO COVER
+
+ assert initiation_url is not None
+ assert "projects/custom-project" in initiation_url
+
+ def test__do_resumable_upload_custom_timeout(self):
+ file_obj = self._make_file_obj()
+ file_obj_len = len(file_obj.getvalue())
+ transport = self._make_transport(
+ self._make_resumable_upload_responses(file_obj_len)
+ )
+ client = self._make_client(transport)
+
+ client._do_resumable_upload(
+ file_obj, self.EXPECTED_CONFIGURATION, num_retries=0, timeout=3.14
+ )
+
+ # The timeout should be applied to all underlying calls.
+ for call_args in transport.request.call_args_list:
+ assert call_args[1].get("timeout") == 3.14
+
+ def test__do_multipart_upload(self):
+ transport = self._make_transport([self._make_response(http.client.OK)])
+ client = self._make_client(transport)
+ file_obj = self._make_file_obj()
+ file_obj_len = len(file_obj.getvalue())
+
+ client._do_multipart_upload(
+ file_obj, self.EXPECTED_CONFIGURATION, file_obj_len, None, None
+ )
+
+ # Verify that configuration data was passed in with the initial
+ # request.
+ request_args = transport.request.mock_calls[0][2]
+ request_data = request_args["data"].decode("utf-8")
+ request_headers = request_args["headers"]
+
+ request_content = email.message_from_string(
+ "Content-Type: {}\r\n{}".format(
+ request_headers["content-type"].decode("utf-8"), request_data
+ )
+ )
+
+ # There should be two payloads: the configuration and the binary daya.
+ configuration_data = request_content.get_payload(0).get_payload()
+ binary_data = request_content.get_payload(1).get_payload()
+
+ assert json.loads(configuration_data) == self.EXPECTED_CONFIGURATION
+ assert binary_data.encode("utf-8") == file_obj.getvalue()
+
+ def test__do_multipart_upload_wrong_size(self):
+ client = self._make_client()
+ file_obj = self._make_file_obj()
+ file_obj_len = len(file_obj.getvalue())
+
+ with pytest.raises(ValueError):
+ client._do_multipart_upload(file_obj, {}, file_obj_len + 1, None, None)
+
+ def test_schema_from_json_with_file_path(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ file_content = """[
+ {
+ "description": "quarter",
+ "mode": "REQUIRED",
+ "name": "qtr",
+ "type": "STRING"
+ },
+ {
+ "description": "sales representative",
+ "mode": "NULLABLE",
+ "name": "rep",
+ "type": "STRING"
+ },
+ {
+ "description": "total sales",
+ "mode": "NULLABLE",
+ "name": "sales",
+ "type": "FLOAT"
+ }
+ ]"""
+
+ expected = [
+ SchemaField("qtr", "STRING", "REQUIRED", description="quarter"),
+ SchemaField(
+ "rep",
+ "STRING",
+ "NULLABLE",
+ description="sales representative",
+ ),
+ SchemaField(
+ "sales",
+ "FLOAT",
+ "NULLABLE",
+ description="total sales",
+ ),
+ ]
+
+ client = self._make_client()
+ mock_file_path = "/mocked/file.json"
+
+ open_patch = mock.patch(
+ "builtins.open", new=mock.mock_open(read_data=file_content)
+ )
+
+ with open_patch as _mock_file:
+ actual = client.schema_from_json(mock_file_path)
+ _mock_file.assert_called_once_with(mock_file_path)
+ # This assert is to make sure __exit__ is called in the context
+ # manager that opens the file in the function
+ _mock_file().__exit__.assert_called_once()
+
+ assert expected == actual
+
+ def test_schema_from_json_with_file_object(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ file_content = """[
+ {
+ "description": "quarter",
+ "mode": "REQUIRED",
+ "name": "qtr",
+ "type": "STRING"
+ },
+ {
+ "description": "sales representative",
+ "mode": "NULLABLE",
+ "name": "rep",
+ "type": "STRING"
+ },
+ {
+ "description": "total sales",
+ "mode": "NULLABLE",
+ "name": "sales",
+ "type": "FLOAT"
+ }
+ ]"""
+
+ expected = [
+ SchemaField("qtr", "STRING", "REQUIRED", description="quarter"),
+ SchemaField(
+ "rep", "STRING", "NULLABLE", description="sales representative"
+ ),
+ SchemaField("sales", "FLOAT", "NULLABLE", description="total sales"),
+ ]
+
+ client = self._make_client()
+ fake_file = io.StringIO(file_content)
+ actual = client.schema_from_json(fake_file)
+
+ assert expected == actual
+
+ def test_schema_to_json_with_file_path(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ file_content = [
+ {
+ "description": "quarter",
+ "mode": "REQUIRED",
+ "name": "qtr",
+ "type": "STRING",
+ },
+ {
+ "description": "sales representative",
+ "mode": "NULLABLE",
+ "name": "rep",
+ "type": "STRING",
+ },
+ {
+ "description": "total sales",
+ "mode": "NULLABLE",
+ "name": "sales",
+ "type": "FLOAT",
+ },
+ ]
+
+ schema_list = [
+ SchemaField("qtr", "STRING", "REQUIRED", description="quarter"),
+ SchemaField(
+ "rep", "STRING", "NULLABLE", description="sales representative"
+ ),
+ SchemaField("sales", "FLOAT", "NULLABLE", description="total sales"),
+ ]
+
+ client = self._make_client()
+ mock_file_path = "/mocked/file.json"
+ open_patch = mock.patch("builtins.open", mock.mock_open())
+
+ with open_patch as mock_file, mock.patch("json.dump") as mock_dump:
+ client.schema_to_json(schema_list, mock_file_path)
+ mock_file.assert_called_once_with(mock_file_path, mode="w")
+ # This assert is to make sure __exit__ is called in the context
+ # manager that opens the file in the function
+ mock_file().__exit__.assert_called_once()
+ mock_dump.assert_called_with(
+ file_content, mock_file.return_value, indent=2, sort_keys=True
+ )
+
+ def test_schema_to_json_with_file_object(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ file_content = [
+ {
+ "description": "quarter",
+ "mode": "REQUIRED",
+ "name": "qtr",
+ "type": "STRING",
+ },
+ {
+ "description": "sales representative",
+ "mode": "NULLABLE",
+ "name": "rep",
+ "type": "STRING",
+ },
+ {
+ "description": "total sales",
+ "mode": "NULLABLE",
+ "name": "sales",
+ "type": "FLOAT",
+ },
+ ]
+
+ schema_list = [
+ SchemaField("qtr", "STRING", "REQUIRED", description="quarter"),
+ SchemaField(
+ "rep", "STRING", "NULLABLE", description="sales representative"
+ ),
+ SchemaField("sales", "FLOAT", "NULLABLE", description="total sales"),
+ ]
+
+ fake_file = io.StringIO()
+
+ client = self._make_client()
+
+ client.schema_to_json(schema_list, fake_file)
+ assert file_content == json.loads(fake_file.getvalue())
+
+
+def test_upload_chunksize(client):
+ with mock.patch("google.cloud.bigquery.client.ResumableUpload") as RU:
+ upload = RU.return_value
+
+ upload.finished = False
+
+ def transmit_next_chunk(transport, *args, **kwargs):
+ upload.finished = True
+ result = mock.MagicMock()
+ result.json.return_value = {}
+ return result
+
+ upload.transmit_next_chunk = transmit_next_chunk
+ f = io.BytesIO()
+ client.load_table_from_file(f, "foo.bar")
+
+ chunk_size = RU.call_args_list[0][0][1]
+ assert chunk_size == 100 * (1 << 20)
+
+
+@pytest.mark.enable_add_server_timeout_header
+@pytest.mark.parametrize("headers", [None, {}])
+def test__call_api_add_server_timeout_w_timeout(client, headers):
+ client._connection = make_connection({})
+ client._call_api(None, method="GET", path="/", headers=headers, timeout=42)
+ client._connection.api_request.assert_called_with(
+ method="GET", path="/", timeout=42, headers={"X-Server-Timeout": "42"}
+ )
+
+
+@pytest.mark.enable_add_server_timeout_header
+def test__call_api_no_add_server_timeout_wo_timeout(client):
+ client._connection = make_connection({})
+ client._call_api(None, method="GET", path="/")
+ client._connection.api_request.assert_called_with(method="GET", path="/")
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_create_dataset.py b/testbed/googleapis__python-bigquery/tests/unit/test_create_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2491a8121ca069eeb100ec9824de9c84bc89b49
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_create_dataset.py
@@ -0,0 +1,548 @@
+# Copyright 2021 Google LLC
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# https://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest import mock
+
+from google.cloud.bigquery.dataset import Dataset, DatasetReference
+from .helpers import make_connection, dataset_polymorphic, make_client
+import google.cloud.bigquery.dataset
+from google.cloud.bigquery.retry import DEFAULT_TIMEOUT
+import pytest
+
+
+@dataset_polymorphic
+def test_create_dataset_minimal(make_dataset, get_reference, client, PROJECT, DS_ID):
+ PATH = "projects/%s/datasets" % PROJECT
+ RESOURCE = {
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "etag": "etag",
+ "id": "%s:%s" % (PROJECT, DS_ID),
+ }
+ conn = client._connection = make_connection(RESOURCE)
+
+ dataset = make_dataset(PROJECT, DS_ID)
+ after = client.create_dataset(dataset, timeout=7.5)
+
+ assert after.dataset_id == DS_ID
+ assert after.project == PROJECT
+ assert after.etag == RESOURCE["etag"]
+ assert after.full_dataset_id == RESOURCE["id"]
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/%s" % PATH,
+ data={
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "labels": {},
+ },
+ timeout=7.5,
+ )
+
+
+def test_create_dataset_w_attrs(client, PROJECT, DS_ID):
+ from google.cloud.bigquery.dataset import AccessEntry
+
+ PATH = "projects/%s/datasets" % PROJECT
+ DESCRIPTION = "DESC"
+ FRIENDLY_NAME = "FN"
+ LOCATION = "US"
+ USER_EMAIL = "phred@example.com"
+ LABELS = {"color": "red"}
+ VIEW = {
+ "projectId": "my-proj",
+ "datasetId": "starry-skies",
+ "tableId": "northern-hemisphere",
+ }
+ DEFAULT_ROUNDING_MODE = "ROUND_HALF_EVEN"
+ RESOURCE = {
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "etag": "etag",
+ "id": "%s:%s" % (PROJECT, DS_ID),
+ "description": DESCRIPTION,
+ "friendlyName": FRIENDLY_NAME,
+ "location": LOCATION,
+ "defaultTableExpirationMs": "3600",
+ "labels": LABELS,
+ "access": [{"role": "OWNER", "userByEmail": USER_EMAIL}, {"view": VIEW}],
+ "defaultRoundingMode": DEFAULT_ROUNDING_MODE,
+ }
+ conn = client._connection = make_connection(RESOURCE)
+ entries = [
+ AccessEntry("OWNER", "userByEmail", USER_EMAIL),
+ AccessEntry(None, "view", VIEW),
+ ]
+
+ ds_ref = DatasetReference(PROJECT, DS_ID)
+ before = Dataset(ds_ref)
+ before.access_entries = entries
+ before.description = DESCRIPTION
+ before.friendly_name = FRIENDLY_NAME
+ before.default_table_expiration_ms = 3600
+ before.location = LOCATION
+ before.labels = LABELS
+ before.default_rounding_mode = DEFAULT_ROUNDING_MODE
+ after = client.create_dataset(before)
+ assert after.dataset_id == DS_ID
+ assert after.project == PROJECT
+ assert after.etag == RESOURCE["etag"]
+ assert after.full_dataset_id == RESOURCE["id"]
+ assert after.description == DESCRIPTION
+ assert after.friendly_name == FRIENDLY_NAME
+ assert after.location == LOCATION
+ assert after.default_table_expiration_ms == 3600
+ assert after.labels == LABELS
+ assert after.default_rounding_mode == DEFAULT_ROUNDING_MODE
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/%s" % PATH,
+ data={
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "description": DESCRIPTION,
+ "friendlyName": FRIENDLY_NAME,
+ "location": LOCATION,
+ "defaultTableExpirationMs": "3600",
+ "defaultRoundingMode": DEFAULT_ROUNDING_MODE,
+ "access": [
+ {"role": "OWNER", "userByEmail": USER_EMAIL},
+ {"view": VIEW, "role": None},
+ ],
+ "labels": LABELS,
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_create_dataset_w_custom_property(client, PROJECT, DS_ID):
+ # The library should handle sending properties to the API that are not
+ # yet part of the library
+
+ path = "/projects/%s/datasets" % PROJECT
+ resource = {
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "newAlphaProperty": "unreleased property",
+ }
+ conn = client._connection = make_connection(resource)
+
+ ds_ref = DatasetReference(PROJECT, DS_ID)
+ before = Dataset(ds_ref)
+ before._properties["newAlphaProperty"] = "unreleased property"
+ after = client.create_dataset(before)
+
+ assert after.dataset_id == DS_ID
+ assert after.project == PROJECT
+ assert after._properties["newAlphaProperty"] == "unreleased property"
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=path,
+ data={
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "newAlphaProperty": "unreleased property",
+ "labels": {},
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_create_dataset_w_client_location_wo_dataset_location(PROJECT, DS_ID, LOCATION):
+ PATH = "projects/%s/datasets" % PROJECT
+ RESOURCE = {
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "etag": "etag",
+ "id": "%s:%s" % (PROJECT, DS_ID),
+ "location": LOCATION,
+ }
+ client = make_client(location=LOCATION)
+ conn = client._connection = make_connection(RESOURCE)
+
+ ds_ref = DatasetReference(PROJECT, DS_ID)
+ before = Dataset(ds_ref)
+ after = client.create_dataset(before)
+
+ assert after.dataset_id == DS_ID
+ assert after.project == PROJECT
+ assert after.etag == RESOURCE["etag"]
+ assert after.full_dataset_id == RESOURCE["id"]
+ assert after.location == LOCATION
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/%s" % PATH,
+ data={
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "labels": {},
+ "location": LOCATION,
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_create_dataset_w_client_location_w_dataset_location(PROJECT, DS_ID, LOCATION):
+ PATH = "projects/%s/datasets" % PROJECT
+ OTHER_LOCATION = "EU"
+ RESOURCE = {
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "etag": "etag",
+ "id": "%s:%s" % (PROJECT, DS_ID),
+ "location": OTHER_LOCATION,
+ }
+ client = make_client(location=LOCATION)
+ conn = client._connection = make_connection(RESOURCE)
+
+ ds_ref = DatasetReference(PROJECT, DS_ID)
+ before = Dataset(ds_ref)
+ before.location = OTHER_LOCATION
+ after = client.create_dataset(before)
+
+ assert after.dataset_id == DS_ID
+ assert after.project == PROJECT
+ assert after.etag == RESOURCE["etag"]
+ assert after.full_dataset_id == RESOURCE["id"]
+ assert after.location == OTHER_LOCATION
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path="/%s" % PATH,
+ data={
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "labels": {},
+ "location": OTHER_LOCATION,
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_create_dataset_w_reference(PROJECT, DS_ID, LOCATION):
+ path = "/projects/%s/datasets" % PROJECT
+ resource = {
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "etag": "etag",
+ "id": "%s:%s" % (PROJECT, DS_ID),
+ "location": LOCATION,
+ }
+ client = make_client(location=LOCATION)
+ conn = client._connection = make_connection(resource)
+ dataset = client.create_dataset(DatasetReference(PROJECT, DS_ID))
+
+ assert dataset.dataset_id == DS_ID
+ assert dataset.project == PROJECT
+ assert dataset.etag == resource["etag"]
+ assert dataset.full_dataset_id == resource["id"]
+ assert dataset.location == LOCATION
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=path,
+ data={
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "labels": {},
+ "location": LOCATION,
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_create_dataset_w_fully_qualified_string(PROJECT, DS_ID, LOCATION):
+ path = "/projects/%s/datasets" % PROJECT
+ resource = {
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "etag": "etag",
+ "id": "%s:%s" % (PROJECT, DS_ID),
+ "location": LOCATION,
+ }
+ client = make_client(location=LOCATION)
+ conn = client._connection = make_connection(resource)
+ dataset = client.create_dataset("{}.{}".format(PROJECT, DS_ID))
+
+ assert dataset.dataset_id == DS_ID
+ assert dataset.project == PROJECT
+ assert dataset.etag == resource["etag"]
+ assert dataset.full_dataset_id == resource["id"]
+ assert dataset.location == LOCATION
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=path,
+ data={
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "labels": {},
+ "location": LOCATION,
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_create_dataset_w_string(PROJECT, DS_ID, LOCATION):
+ path = "/projects/%s/datasets" % PROJECT
+ resource = {
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "etag": "etag",
+ "id": "%s:%s" % (PROJECT, DS_ID),
+ "location": LOCATION,
+ }
+ client = make_client(location=LOCATION)
+ conn = client._connection = make_connection(resource)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ dataset = client.create_dataset(DS_ID)
+
+ final_attributes.assert_called_once_with({"path": path}, client, None)
+
+ assert dataset.dataset_id == DS_ID
+ assert dataset.project == PROJECT
+ assert dataset.etag == resource["etag"]
+ assert dataset.full_dataset_id == resource["id"]
+ assert dataset.location == LOCATION
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=path,
+ data={
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "labels": {},
+ "location": LOCATION,
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_create_dataset_alreadyexists_w_exists_ok_false(PROJECT, DS_ID, LOCATION):
+ client = make_client(location=LOCATION)
+ client._connection = make_connection(
+ google.api_core.exceptions.AlreadyExists("dataset already exists")
+ )
+
+ with pytest.raises(google.api_core.exceptions.AlreadyExists):
+ client.create_dataset(DS_ID)
+
+
+def test_create_dataset_alreadyexists_w_exists_ok_true(PROJECT, DS_ID, LOCATION):
+ post_path = "/projects/{}/datasets".format(PROJECT)
+ get_path = "/projects/{}/datasets/{}".format(PROJECT, DS_ID)
+ resource = {
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "etag": "etag",
+ "id": "{}:{}".format(PROJECT, DS_ID),
+ "location": LOCATION,
+ }
+ client = make_client(location=LOCATION)
+ conn = client._connection = make_connection(
+ google.api_core.exceptions.AlreadyExists("dataset already exists"), resource
+ )
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ dataset = client.create_dataset(DS_ID, exists_ok=True)
+
+ final_attributes.assert_called_with({"path": get_path}, client, None)
+
+ assert dataset.dataset_id == DS_ID
+ assert dataset.project == PROJECT
+ assert dataset.etag == resource["etag"]
+ assert dataset.full_dataset_id == resource["id"]
+ assert dataset.location == LOCATION
+
+ conn.api_request.assert_has_calls(
+ [
+ mock.call(
+ method="POST",
+ path=post_path,
+ data={
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "labels": {},
+ "location": LOCATION,
+ },
+ timeout=DEFAULT_TIMEOUT,
+ ),
+ mock.call(method="GET", path=get_path, timeout=DEFAULT_TIMEOUT),
+ ]
+ )
+
+
+def test_create_dataset_with_default_rounding_mode_if_value_is_none(
+ PROJECT, DS_ID, LOCATION
+):
+ default_rounding_mode = None
+ path = "/projects/%s/datasets" % PROJECT
+ resource = {
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "etag": "etag",
+ "id": "{}:{}".format(PROJECT, DS_ID),
+ "location": LOCATION,
+ }
+ client = make_client(location=LOCATION)
+ conn = client._connection = make_connection(resource)
+
+ ds_ref = DatasetReference(PROJECT, DS_ID)
+ before = Dataset(ds_ref)
+ before.default_rounding_mode = default_rounding_mode
+ after = client.create_dataset(before)
+
+ assert after.dataset_id == DS_ID
+ assert after.project == PROJECT
+ assert after.default_rounding_mode is None
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=path,
+ data={
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "labels": {},
+ "location": LOCATION,
+ "defaultRoundingMode": "ROUNDING_MODE_UNSPECIFIED",
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_create_dataset_with_default_rounding_mode_if_value_is_not_string(
+ PROJECT, DS_ID, LOCATION
+):
+ default_rounding_mode = 10
+ ds_ref = DatasetReference(PROJECT, DS_ID)
+ dataset = Dataset(ds_ref)
+ with pytest.raises(ValueError) as e:
+ dataset.default_rounding_mode = default_rounding_mode
+ assert str(e.value) == "Pass a string, or None"
+
+
+def test_create_dataset_with_default_rounding_mode_if_value_is_not_in_possible_values(
+ PROJECT, DS_ID
+):
+ default_rounding_mode = "ROUND_HALF_AWAY_FROM_ZEROS"
+ ds_ref = DatasetReference(PROJECT, DS_ID)
+ dataset = Dataset(ds_ref)
+ with pytest.raises(ValueError) as e:
+ dataset.default_rounding_mode = default_rounding_mode
+ assert (
+ str(e.value)
+ == "rounding mode needs to be one of ROUNDING_MODE_UNSPECIFIED,ROUND_HALF_AWAY_FROM_ZERO,ROUND_HALF_EVEN"
+ )
+
+
+def test_create_dataset_with_default_rounding_mode_if_value_is_in_possible_values(
+ PROJECT, DS_ID, LOCATION
+):
+ default_rounding_mode = "ROUND_HALF_AWAY_FROM_ZERO"
+ path = "/projects/%s/datasets" % PROJECT
+ resource = {
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "etag": "etag",
+ "id": "{}:{}".format(PROJECT, DS_ID),
+ "location": LOCATION,
+ }
+ client = make_client(location=LOCATION)
+ conn = client._connection = make_connection(resource)
+
+ ds_ref = DatasetReference(PROJECT, DS_ID)
+ before = Dataset(ds_ref)
+ before.default_rounding_mode = default_rounding_mode
+ after = client.create_dataset(before)
+
+ assert after.dataset_id == DS_ID
+ assert after.project == PROJECT
+ assert after.default_rounding_mode is None
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=path,
+ data={
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "labels": {},
+ "location": LOCATION,
+ "defaultRoundingMode": default_rounding_mode,
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_create_dataset_with_max_time_travel_hours(PROJECT, DS_ID, LOCATION):
+ path = "/projects/%s/datasets" % PROJECT
+ max_time_travel_hours = 24 * 3
+
+ resource = {
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "etag": "etag",
+ "id": "{}:{}".format(PROJECT, DS_ID),
+ "location": LOCATION,
+ "maxTimeTravelHours": max_time_travel_hours,
+ }
+ client = make_client(location=LOCATION)
+ conn = client._connection = make_connection(resource)
+
+ ds_ref = DatasetReference(PROJECT, DS_ID)
+ before = Dataset(ds_ref)
+ before.max_time_travel_hours = max_time_travel_hours
+ after = client.create_dataset(before)
+ assert after.dataset_id == DS_ID
+ assert after.project == PROJECT
+ assert after.max_time_travel_hours == max_time_travel_hours
+
+ conn.api_request.assert_called_once_with(
+ method="POST",
+ path=path,
+ data={
+ "datasetReference": {"projectId": PROJECT, "datasetId": DS_ID},
+ "labels": {},
+ "location": LOCATION,
+ "maxTimeTravelHours": max_time_travel_hours,
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_create_dataset_with_max_time_travel_hours_not_multiple_of_24(
+ PROJECT, DS_ID, LOCATION
+):
+ ds_ref = DatasetReference(PROJECT, DS_ID)
+ dataset = Dataset(ds_ref)
+ with pytest.raises(ValueError) as e:
+ dataset.max_time_travel_hours = 50
+ assert str(e.value) == "Time Travel Window should be multiple of 24"
+
+
+def test_create_dataset_with_max_time_travel_hours_is_less_than_2_days(
+ PROJECT, DS_ID, LOCATION
+):
+ ds_ref = DatasetReference(PROJECT, DS_ID)
+ dataset = Dataset(ds_ref)
+ with pytest.raises(ValueError) as e:
+ dataset.max_time_travel_hours = 24
+ assert (
+ str(e.value)
+ == "Time Travel Window should be from 48 to 168 hours (2 to 7 days)"
+ )
+
+
+def test_create_dataset_with_max_time_travel_hours_is_greater_than_7_days(
+ PROJECT, DS_ID, LOCATION
+):
+ ds_ref = DatasetReference(PROJECT, DS_ID)
+ dataset = Dataset(ds_ref)
+ with pytest.raises(ValueError) as e:
+ dataset.max_time_travel_hours = 192
+ assert (
+ str(e.value)
+ == "Time Travel Window should be from 48 to 168 hours (2 to 7 days)"
+ )
+
+
+def test_create_dataset_with_max_time_travel_hours_is_not_int(PROJECT, DS_ID, LOCATION):
+ ds_ref = DatasetReference(PROJECT, DS_ID)
+ dataset = Dataset(ds_ref)
+ with pytest.raises(ValueError) as e:
+ dataset.max_time_travel_hours = "50"
+ assert str(e.value) == "max_time_travel_hours must be an integer. Got 50"
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_dataset.py b/testbed/googleapis__python-bigquery/tests/unit/test_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0164bc738a49a746d4ced6ecfa0595897a264f3
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_dataset.py
@@ -0,0 +1,1124 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from unittest import mock
+
+from google.cloud.bigquery.routine.routine import Routine, RoutineReference
+import pytest
+from google.cloud.bigquery.dataset import (
+ AccessEntry,
+ Dataset,
+ DatasetReference,
+ Table,
+ TableReference,
+)
+
+
+class TestAccessEntry(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ return AccessEntry
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor_defaults(self):
+ entry = self._make_one("OWNER", "userByEmail", "phred@example.com")
+ self.assertEqual(entry.role, "OWNER")
+ self.assertEqual(entry.entity_type, "userByEmail")
+ self.assertEqual(entry.entity_id, "phred@example.com")
+
+ def test_ctor_view_success(self):
+ role = None
+ entity_type = "view"
+ entity_id = object()
+ entry = self._make_one(role, entity_type, entity_id)
+ self.assertEqual(entry.role, role)
+ self.assertEqual(entry.entity_type, entity_type)
+ self.assertEqual(entry.entity_id, entity_id)
+
+ def test_ctor_routine_success(self):
+ role = None
+ entity_type = "routine"
+ entity_id = object()
+ entry = self._make_one(role, entity_type, entity_id)
+ self.assertEqual(entry.role, role)
+ self.assertEqual(entry.entity_type, entity_type)
+ self.assertEqual(entry.entity_id, entity_id)
+
+ def test___eq___role_mismatch(self):
+ entry = self._make_one("OWNER", "userByEmail", "phred@example.com")
+ other = self._make_one("WRITER", "userByEmail", "phred@example.com")
+ self.assertNotEqual(entry, other)
+
+ def test___eq___entity_type_mismatch(self):
+ entry = self._make_one("OWNER", "userByEmail", "phred@example.com")
+ other = self._make_one("OWNER", "groupByEmail", "phred@example.com")
+ self.assertNotEqual(entry, other)
+
+ def test___eq___entity_id_mismatch(self):
+ entry = self._make_one("OWNER", "userByEmail", "phred@example.com")
+ other = self._make_one("OWNER", "userByEmail", "bharney@example.com")
+ self.assertNotEqual(entry, other)
+
+ def test___eq___hit(self):
+ entry = self._make_one("OWNER", "userByEmail", "phred@example.com")
+ other = self._make_one("OWNER", "userByEmail", "phred@example.com")
+ self.assertEqual(entry, other)
+
+ def test__eq___type_mismatch(self):
+ entry = self._make_one("OWNER", "userByEmail", "silly@example.com")
+ self.assertNotEqual(entry, object())
+ self.assertEqual(entry, mock.ANY)
+
+ def test___hash__set_equality(self):
+ entry1 = self._make_one("OWNER", "userByEmail", "silly@example.com")
+ entry2 = self._make_one("OWNER", "userByEmail", "phred@example.com")
+ set_one = {entry1, entry2}
+ set_two = {entry1, entry2}
+ self.assertEqual(set_one, set_two)
+
+ def test___hash__not_equals(self):
+ entry1 = self._make_one("OWNER", "userByEmail", "silly@example.com")
+ entry2 = self._make_one("OWNER", "userByEmail", "phred@example.com")
+ set_one = {entry1}
+ set_two = {entry2}
+ self.assertNotEqual(set_one, set_two)
+
+ def test_to_api_repr(self):
+ entry = self._make_one("OWNER", "userByEmail", "salmon@example.com")
+ resource = entry.to_api_repr()
+ exp_resource = {"role": "OWNER", "userByEmail": "salmon@example.com"}
+ self.assertEqual(resource, exp_resource)
+
+ def test_to_api_repr_view(self):
+ view = {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "tableId": "my_table",
+ }
+ entry = self._make_one(None, "view", view)
+ resource = entry.to_api_repr()
+ exp_resource = {"view": view, "role": None}
+ self.assertEqual(resource, exp_resource)
+
+ def test_to_api_repr_routine(self):
+ routine = {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "routineId": "my_routine",
+ }
+
+ entry = self._make_one(None, "routine", routine)
+ resource = entry.to_api_repr()
+ exp_resource = {"routine": routine, "role": None}
+ self.assertEqual(resource, exp_resource)
+
+ def test_to_api_repr_dataset(self):
+ dataset = {
+ "dataset": {"projectId": "my-project", "datasetId": "my_dataset"},
+ "target_types": "VIEWS",
+ }
+ entry = self._make_one(None, "dataset", dataset)
+ resource = entry.to_api_repr()
+ exp_resource = {"dataset": dataset, "role": None}
+ self.assertEqual(resource, exp_resource)
+
+ def test_from_api_repr(self):
+ resource = {"role": "OWNER", "userByEmail": "salmon@example.com"}
+ entry = self._get_target_class().from_api_repr(resource)
+ self.assertEqual(entry.role, "OWNER")
+ self.assertEqual(entry.entity_type, "userByEmail")
+ self.assertEqual(entry.entity_id, "salmon@example.com")
+
+ def test_from_api_repr_w_unknown_entity_type(self):
+ resource = {"role": "READER", "unknown": "UNKNOWN"}
+ entry = self._get_target_class().from_api_repr(resource)
+ self.assertEqual(entry.role, "READER")
+ self.assertEqual(entry.entity_type, "unknown")
+ self.assertEqual(entry.entity_id, "UNKNOWN")
+ exp_resource = entry.to_api_repr()
+ self.assertEqual(resource, exp_resource)
+
+ def test_from_api_repr_wo_role(self):
+ resource = {
+ "view": {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "tableId": "my_table",
+ }
+ }
+ entry = self._get_target_class().from_api_repr(resource)
+ exp_entry = self._make_one(
+ role=None,
+ entity_type="view",
+ entity_id=resource["view"],
+ )
+ self.assertEqual(entry, exp_entry)
+
+ def test_to_api_repr_w_extra_properties(self):
+ resource = {
+ "role": "READER",
+ "userByEmail": "salmon@example.com",
+ }
+ entry = self._get_target_class().from_api_repr(resource)
+ entry._properties["specialGroup"] = resource["specialGroup"] = "projectReaders"
+ exp_resource = entry.to_api_repr()
+ self.assertEqual(resource, exp_resource)
+
+ def test_from_api_repr_entries_w_extra_keys(self):
+ resource = {
+ "role": "READER",
+ "specialGroup": "projectReaders",
+ "userByEmail": "salmon@example.com",
+ }
+ with self.assertRaises(ValueError):
+ self._get_target_class().from_api_repr(resource)
+
+ def test_view_getter_setter(self):
+ view = {
+ "projectId": "my_project",
+ "datasetId": "my_dataset",
+ "tableId": "my_table",
+ }
+ view_ref = TableReference.from_api_repr(view)
+ entry = self._make_one(None)
+ entry.view = view
+ resource = entry.to_api_repr()
+ exp_resource = {"view": view, "role": None}
+ self.assertEqual(entry.view, view_ref)
+ self.assertEqual(resource, exp_resource)
+
+ def test_view_getter_setter_none(self):
+ entry = self._make_one(None)
+ self.assertEqual(entry.view, None)
+
+ def test_view_getter_setter_string(self):
+ project = "my_project"
+ dataset = "my_dataset"
+ table = "my_table"
+ view = {
+ "projectId": project,
+ "datasetId": dataset,
+ "tableId": table,
+ }
+ entry = self._make_one(None)
+ entry.view = f"{project}.{dataset}.{table}"
+ resource = entry.to_api_repr()
+ exp_resource = {"view": view, "role": None}
+ self.assertEqual(resource, exp_resource)
+
+ def test_view_getter_setter_table(self):
+ project = "my_project"
+ dataset = "my_dataset"
+ table = "my_table"
+ view = {
+ "projectId": project,
+ "datasetId": dataset,
+ "tableId": table,
+ }
+ view_ref = Table.from_string(f"{project}.{dataset}.{table}")
+ entry = self._make_one(None)
+ entry.view = view_ref
+ resource = entry.to_api_repr()
+ exp_resource = {"view": view, "role": None}
+ self.assertEqual(resource, exp_resource)
+
+ def test_view_getter_setter_table_ref(self):
+ project = "my_project"
+ dataset = "my_dataset"
+ table = "my_table"
+ view = {
+ "projectId": project,
+ "datasetId": dataset,
+ "tableId": table,
+ }
+ view_ref = TableReference.from_string(f"{project}.{dataset}.{table}")
+ entry = self._make_one(None)
+ entry.view = view_ref
+ resource = entry.to_api_repr()
+ exp_resource = {"view": view, "role": None}
+ self.assertEqual(resource, exp_resource)
+
+ def test_view_getter_setter_incorrect_role(self):
+ view = {
+ "projectId": "my_project",
+ "datasetId": "my_dataset",
+ "tableId": "my_table",
+ }
+ view_ref = TableReference.from_api_repr(view)
+ entry = self._make_one("READER")
+ with self.assertRaises(ValueError):
+ entry.view = view_ref
+
+ def test_dataset_getter_setter(self):
+ dataset = {"projectId": "my-project", "datasetId": "my_dataset"}
+ entry = self._make_one(None)
+ entry.dataset = dataset
+ resource = entry.to_api_repr()
+ exp_resource = {
+ "dataset": {"dataset": dataset, "targetTypes": None},
+ "role": None,
+ }
+ dataset_ref = DatasetReference.from_api_repr(dataset)
+ prop = entry.dataset
+ self.assertEqual(resource, exp_resource)
+ self.assertEqual(prop, dataset_ref)
+
+ def test_dataset_getter_setter_none(self):
+ entry = self._make_one(None)
+ self.assertEqual(entry.dataset, None)
+
+ def test_dataset_getter_setter_string(self):
+ project = "my-project"
+ dataset_id = "my_dataset"
+ dataset = {
+ "projectId": project,
+ "datasetId": dataset_id,
+ }
+ entry = self._make_one(None)
+ string_ref = f"{project}.{dataset_id}"
+ entry.dataset = string_ref
+ resource = entry.to_api_repr()
+ exp_resource = {
+ "dataset": {"dataset": dataset, "targetTypes": None},
+ "role": None,
+ }
+ self.assertEqual(resource, exp_resource)
+
+ def test_dataset_getter_setter_dataset_ref(self):
+ project = "my-project"
+ dataset_id = "my_dataset"
+ dataset_ref = DatasetReference(project, dataset_id)
+ entry = self._make_one(None)
+ entry.dataset = dataset_ref
+ resource = entry.to_api_repr()
+ exp_resource = {
+ "dataset": {"dataset": dataset_ref, "targetTypes": None},
+ "role": None,
+ }
+ self.assertEqual(resource, exp_resource)
+
+ def test_dataset_getter_setter_dataset(self):
+ project = "my-project"
+ dataset_id = "my_dataset"
+ dataset_repr = {
+ "projectId": project,
+ "datasetId": dataset_id,
+ }
+ dataset = Dataset(f"{project}.{dataset_id}")
+ entry = self._make_one(None)
+ entry.dataset = dataset
+ resource = entry.to_api_repr()
+ exp_resource = {
+ "role": None,
+ "dataset": {"dataset": dataset_repr, "targetTypes": None},
+ }
+ self.assertEqual(resource, exp_resource)
+
+ def test_dataset_getter_setter_incorrect_role(self):
+ dataset = {"dataset": {"projectId": "my-project", "datasetId": "my_dataset"}}
+ entry = self._make_one("READER")
+ with self.assertRaises(ValueError):
+ entry.dataset = dataset
+
+ def test_routine_getter_setter(self):
+ routine = {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "routineId": "my_routine",
+ }
+ entry = self._make_one(None)
+ entry.routine = routine
+ resource = entry.to_api_repr()
+ exp_resource = {"routine": routine, "role": None}
+ self.assertEqual(resource, exp_resource)
+
+ def test_routine_getter_setter_none(self):
+ entry = self._make_one(None)
+ self.assertEqual(entry.routine, None)
+
+ def test_routine_getter_setter_string(self):
+ project = "my-project"
+ dataset_id = "my_dataset"
+ routine_id = "my_routine"
+ routine = {
+ "projectId": project,
+ "datasetId": dataset_id,
+ "routineId": routine_id,
+ }
+ entry = self._make_one(None)
+ entry.routine = f"{project}.{dataset_id}.{routine_id}"
+ resource = entry.to_api_repr()
+ exp_resource = {
+ "routine": routine,
+ "role": None,
+ }
+ self.assertEqual(resource, exp_resource)
+
+ def test_routine_getter_setter_routine_ref(self):
+ routine = {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "routineId": "my_routine",
+ }
+ entry = self._make_one(None)
+ entry.routine = RoutineReference.from_api_repr(routine)
+ resource = entry.to_api_repr()
+ exp_resource = {
+ "routine": routine,
+ "role": None,
+ }
+ self.assertEqual(resource, exp_resource)
+
+ def test_routine_getter_setter_routine(self):
+ routine = {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "routineId": "my_routine",
+ }
+ routine_ref = RoutineReference.from_api_repr(routine)
+ entry = self._make_one(None)
+ entry.routine = Routine(routine_ref)
+ resource = entry.to_api_repr()
+ exp_resource = {
+ "routine": routine,
+ "role": None,
+ }
+ self.assertEqual(entry.routine, routine_ref)
+ self.assertEqual(resource, exp_resource)
+
+ def test_routine_getter_setter_incorrect_role(self):
+ routine = {
+ "projectId": "my-project",
+ "datasetId": "my_dataset",
+ "routineId": "my_routine",
+ }
+ entry = self._make_one("READER")
+ with self.assertRaises(ValueError):
+ entry.routine = routine
+
+ def test_group_by_email_getter_setter(self):
+ email = "cloud-developer-relations@google.com"
+ entry = self._make_one(None)
+ entry.group_by_email = email
+ resource = entry.to_api_repr()
+ exp_resource = {"groupByEmail": email, "role": None}
+ self.assertEqual(entry.group_by_email, email)
+ self.assertEqual(resource, exp_resource)
+
+ def test_group_by_email_getter_setter_none(self):
+ entry = self._make_one(None)
+ self.assertEqual(entry.group_by_email, None)
+
+ def test_user_by_email_getter_setter(self):
+ email = "cloud-developer-relations@google.com"
+ entry = self._make_one(None)
+ entry.user_by_email = email
+ resource = entry.to_api_repr()
+ exp_resource = {"userByEmail": email, "role": None}
+ self.assertEqual(entry.user_by_email, email)
+ self.assertEqual(resource, exp_resource)
+
+ def test_user_by_email_getter_setter_none(self):
+ entry = self._make_one(None)
+ self.assertEqual(entry.user_by_email, None)
+
+ def test_domain_setter(self):
+ domain = "my_domain"
+ entry = self._make_one(None)
+ entry.domain = domain
+ resource = entry.to_api_repr()
+ exp_resource = {"domain": domain, "role": None}
+ self.assertEqual(entry.domain, domain)
+ self.assertEqual(resource, exp_resource)
+
+ def test_domain_getter_setter_none(self):
+ entry = self._make_one(None)
+ self.assertEqual(entry.domain, None)
+
+ def test_special_group_getter_setter(self):
+ special_group = "my_special_group"
+ entry = self._make_one(None)
+ entry.special_group = special_group
+ resource = entry.to_api_repr()
+ exp_resource = {"specialGroup": special_group, "role": None}
+ self.assertEqual(entry.special_group, special_group)
+ self.assertEqual(resource, exp_resource)
+
+ def test_special_group_getter_setter_none(self):
+ entry = self._make_one(None)
+ self.assertEqual(entry.special_group, None)
+
+ def test_role_getter_setter(self):
+ role = "READER"
+ entry = self._make_one(None)
+ entry.role = role
+ resource = entry.to_api_repr()
+ exp_resource = {"role": role}
+ self.assertEqual(resource, exp_resource)
+
+ def test_role_getter_setter_none(self):
+ entry = self._make_one(None)
+ self.assertEqual(entry.role, None)
+
+ def test_dataset_target_types_getter_setter(self):
+ target_types = ["VIEWS"]
+ entry = self._make_one(None)
+ entry.dataset_target_types = target_types
+ self.assertEqual(entry.dataset_target_types, target_types)
+
+ def test_dataset_target_types_getter_setter_none(self):
+ entry = self._make_one(None)
+ self.assertEqual(entry.dataset_target_types, None)
+
+ def test_dataset_target_types_getter_setter_w_dataset(self):
+ dataset = {"projectId": "my-project", "datasetId": "my_dataset"}
+ target_types = ["VIEWS"]
+ entry = self._make_one(None)
+ entry.dataset = dataset
+ entry.dataset_target_types = target_types
+ self.assertEqual(entry.dataset_target_types, target_types)
+
+
+class TestDatasetReference(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.dataset import DatasetReference
+
+ return DatasetReference
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor_defaults(self):
+ dataset_ref = self._make_one("some-project-1", "dataset_1")
+ self.assertEqual(dataset_ref.project, "some-project-1")
+ self.assertEqual(dataset_ref.dataset_id, "dataset_1")
+
+ def test_ctor_bad_args(self):
+ with self.assertRaises(ValueError):
+ self._make_one(1, "d")
+ with self.assertRaises(ValueError):
+ self._make_one("p", 2)
+
+ def test_table(self):
+ dataset_ref = self._make_one("some-project-1", "dataset_1")
+ table_ref = dataset_ref.table("table_1")
+ self.assertEqual(table_ref.dataset_id, "dataset_1")
+ self.assertEqual(table_ref.project, "some-project-1")
+ self.assertEqual(table_ref.table_id, "table_1")
+
+ def test_model(self):
+ dataset_ref = self._make_one("some-project-1", "dataset_1")
+ model_ref = dataset_ref.model("model_1")
+ self.assertEqual(model_ref.project, "some-project-1")
+ self.assertEqual(model_ref.dataset_id, "dataset_1")
+ self.assertEqual(model_ref.model_id, "model_1")
+
+ def test_routine(self):
+ dataset_ref = self._make_one("some-project-1", "dataset_1")
+ routine_ref = dataset_ref.routine("routine_1")
+ self.assertEqual(routine_ref.project, "some-project-1")
+ self.assertEqual(routine_ref.dataset_id, "dataset_1")
+ self.assertEqual(routine_ref.routine_id, "routine_1")
+
+ def test_to_api_repr(self):
+ dataset = self._make_one("project_1", "dataset_1")
+
+ resource = dataset.to_api_repr()
+
+ self.assertEqual(resource, {"projectId": "project_1", "datasetId": "dataset_1"})
+
+ def test_from_api_repr(self):
+ cls = self._get_target_class()
+ expected = self._make_one("project_1", "dataset_1")
+
+ got = cls.from_api_repr({"projectId": "project_1", "datasetId": "dataset_1"})
+
+ self.assertEqual(expected, got)
+
+ def test_from_string(self):
+ cls = self._get_target_class()
+ got = cls.from_string("string-project.string_dataset")
+ self.assertEqual(got.project, "string-project")
+ self.assertEqual(got.dataset_id, "string_dataset")
+
+ def test_from_string_w_prefix(self):
+ cls = self._get_target_class()
+ got = cls.from_string("google.com:string-project.string_dataset")
+ self.assertEqual(got.project, "google.com:string-project")
+ self.assertEqual(got.dataset_id, "string_dataset")
+
+ def test_from_string_legacy_string(self):
+ cls = self._get_target_class()
+ with self.assertRaises(ValueError):
+ cls.from_string("string-project:string_dataset")
+
+ def test_from_string_w_incorrect_prefix(self):
+ cls = self._get_target_class()
+ with self.assertRaises(ValueError):
+ cls.from_string("google.com.string-project.dataset_id")
+
+ def test_from_string_w_prefix_and_too_many_parts(self):
+ cls = self._get_target_class()
+ with self.assertRaises(ValueError):
+ cls.from_string("google.com:string-project.dataset_id.table_id")
+
+ def test_from_string_not_fully_qualified(self):
+ cls = self._get_target_class()
+ with self.assertRaises(ValueError):
+ cls.from_string("string_dataset")
+ with self.assertRaises(ValueError):
+ cls.from_string("a.b.c")
+
+ def test_from_string_with_default_project(self):
+ cls = self._get_target_class()
+ got = cls.from_string("string_dataset", default_project="default-project")
+ self.assertEqual(got.project, "default-project")
+ self.assertEqual(got.dataset_id, "string_dataset")
+
+ def test_from_string_ignores_default_project(self):
+ cls = self._get_target_class()
+ got = cls.from_string(
+ "string-project.string_dataset", default_project="default-project"
+ )
+ self.assertEqual(got.project, "string-project")
+ self.assertEqual(got.dataset_id, "string_dataset")
+
+ def test___eq___wrong_type(self):
+ dataset = self._make_one("project_1", "dataset_1")
+ other = object()
+ self.assertNotEqual(dataset, other)
+ self.assertEqual(dataset, mock.ANY)
+
+ def test___eq___project_mismatch(self):
+ dataset = self._make_one("project_1", "dataset_1")
+ other = self._make_one("project_2", "dataset_1")
+ self.assertNotEqual(dataset, other)
+
+ def test___eq___dataset_mismatch(self):
+ dataset = self._make_one("project_1", "dataset_1")
+ other = self._make_one("project_1", "dataset_2")
+ self.assertNotEqual(dataset, other)
+
+ def test___eq___equality(self):
+ dataset = self._make_one("project_1", "dataset_1")
+ other = self._make_one("project_1", "dataset_1")
+ self.assertEqual(dataset, other)
+
+ def test___hash__set_equality(self):
+ dataset1 = self._make_one("project_1", "dataset_1")
+ dataset2 = self._make_one("project_1", "dataset_2")
+ set_one = {dataset1, dataset2}
+ set_two = {dataset1, dataset2}
+ self.assertEqual(set_one, set_two)
+
+ def test___hash__not_equals(self):
+ dataset1 = self._make_one("project_1", "dataset_1")
+ dataset2 = self._make_one("project_1", "dataset_2")
+ set_one = {dataset1}
+ set_two = {dataset2}
+ self.assertNotEqual(set_one, set_two)
+
+ def test___repr__(self):
+ dataset = self._make_one("project1", "dataset1")
+ expected = "DatasetReference('project1', 'dataset1')"
+ self.assertEqual(repr(dataset), expected)
+
+ def test___str__(self):
+ dataset = self._make_one("project1", "dataset1")
+ self.assertEqual(str(dataset), "project1.dataset1")
+
+
+class TestDataset(unittest.TestCase):
+ from google.cloud.bigquery.dataset import DatasetReference
+
+ PROJECT = "project"
+ DS_ID = "dataset-id"
+ DS_REF = DatasetReference(PROJECT, DS_ID)
+ KMS_KEY_NAME = "projects/1/locations/us/keyRings/1/cryptoKeys/1"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.dataset import Dataset
+
+ return Dataset
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def _setUpConstants(self):
+ import datetime
+ from google.cloud._helpers import UTC
+
+ self.WHEN_TS = 1437767599.006
+ self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(tzinfo=UTC)
+ self.ETAG = "ETAG"
+ self.DS_FULL_ID = "%s:%s" % (self.PROJECT, self.DS_ID)
+ self.RESOURCE_URL = "http://example.com/path/to/resource"
+
+ def _make_resource(self):
+ self._setUpConstants()
+ USER_EMAIL = "phred@example.com"
+ GROUP_EMAIL = "group-name@lists.example.com"
+ return {
+ "creationTime": self.WHEN_TS * 1000,
+ "datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
+ "etag": self.ETAG,
+ "id": self.DS_FULL_ID,
+ "lastModifiedTime": self.WHEN_TS * 1000,
+ "location": "US",
+ "selfLink": self.RESOURCE_URL,
+ "defaultTableExpirationMs": 3600,
+ "storageBillingModel": "LOGICAL",
+ "access": [
+ {"role": "OWNER", "userByEmail": USER_EMAIL},
+ {"role": "OWNER", "groupByEmail": GROUP_EMAIL},
+ {"role": "WRITER", "specialGroup": "projectWriters"},
+ {"role": "READER", "specialGroup": "projectReaders"},
+ ],
+ "defaultEncryptionConfiguration": {"kmsKeyName": self.KMS_KEY_NAME},
+ }
+
+ def _verify_access_entry(self, access_entries, resource):
+ r_entries = []
+ for r_entry in resource["access"]:
+ role = r_entry.pop("role")
+ for entity_type, entity_id in sorted(r_entry.items()):
+ r_entries.append(
+ {"role": role, "entity_type": entity_type, "entity_id": entity_id}
+ )
+
+ self.assertEqual(len(access_entries), len(r_entries))
+ for a_entry, r_entry in zip(access_entries, r_entries):
+ self.assertEqual(a_entry.role, r_entry["role"])
+ self.assertEqual(a_entry.entity_type, r_entry["entity_type"])
+ self.assertEqual(a_entry.entity_id, r_entry["entity_id"])
+
+ def _verify_readonly_resource_properties(self, dataset, resource):
+ self.assertEqual(dataset.project, self.PROJECT)
+ self.assertEqual(dataset.dataset_id, self.DS_ID)
+ self.assertEqual(dataset.reference.project, self.PROJECT)
+ self.assertEqual(dataset.reference.dataset_id, self.DS_ID)
+
+ if "creationTime" in resource:
+ self.assertEqual(dataset.created, self.WHEN)
+ else:
+ self.assertIsNone(dataset.created)
+ if "etag" in resource:
+ self.assertEqual(dataset.etag, self.ETAG)
+ else:
+ self.assertIsNone(dataset.etag)
+ if "lastModifiedTime" in resource:
+ self.assertEqual(dataset.modified, self.WHEN)
+ else:
+ self.assertIsNone(dataset.modified)
+ if "selfLink" in resource:
+ self.assertEqual(dataset.self_link, self.RESOURCE_URL)
+ else:
+ self.assertIsNone(dataset.self_link)
+
+ def _verify_resource_properties(self, dataset, resource):
+ self._verify_readonly_resource_properties(dataset, resource)
+
+ if "defaultTableExpirationMs" in resource:
+ self.assertEqual(
+ dataset.default_table_expiration_ms,
+ int(resource.get("defaultTableExpirationMs")),
+ )
+ else:
+ self.assertIsNone(dataset.default_table_expiration_ms)
+ self.assertEqual(dataset.description, resource.get("description"))
+ self.assertEqual(dataset.friendly_name, resource.get("friendlyName"))
+ self.assertEqual(dataset.location, resource.get("location"))
+ self.assertEqual(
+ dataset.is_case_insensitive, resource.get("isCaseInsensitive") or False
+ )
+ if "defaultEncryptionConfiguration" in resource:
+ self.assertEqual(
+ dataset.default_encryption_configuration.kms_key_name,
+ resource.get("defaultEncryptionConfiguration")["kmsKeyName"],
+ )
+ else:
+ self.assertIsNone(dataset.default_encryption_configuration)
+ if "storageBillingModel" in resource:
+ self.assertEqual(
+ dataset.storage_billing_model, resource.get("storageBillingModel")
+ )
+ else:
+ self.assertIsNone(dataset.storage_billing_model)
+ if "access" in resource:
+ self._verify_access_entry(dataset.access_entries, resource)
+ else:
+ self.assertEqual(dataset.access_entries, [])
+
+ def test_ctor_defaults(self):
+ dataset = self._make_one(self.DS_REF)
+ self.assertEqual(dataset.dataset_id, self.DS_ID)
+ self.assertEqual(dataset.project, self.PROJECT)
+ self.assertEqual(
+ dataset.path, "/projects/%s/datasets/%s" % (self.PROJECT, self.DS_ID)
+ )
+ self.assertEqual(dataset.access_entries, [])
+
+ self.assertIsNone(dataset.created)
+ self.assertIsNone(dataset.full_dataset_id)
+ self.assertIsNone(dataset.etag)
+ self.assertIsNone(dataset.modified)
+ self.assertIsNone(dataset.self_link)
+
+ self.assertIsNone(dataset.default_table_expiration_ms)
+ self.assertIsNone(dataset.description)
+ self.assertIsNone(dataset.friendly_name)
+ self.assertIsNone(dataset.location)
+ self.assertEqual(dataset.is_case_insensitive, False)
+
+ def test_ctor_string(self):
+ dataset = self._make_one("some-project.some_dset")
+ self.assertEqual(dataset.project, "some-project")
+ self.assertEqual(dataset.dataset_id, "some_dset")
+
+ def test_ctor_string_wo_project_id(self):
+ with pytest.raises(ValueError):
+ # Project ID is missing.
+ self._make_one("some_dset")
+
+ def test_ctor_explicit(self):
+ from google.cloud.bigquery.dataset import DatasetReference, AccessEntry
+
+ phred = AccessEntry("OWNER", "userByEmail", "phred@example.com")
+ bharney = AccessEntry("OWNER", "userByEmail", "bharney@example.com")
+ entries = [phred, bharney]
+ OTHER_PROJECT = "foo-bar-123"
+ dataset = self._make_one(DatasetReference(OTHER_PROJECT, self.DS_ID))
+ dataset.access_entries = entries
+ self.assertEqual(dataset.dataset_id, self.DS_ID)
+ self.assertEqual(dataset.project, OTHER_PROJECT)
+ self.assertEqual(
+ dataset.path, "/projects/%s/datasets/%s" % (OTHER_PROJECT, self.DS_ID)
+ )
+ self.assertEqual(dataset.access_entries, entries)
+
+ self.assertIsNone(dataset.created)
+ self.assertIsNone(dataset.full_dataset_id)
+ self.assertIsNone(dataset.etag)
+ self.assertIsNone(dataset.modified)
+ self.assertIsNone(dataset.self_link)
+
+ self.assertIsNone(dataset.default_table_expiration_ms)
+ self.assertIsNone(dataset.description)
+ self.assertIsNone(dataset.friendly_name)
+ self.assertIsNone(dataset.location)
+ self.assertEqual(dataset.is_case_insensitive, False)
+
+ def test_access_entries_setter_non_list(self):
+ dataset = self._make_one(self.DS_REF)
+ with self.assertRaises(TypeError):
+ dataset.access_entries = object()
+
+ def test_access_entries_setter_invalid_field(self):
+ from google.cloud.bigquery.dataset import AccessEntry
+
+ dataset = self._make_one(self.DS_REF)
+ phred = AccessEntry("OWNER", "userByEmail", "phred@example.com")
+ with self.assertRaises(ValueError):
+ dataset.access_entries = [phred, object()]
+
+ def test_access_entries_setter(self):
+ from google.cloud.bigquery.dataset import AccessEntry
+
+ dataset = self._make_one(self.DS_REF)
+ phred = AccessEntry("OWNER", "userByEmail", "phred@example.com")
+ bharney = AccessEntry("OWNER", "userByEmail", "bharney@example.com")
+ dataset.access_entries = [phred, bharney]
+ self.assertEqual(dataset.access_entries, [phred, bharney])
+
+ def test_default_partition_expiration_ms(self):
+ dataset = self._make_one("proj.dset")
+ assert dataset.default_partition_expiration_ms is None
+ dataset.default_partition_expiration_ms = 12345
+ assert dataset.default_partition_expiration_ms == 12345
+ dataset.default_partition_expiration_ms = None
+ assert dataset.default_partition_expiration_ms is None
+
+ def test_default_table_expiration_ms_setter_bad_value(self):
+ dataset = self._make_one(self.DS_REF)
+ with self.assertRaises(ValueError):
+ dataset.default_table_expiration_ms = "bogus"
+
+ def test_default_table_expiration_ms_setter(self):
+ dataset = self._make_one(self.DS_REF)
+ dataset.default_table_expiration_ms = 12345
+ self.assertEqual(dataset.default_table_expiration_ms, 12345)
+
+ def test_description_setter_bad_value(self):
+ dataset = self._make_one(self.DS_REF)
+ with self.assertRaises(ValueError):
+ dataset.description = 12345
+
+ def test_description_setter(self):
+ dataset = self._make_one(self.DS_REF)
+ dataset.description = "DESCRIPTION"
+ self.assertEqual(dataset.description, "DESCRIPTION")
+
+ def test_friendly_name_setter_bad_value(self):
+ dataset = self._make_one(self.DS_REF)
+ with self.assertRaises(ValueError):
+ dataset.friendly_name = 12345
+
+ def test_friendly_name_setter(self):
+ dataset = self._make_one(self.DS_REF)
+ dataset.friendly_name = "FRIENDLY"
+ self.assertEqual(dataset.friendly_name, "FRIENDLY")
+
+ def test_location_setter_bad_value(self):
+ dataset = self._make_one(self.DS_REF)
+ with self.assertRaises(ValueError):
+ dataset.location = 12345
+
+ def test_location_setter(self):
+ dataset = self._make_one(self.DS_REF)
+ dataset.location = "LOCATION"
+ self.assertEqual(dataset.location, "LOCATION")
+
+ def test_labels_update_in_place(self):
+ dataset = self._make_one(self.DS_REF)
+ del dataset._properties["labels"] # don't start w/ existing dict
+ labels = dataset.labels
+ labels["foo"] = "bar" # update in place
+ self.assertEqual(dataset.labels, {"foo": "bar"})
+
+ def test_labels_setter(self):
+ dataset = self._make_one(self.DS_REF)
+ dataset.labels = {"color": "green"}
+ self.assertEqual(dataset.labels, {"color": "green"})
+
+ def test_labels_setter_bad_value(self):
+ dataset = self._make_one(self.DS_REF)
+ with self.assertRaises(ValueError):
+ dataset.labels = None
+
+ def test_labels_getter_missing_value(self):
+ dataset = self._make_one(self.DS_REF)
+ self.assertEqual(dataset.labels, {})
+
+ def test_is_case_insensitive_setter_bad_value(self):
+ dataset = self._make_one(self.DS_REF)
+ with self.assertRaises(ValueError):
+ dataset.is_case_insensitive = 0
+
+ def test_is_case_insensitive_setter_true(self):
+ dataset = self._make_one(self.DS_REF)
+ dataset.is_case_insensitive = True
+ self.assertEqual(dataset.is_case_insensitive, True)
+
+ def test_is_case_insensitive_setter_none(self):
+ dataset = self._make_one(self.DS_REF)
+ dataset.is_case_insensitive = None
+ self.assertEqual(dataset.is_case_insensitive, False)
+
+ def test_is_case_insensitive_setter_false(self):
+ dataset = self._make_one(self.DS_REF)
+ dataset.is_case_insensitive = False
+ self.assertEqual(dataset.is_case_insensitive, False)
+
+ def test_from_api_repr_missing_identity(self):
+ self._setUpConstants()
+ RESOURCE = {}
+ klass = self._get_target_class()
+ with self.assertRaises(KeyError):
+ klass.from_api_repr(RESOURCE)
+
+ def test_from_api_repr_bare(self):
+ self._setUpConstants()
+ RESOURCE = {
+ "id": "%s:%s" % (self.PROJECT, self.DS_ID),
+ "datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
+ }
+ klass = self._get_target_class()
+ dataset = klass.from_api_repr(RESOURCE)
+ self._verify_resource_properties(dataset, RESOURCE)
+
+ def test_from_api_repr_w_properties(self):
+ RESOURCE = self._make_resource()
+ klass = self._get_target_class()
+ dataset = klass.from_api_repr(RESOURCE)
+ self._verify_resource_properties(dataset, RESOURCE)
+
+ def test_to_api_repr_w_custom_field(self):
+ dataset = self._make_one(self.DS_REF)
+ dataset._properties["newAlphaProperty"] = "unreleased property"
+ resource = dataset.to_api_repr()
+
+ exp_resource = {
+ "datasetReference": self.DS_REF.to_api_repr(),
+ "labels": {},
+ "newAlphaProperty": "unreleased property",
+ }
+ self.assertEqual(resource, exp_resource)
+
+ def test_default_encryption_configuration_setter(self):
+ from google.cloud.bigquery.encryption_configuration import (
+ EncryptionConfiguration,
+ )
+
+ dataset = self._make_one(self.DS_REF)
+ encryption_configuration = EncryptionConfiguration(
+ kms_key_name=self.KMS_KEY_NAME
+ )
+ dataset.default_encryption_configuration = encryption_configuration
+ self.assertEqual(
+ dataset.default_encryption_configuration.kms_key_name, self.KMS_KEY_NAME
+ )
+ dataset.default_encryption_configuration = None
+ self.assertIsNone(dataset.default_encryption_configuration)
+
+ def test_storage_billing_model_setter(self):
+ dataset = self._make_one(self.DS_REF)
+ dataset.storage_billing_model = "PHYSICAL"
+ self.assertEqual(dataset.storage_billing_model, "PHYSICAL")
+
+ def test_storage_billing_model_setter_with_none(self):
+ dataset = self._make_one(self.DS_REF)
+ dataset.storage_billing_model = None
+ self.assertIsNone(dataset.storage_billing_model)
+
+ def test_storage_billing_model_setter_with_invalid_type(self):
+ dataset = self._make_one(self.DS_REF)
+ with self.assertRaises(ValueError) as raises:
+ dataset.storage_billing_model = object()
+
+ self.assertIn("storage_billing_model", str(raises.exception))
+
+ def test_from_string(self):
+ cls = self._get_target_class()
+ got = cls.from_string("string-project.string_dataset")
+ self.assertEqual(got.project, "string-project")
+ self.assertEqual(got.dataset_id, "string_dataset")
+
+ def test_from_string_legacy_string(self):
+ cls = self._get_target_class()
+ with self.assertRaises(ValueError):
+ cls.from_string("string-project:string_dataset")
+
+ def test__build_resource_w_custom_field(self):
+ dataset = self._make_one(self.DS_REF)
+ dataset._properties["newAlphaProperty"] = "unreleased property"
+ resource = dataset._build_resource(["newAlphaProperty"])
+
+ exp_resource = {"newAlphaProperty": "unreleased property"}
+ self.assertEqual(resource, exp_resource)
+
+ def test__build_resource_w_custom_field_not_in__properties(self):
+ dataset = self._make_one(self.DS_REF)
+ dataset.bad = "value"
+ with self.assertRaises(ValueError):
+ dataset._build_resource(["bad"])
+
+ def test_table(self):
+ from google.cloud.bigquery.table import TableReference
+
+ dataset = self._make_one(self.DS_REF)
+ table = dataset.table("table_id")
+ self.assertIsInstance(table, TableReference)
+ self.assertEqual(table.table_id, "table_id")
+ self.assertEqual(table.dataset_id, self.DS_ID)
+ self.assertEqual(table.project, self.PROJECT)
+
+ def test___repr__(self):
+ from google.cloud.bigquery.dataset import DatasetReference
+
+ dataset = self._make_one(DatasetReference("project1", "dataset1"))
+ expected = "Dataset(DatasetReference('project1', 'dataset1'))"
+ self.assertEqual(repr(dataset), expected)
+
+
+class TestDatasetListItem(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.dataset import DatasetListItem
+
+ return DatasetListItem
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor(self):
+ project = "test-project"
+ dataset_id = "test_dataset"
+ resource = {
+ "kind": "bigquery#dataset",
+ "id": "{}:{}".format(project, dataset_id),
+ "datasetReference": {"projectId": project, "datasetId": dataset_id},
+ "friendlyName": "Data of the Test",
+ "labels": {"some-stuff": "this-is-a-label"},
+ }
+
+ dataset = self._make_one(resource)
+ self.assertEqual(dataset.project, project)
+ self.assertEqual(dataset.dataset_id, dataset_id)
+ self.assertEqual(dataset.full_dataset_id, "{}:{}".format(project, dataset_id))
+ self.assertEqual(dataset.reference.project, project)
+ self.assertEqual(dataset.reference.dataset_id, dataset_id)
+ self.assertEqual(dataset.friendly_name, "Data of the Test")
+ self.assertEqual(dataset.labels["some-stuff"], "this-is-a-label")
+
+ def test_ctor_missing_properties(self):
+ resource = {
+ "datasetReference": {"projectId": "testproject", "datasetId": "testdataset"}
+ }
+ dataset = self._make_one(resource)
+ self.assertEqual(dataset.project, "testproject")
+ self.assertEqual(dataset.dataset_id, "testdataset")
+ self.assertIsNone(dataset.full_dataset_id)
+ self.assertIsNone(dataset.friendly_name)
+ self.assertEqual(dataset.labels, {})
+
+ def test_ctor_wo_project(self):
+ resource = {"datasetReference": {"datasetId": "testdataset"}}
+ with self.assertRaises(ValueError):
+ self._make_one(resource)
+
+ def test_ctor_wo_dataset(self):
+ resource = {"datasetReference": {"projectId": "testproject"}}
+ with self.assertRaises(ValueError):
+ self._make_one(resource)
+
+ def test_ctor_wo_reference(self):
+ with self.assertRaises(ValueError):
+ self._make_one({})
+
+ def test_labels_update_in_place(self):
+ resource = {
+ "datasetReference": {"projectId": "testproject", "datasetId": "testdataset"}
+ }
+ dataset = self._make_one(resource)
+ labels = dataset.labels
+ labels["foo"] = "bar" # update in place
+ self.assertEqual(dataset.labels, {"foo": "bar"})
+
+ def test_table(self):
+ from google.cloud.bigquery.table import TableReference
+
+ project = "test-project"
+ dataset_id = "test_dataset"
+ resource = {"datasetReference": {"projectId": project, "datasetId": dataset_id}}
+ dataset = self._make_one(resource)
+ table = dataset.table("table_id")
+ self.assertIsInstance(table, TableReference)
+ self.assertEqual(table.table_id, "table_id")
+ self.assertEqual(table.dataset_id, dataset_id)
+ self.assertEqual(table.project, project)
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_dbapi__helpers.py b/testbed/googleapis__python-bigquery/tests/unit/test_dbapi__helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e1da0034309d461ddcc018aa7fa2c81f4b7aa50
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_dbapi__helpers.py
@@ -0,0 +1,692 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import decimal
+import math
+import operator as op
+import re
+import unittest
+
+import pytest
+
+import google.cloud._helpers
+from google.cloud.bigquery import query, table
+from google.cloud.bigquery.dbapi import _helpers
+from google.cloud.bigquery.dbapi import exceptions
+
+
+class TestQueryParameters(unittest.TestCase):
+ def test_scalar_to_query_parameter(self):
+ expected_types = [
+ (True, "BOOL"),
+ (False, "BOOL"),
+ (123, "INT64"),
+ (-123456789, "INT64"),
+ (1.25, "FLOAT64"),
+ (b"I am some bytes", "BYTES"),
+ ("I am a string", "STRING"),
+ (datetime.date(2017, 4, 1), "DATE"),
+ (datetime.time(12, 34, 56), "TIME"),
+ (datetime.datetime(2012, 3, 4, 5, 6, 7), "DATETIME"),
+ (
+ datetime.datetime(
+ 2012, 3, 4, 5, 6, 7, tzinfo=google.cloud._helpers.UTC
+ ),
+ "TIMESTAMP",
+ ),
+ (decimal.Decimal("1.25"), "NUMERIC"),
+ (decimal.Decimal("9.9999999999999999999999999999999999999E+28"), "NUMERIC"),
+ (decimal.Decimal("1.0E+29"), "BIGNUMERIC"), # more than max NUMERIC value
+ (decimal.Decimal("1.123456789"), "NUMERIC"),
+ (decimal.Decimal("1.1234567891"), "BIGNUMERIC"), # scale > 9
+ (decimal.Decimal("12345678901234567890123456789.012345678"), "NUMERIC"),
+ (
+ decimal.Decimal("12345678901234567890123456789012345678"),
+ "BIGNUMERIC", # larger than max NUMERIC value, despite precision <=38
+ ),
+ ]
+
+ for value, expected_type in expected_types:
+ msg = "value: {} expected_type: {}".format(value, expected_type)
+ parameter = _helpers.scalar_to_query_parameter(value)
+ self.assertIsNone(parameter.name, msg=msg)
+ self.assertEqual(parameter.type_, expected_type, msg=msg)
+ self.assertEqual(parameter.value, value, msg=msg)
+ named_parameter = _helpers.scalar_to_query_parameter(value, name="myvar")
+ self.assertEqual(named_parameter.name, "myvar", msg=msg)
+ self.assertEqual(named_parameter.type_, expected_type, msg=msg)
+ self.assertEqual(named_parameter.value, value, msg=msg)
+
+ def test_scalar_to_query_parameter_w_unexpected_type(self):
+ with self.assertRaises(exceptions.ProgrammingError):
+ _helpers.scalar_to_query_parameter(value={"a": "dictionary"})
+
+ def test_scalar_to_query_parameter_w_special_floats(self):
+ nan_parameter = _helpers.scalar_to_query_parameter(float("nan"))
+ self.assertTrue(math.isnan(nan_parameter.value))
+ self.assertEqual(nan_parameter.type_, "FLOAT64")
+ inf_parameter = _helpers.scalar_to_query_parameter(float("inf"))
+ self.assertTrue(math.isinf(inf_parameter.value))
+ self.assertEqual(inf_parameter.type_, "FLOAT64")
+
+ def test_array_to_query_parameter_valid_argument(self):
+ expected_types = [
+ ([True, False], "BOOL"),
+ ([123, -456, 0], "INT64"),
+ ([1.25, 2.50], "FLOAT64"),
+ ([decimal.Decimal("1.25")], "NUMERIC"),
+ ([decimal.Decimal("{d38}.{d38}".format(d38="9" * 38))], "BIGNUMERIC"),
+ ([b"foo", b"bar"], "BYTES"),
+ (["foo", "bar"], "STRING"),
+ ([datetime.date(2017, 4, 1), datetime.date(2018, 4, 1)], "DATE"),
+ ([datetime.time(12, 34, 56), datetime.time(10, 20, 30)], "TIME"),
+ (
+ [
+ datetime.datetime(2012, 3, 4, 5, 6, 7),
+ datetime.datetime(2013, 1, 1, 10, 20, 30),
+ ],
+ "DATETIME",
+ ),
+ (
+ [
+ datetime.datetime(
+ 2012, 3, 4, 5, 6, 7, tzinfo=google.cloud._helpers.UTC
+ ),
+ datetime.datetime(
+ 2013, 1, 1, 10, 20, 30, tzinfo=google.cloud._helpers.UTC
+ ),
+ ],
+ "TIMESTAMP",
+ ),
+ ]
+
+ for values, expected_type in expected_types:
+ msg = "value: {} expected_type: {}".format(values, expected_type)
+ parameter = _helpers.array_to_query_parameter(values)
+ self.assertIsNone(parameter.name, msg=msg)
+ self.assertEqual(parameter.array_type, expected_type, msg=msg)
+ self.assertEqual(parameter.values, values, msg=msg)
+ named_param = _helpers.array_to_query_parameter(values, name="my_param")
+ self.assertEqual(named_param.name, "my_param", msg=msg)
+ self.assertEqual(named_param.array_type, expected_type, msg=msg)
+ self.assertEqual(named_param.values, values, msg=msg)
+
+ def test_array_to_query_parameter_empty_argument(self):
+ with self.assertRaises(exceptions.ProgrammingError):
+ _helpers.array_to_query_parameter([])
+
+ def test_array_to_query_parameter_unsupported_sequence(self):
+ unsupported_iterables = [{10, 20, 30}, "foo", b"bar", bytearray([65, 75, 85])]
+ for iterable in unsupported_iterables:
+ with self.assertRaises(exceptions.ProgrammingError):
+ _helpers.array_to_query_parameter(iterable)
+
+ def test_array_to_query_parameter_sequence_w_invalid_elements(self):
+ with self.assertRaises(exceptions.ProgrammingError):
+ _helpers.array_to_query_parameter([object(), 2, 7])
+
+ def test_to_query_parameters_w_dict(self):
+ parameters = {"somebool": True, "somestring": "a-string-value"}
+ query_parameters = _helpers.to_query_parameters(parameters, {})
+ query_parameter_tuples = []
+ for param in query_parameters:
+ query_parameter_tuples.append((param.name, param.type_, param.value))
+ self.assertSequenceEqual(
+ sorted(query_parameter_tuples),
+ sorted(
+ [
+ ("somebool", "BOOL", True),
+ ("somestring", "STRING", "a-string-value"),
+ ]
+ ),
+ )
+
+ def test_to_query_parameters_w_dict_array_param(self):
+ parameters = {"somelist": [10, 20]}
+ query_parameters = _helpers.to_query_parameters(parameters, {})
+
+ self.assertEqual(len(query_parameters), 1)
+ param = query_parameters[0]
+
+ self.assertEqual(param.name, "somelist")
+ self.assertEqual(param.array_type, "INT64")
+ self.assertEqual(param.values, [10, 20])
+
+ def test_to_query_parameters_w_dict_dict_param(self):
+ parameters = {"my_param": {"foo": "bar"}}
+
+ with self.assertRaises(NotImplementedError):
+ _helpers.to_query_parameters(parameters, {})
+
+ def test_to_query_parameters_w_list(self):
+ parameters = [True, "a-string-value"]
+ query_parameters = _helpers.to_query_parameters(parameters, [None, None])
+ query_parameter_tuples = []
+ for param in query_parameters:
+ query_parameter_tuples.append((param.name, param.type_, param.value))
+ self.assertSequenceEqual(
+ sorted(query_parameter_tuples),
+ sorted([(None, "BOOL", True), (None, "STRING", "a-string-value")]),
+ )
+
+ def test_to_query_parameters_w_list_array_param(self):
+ parameters = [[10, 20]]
+ query_parameters = _helpers.to_query_parameters(parameters, [None])
+
+ self.assertEqual(len(query_parameters), 1)
+ param = query_parameters[0]
+
+ self.assertIsNone(param.name)
+ self.assertEqual(param.array_type, "INT64")
+ self.assertEqual(param.values, [10, 20])
+
+ def test_to_query_parameters_w_list_dict_param(self):
+ parameters = [{"foo": "bar"}]
+
+ with self.assertRaises(NotImplementedError):
+ _helpers.to_query_parameters(parameters, [None])
+
+ def test_to_query_parameters_none_argument(self):
+ query_parameters = _helpers.to_query_parameters(None, None)
+ self.assertEqual(query_parameters, [])
+
+
+class TestToBqTableRows(unittest.TestCase):
+ def test_empty_iterable(self):
+ rows_iterable = iter([])
+ result = _helpers.to_bq_table_rows(rows_iterable)
+ self.assertEqual(list(result), [])
+
+ def test_non_empty_iterable(self):
+ pytest.importorskip("pyarrow")
+ from tests.unit.helpers import _to_pyarrow
+
+ rows_iterable = [
+ dict(
+ one=_to_pyarrow(1.1),
+ four=_to_pyarrow(1.4),
+ two=_to_pyarrow(1.2),
+ three=_to_pyarrow(1.3),
+ ),
+ dict(
+ one=_to_pyarrow(2.1),
+ four=_to_pyarrow(2.4),
+ two=_to_pyarrow(2.2),
+ three=_to_pyarrow(2.3),
+ ),
+ ]
+
+ result = _helpers.to_bq_table_rows(rows_iterable)
+
+ rows = list(result)
+ self.assertEqual(len(rows), 2)
+
+ row_1, row_2 = rows
+ self.assertIsInstance(row_1, table.Row)
+ self.assertIsInstance(row_2, table.Row)
+
+ field_value = op.itemgetter(1)
+
+ items = sorted(row_1.items(), key=field_value)
+ expected_items = [("one", 1.1), ("two", 1.2), ("three", 1.3), ("four", 1.4)]
+ self.assertEqual(items, expected_items)
+
+ items = sorted(row_2.items(), key=field_value)
+ expected_items = [("one", 2.1), ("two", 2.2), ("three", 2.3), ("four", 2.4)]
+ self.assertEqual(items, expected_items)
+
+
+class TestRaiseOnClosedDecorator(unittest.TestCase):
+ def _make_class(self):
+ class Foo(object):
+ class_member = "class member"
+
+ def __init__(self):
+ self._closed = False
+ self.instance_member = "instance member"
+
+ def instance_method(self):
+ return self.instance_member
+
+ @classmethod
+ def class_method(cls): # pragma: NO COVER
+ return cls.class_member
+
+ @staticmethod
+ def static_method(): # pragma: NO COVER
+ return "static return value"
+
+ def _private_method(self):
+ return self.instance_member
+
+ return Foo
+
+ def test_preserves_method_names(self):
+ klass = self._make_class()
+ decorated_class = _helpers.raise_on_closed("I'm closed!")(klass)
+ instance = decorated_class()
+
+ self.assertEqual(instance.instance_method.__name__, "instance_method")
+ self.assertEqual(instance.class_method.__name__, "class_method")
+ self.assertEqual(instance.static_method.__name__, "static_method")
+ self.assertEqual(instance._private_method.__name__, "_private_method")
+
+ def test_methods_on_not_closed_instance(self):
+ klass = self._make_class()
+ decorated_class = _helpers.raise_on_closed("I'm closed!")(klass)
+ instance = decorated_class()
+ instance._closed = False
+
+ self.assertEqual(instance.instance_method(), "instance member")
+ self.assertEqual(instance.class_method(), "class member")
+ self.assertEqual(instance.static_method(), "static return value")
+ self.assertEqual(instance._private_method(), "instance member")
+
+ def test_public_instance_methods_on_closed_instance(self):
+ klass = self._make_class()
+ decorated_class = _helpers.raise_on_closed("I'm closed!")(klass)
+ instance = decorated_class()
+ instance._closed = True
+
+ with self.assertRaisesRegex(exceptions.ProgrammingError, "I'm closed!"):
+ instance.instance_method()
+
+ def test_methods_wo_public_instance_methods_on_closed_instance(self):
+ klass = self._make_class()
+ decorated_class = _helpers.raise_on_closed("I'm closed!")(klass)
+ instance = decorated_class()
+ instance._closed = True
+
+ # no errors expected
+ self.assertEqual(instance.class_method(), "class member")
+ self.assertEqual(instance.static_method(), "static return value")
+ self.assertEqual(instance._private_method(), "instance member")
+
+ def test_custom_class_closed_attribute(self):
+ klass = self._make_class()
+ decorated_class = _helpers.raise_on_closed(
+ "I'm closed!", closed_attr_name="_really_closed"
+ )(klass)
+ instance = decorated_class()
+ instance._closed = False
+ instance._really_closed = True
+
+ with self.assertRaisesRegex(exceptions.ProgrammingError, "I'm closed!"):
+ instance.instance_method()
+
+ def test_custom_on_closed_error_type(self):
+ klass = self._make_class()
+ decorated_class = _helpers.raise_on_closed(
+ "I'm closed!", exc_class=RuntimeError
+ )(klass)
+ instance = decorated_class()
+ instance._closed = True
+
+ with self.assertRaisesRegex(RuntimeError, "I'm closed!"):
+ instance.instance_method()
+
+
+VALID_BQ_TYPES = [
+ (name, getattr(query.SqlParameterScalarTypes, name)._type)
+ for name in dir(query.SqlParameterScalarTypes)
+ if not name.startswith("_")
+]
+
+
+@pytest.mark.parametrize("alias, type_", VALID_BQ_TYPES)
+def test_scalar_to_query_parameter_honors_given_type(alias, type_):
+ from google.cloud import bigquery
+
+ assert _helpers.scalar_to_query_parameter(1.23, None, alias) == (
+ bigquery.ScalarQueryParameter(None, type_, 1.23)
+ )
+ assert _helpers.scalar_to_query_parameter(None, "foo", alias) == (
+ bigquery.ScalarQueryParameter("foo", type_, None)
+ )
+
+
+def test_scalar_to_query_parameter_honors_given_type_errors_on_invalid():
+ with pytest.raises(
+ google.cloud.bigquery.dbapi.exceptions.ProgrammingError,
+ match="The given parameter type, INT, for foo is not a valid BigQuery scalar type.",
+ ):
+ _helpers.scalar_to_query_parameter(None, "foo", "INT")
+
+
+@pytest.mark.parametrize("alias, type_", VALID_BQ_TYPES)
+def test_array_to_query_parameter_honors_given_type(alias, type_):
+ from google.cloud import bigquery
+
+ assert _helpers.array_to_query_parameter([1.23], None, alias) == (
+ bigquery.ArrayQueryParameter(None, type_, [1.23])
+ )
+ assert _helpers.array_to_query_parameter((), "foo", alias) == (
+ bigquery.ArrayQueryParameter("foo", type_, ())
+ )
+
+
+def test_array_to_query_parameter_honors_given_type_errors_on_invalid():
+ with pytest.raises(
+ google.cloud.bigquery.dbapi.exceptions.ProgrammingError,
+ match="The given parameter type, INT, for foo is not a valid BigQuery scalar type.",
+ ):
+ _helpers.array_to_query_parameter((), "foo", "INT")
+
+
+def test_to_query_parameters_dict_w_types():
+ from google.cloud import bigquery
+
+ assert sorted(
+ _helpers.to_query_parameters(
+ dict(i=1, x=1.2, y=None, q="hi", z=[]),
+ dict(x="numeric", y="string", q="string(9)", z="float64"),
+ ),
+ key=lambda p: p.name,
+ ) == [
+ bigquery.ScalarQueryParameter("i", "INT64", 1),
+ bigquery.ScalarQueryParameter("q", "STRING", "hi"),
+ bigquery.ScalarQueryParameter("x", "NUMERIC", 1.2),
+ bigquery.ScalarQueryParameter("y", "STRING", None),
+ bigquery.ArrayQueryParameter("z", "FLOAT64", []),
+ ]
+
+
+def test_to_query_parameters_list_w_types():
+ from google.cloud import bigquery
+
+ assert _helpers.to_query_parameters(
+ [1, 1.2, None, "hi", []], [None, "numeric", "string", "string(9)", "float64"]
+ ) == [
+ bigquery.ScalarQueryParameter(None, "INT64", 1),
+ bigquery.ScalarQueryParameter(None, "NUMERIC", 1.2),
+ bigquery.ScalarQueryParameter(None, "STRING", None),
+ bigquery.ScalarQueryParameter(None, "STRING", "hi"),
+ bigquery.ArrayQueryParameter(None, "FLOAT64", []),
+ ]
+
+
+@pytest.mark.parametrize(
+ "value,type_,expect",
+ [
+ (
+ [],
+ "ARRAY",
+ {
+ "parameterType": {"type": "ARRAY", "arrayType": {"type": "INT64"}},
+ "parameterValue": {"arrayValues": []},
+ },
+ ),
+ (
+ [1, 2],
+ "ARRAY",
+ {
+ "parameterType": {"type": "ARRAY", "arrayType": {"type": "INT64"}},
+ "parameterValue": {"arrayValues": [{"value": "1"}, {"value": "2"}]},
+ },
+ ),
+ (
+ dict(
+ name="par",
+ children=[
+ dict(name="ch1", bdate=datetime.date(2021, 1, 1)),
+ dict(name="ch2", bdate=datetime.date(2021, 1, 2)),
+ ],
+ ),
+ "struct>>",
+ {
+ "parameterType": {
+ "structTypes": [
+ {"name": "name", "type": {"type": "STRING"}},
+ {
+ "name": "children",
+ "type": {
+ "arrayType": {
+ "structTypes": [
+ {"name": "name", "type": {"type": "STRING"}},
+ {"name": "bdate", "type": {"type": "DATE"}},
+ ],
+ "type": "STRUCT",
+ },
+ "type": "ARRAY",
+ },
+ },
+ ],
+ "type": "STRUCT",
+ },
+ "parameterValue": {
+ "structValues": {
+ "children": {
+ "arrayValues": [
+ {
+ "structValues": {
+ "bdate": {"value": "2021-01-01"},
+ "name": {"value": "ch1"},
+ }
+ },
+ {
+ "structValues": {
+ "bdate": {"value": "2021-01-02"},
+ "name": {"value": "ch2"},
+ }
+ },
+ ]
+ },
+ "name": {"value": "par"},
+ }
+ },
+ },
+ ),
+ (
+ dict(
+ name="par",
+ children=[
+ dict(name="ch1", bdate=datetime.date(2021, 1, 1)),
+ dict(name="ch2", bdate=datetime.date(2021, 1, 2)),
+ ],
+ ),
+ "struct>>",
+ {
+ "parameterType": {
+ "structTypes": [
+ {"name": "name", "type": {"type": "STRING"}},
+ {
+ "name": "children",
+ "type": {
+ "arrayType": {
+ "structTypes": [
+ {"name": "name", "type": {"type": "STRING"}},
+ {"name": "bdate", "type": {"type": "DATE"}},
+ ],
+ "type": "STRUCT",
+ },
+ "type": "ARRAY",
+ },
+ },
+ ],
+ "type": "STRUCT",
+ },
+ "parameterValue": {
+ "structValues": {
+ "children": {
+ "arrayValues": [
+ {
+ "structValues": {
+ "bdate": {"value": "2021-01-01"},
+ "name": {"value": "ch1"},
+ }
+ },
+ {
+ "structValues": {
+ "bdate": {"value": "2021-01-02"},
+ "name": {"value": "ch2"},
+ }
+ },
+ ]
+ },
+ "name": {"value": "par"},
+ }
+ },
+ },
+ ),
+ (
+ ["1", "hi"],
+ "ARRAY",
+ {
+ "parameterType": {"type": "ARRAY", "arrayType": {"type": "STRING"}},
+ "parameterValue": {"arrayValues": [{"value": "1"}, {"value": "hi"}]},
+ },
+ ),
+ ],
+)
+def test_complex_query_parameter_type(type_, value, expect):
+ from google.cloud.bigquery.dbapi._helpers import complex_query_parameter
+
+ param = complex_query_parameter("test", value, type_).to_api_repr()
+ assert param.pop("name") == "test"
+ assert param == expect
+
+
+def _expected_error_match(expect):
+ return "^" + re.escape(expect) + "$"
+
+
+@pytest.mark.parametrize(
+ "value,type_,expect",
+ [
+ (
+ [],
+ "ARRAY",
+ "The given parameter type, INT,"
+ " is not a valid BigQuery scalar type, in ARRAY.",
+ ),
+ ([], "x", "Invalid parameter type, x"),
+ ({}, "struct", "Invalid struct field, int, in struct"),
+ (
+ {"x": 1},
+ "struct",
+ "The given parameter type, int,"
+ " for x is not a valid BigQuery scalar type, in struct.",
+ ),
+ ([], "x<", "Invalid parameter type, x<"),
+ (0, "ARRAY", "Array type with non-array-like value with type int"),
+ (
+ [],
+ "ARRAY>",
+ "Array can't contain an array in ARRAY>",
+ ),
+ ([], "struct", "Non-mapping value for type struct"),
+ ({}, "struct", "No field value for x in struct"),
+ ({"x": 1, "y": 1}, "struct", "Extra data keys for struct"),
+ ([], "array>", "Invalid struct field, xxx, in array>"),
+ ([], "array<<>>", "Invalid parameter type, <>"),
+ ],
+)
+def test_complex_query_parameter_type_errors(type_, value, expect):
+ from google.cloud.bigquery.dbapi._helpers import complex_query_parameter
+ from google.cloud.bigquery.dbapi import exceptions
+
+ with pytest.raises(
+ exceptions.ProgrammingError,
+ match=_expected_error_match(expect),
+ ):
+ complex_query_parameter("test", value, type_)
+
+
+@pytest.mark.parametrize(
+ "parameters,parameter_types,expect",
+ [
+ (
+ [[], dict(name="ch1", b_date=datetime.date(2021, 1, 1))],
+ ["ARRAY", "struct"],
+ [
+ {
+ "parameterType": {"arrayType": {"type": "INT64"}, "type": "ARRAY"},
+ "parameterValue": {"arrayValues": []},
+ },
+ {
+ "parameterType": {
+ "structTypes": [
+ {"name": "name", "type": {"type": "STRING"}},
+ {"name": "b_date", "type": {"type": "DATE"}},
+ ],
+ "type": "STRUCT",
+ },
+ "parameterValue": {
+ "structValues": {
+ "b_date": {"value": "2021-01-01"},
+ "name": {"value": "ch1"},
+ }
+ },
+ },
+ ],
+ ),
+ (
+ dict(ids=[], child=dict(name="ch1", bdate=datetime.date(2021, 1, 1))),
+ dict(ids="ARRAY", child="struct"),
+ [
+ {
+ "name": "ids",
+ "parameterType": {"arrayType": {"type": "INT64"}, "type": "ARRAY"},
+ "parameterValue": {"arrayValues": []},
+ },
+ {
+ "name": "child",
+ "parameterType": {
+ "structTypes": [
+ {"name": "name", "type": {"type": "STRING"}},
+ {"name": "bdate", "type": {"type": "DATE"}},
+ ],
+ "type": "STRUCT",
+ },
+ "parameterValue": {
+ "structValues": {
+ "bdate": {"value": "2021-01-01"},
+ "name": {"value": "ch1"},
+ }
+ },
+ },
+ ],
+ ),
+ ],
+)
+def test_to_query_parameters_complex_types(parameters, parameter_types, expect):
+ from google.cloud.bigquery.dbapi._helpers import to_query_parameters
+
+ result = [p.to_api_repr() for p in to_query_parameters(parameters, parameter_types)]
+ assert result == expect
+
+
+def test_to_query_parameters_struct_error():
+ from google.cloud.bigquery.dbapi._helpers import to_query_parameters
+
+ with pytest.raises(
+ NotImplementedError,
+ match=_expected_error_match(
+ "STRUCT-like parameter values are not supported, "
+ "unless an explicit type is give in the parameter placeholder "
+ "(e.g. '%(:struct<...>)s')."
+ ),
+ ):
+ to_query_parameters([dict(x=1)], [None])
+
+ with pytest.raises(
+ NotImplementedError,
+ match=_expected_error_match(
+ "STRUCT-like parameter values are not supported (parameter foo), "
+ "unless an explicit type is give in the parameter placeholder "
+ "(e.g. '%(foo:struct<...>)s')."
+ ),
+ ):
+ to_query_parameters(dict(foo=dict(x=1)), {})
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_dbapi_connection.py b/testbed/googleapis__python-bigquery/tests/unit/test_dbapi_connection.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5c77c448eeed9b73aea5eddda389237fb130b6a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_dbapi_connection.py
@@ -0,0 +1,253 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import gc
+import pytest
+import unittest
+from unittest import mock
+
+
+class TestConnection(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.dbapi import Connection
+
+ return Connection
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def _mock_client(self):
+ from google.cloud.bigquery import client
+
+ mock_client = mock.create_autospec(client.Client)
+ return mock_client
+
+ def _mock_bqstorage_client(self):
+ # Assumption: bigquery_storage exists. It's the test's responisbility to
+ # not use this helper or skip itself if bqstorage is not installed.
+ from google.cloud import bigquery_storage
+
+ mock_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ mock_client._transport = mock.Mock(spec=["channel"])
+ mock_client._transport.grpc_channel = mock.Mock(spec=["close"])
+ return mock_client
+
+ def test_ctor_wo_bqstorage_client(self):
+ from google.cloud.bigquery.dbapi import Connection
+
+ mock_client = self._mock_client()
+ mock_client._ensure_bqstorage_client.return_value = None
+
+ connection = self._make_one(client=mock_client)
+ self.assertIsInstance(connection, Connection)
+ self.assertIs(connection._client, mock_client)
+ self.assertIs(connection._bqstorage_client, None)
+
+ def test_ctor_w_bqstorage_client(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery.dbapi import Connection
+
+ mock_client = self._mock_client()
+ mock_bqstorage_client = self._mock_bqstorage_client()
+ mock_client._ensure_bqstorage_client.return_value = mock_bqstorage_client
+
+ connection = self._make_one(
+ client=mock_client,
+ bqstorage_client=mock_bqstorage_client,
+ )
+
+ mock_client._ensure_bqstorage_client.assert_called_once_with(
+ mock_bqstorage_client
+ )
+ self.assertIsInstance(connection, Connection)
+ self.assertIs(connection._client, mock_client)
+ self.assertIs(connection._bqstorage_client, mock_bqstorage_client)
+
+ @mock.patch("google.cloud.bigquery.Client", autospec=True)
+ def test_connect_wo_client(self, mock_client):
+ from google.cloud.bigquery.dbapi import connect
+ from google.cloud.bigquery.dbapi import Connection
+
+ connection = connect()
+ self.assertIsInstance(connection, Connection)
+ self.assertIsNotNone(connection._client)
+ self.assertIsNotNone(connection._bqstorage_client)
+
+ def test_connect_w_client(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery.dbapi import connect
+ from google.cloud.bigquery.dbapi import Connection
+
+ mock_client = self._mock_client()
+ mock_bqstorage_client = self._mock_bqstorage_client()
+ mock_client._ensure_bqstorage_client.return_value = mock_bqstorage_client
+
+ connection = connect(client=mock_client)
+
+ mock_client._ensure_bqstorage_client.assert_called_once_with()
+ self.assertIsInstance(connection, Connection)
+ self.assertIs(connection._client, mock_client)
+ self.assertIs(connection._bqstorage_client, mock_bqstorage_client)
+
+ def test_connect_w_both_clients(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery.dbapi import connect
+ from google.cloud.bigquery.dbapi import Connection
+
+ mock_client = self._mock_client()
+ mock_bqstorage_client = self._mock_bqstorage_client()
+ mock_client._ensure_bqstorage_client.return_value = mock_bqstorage_client
+
+ connection = connect(
+ client=mock_client,
+ bqstorage_client=mock_bqstorage_client,
+ )
+
+ mock_client._ensure_bqstorage_client.assert_called_once_with(
+ mock_bqstorage_client
+ )
+ self.assertIsInstance(connection, Connection)
+ self.assertIs(connection._client, mock_client)
+ self.assertIs(connection._bqstorage_client, mock_bqstorage_client)
+
+ def test_connect_prefer_bqstorage_client_false(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery.dbapi import connect
+ from google.cloud.bigquery.dbapi import Connection
+
+ mock_client = self._mock_client()
+ mock_bqstorage_client = self._mock_bqstorage_client()
+ mock_client._ensure_bqstorage_client.return_value = mock_bqstorage_client
+
+ connection = connect(
+ client=mock_client,
+ bqstorage_client=mock_bqstorage_client,
+ prefer_bqstorage_client=False,
+ )
+
+ mock_client._ensure_bqstorage_client.assert_not_called()
+ self.assertIsInstance(connection, Connection)
+ self.assertIs(connection._client, mock_client)
+ self.assertIs(connection._bqstorage_client, None)
+
+ def test_raises_error_if_closed(self):
+ from google.cloud.bigquery.dbapi.exceptions import ProgrammingError
+
+ connection = self._make_one(client=self._mock_client())
+
+ connection.close()
+
+ for method in ("close", "commit", "cursor"):
+ with self.assertRaisesRegex(
+ ProgrammingError, r"Operating on a closed connection\."
+ ):
+ getattr(connection, method)()
+
+ def test_close_closes_all_created_bigquery_clients(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ client = self._mock_client()
+ bqstorage_client = self._mock_bqstorage_client()
+
+ client_patcher = mock.patch(
+ "google.cloud.bigquery.dbapi.connection.bigquery.Client",
+ return_value=client,
+ )
+ bqstorage_client_patcher = mock.patch.object(
+ client,
+ "_ensure_bqstorage_client",
+ return_value=bqstorage_client,
+ )
+
+ with client_patcher, bqstorage_client_patcher:
+ connection = self._make_one(client=None, bqstorage_client=None)
+
+ connection.close()
+
+ self.assertTrue(client.close.called)
+ self.assertTrue(bqstorage_client._transport.grpc_channel.close.called)
+
+ def test_close_does_not_close_bigquery_clients_passed_to_it(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ client = self._mock_client()
+ bqstorage_client = self._mock_bqstorage_client()
+ connection = self._make_one(client=client, bqstorage_client=bqstorage_client)
+
+ connection.close()
+
+ self.assertFalse(client.close.called)
+ self.assertFalse(bqstorage_client._transport.grpc_channel.close.called)
+
+ def test_close_closes_all_created_cursors(self):
+ connection = self._make_one(client=self._mock_client())
+ cursor_1 = connection.cursor()
+ cursor_2 = connection.cursor()
+ self.assertFalse(cursor_1._closed)
+ self.assertFalse(cursor_2._closed)
+
+ connection.close()
+
+ self.assertTrue(cursor_1._closed)
+ self.assertTrue(cursor_2._closed)
+
+ def test_close_closes_only_open_created_cursors(self):
+ connection = self._make_one(client=self._mock_client())
+ cursor_1 = connection.cursor()
+ cursor_2 = connection.cursor()
+ self.assertFalse(cursor_1._closed)
+ self.assertFalse(cursor_2._closed)
+
+ cursor_1.close()
+ self.assertTrue(cursor_1._closed)
+ cursor_1.close = mock.MagicMock()
+
+ connection.close()
+
+ self.assertFalse(cursor_1.close.called)
+ self.assertTrue(cursor_2._closed)
+
+ def test_does_not_keep_cursor_instances_alive(self):
+ from google.cloud.bigquery.dbapi import Cursor
+
+ connection = self._make_one(client=self._mock_client())
+ cursor_1 = connection.cursor() # noqa
+ cursor_2 = connection.cursor()
+ cursor_3 = connection.cursor() # noqa
+
+ del cursor_2
+
+ # Connections should not hold strong references to the Cursor instances
+ # they created, unnecessarily keeping them alive.
+ gc.collect()
+ cursor_count = 0
+ for obj in gc.get_objects():
+ try:
+ if isinstance(obj, Cursor):
+ cursor_count += 1
+ except ReferenceError: # pragma: NO COVER
+ pass
+ self.assertEqual(cursor_count, 2)
+
+ def test_commit(self):
+ connection = self._make_one(client=self._mock_client())
+ # commit() is a no-op, there is nothing to test.
+ connection.commit()
+
+ def test_cursor(self):
+ from google.cloud.bigquery.dbapi import Cursor
+
+ connection = self._make_one(client=self._mock_client())
+ cursor = connection.cursor()
+ self.assertIsInstance(cursor, Cursor)
+ self.assertIs(cursor.connection, connection)
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_dbapi_cursor.py b/testbed/googleapis__python-bigquery/tests/unit/test_dbapi_cursor.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fca4cec072461d493630ea2045e3e8c809a53ec
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_dbapi_cursor.py
@@ -0,0 +1,935 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools
+import operator as op
+import unittest
+from unittest import mock
+
+import pytest
+
+import google.cloud.bigquery.table as bq_table
+
+from google.api_core import exceptions
+
+from tests.unit.helpers import _to_pyarrow
+
+
+class TestCursor(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.dbapi import Cursor
+
+ return Cursor
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def _mock_client(
+ self,
+ rows=None,
+ schema=None,
+ num_dml_affected_rows=None,
+ dry_run_job=False,
+ total_bytes_processed=0,
+ total_rows=None,
+ destination_table="test-project.test_dataset.test_table",
+ ):
+ from google.cloud.bigquery import client
+
+ if total_rows is None:
+ total_rows = 0
+ if rows is not None:
+ total_rows = len(rows)
+
+ table = bq_table.TableReference.from_string(destination_table)
+ mock_client = mock.create_autospec(client.Client)
+ mock_job = self._mock_job(
+ total_rows=total_rows,
+ schema=schema,
+ num_dml_affected_rows=num_dml_affected_rows,
+ dry_run=dry_run_job,
+ total_bytes_processed=total_bytes_processed,
+ rows=self._mock_rows(
+ rows,
+ total_rows=total_rows,
+ schema=schema,
+ num_dml_affected_rows=num_dml_affected_rows,
+ table=table,
+ ),
+ )
+ mock_client.get_job.return_value = mock_job
+ mock_client.query.return_value = mock_job
+ mock_client.query_and_wait.return_value = self._mock_rows(
+ rows,
+ total_rows=total_rows,
+ schema=schema,
+ num_dml_affected_rows=num_dml_affected_rows,
+ # Sometimes all the results will be available in the initial
+ # response, in which case may be no job and no destination table.
+ table=table if rows is not None and total_rows > len(rows) else None,
+ )
+
+ # Assure that the REST client gets used, not the BQ Storage client.
+ mock_client._ensure_bqstorage_client.return_value = None
+
+ return mock_client
+
+ def _mock_bqstorage_client(self, rows=None, stream_count=0):
+ from google.cloud import bigquery_storage
+
+ if rows is None:
+ rows = []
+
+ mock_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ mock_read_session = mock.MagicMock(
+ streams=[
+ bigquery_storage.types.ReadStream(name="streams/stream_{}".format(i))
+ for i in range(stream_count)
+ ]
+ )
+
+ mock_client.create_read_session.return_value = mock_read_session
+
+ mock_rows_stream = mock.MagicMock()
+ mock_rows_stream.rows.return_value = iter(rows)
+ mock_client.read_rows.return_value = mock_rows_stream
+
+ return mock_client
+
+ def _mock_job(
+ self,
+ total_rows=0,
+ schema=None,
+ num_dml_affected_rows=None,
+ dry_run=False,
+ total_bytes_processed=0,
+ rows=None,
+ ):
+ from google.cloud.bigquery import job
+
+ mock_job = mock.create_autospec(job.QueryJob)
+ mock_job.error_result = None
+ mock_job.state = "DONE"
+ mock_job.dry_run = dry_run
+
+ if dry_run:
+ mock_job.result.side_effect = exceptions.NotFound
+ mock_job.total_bytes_processed = total_bytes_processed
+ else:
+ mock_job.result.return_value = rows
+ mock_job._query_results = self._mock_results(
+ total_rows=total_rows,
+ schema=schema,
+ num_dml_affected_rows=num_dml_affected_rows,
+ )
+ mock_job.destination.project = "P"
+ mock_job.destination.to_bqstorage.return_value = (
+ "projects/P/datasets/DS/tables/T"
+ )
+
+ if num_dml_affected_rows is None:
+ mock_job.statement_type = None # API sends back None for SELECT
+ else:
+ mock_job.statement_type = "UPDATE"
+
+ return mock_job
+
+ def _mock_rows(
+ self, rows, total_rows=0, schema=None, num_dml_affected_rows=None, table=None
+ ):
+ mock_rows = mock.create_autospec(bq_table.RowIterator, instance=True)
+ mock_rows.__iter__.return_value = rows
+ mock_rows._table = table
+ mock_rows._should_use_bqstorage = functools.partial(
+ bq_table.RowIterator._should_use_bqstorage,
+ mock_rows,
+ )
+ mock_rows._is_almost_completely_cached = functools.partial(
+ bq_table.RowIterator._is_almost_completely_cached,
+ mock_rows,
+ )
+ mock_rows.max_results = None
+ type(mock_rows).job_id = mock.PropertyMock(return_value="test-job-id")
+ type(mock_rows).location = mock.PropertyMock(return_value="test-location")
+ type(mock_rows).num_dml_affected_rows = mock.PropertyMock(
+ return_value=num_dml_affected_rows
+ )
+ type(mock_rows).total_rows = mock.PropertyMock(return_value=total_rows)
+ type(mock_rows).schema = mock.PropertyMock(return_value=schema)
+ return mock_rows
+
+ def _mock_results(self, total_rows=0, schema=None, num_dml_affected_rows=None):
+ from google.cloud.bigquery import query
+
+ mock_results = mock.create_autospec(query._QueryResults)
+ mock_results.schema = schema
+ mock_results.num_dml_affected_rows = num_dml_affected_rows
+ mock_results.total_rows = total_rows
+ return mock_results
+
+ def test_ctor(self):
+ from google.cloud.bigquery.dbapi import connect
+ from google.cloud.bigquery.dbapi import Cursor
+
+ connection = connect(self._mock_client())
+ cursor = self._make_one(connection)
+ self.assertIsInstance(cursor, Cursor)
+ self.assertIs(cursor.connection, connection)
+
+ def test_close(self):
+ from google.cloud.bigquery.dbapi import connect
+
+ connection = connect(self._mock_client())
+ cursor = connection.cursor()
+ # close() is a no-op, there is nothing to test.
+ cursor.close()
+
+ def test_raises_error_if_closed(self):
+ from google.cloud.bigquery.dbapi import connect
+ from google.cloud.bigquery.dbapi.exceptions import ProgrammingError
+
+ connection = connect(self._mock_client())
+ cursor = connection.cursor()
+ cursor.close()
+
+ method_names = (
+ "close",
+ "execute",
+ "executemany",
+ "fetchall",
+ "fetchmany",
+ "fetchone",
+ "setinputsizes",
+ "setoutputsize",
+ "__iter__",
+ )
+
+ for method in method_names:
+ with self.assertRaisesRegex(
+ ProgrammingError, r"Operating on a closed cursor\."
+ ):
+ getattr(cursor, method)()
+
+ def test_fetchone_wo_execute_raises_error(self):
+ from google.cloud.bigquery import dbapi
+
+ connection = dbapi.connect(self._mock_client())
+ cursor = connection.cursor()
+ self.assertRaises(dbapi.Error, cursor.fetchone)
+
+ def test_fetchone_w_row(self):
+ from google.cloud.bigquery import dbapi
+
+ connection = dbapi.connect(self._mock_client(rows=[(1,)]))
+ cursor = connection.cursor()
+ cursor.execute("SELECT 1;")
+ row = cursor.fetchone()
+ self.assertEqual(row, (1,))
+ self.assertIsNone(cursor.fetchone())
+
+ def test_fetchmany_wo_execute_raises_error(self):
+ from google.cloud.bigquery import dbapi
+
+ connection = dbapi.connect(self._mock_client())
+ cursor = connection.cursor()
+ self.assertRaises(dbapi.Error, cursor.fetchmany)
+
+ def test_fetchmany_w_row(self):
+ from google.cloud.bigquery import dbapi
+
+ connection = dbapi.connect(self._mock_client(rows=[(1,)]))
+ cursor = connection.cursor()
+ cursor.execute("SELECT 1;")
+ rows = cursor.fetchmany()
+ self.assertEqual(len(rows), 1)
+ self.assertEqual(rows[0], (1,))
+
+ def test_fetchmany_w_size(self):
+ from google.cloud.bigquery import dbapi
+
+ connection = dbapi.connect(
+ self._mock_client(rows=[(1, 2, 3), (4, 5, 6), (7, 8, 9)])
+ )
+ cursor = connection.cursor()
+ cursor.execute("SELECT a, b, c;")
+ rows = cursor.fetchmany(size=2)
+ self.assertEqual(len(rows), 2)
+ self.assertEqual(rows[0], (1, 2, 3))
+ self.assertEqual(rows[1], (4, 5, 6))
+ second_page = cursor.fetchmany(size=2)
+ self.assertEqual(len(second_page), 1)
+ self.assertEqual(second_page[0], (7, 8, 9))
+ third_page = cursor.fetchmany(size=2)
+ self.assertEqual(third_page, [])
+
+ def test_fetchmany_w_arraysize(self):
+ from google.cloud.bigquery import dbapi
+
+ connection = dbapi.connect(
+ self._mock_client(rows=[(1, 2, 3), (4, 5, 6), (7, 8, 9)])
+ )
+ cursor = connection.cursor()
+ cursor.execute("SELECT a, b, c;")
+ cursor.arraysize = 2
+ rows = cursor.fetchmany()
+ self.assertEqual(len(rows), 2)
+ self.assertEqual(rows[0], (1, 2, 3))
+ self.assertEqual(rows[1], (4, 5, 6))
+ second_page = cursor.fetchmany()
+ self.assertEqual(len(second_page), 1)
+ self.assertEqual(second_page[0], (7, 8, 9))
+ third_page = cursor.fetchmany()
+ self.assertEqual(third_page, [])
+
+ def test_fetchall_wo_execute_raises_error(self):
+ from google.cloud.bigquery import dbapi
+
+ connection = dbapi.connect(self._mock_client())
+ cursor = connection.cursor()
+ self.assertRaises(dbapi.Error, cursor.fetchall)
+
+ def test_fetchall_w_row(self):
+ from google.cloud.bigquery import dbapi
+
+ connection = dbapi.connect(self._mock_client(rows=[(1,)]))
+ cursor = connection.cursor()
+ cursor.execute("SELECT 1;")
+ self.assertIsNone(cursor.description)
+ self.assertEqual(cursor.rowcount, 1)
+ rows = cursor.fetchall()
+ self.assertEqual(len(rows), 1)
+ self.assertEqual(rows[0], (1,))
+
+ def test_fetchall_w_bqstorage_client_fetch_success(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery import dbapi
+
+ # use unordered data to also test any non-determenistic key order in dicts
+ row_data = [
+ bq_table.Row(
+ [1.4, 1.1, 1.3, 1.2], {"bar": 3, "baz": 2, "foo": 1, "quux": 0}
+ ),
+ bq_table.Row(
+ [2.4, 2.1, 2.3, 2.2], {"bar": 3, "baz": 2, "foo": 1, "quux": 0}
+ ),
+ ]
+ bqstorage_streamed_rows = [
+ {
+ "bar": _to_pyarrow(1.2),
+ "foo": _to_pyarrow(1.1),
+ "quux": _to_pyarrow(1.4),
+ "baz": _to_pyarrow(1.3),
+ },
+ {
+ "bar": _to_pyarrow(2.2),
+ "foo": _to_pyarrow(2.1),
+ "quux": _to_pyarrow(2.4),
+ "baz": _to_pyarrow(2.3),
+ },
+ ]
+
+ mock_client = self._mock_client(rows=row_data)
+ mock_bqstorage_client = self._mock_bqstorage_client(
+ stream_count=1,
+ rows=bqstorage_streamed_rows,
+ )
+ mock_client._ensure_bqstorage_client.return_value = mock_bqstorage_client
+
+ connection = dbapi.connect(
+ client=mock_client,
+ bqstorage_client=mock_bqstorage_client,
+ )
+ cursor = connection.cursor()
+ cursor.execute("SELECT foo, bar FROM some_table")
+
+ rows = cursor.fetchall()
+
+ # the default client was not used
+ mock_client.list_rows.assert_not_called()
+
+ # check the data returned
+ field_value = op.itemgetter(1)
+ sorted_row_data = [sorted(row.items(), key=field_value) for row in rows]
+ expected_row_data = [
+ [("foo", 1.1), ("bar", 1.2), ("baz", 1.3), ("quux", 1.4)],
+ [("foo", 2.1), ("bar", 2.2), ("baz", 2.3), ("quux", 2.4)],
+ ]
+
+ self.assertEqual(sorted_row_data, expected_row_data)
+
+ def test_fetchall_w_bqstorage_client_fetch_no_rows(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery import dbapi
+
+ mock_client = self._mock_client(
+ rows=[],
+ # Assume there are many more pages of data to look at so that the
+ # BQ Storage API is necessary.
+ total_rows=1000,
+ )
+ mock_bqstorage_client = self._mock_bqstorage_client(stream_count=0)
+ mock_client._ensure_bqstorage_client.return_value = mock_bqstorage_client
+
+ connection = dbapi.connect(
+ client=mock_client,
+ bqstorage_client=mock_bqstorage_client,
+ )
+ cursor = connection.cursor()
+ cursor.execute("SELECT foo, bar FROM some_table")
+
+ rows = cursor.fetchall()
+
+ # # the default client was not used
+ mock_client.list_rows.assert_not_called()
+
+ # check the data returned
+ self.assertEqual(rows, [])
+
+ def test_fetchall_w_bqstorage_client_fetch_error_no_fallback(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery import dbapi
+
+ row_data = [bq_table.Row([1.1, 1.2], {"foo": 0, "bar": 1})]
+
+ def fake_ensure_bqstorage_client(bqstorage_client=None, **kwargs):
+ return bqstorage_client
+
+ mock_client = self._mock_client(
+ rows=row_data,
+ # Assume there are many more pages of data to look at so that the
+ # BQ Storage API is necessary.
+ total_rows=1000,
+ )
+ mock_client._ensure_bqstorage_client.side_effect = fake_ensure_bqstorage_client
+ mock_bqstorage_client = self._mock_bqstorage_client(
+ stream_count=1,
+ rows=row_data,
+ )
+ no_access_error = exceptions.Forbidden("invalid credentials")
+ mock_bqstorage_client.create_read_session.side_effect = no_access_error
+
+ connection = dbapi.connect(
+ client=mock_client,
+ bqstorage_client=mock_bqstorage_client,
+ )
+ cursor = connection.cursor()
+ cursor.execute("SELECT foo, bar FROM some_table")
+
+ with self.assertRaisesRegex(exceptions.Forbidden, "invalid credentials"):
+ cursor.fetchall()
+
+ # the default client was not used
+ mock_client.list_rows.assert_not_called()
+
+ def test_fetchall_w_bqstorage_client_no_arrow_compression(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ pytest.importorskip("pyarrow")
+ from google.cloud import bigquery_storage
+ from google.cloud.bigquery import dbapi
+
+ # Use unordered data to also test any non-determenistic key order in dicts.
+ row_data = [bq_table.Row([1.2, 1.1], {"bar": 1, "foo": 0})]
+ bqstorage_streamed_rows = [{"bar": _to_pyarrow(1.2), "foo": _to_pyarrow(1.1)}]
+
+ def fake_ensure_bqstorage_client(bqstorage_client=None, **kwargs):
+ return bqstorage_client
+
+ mock_client = self._mock_client(
+ rows=row_data,
+ # Assume there are many more pages of data to look at so that the
+ # BQ Storage API is necessary.
+ total_rows=1000,
+ destination_table="P.DS.T",
+ )
+ mock_client._ensure_bqstorage_client.side_effect = fake_ensure_bqstorage_client
+ mock_bqstorage_client = self._mock_bqstorage_client(
+ stream_count=1,
+ rows=bqstorage_streamed_rows,
+ )
+
+ connection = dbapi.connect(
+ client=mock_client,
+ bqstorage_client=mock_bqstorage_client,
+ )
+ cursor = connection.cursor()
+ cursor.execute("SELECT foo, bar FROM some_table")
+
+ with mock.patch(
+ "google.cloud.bigquery.dbapi.cursor._ARROW_COMPRESSION_SUPPORT", new=False
+ ):
+ rows = cursor.fetchall()
+
+ mock_client.list_rows.assert_not_called() # The default client was not used.
+
+ # Check the BQ Storage session config.
+ expected_session = bigquery_storage.ReadSession(
+ table="projects/P/datasets/DS/tables/T",
+ data_format=bigquery_storage.DataFormat.ARROW,
+ )
+ mock_bqstorage_client.create_read_session.assert_called_once_with(
+ parent="projects/P", read_session=expected_session, max_stream_count=1
+ )
+
+ # Check the data returned.
+ field_value = op.itemgetter(1)
+ sorted_row_data = [sorted(row.items(), key=field_value) for row in rows]
+ expected_row_data = [[("foo", 1.1), ("bar", 1.2)]]
+
+ self.assertEqual(sorted_row_data, expected_row_data)
+
+ def test_execute_custom_job_id(self):
+ from google.cloud.bigquery.dbapi import connect
+
+ client = self._mock_client(rows=[], num_dml_affected_rows=0)
+ connection = connect(client)
+ cursor = connection.cursor()
+ cursor.execute("SELECT 1;", job_id="foo")
+ args, kwargs = client.query.call_args
+ self.assertEqual(args[0], "SELECT 1;")
+ self.assertEqual(kwargs["job_id"], "foo")
+
+ def test_execute_w_default_config(self):
+ from google.cloud.bigquery.dbapi import connect
+
+ client = self._mock_client(rows=[], num_dml_affected_rows=0)
+ connection = connect(client)
+ cursor = connection.cursor()
+
+ cursor.execute("SELECT 1;", job_id="foo")
+
+ _, kwargs = client.query.call_args
+ used_config = kwargs["job_config"]
+ self.assertIsNone(used_config)
+
+ def test_execute_custom_job_config_wo_default_config(self):
+ from google.cloud.bigquery.dbapi import connect
+ from google.cloud.bigquery import job
+
+ config = job.QueryJobConfig(use_legacy_sql=True)
+ client = self._mock_client(rows=[], num_dml_affected_rows=0)
+ connection = connect(client)
+ cursor = connection.cursor()
+ cursor.execute("SELECT 1;", job_id="foo", job_config=config)
+ args, kwargs = client.query.call_args
+ self.assertEqual(args[0], "SELECT 1;")
+ self.assertEqual(kwargs["job_id"], "foo")
+ self.assertEqual(kwargs["job_config"], config)
+
+ def test_execute_custom_job_config_w_default_config(self):
+ from google.cloud.bigquery.dbapi import connect
+ from google.cloud.bigquery import job
+
+ client = self._mock_client(rows=[], num_dml_affected_rows=0)
+ connection = connect(client)
+ cursor = connection.cursor()
+ config = job.QueryJobConfig(use_legacy_sql=True)
+
+ cursor.execute("SELECT 1;", job_id="foo", job_config=config)
+
+ _, kwargs = client.query.call_args
+ used_config = kwargs["job_config"]
+ expected_config = job.QueryJobConfig(
+ use_legacy_sql=True, # the config passed to execute() prevails
+ query_parameters=[],
+ )
+ self.assertEqual(used_config._properties, expected_config._properties)
+
+ def test_execute_w_dml(self):
+ from google.cloud.bigquery.dbapi import connect
+
+ connection = connect(self._mock_client(rows=[], num_dml_affected_rows=12))
+ cursor = connection.cursor()
+ cursor.execute("DELETE FROM UserSessions WHERE user_id = 'test';")
+ rows = cursor.fetchall()
+ self.assertIsNone(cursor.description)
+ self.assertEqual(cursor.rowcount, 12)
+ self.assertEqual(rows, [])
+
+ def test_execute_w_query(self):
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery import dbapi
+
+ connection = dbapi.connect(
+ self._mock_client(
+ rows=[("hello", "world", 1), ("howdy", "y'all", 2)],
+ schema=[
+ SchemaField("a", "STRING", mode="NULLABLE"),
+ SchemaField("b", "STRING", mode="REQUIRED"),
+ SchemaField("c", "INTEGER", mode="NULLABLE"),
+ ],
+ )
+ )
+ cursor = connection.cursor()
+ cursor.execute("SELECT a, b, c FROM hello_world WHERE d > 3;")
+
+ # Verify the description.
+ self.assertEqual(len(cursor.description), 3)
+ a_name, a_type, _, _, _, _, a_null_ok = cursor.description[0]
+ self.assertEqual(a_name, "a")
+ self.assertEqual(a_type, "STRING")
+ self.assertEqual(a_type, dbapi.STRING)
+ self.assertTrue(a_null_ok)
+ b_name, b_type, _, _, _, _, b_null_ok = cursor.description[1]
+ self.assertEqual(b_name, "b")
+ self.assertEqual(b_type, "STRING")
+ self.assertEqual(b_type, dbapi.STRING)
+ self.assertFalse(b_null_ok)
+ c_name, c_type, _, _, _, _, c_null_ok = cursor.description[2]
+ self.assertEqual(c_name, "c")
+ self.assertEqual(c_type, "INTEGER")
+ self.assertEqual(c_type, dbapi.NUMBER)
+ self.assertTrue(c_null_ok)
+
+ # Verify the results.
+ self.assertEqual(cursor.rowcount, 2)
+ row = cursor.fetchone()
+ self.assertEqual(row, ("hello", "world", 1))
+ row = cursor.fetchone()
+ self.assertEqual(row, ("howdy", "y'all", 2))
+ row = cursor.fetchone()
+ self.assertIsNone(row)
+
+ def test_execute_w_query_dry_run(self):
+ from google.cloud.bigquery.job import QueryJobConfig
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery import dbapi
+
+ connection = dbapi.connect(
+ self._mock_client(
+ rows=[],
+ schema=[
+ SchemaField("a", "STRING", mode="NULLABLE"),
+ SchemaField("b", "STRING", mode="REQUIRED"),
+ SchemaField("c", "INTEGER", mode="NULLABLE"),
+ ],
+ dry_run_job=True,
+ total_bytes_processed=12345,
+ )
+ )
+ cursor = connection.cursor()
+
+ cursor.execute(
+ "SELECT a, b, c FROM hello_world WHERE d > 3;",
+ job_config=QueryJobConfig(dry_run=True),
+ )
+
+ self.assertEqual(cursor.rowcount, 0)
+ self.assertIsNotNone(cursor.description)
+ rows = cursor.fetchall()
+ self.assertEqual(list(rows), [])
+
+ def test_execute_raises_if_result_raises(self):
+ import google.cloud.exceptions
+
+ from google.cloud.bigquery import client
+ from google.cloud.bigquery.dbapi import connect
+ from google.cloud.bigquery.dbapi import exceptions
+
+ client = mock.create_autospec(client.Client)
+ client.query_and_wait.side_effect = google.cloud.exceptions.GoogleCloudError("")
+ connection = connect(client)
+ cursor = connection.cursor()
+
+ with self.assertRaises(exceptions.DatabaseError):
+ cursor.execute("SELECT 1")
+
+ def test_executemany_w_dml(self):
+ from google.cloud.bigquery.dbapi import connect
+
+ connection = connect(self._mock_client(rows=[], num_dml_affected_rows=12))
+ cursor = connection.cursor()
+ cursor.executemany(
+ "DELETE FROM UserSessions WHERE user_id = %s;",
+ (("test",), ("anothertest",)),
+ )
+ self.assertIsNone(cursor.description)
+ self.assertEqual(cursor.rowcount, 24) # 24 because 2 * 12 because cumulatve.
+
+ def test_executemany_empty(self):
+ from google.cloud.bigquery.dbapi import connect
+
+ connection = connect(self._mock_client(rows=[], num_dml_affected_rows=12))
+ cursor = connection.cursor()
+ cursor.executemany((), ())
+ self.assertIsNone(cursor.description)
+ self.assertEqual(cursor.rowcount, -1)
+
+ def test_is_iterable(self):
+ from google.cloud.bigquery import dbapi
+
+ connection = dbapi.connect(
+ self._mock_client(rows=[("hello", "there", 7), ("good", "bye", -3)])
+ )
+ cursor = connection.cursor()
+ cursor.execute("SELECT foo, bar, baz FROM hello_world WHERE baz < 42;")
+
+ rows_iter = iter(cursor)
+
+ row = next(rows_iter)
+ self.assertEqual(row, ("hello", "there", 7))
+ row = next(rows_iter)
+ self.assertEqual(row, ("good", "bye", -3))
+ self.assertRaises(StopIteration, next, rows_iter)
+
+ self.assertEqual(
+ list(cursor),
+ [],
+ "Iterating again over the same results should produce no rows.",
+ )
+
+ def test_query_job_wo_execute(self):
+ from google.cloud.bigquery import dbapi
+
+ connection = dbapi.connect(self._mock_client())
+ cursor = connection.cursor()
+ self.assertIsNone(cursor.query_job)
+
+ def test_query_job_w_execute(self):
+ from google.cloud.bigquery import dbapi, QueryJob
+
+ connection = dbapi.connect(self._mock_client())
+ cursor = connection.cursor()
+ cursor.execute("SELECT 1;")
+ self.assertIsInstance(cursor.query_job, QueryJob)
+
+ def test_query_job_w_execute_no_job(self):
+ from google.cloud.bigquery import dbapi
+
+ connection = dbapi.connect(self._mock_client())
+ cursor = connection.cursor()
+ cursor.execute("SELECT 1;")
+
+ # Simulate jobless execution.
+ type(cursor._query_rows).job_id = mock.PropertyMock(return_value=None)
+
+ self.assertIsNone(cursor.query_job)
+
+ def test_query_job_w_executemany(self):
+ from google.cloud.bigquery import dbapi, QueryJob
+
+ connection = dbapi.connect(self._mock_client())
+ cursor = connection.cursor()
+ cursor.executemany("SELECT %s;", (("1",), ("2",)))
+ self.assertIsInstance(cursor.query_job, QueryJob)
+
+ def test__format_operation_w_dict(self):
+ from google.cloud.bigquery.dbapi import cursor
+
+ parameter_types = {}
+ formatted_operation, parameter_types = cursor._format_operation(
+ "SELECT %(somevalue)s, %(a `weird` one:STRING)s;",
+ {"somevalue": "hi", "a `weird` one": "world"},
+ )
+ self.assertEqual(
+ formatted_operation, "SELECT @`somevalue`, @`a \\`weird\\` one`;"
+ )
+ self.assertEqual(parameter_types, {"a `weird` one": "STRING"})
+
+ def test__format_operation_w_wrong_dict(self):
+ from google.cloud.bigquery import dbapi
+ from google.cloud.bigquery.dbapi import cursor
+
+ self.assertRaises(
+ dbapi.ProgrammingError,
+ cursor._format_operation,
+ "SELECT %(somevalue)s, %(othervalue)s;",
+ {"somevalue-not-here": "hi", "othervalue": "world"},
+ )
+
+ def test__format_operation_w_redundant_dict_key(self):
+ from google.cloud.bigquery.dbapi import cursor
+
+ formatted_operation, _ = cursor._format_operation(
+ "SELECT %(somevalue)s;", {"somevalue": "foo", "value-not-used": "bar"}
+ )
+ self.assertEqual(formatted_operation, "SELECT @`somevalue`;")
+
+ def test__format_operation_w_sequence(self):
+ from google.cloud.bigquery.dbapi import cursor
+
+ formatted_operation, _ = cursor._format_operation(
+ "SELECT %s, %s;", ("hello", "world")
+ )
+ self.assertEqual(formatted_operation, "SELECT ?, ?;")
+
+ def test__format_operation_w_too_short_sequence(self):
+ from google.cloud.bigquery import dbapi
+ from google.cloud.bigquery.dbapi import cursor
+
+ self.assertRaises(
+ dbapi.ProgrammingError,
+ cursor._format_operation,
+ "SELECT %s, %s;",
+ ("hello",),
+ )
+
+ def test__format_operation_w_too_long_sequence(self):
+ from google.cloud.bigquery import dbapi
+ from google.cloud.bigquery.dbapi import cursor
+
+ self.assertRaises(
+ dbapi.ProgrammingError,
+ cursor._format_operation,
+ "SELECT %s, %s;",
+ ("hello", "world", "everyone"),
+ )
+
+ def test__format_operation_w_empty_dict(self):
+ from google.cloud.bigquery.dbapi import cursor
+
+ formatted_operation, _ = cursor._format_operation("SELECT '%f'", {})
+ self.assertEqual(formatted_operation, "SELECT '%f'")
+
+ def test__format_operation_wo_params_single_percent(self):
+ from google.cloud.bigquery.dbapi import cursor
+
+ formatted_operation, _ = cursor._format_operation("SELECT '%'", {})
+ self.assertEqual(formatted_operation, "SELECT '%'")
+
+ def test__format_operation_wo_params_double_percents(self):
+ from google.cloud.bigquery.dbapi import cursor
+
+ formatted_operation, _ = cursor._format_operation("SELECT '%%'", {})
+ self.assertEqual(formatted_operation, "SELECT '%'")
+
+ def test__format_operation_unescaped_percent_w_dict_param(self):
+ from google.cloud.bigquery import dbapi
+ from google.cloud.bigquery.dbapi import cursor
+
+ self.assertRaises(
+ dbapi.ProgrammingError,
+ cursor._format_operation,
+ "SELECT %(foo)s, '100 %';",
+ {"foo": "bar"},
+ )
+
+ def test__format_operation_unescaped_percent_w_list_param(self):
+ from google.cloud.bigquery import dbapi
+ from google.cloud.bigquery.dbapi import cursor
+
+ self.assertRaises(
+ dbapi.ProgrammingError,
+ cursor._format_operation,
+ "SELECT %s, %s, '100 %';",
+ ["foo", "bar"],
+ )
+
+ def test__format_operation_no_placeholders(self):
+ from google.cloud.bigquery import dbapi
+ from google.cloud.bigquery.dbapi import cursor
+
+ self.assertRaises(
+ dbapi.ProgrammingError,
+ cursor._format_operation,
+ "SELECT 42",
+ ["foo", "bar"],
+ )
+
+
+@pytest.mark.parametrize(
+ "inp,expect",
+ [
+ ("", ("", None)),
+ ("values(%(foo)s, %(bar)s)", ("values(%(foo)s, %(bar)s)", {})),
+ (
+ "values('%%(oof:INT64)s', %(foo)s, %(bar)s)",
+ ("values('%%(oof:INT64)s', %(foo)s, %(bar)s)", {}),
+ ),
+ (
+ "values(%(foo:INT64)s, %(bar)s)",
+ ("values(%(foo)s, %(bar)s)", dict(foo="INT64")),
+ ),
+ (
+ "values('%%(oof:INT64)s, %(foo:INT64)s, %(foo)s)",
+ ("values('%%(oof:INT64)s, %(foo)s, %(foo)s)", dict(foo="INT64")),
+ ),
+ (
+ "values(%(foo:INT64)s, %(foo:INT64)s)",
+ ("values(%(foo)s, %(foo)s)", dict(foo="INT64")),
+ ),
+ (
+ "values(%(foo:INT64)s, %(bar:NUMERIC)s) 100 %",
+ ("values(%(foo)s, %(bar)s) 100 %", dict(foo="INT64", bar="NUMERIC")),
+ ),
+ (" %s %()s %(:int64)s ", (" %s %s %s ", [None, None, "int64"])),
+ (" %%s %s %()s %(:int64)s ", (" %%s %s %s %s ", [None, None, "int64"])),
+ (
+ "values(%%%(foo:INT64)s, %(bar)s)",
+ ("values(%%%(foo)s, %(bar)s)", dict(foo="INT64")),
+ ),
+ (
+ "values(%%%%(foo:INT64)s, %(bar)s)",
+ ("values(%%%%(foo:INT64)s, %(bar)s)", dict()),
+ ),
+ (
+ "values(%%%%%(foo:INT64)s, %(bar)s)",
+ ("values(%%%%%(foo)s, %(bar)s)", dict(foo="INT64")),
+ ),
+ (
+ "values(%%%%%(foo:struct)s, %(bar)s)",
+ ("values(%%%%%(foo)s, %(bar)s)", dict(foo="struct")),
+ ),
+ (
+ "values(%%%%%(foo:struct)s, %(bar)s)",
+ ("values(%%%%%(foo)s, %(bar)s)", dict(foo="struct")),
+ ),
+ (
+ "values(%%%%%(foo:struct)s, %(bar)s)",
+ ("values(%%%%%(foo)s, %(bar)s)", dict(foo="struct")),
+ ),
+ (
+ "values(%(foo:struct)s, %(bar)s)",
+ (
+ "values(%(foo)s, %(bar)s)",
+ dict(foo="struct"),
+ ),
+ ),
+ (
+ "values(%(foo:struct)s, %(bar)s)",
+ (
+ "values(%(foo)s, %(bar)s)",
+ dict(foo="struct"),
+ ),
+ ),
+ (
+ "values(%(foo:string(10))s, %(bar)s)",
+ ("values(%(foo)s, %(bar)s)", dict(foo="string(10)")),
+ ),
+ ],
+)
+def test__extract_types(inp, expect):
+ from google.cloud.bigquery.dbapi.cursor import _extract_types as et
+
+ assert et(inp) == expect
+
+
+@pytest.mark.parametrize(
+ "match,inp",
+ [
+ (
+ "Conflicting types for foo: numeric and int64.",
+ " %(foo:numeric)s %(foo:int64)s ",
+ ),
+ (r"' %s %\(foo\)s ' mixes named and unamed parameters.", " %s %(foo)s "),
+ (r"' %\(foo\)s %s ' mixes named and unamed parameters.", " %(foo)s %s "),
+ ],
+)
+def test__extract_types_fail(match, inp):
+ from google.cloud.bigquery.dbapi.cursor import _extract_types as et
+ from google.cloud.bigquery.dbapi import exceptions
+
+ with pytest.raises(exceptions.ProgrammingError, match=match):
+ et(inp)
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_dbapi_types.py b/testbed/googleapis__python-bigquery/tests/unit/test_dbapi_types.py
new file mode 100644
index 0000000000000000000000000000000000000000..7319aa0161c32733260a9787ea72e85c961d24f0
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_dbapi_types.py
@@ -0,0 +1,66 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import unittest
+
+import pytest
+
+import google.cloud._helpers
+from google.cloud.bigquery.dbapi import types
+
+
+class TestTypes(unittest.TestCase):
+ def test_binary_type(self):
+ self.assertEqual("BYTES", types.BINARY)
+ self.assertEqual("RECORD", types.BINARY)
+ self.assertEqual("STRUCT", types.BINARY)
+ self.assertNotEqual("STRING", types.BINARY)
+
+ def test_timefromticks(self):
+ somedatetime = datetime.datetime(
+ 2017, 2, 18, 12, 47, 26, tzinfo=google.cloud._helpers.UTC
+ )
+ epoch = datetime.datetime(1970, 1, 1, tzinfo=google.cloud._helpers.UTC)
+ ticks = (somedatetime - epoch).total_seconds()
+ self.assertEqual(
+ types.TimeFromTicks(ticks, google.cloud._helpers.UTC),
+ datetime.time(12, 47, 26, tzinfo=google.cloud._helpers.UTC),
+ )
+
+
+class CustomBinary:
+ def __bytes__(self):
+ return b"Google"
+
+
+@pytest.mark.parametrize(
+ "raw,expected",
+ [
+ ("hello", b"hello"),
+ ("\u1f60", "\u1f60".encode("utf-8")),
+ (b"hello", b"hello"),
+ (bytearray(b"hello"), b"hello"),
+ (memoryview(b"hello"), b"hello"),
+ (CustomBinary(), b"Google"),
+ ],
+)
+def test_binary_constructor(raw, expected):
+ assert types.Binary(raw) == expected
+
+
+@pytest.mark.parametrize("bad", (42, 42.0, None))
+def test_invalid_binary_constructor(bad):
+ with pytest.raises(TypeError):
+ types.Binary(bad)
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_delete_dataset.py b/testbed/googleapis__python-bigquery/tests/unit/test_delete_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..b48beb1473d51825803ecf75ff5d2c4660393c3b
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_delete_dataset.py
@@ -0,0 +1,79 @@
+# Copyright 2021 Google LLC
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# https://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .helpers import make_connection, make_client, dataset_polymorphic
+import google.api_core.exceptions
+from google.cloud.bigquery.retry import DEFAULT_TIMEOUT
+import pytest
+
+
+@dataset_polymorphic
+def test_delete_dataset(make_dataset, get_reference, client, PROJECT, DS_ID):
+ dataset = make_dataset(PROJECT, DS_ID)
+ PATH = "projects/%s/datasets/%s" % (PROJECT, DS_ID)
+ conn = client._connection = make_connection({})
+ client.delete_dataset(dataset, timeout=7.5)
+ conn.api_request.assert_called_with(
+ method="DELETE", path="/%s" % PATH, query_params={}, timeout=7.5
+ )
+
+
+@dataset_polymorphic
+def test_delete_dataset_delete_contents(
+ make_dataset, get_reference, client, PROJECT, DS_ID
+):
+ PATH = "projects/%s/datasets/%s" % (PROJECT, DS_ID)
+ conn = client._connection = make_connection({})
+ dataset = make_dataset(PROJECT, DS_ID)
+ client.delete_dataset(dataset, delete_contents=True)
+ conn.api_request.assert_called_with(
+ method="DELETE",
+ path="/%s" % PATH,
+ query_params={"deleteContents": "true"},
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_delete_dataset_wrong_type(client):
+ with pytest.raises(TypeError):
+ client.delete_dataset(42)
+
+
+def test_delete_dataset_w_not_found_ok_false(PROJECT, DS_ID):
+ path = "/projects/{}/datasets/{}".format(PROJECT, DS_ID)
+ http = object()
+ client = make_client(_http=http)
+ conn = client._connection = make_connection(
+ google.api_core.exceptions.NotFound("dataset not found")
+ )
+
+ with pytest.raises(google.api_core.exceptions.NotFound):
+ client.delete_dataset(DS_ID)
+
+ conn.api_request.assert_called_with(
+ method="DELETE", path=path, query_params={}, timeout=DEFAULT_TIMEOUT
+ )
+
+
+def test_delete_dataset_w_not_found_ok_true(PROJECT, DS_ID):
+ path = "/projects/{}/datasets/{}".format(PROJECT, DS_ID)
+ http = object()
+ client = make_client(_http=http)
+ conn = client._connection = make_connection(
+ google.api_core.exceptions.NotFound("dataset not found")
+ )
+ client.delete_dataset(DS_ID, not_found_ok=True)
+ conn.api_request.assert_called_with(
+ method="DELETE", path=path, query_params={}, timeout=DEFAULT_TIMEOUT
+ )
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_encryption_configuration.py b/testbed/googleapis__python-bigquery/tests/unit/test_encryption_configuration.py
new file mode 100644
index 0000000000000000000000000000000000000000..cdd944a8fe1a339165c48b3fab4af6136c3e00ad
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_encryption_configuration.py
@@ -0,0 +1,111 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from unittest import mock
+
+
+class TestEncryptionConfiguration(unittest.TestCase):
+ KMS_KEY_NAME = "projects/1/locations/us/keyRings/1/cryptoKeys/1"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.encryption_configuration import (
+ EncryptionConfiguration,
+ )
+
+ return EncryptionConfiguration
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor_defaults(self):
+ encryption_config = self._make_one()
+ self.assertIsNone(encryption_config.kms_key_name)
+
+ def test_ctor_with_key(self):
+ encryption_config = self._make_one(kms_key_name=self.KMS_KEY_NAME)
+ self.assertEqual(encryption_config.kms_key_name, self.KMS_KEY_NAME)
+
+ def test_kms_key_name_setter(self):
+ encryption_config = self._make_one()
+ self.assertIsNone(encryption_config.kms_key_name)
+ encryption_config.kms_key_name = self.KMS_KEY_NAME
+ self.assertEqual(encryption_config.kms_key_name, self.KMS_KEY_NAME)
+ encryption_config.kms_key_name = None
+ self.assertIsNone(encryption_config.kms_key_name)
+
+ def test_from_api_repr(self):
+ RESOURCE = {"kmsKeyName": self.KMS_KEY_NAME}
+ klass = self._get_target_class()
+ encryption_config = klass.from_api_repr(RESOURCE)
+ self.assertEqual(encryption_config.kms_key_name, self.KMS_KEY_NAME)
+
+ def test_to_api_repr(self):
+ encryption_config = self._make_one(kms_key_name=self.KMS_KEY_NAME)
+ resource = encryption_config.to_api_repr()
+ self.assertEqual(resource, {"kmsKeyName": self.KMS_KEY_NAME})
+
+ def test___eq___wrong_type(self):
+ encryption_config = self._make_one()
+ other = object()
+ self.assertNotEqual(encryption_config, other)
+ self.assertEqual(encryption_config, mock.ANY)
+
+ def test___eq___kms_key_name_mismatch(self):
+ encryption_config = self._make_one()
+ other = self._make_one(self.KMS_KEY_NAME)
+ self.assertNotEqual(encryption_config, other)
+
+ def test___eq___hit(self):
+ encryption_config = self._make_one(self.KMS_KEY_NAME)
+ other = self._make_one(self.KMS_KEY_NAME)
+ self.assertEqual(encryption_config, other)
+
+ def test___ne___wrong_type(self):
+ encryption_config = self._make_one()
+ other = object()
+ self.assertNotEqual(encryption_config, other)
+ self.assertEqual(encryption_config, mock.ANY)
+
+ def test___ne___same_value(self):
+ encryption_config1 = self._make_one(self.KMS_KEY_NAME)
+ encryption_config2 = self._make_one(self.KMS_KEY_NAME)
+ # unittest ``assertEqual`` uses ``==`` not ``!=``.
+ comparison_val = encryption_config1 != encryption_config2
+ self.assertFalse(comparison_val)
+
+ def test___ne___different_values(self):
+ encryption_config1 = self._make_one()
+ encryption_config2 = self._make_one(self.KMS_KEY_NAME)
+ self.assertNotEqual(encryption_config1, encryption_config2)
+
+ def test___hash__set_equality(self):
+ encryption_config1 = self._make_one(self.KMS_KEY_NAME)
+ encryption_config2 = self._make_one(self.KMS_KEY_NAME)
+ set_one = {encryption_config1, encryption_config2}
+ set_two = {encryption_config1, encryption_config2}
+ self.assertEqual(set_one, set_two)
+
+ def test___hash__not_equals(self):
+ encryption_config1 = self._make_one()
+ encryption_config2 = self._make_one(self.KMS_KEY_NAME)
+ set_one = {encryption_config1}
+ set_two = {encryption_config2}
+ self.assertNotEqual(set_one, set_two)
+
+ def test___repr__(self):
+ encryption_config = self._make_one(self.KMS_KEY_NAME)
+ expected = "EncryptionConfiguration({})".format(self.KMS_KEY_NAME)
+ self.assertEqual(repr(encryption_config), expected)
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_external_config.py b/testbed/googleapis__python-bigquery/tests/unit/test_external_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..9fd16e69967cda2d7528980ff2614836064e7065
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_external_config.py
@@ -0,0 +1,892 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import copy
+import unittest
+
+from google.cloud.bigquery import external_config
+from google.cloud.bigquery import schema
+
+
+class TestExternalConfig(unittest.TestCase):
+ SOURCE_URIS = ["gs://foo", "gs://bar"]
+
+ BASE_RESOURCE = {
+ "sourceFormat": "",
+ "sourceUris": SOURCE_URIS,
+ "maxBadRecords": 17,
+ "autodetect": True,
+ "ignoreUnknownValues": False,
+ "compression": "compression",
+ }
+
+ def test_from_api_repr_base(self):
+ resource = copy.deepcopy(self.BASE_RESOURCE)
+ ec = external_config.ExternalConfig.from_api_repr(resource)
+ self._verify_base(ec)
+ self.assertEqual(ec.schema, [])
+ self.assertIsNone(ec.options)
+
+ got_resource = ec.to_api_repr()
+ self.assertEqual(got_resource, self.BASE_RESOURCE)
+
+ resource = _copy_and_update(
+ self.BASE_RESOURCE,
+ {
+ "schema": {
+ "fields": [
+ {
+ "name": "full_name",
+ "type": "STRING",
+ "mode": "REQUIRED",
+ "description": None,
+ }
+ ]
+ }
+ },
+ )
+ ec = external_config.ExternalConfig.from_api_repr(resource)
+ self._verify_base(ec)
+ exp_schema = [schema.SchemaField("full_name", "STRING", mode="REQUIRED")]
+ self.assertEqual(ec.schema, exp_schema)
+ self.assertIsNone(ec.options)
+
+ got_resource = ec.to_api_repr()
+ self.assertEqual(got_resource, resource)
+
+ def test_to_api_repr_base(self):
+ ec = external_config.ExternalConfig("")
+ ec.source_uris = self.SOURCE_URIS
+ ec.max_bad_records = 17
+ ec.autodetect = True
+ ec.ignore_unknown_values = False
+ ec.compression = "compression"
+ ec.connection_id = "path/to/connection"
+ ec.schema = [schema.SchemaField("full_name", "STRING", mode="REQUIRED")]
+
+ exp_schema = {
+ "fields": [{"name": "full_name", "type": "STRING", "mode": "REQUIRED"}]
+ }
+ got_resource = ec.to_api_repr()
+ exp_resource = {
+ "sourceFormat": "",
+ "sourceUris": self.SOURCE_URIS,
+ "maxBadRecords": 17,
+ "autodetect": True,
+ "ignoreUnknownValues": False,
+ "compression": "compression",
+ "connectionId": "path/to/connection",
+ "schema": exp_schema,
+ }
+ self.assertEqual(got_resource, exp_resource)
+
+ def test_connection_id(self):
+ ec = external_config.ExternalConfig("")
+ self.assertIsNone(ec.connection_id)
+ ec.connection_id = "path/to/connection"
+ self.assertEqual(ec.connection_id, "path/to/connection")
+
+ def test_reference_file_schema_uri(self):
+ ec = external_config.ExternalConfig("")
+ self.assertIsNone(ec.reference_file_schema_uri)
+ ec.reference_file_schema_uri = "path/to/reference"
+ self.assertEqual(ec.reference_file_schema_uri, "path/to/reference")
+
+ def test_schema_None(self):
+ ec = external_config.ExternalConfig("")
+ ec.schema = None
+ got = ec.to_api_repr()
+ want = {"sourceFormat": "", "schema": None}
+ self.assertEqual(got, want)
+
+ def test_schema_empty(self):
+ ec = external_config.ExternalConfig("")
+ ec.schema = []
+ got = ec.to_api_repr()
+ want = {"sourceFormat": "", "schema": {"fields": []}}
+ self.assertEqual(got, want)
+
+ def _verify_base(self, ec):
+ self.assertEqual(ec.autodetect, True)
+ self.assertEqual(ec.compression, "compression")
+ self.assertEqual(ec.ignore_unknown_values, False)
+ self.assertEqual(ec.max_bad_records, 17)
+ self.assertEqual(ec.source_uris, self.SOURCE_URIS)
+
+ def test_to_api_repr_source_format(self):
+ ec = external_config.ExternalConfig("CSV")
+ got = ec.to_api_repr()
+ want = {"sourceFormat": "CSV"}
+ self.assertEqual(got, want)
+
+ def test_from_api_repr_sheets(self):
+ resource = _copy_and_update(
+ self.BASE_RESOURCE,
+ {
+ "sourceFormat": "GOOGLE_SHEETS",
+ "googleSheetsOptions": {
+ "skipLeadingRows": "123",
+ "range": "Sheet1!A5:B10",
+ },
+ },
+ )
+
+ ec = external_config.ExternalConfig.from_api_repr(resource)
+
+ self._verify_base(ec)
+ self.assertEqual(ec.source_format, "GOOGLE_SHEETS")
+ self.assertIsInstance(ec.options, external_config.GoogleSheetsOptions)
+ self.assertEqual(ec.options.skip_leading_rows, 123)
+ self.assertEqual(ec.options.range, "Sheet1!A5:B10")
+
+ got_resource = ec.to_api_repr()
+
+ self.assertEqual(got_resource, resource)
+
+ del resource["googleSheetsOptions"]["skipLeadingRows"]
+ del resource["googleSheetsOptions"]["range"]
+ ec = external_config.ExternalConfig.from_api_repr(resource)
+ self.assertIsNone(ec.options.skip_leading_rows)
+ self.assertIsNone(ec.options.range)
+ got_resource = ec.to_api_repr()
+ self.assertEqual(got_resource, resource)
+
+ def test_to_api_repr_sheets(self):
+ ec = external_config.ExternalConfig("GOOGLE_SHEETS")
+ options = external_config.GoogleSheetsOptions()
+ options.skip_leading_rows = 123
+ options.range = "Sheet1!A5:B10"
+ ec.google_sheets_options = options
+
+ exp_resource = {
+ "sourceFormat": "GOOGLE_SHEETS",
+ "googleSheetsOptions": {"skipLeadingRows": "123", "range": "Sheet1!A5:B10"},
+ }
+
+ got_resource = ec.to_api_repr()
+
+ self.assertEqual(got_resource, exp_resource)
+
+ def test_from_api_repr_hive_partitioning(self):
+ resource = _copy_and_update(
+ self.BASE_RESOURCE,
+ {
+ "sourceFormat": "FORMAT_FOO",
+ "hivePartitioningOptions": {
+ "sourceUriPrefix": "http://foo/bar",
+ "mode": "STRINGS",
+ "requirePartitionFilter": True,
+ },
+ },
+ )
+
+ ec = external_config.ExternalConfig.from_api_repr(resource)
+
+ self._verify_base(ec)
+ self.assertEqual(ec.source_format, "FORMAT_FOO")
+ self.assertIsInstance(
+ ec.hive_partitioning, external_config.HivePartitioningOptions
+ )
+ self.assertEqual(ec.hive_partitioning.source_uri_prefix, "http://foo/bar")
+ self.assertEqual(ec.hive_partitioning.mode, "STRINGS")
+ self.assertEqual(ec.hive_partitioning.require_partition_filter, True)
+
+ # converting back to API representation should yield the same result
+ got_resource = ec.to_api_repr()
+ self.assertEqual(got_resource, resource)
+
+ del resource["hivePartitioningOptions"]
+ ec = external_config.ExternalConfig.from_api_repr(resource)
+ self.assertIsNone(ec.hive_partitioning)
+
+ got_resource = ec.to_api_repr()
+ self.assertEqual(got_resource, resource)
+
+ def test_to_api_repr_hive_partitioning(self):
+ hive_partitioning = external_config.HivePartitioningOptions()
+ hive_partitioning.source_uri_prefix = "http://foo/bar"
+ hive_partitioning.mode = "STRINGS"
+ hive_partitioning.require_partition_filter = False
+
+ ec = external_config.ExternalConfig("FORMAT_FOO")
+ ec.hive_partitioning = hive_partitioning
+
+ got_resource = ec.to_api_repr()
+
+ expected_resource = {
+ "sourceFormat": "FORMAT_FOO",
+ "hivePartitioningOptions": {
+ "sourceUriPrefix": "http://foo/bar",
+ "mode": "STRINGS",
+ "requirePartitionFilter": False,
+ },
+ }
+ self.assertEqual(got_resource, expected_resource)
+
+ def test_from_api_repr_csv(self):
+ resource = _copy_and_update(
+ self.BASE_RESOURCE,
+ {
+ "sourceFormat": "CSV",
+ "csvOptions": {
+ "fieldDelimiter": "fieldDelimiter",
+ "skipLeadingRows": "123",
+ "quote": "quote",
+ "allowQuotedNewlines": True,
+ "allowJaggedRows": False,
+ "encoding": "encoding",
+ "preserveAsciiControlCharacters": False,
+ },
+ },
+ )
+
+ ec = external_config.ExternalConfig.from_api_repr(resource)
+
+ self._verify_base(ec)
+ self.assertEqual(ec.source_format, "CSV")
+ self.assertIsInstance(ec.options, external_config.CSVOptions)
+ self.assertEqual(ec.options.field_delimiter, "fieldDelimiter")
+ self.assertEqual(ec.options.skip_leading_rows, 123)
+ self.assertEqual(ec.options.quote_character, "quote")
+ self.assertEqual(ec.options.allow_quoted_newlines, True)
+ self.assertEqual(ec.options.allow_jagged_rows, False)
+ self.assertEqual(ec.options.encoding, "encoding")
+ self.assertEqual(ec.options.preserve_ascii_control_characters, False)
+
+ got_resource = ec.to_api_repr()
+
+ self.assertEqual(got_resource, resource)
+
+ del resource["csvOptions"]["skipLeadingRows"]
+ ec = external_config.ExternalConfig.from_api_repr(resource)
+ self.assertIsNone(ec.options.skip_leading_rows)
+ got_resource = ec.to_api_repr()
+ self.assertEqual(got_resource, resource)
+
+ def test_to_api_repr_csv(self):
+ ec = external_config.ExternalConfig("CSV")
+ options = external_config.CSVOptions()
+ options.allow_quoted_newlines = True
+ options.encoding = "encoding"
+ options.field_delimiter = "fieldDelimiter"
+ options.quote_character = "quote"
+ options.skip_leading_rows = 123
+ options.allow_jagged_rows = False
+ options.preserve_ascii_control_characters = False
+ ec.csv_options = options
+
+ exp_resource = {
+ "sourceFormat": "CSV",
+ "csvOptions": {
+ "fieldDelimiter": "fieldDelimiter",
+ "skipLeadingRows": "123",
+ "quote": "quote",
+ "allowQuotedNewlines": True,
+ "allowJaggedRows": False,
+ "encoding": "encoding",
+ "preserveAsciiControlCharacters": False,
+ },
+ }
+
+ got_resource = ec.to_api_repr()
+
+ self.assertEqual(got_resource, exp_resource)
+
+ def test_from_api_repr_bigtable(self):
+ qualifier_encoded = base64.standard_b64encode(b"q").decode("ascii")
+ resource = _copy_and_update(
+ self.BASE_RESOURCE,
+ {
+ "sourceFormat": "BIGTABLE",
+ "bigtableOptions": {
+ "ignoreUnspecifiedColumnFamilies": True,
+ "readRowkeyAsString": False,
+ "columnFamilies": [
+ {
+ "familyId": "familyId",
+ "type": "type",
+ "encoding": "encoding",
+ "columns": [
+ {
+ "qualifierString": "q",
+ "fieldName": "fieldName1",
+ "type": "type1",
+ "encoding": "encoding1",
+ "onlyReadLatest": True,
+ },
+ {
+ "qualifierEncoded": qualifier_encoded,
+ "fieldName": "fieldName2",
+ "type": "type2",
+ "encoding": "encoding2",
+ },
+ ],
+ "onlyReadLatest": False,
+ }
+ ],
+ },
+ },
+ )
+
+ ec = external_config.ExternalConfig.from_api_repr(resource)
+
+ self._verify_base(ec)
+ self.assertEqual(ec.source_format, "BIGTABLE")
+ self.assertIsInstance(ec.options, external_config.BigtableOptions)
+ self.assertEqual(ec.options.ignore_unspecified_column_families, True)
+ self.assertEqual(ec.options.read_rowkey_as_string, False)
+ self.assertEqual(len(ec.options.column_families), 1)
+ fam1 = ec.options.column_families[0]
+ self.assertIsInstance(fam1, external_config.BigtableColumnFamily)
+ self.assertEqual(fam1.family_id, "familyId")
+ self.assertEqual(fam1.type_, "type")
+ self.assertEqual(fam1.encoding, "encoding")
+ self.assertEqual(len(fam1.columns), 2)
+ self.assertFalse(fam1.only_read_latest)
+ col1 = fam1.columns[0]
+ self.assertEqual(col1.qualifier_string, "q")
+ self.assertEqual(col1.field_name, "fieldName1")
+ self.assertEqual(col1.type_, "type1")
+ self.assertEqual(col1.encoding, "encoding1")
+ self.assertTrue(col1.only_read_latest)
+ self.assertIsNone(col1.qualifier_encoded)
+ col2 = ec.options.column_families[0].columns[1]
+ self.assertEqual(col2.qualifier_encoded, b"q")
+ self.assertEqual(col2.field_name, "fieldName2")
+ self.assertEqual(col2.type_, "type2")
+ self.assertEqual(col2.encoding, "encoding2")
+
+ got_resource = ec.to_api_repr()
+
+ self.assertEqual(got_resource, resource)
+
+ def test_to_api_repr_bigtable(self):
+ ec = external_config.ExternalConfig("BIGTABLE")
+ options = external_config.BigtableOptions()
+ options.ignore_unspecified_column_families = True
+ options.read_rowkey_as_string = False
+ ec.bigtable_options = options
+
+ fam1 = external_config.BigtableColumnFamily()
+ fam1.family_id = "familyId"
+ fam1.type_ = "type"
+ fam1.encoding = "encoding"
+ fam1.only_read_latest = False
+ col1 = external_config.BigtableColumn()
+ col1.qualifier_string = "q"
+ col1.field_name = "fieldName1"
+ col1.type_ = "type1"
+ col1.encoding = "encoding1"
+ col1.only_read_latest = True
+ col2 = external_config.BigtableColumn()
+ col2.qualifier_encoded = b"q"
+ col2.field_name = "fieldName2"
+ col2.type_ = "type2"
+ col2.encoding = "encoding2"
+ fam1.columns = [col1, col2]
+ options.column_families = [fam1]
+
+ qualifier_encoded = base64.standard_b64encode(b"q").decode("ascii")
+ exp_resource = {
+ "sourceFormat": "BIGTABLE",
+ "bigtableOptions": {
+ "ignoreUnspecifiedColumnFamilies": True,
+ "readRowkeyAsString": False,
+ "columnFamilies": [
+ {
+ "familyId": "familyId",
+ "type": "type",
+ "encoding": "encoding",
+ "columns": [
+ {
+ "qualifierString": "q",
+ "fieldName": "fieldName1",
+ "type": "type1",
+ "encoding": "encoding1",
+ "onlyReadLatest": True,
+ },
+ {
+ "qualifierEncoded": qualifier_encoded,
+ "fieldName": "fieldName2",
+ "type": "type2",
+ "encoding": "encoding2",
+ },
+ ],
+ "onlyReadLatest": False,
+ }
+ ],
+ },
+ }
+
+ got_resource = ec.to_api_repr()
+
+ self.assertEqual(got_resource, exp_resource)
+
+ def test_avro_options_getter_and_setter(self):
+ from google.cloud.bigquery.external_config import AvroOptions
+
+ options = AvroOptions.from_api_repr({"useAvroLogicalTypes": True})
+ ec = external_config.ExternalConfig(external_config.ExternalSourceFormat.AVRO)
+
+ self.assertIsNone(ec.avro_options.use_avro_logical_types)
+
+ ec.avro_options = options
+
+ self.assertTrue(ec.avro_options.use_avro_logical_types)
+ self.assertIs(
+ ec.options._properties, ec._properties[AvroOptions._RESOURCE_NAME]
+ )
+ self.assertIs(
+ ec.avro_options._properties, ec._properties[AvroOptions._RESOURCE_NAME]
+ )
+
+ def test_avro_options_getter_empty(self):
+ ec = external_config.ExternalConfig(external_config.ExternalSourceFormat.AVRO)
+ self.assertIsNotNone(ec.avro_options)
+
+ def test_avro_options_getter_wrong_format(self):
+ ec = external_config.ExternalConfig(external_config.ExternalSourceFormat.CSV)
+ self.assertIsNone(ec.avro_options)
+
+ def test_avro_options_setter_wrong_format(self):
+ from google.cloud.bigquery.format_options import AvroOptions
+
+ options = AvroOptions()
+ ec = external_config.ExternalConfig(external_config.ExternalSourceFormat.CSV)
+
+ with self.assertRaisesRegex(TypeError, "Cannot set.*source format is CSV"):
+ ec.avro_options = options
+
+ def test_bigtable_options_getter_and_setter(self):
+ from google.cloud.bigquery.external_config import BigtableOptions
+
+ options = BigtableOptions.from_api_repr(
+ {"ignoreUnspecifiedColumnFamilies": True, "readRowkeyAsString": False}
+ )
+ ec = external_config.ExternalConfig(
+ external_config.ExternalSourceFormat.BIGTABLE
+ )
+
+ self.assertIsNone(ec.bigtable_options.ignore_unspecified_column_families)
+ self.assertIsNone(ec.bigtable_options.read_rowkey_as_string)
+
+ ec.bigtable_options = options
+
+ self.assertTrue(ec.bigtable_options.ignore_unspecified_column_families)
+ self.assertFalse(ec.bigtable_options.read_rowkey_as_string)
+ self.assertIs(
+ ec.options._properties, ec._properties[BigtableOptions._RESOURCE_NAME]
+ )
+ self.assertIs(
+ ec.bigtable_options._properties,
+ ec._properties[BigtableOptions._RESOURCE_NAME],
+ )
+
+ def test_bigtable_options_getter_empty(self):
+ ec = external_config.ExternalConfig(
+ external_config.ExternalSourceFormat.BIGTABLE
+ )
+ self.assertIsNotNone(ec.bigtable_options)
+
+ def test_bigtable_options_getter_wrong_format(self):
+ ec = external_config.ExternalConfig(external_config.ExternalSourceFormat.CSV)
+ self.assertIsNone(ec.bigtable_options)
+
+ def test_bigtable_options_setter_wrong_format(self):
+ from google.cloud.bigquery.external_config import BigtableOptions
+
+ options = BigtableOptions()
+ ec = external_config.ExternalConfig(external_config.ExternalSourceFormat.CSV)
+
+ with self.assertRaisesRegex(TypeError, "Cannot set.*source format is CSV"):
+ ec.bigtable_options = options
+
+ def test_csv_options_getter_and_setter(self):
+ from google.cloud.bigquery.external_config import CSVOptions
+
+ options = CSVOptions.from_api_repr(
+ {
+ "allowJaggedRows": True,
+ "allowQuotedNewlines": False,
+ "preserveAsciiControlCharacters": False,
+ }
+ )
+ ec = external_config.ExternalConfig(external_config.ExternalSourceFormat.CSV)
+
+ self.assertIsNone(ec.csv_options.allow_jagged_rows)
+ self.assertIsNone(ec.csv_options.allow_quoted_newlines)
+ self.assertIsNone(ec.csv_options.preserve_ascii_control_characters)
+
+ ec.csv_options = options
+
+ self.assertTrue(ec.csv_options.allow_jagged_rows)
+ self.assertFalse(ec.csv_options.allow_quoted_newlines)
+ self.assertFalse(ec.csv_options.preserve_ascii_control_characters)
+ self.assertIs(ec.options._properties, ec._properties[CSVOptions._RESOURCE_NAME])
+ self.assertIs(
+ ec.csv_options._properties, ec._properties[CSVOptions._RESOURCE_NAME]
+ )
+
+ def test_csv_options_getter_empty(self):
+ ec = external_config.ExternalConfig(external_config.ExternalSourceFormat.CSV)
+ self.assertIsNotNone(ec.csv_options)
+
+ def test_csv_options_getter_wrong_format(self):
+ ec = external_config.ExternalConfig(external_config.ExternalSourceFormat.AVRO)
+ self.assertIsNone(ec.csv_options)
+
+ def test_csv_options_setter_wrong_format(self):
+ from google.cloud.bigquery.external_config import CSVOptions
+
+ options = CSVOptions()
+ ec = external_config.ExternalConfig(external_config.ExternalSourceFormat.AVRO)
+
+ with self.assertRaisesRegex(TypeError, "Cannot set.*source format is AVRO"):
+ ec.csv_options = options
+
+ def test_google_sheets_options_getter_and_setter(self):
+ from google.cloud.bigquery.external_config import GoogleSheetsOptions
+
+ options = GoogleSheetsOptions.from_api_repr({"skipLeadingRows": "123"})
+ ec = external_config.ExternalConfig(
+ external_config.ExternalSourceFormat.GOOGLE_SHEETS
+ )
+
+ self.assertIsNone(ec.google_sheets_options.skip_leading_rows)
+
+ ec.google_sheets_options = options
+
+ self.assertEqual(ec.google_sheets_options.skip_leading_rows, 123)
+ self.assertIs(
+ ec.options._properties, ec._properties[GoogleSheetsOptions._RESOURCE_NAME]
+ )
+ self.assertIs(
+ ec.google_sheets_options._properties,
+ ec._properties[GoogleSheetsOptions._RESOURCE_NAME],
+ )
+
+ def test_google_sheets_options_getter_empty(self):
+ ec = external_config.ExternalConfig(
+ external_config.ExternalSourceFormat.GOOGLE_SHEETS
+ )
+ self.assertIsNotNone(ec.google_sheets_options)
+
+ def test_google_sheets_options_getter_wrong_format(self):
+ ec = external_config.ExternalConfig(external_config.ExternalSourceFormat.CSV)
+ self.assertIsNone(ec.google_sheets_options)
+
+ def test_google_sheets_options_setter_wrong_format(self):
+ from google.cloud.bigquery.external_config import GoogleSheetsOptions
+
+ options = GoogleSheetsOptions()
+ ec = external_config.ExternalConfig(external_config.ExternalSourceFormat.CSV)
+
+ with self.assertRaisesRegex(TypeError, "Cannot set.*source format is CSV"):
+ ec.google_sheets_options = options
+
+ def test_parquet_options_getter_and_setter(self):
+ from google.cloud.bigquery.format_options import ParquetOptions
+
+ options = ParquetOptions.from_api_repr(
+ {"enumAsString": True, "enableListInference": False}
+ )
+ ec = external_config.ExternalConfig(
+ external_config.ExternalSourceFormat.PARQUET
+ )
+
+ self.assertIsNone(ec.parquet_options.enum_as_string)
+ self.assertIsNone(ec.parquet_options.enable_list_inference)
+
+ ec.parquet_options = options
+
+ self.assertTrue(ec.parquet_options.enum_as_string)
+ self.assertFalse(ec.parquet_options.enable_list_inference)
+ self.assertIs(
+ ec.options._properties, ec._properties[ParquetOptions._RESOURCE_NAME]
+ )
+ self.assertIs(
+ ec.parquet_options._properties,
+ ec._properties[ParquetOptions._RESOURCE_NAME],
+ )
+
+ def test_parquet_options_set_properties(self):
+ """Check that setting sub-properties works without having to create a
+ new ParquetOptions instance.
+
+ This is required for compatibility with previous
+ ExternalConfig._options implementation.
+ """
+
+ ec = external_config.ExternalConfig(
+ external_config.ExternalSourceFormat.PARQUET
+ )
+
+ self.assertIsNone(ec.parquet_options.enum_as_string)
+ self.assertIsNone(ec.parquet_options.enable_list_inference)
+
+ ec.parquet_options.enum_as_string = True
+ ec.parquet_options.enable_list_inference = False
+
+ self.assertTrue(ec.options.enum_as_string)
+ self.assertFalse(ec.options.enable_list_inference)
+ self.assertTrue(ec.parquet_options.enum_as_string)
+ self.assertFalse(ec.parquet_options.enable_list_inference)
+
+ def test_parquet_options_getter_empty(self):
+ ec = external_config.ExternalConfig(
+ external_config.ExternalSourceFormat.PARQUET
+ )
+ self.assertIsNotNone(ec.parquet_options)
+
+ def test_parquet_options_getter_non_parquet_format(self):
+ ec = external_config.ExternalConfig(external_config.ExternalSourceFormat.CSV)
+ self.assertIsNone(ec.parquet_options)
+
+ def test_parquet_options_setter_non_parquet_format(self):
+ from google.cloud.bigquery.format_options import ParquetOptions
+
+ parquet_options = ParquetOptions.from_api_repr(
+ {"enumAsString": False, "enableListInference": True}
+ )
+ ec = external_config.ExternalConfig(external_config.ExternalSourceFormat.CSV)
+
+ with self.assertRaisesRegex(TypeError, "Cannot set.*source format is CSV"):
+ ec.parquet_options = parquet_options
+
+ def test_from_api_repr_parquet(self):
+ from google.cloud.bigquery.format_options import ParquetOptions
+
+ resource = _copy_and_update(
+ self.BASE_RESOURCE,
+ {
+ "sourceFormat": "PARQUET",
+ "parquetOptions": {"enumAsString": True, "enableListInference": False},
+ },
+ )
+
+ ec = external_config.ExternalConfig.from_api_repr(resource)
+
+ self._verify_base(ec)
+ self.assertEqual(ec.source_format, external_config.ExternalSourceFormat.PARQUET)
+ self.assertIsInstance(ec.options, ParquetOptions)
+ self.assertTrue(ec.parquet_options.enum_as_string)
+ self.assertFalse(ec.parquet_options.enable_list_inference)
+
+ got_resource = ec.to_api_repr()
+
+ self.assertEqual(got_resource, resource)
+
+ del resource["parquetOptions"]["enableListInference"]
+ ec = external_config.ExternalConfig.from_api_repr(resource)
+ self.assertIsNone(ec.options.enable_list_inference)
+ got_resource = ec.to_api_repr()
+ self.assertEqual(got_resource, resource)
+
+ def test_to_api_repr_parquet(self):
+ from google.cloud.bigquery.format_options import ParquetOptions
+
+ ec = external_config.ExternalConfig(
+ external_config.ExternalSourceFormat.PARQUET
+ )
+ options = ParquetOptions.from_api_repr(
+ dict(enumAsString=False, enableListInference=True)
+ )
+ ec.parquet_options = options
+
+ exp_resource = {
+ "sourceFormat": external_config.ExternalSourceFormat.PARQUET,
+ "parquetOptions": {"enumAsString": False, "enableListInference": True},
+ }
+
+ got_resource = ec.to_api_repr()
+
+ self.assertEqual(got_resource, exp_resource)
+
+ def test_from_api_repr_decimal_target_types(self):
+ from google.cloud.bigquery.enums import DecimalTargetType
+
+ resource = _copy_and_update(
+ self.BASE_RESOURCE,
+ {
+ "sourceFormat": "FORMAT_FOO",
+ "decimalTargetTypes": [DecimalTargetType.NUMERIC],
+ },
+ )
+
+ ec = external_config.ExternalConfig.from_api_repr(resource)
+
+ self._verify_base(ec)
+ self.assertEqual(ec.source_format, "FORMAT_FOO")
+ self.assertEqual(
+ ec.decimal_target_types, frozenset([DecimalTargetType.NUMERIC])
+ )
+
+ # converting back to API representation should yield the same result
+ got_resource = ec.to_api_repr()
+ self.assertEqual(got_resource, resource)
+
+ del resource["decimalTargetTypes"]
+ ec = external_config.ExternalConfig.from_api_repr(resource)
+ self.assertIsNone(ec.decimal_target_types)
+
+ got_resource = ec.to_api_repr()
+ self.assertEqual(got_resource, resource)
+
+ def test_to_api_repr_decimal_target_types(self):
+ from google.cloud.bigquery.enums import DecimalTargetType
+
+ ec = external_config.ExternalConfig("FORMAT_FOO")
+ ec.decimal_target_types = [DecimalTargetType.NUMERIC, DecimalTargetType.STRING]
+
+ got_resource = ec.to_api_repr()
+
+ expected_resource = {
+ "sourceFormat": "FORMAT_FOO",
+ "decimalTargetTypes": [DecimalTargetType.NUMERIC, DecimalTargetType.STRING],
+ }
+ self.assertEqual(got_resource, expected_resource)
+
+ def test_to_api_repr_decimal_target_types_unset(self):
+ from google.cloud.bigquery.enums import DecimalTargetType
+
+ ec = external_config.ExternalConfig("FORMAT_FOO")
+ ec._properties["decimalTargetTypes"] = [DecimalTargetType.NUMERIC]
+ ec.decimal_target_types = None
+
+ got_resource = ec.to_api_repr()
+
+ expected_resource = {"sourceFormat": "FORMAT_FOO"}
+ self.assertEqual(got_resource, expected_resource)
+
+ ec.decimal_target_types = None # No error if unsetting when already unset.
+
+
+class BigtableOptions(unittest.TestCase):
+ def test_to_api_repr(self):
+ options = external_config.BigtableOptions()
+ family1 = external_config.BigtableColumnFamily()
+ column1 = external_config.BigtableColumn()
+ column1.qualifier_string = "col1"
+ column1.field_name = "bqcol1"
+ column1.type_ = "FLOAT"
+ column1.encoding = "TEXT"
+ column1.only_read_latest = True
+ column2 = external_config.BigtableColumn()
+ column2.qualifier_encoded = b"col2"
+ column2.field_name = "bqcol2"
+ column2.type_ = "STRING"
+ column2.only_read_latest = False
+ family1.family_id = "family1"
+ family1.type_ = "INTEGER"
+ family1.encoding = "BINARY"
+ family1.columns = [column1, column2]
+ family1.only_read_latest = False
+ family2 = external_config.BigtableColumnFamily()
+ column3 = external_config.BigtableColumn()
+ column3.qualifier_string = "col3"
+ family2.family_id = "family2"
+ family2.type_ = "BYTES"
+ family2.encoding = "TEXT"
+ family2.columns = [column3]
+ family2.only_read_latest = True
+ options.column_families = [family1, family2]
+ options.ignore_unspecified_column_families = False
+ options.read_rowkey_as_string = True
+
+ resource = options.to_api_repr()
+
+ expected_column_families = [
+ {
+ "familyId": "family1",
+ "type": "INTEGER",
+ "encoding": "BINARY",
+ "columns": [
+ {
+ "qualifierString": "col1",
+ "fieldName": "bqcol1",
+ "type": "FLOAT",
+ "encoding": "TEXT",
+ "onlyReadLatest": True,
+ },
+ {
+ "qualifierEncoded": "Y29sMg==",
+ "fieldName": "bqcol2",
+ "type": "STRING",
+ "onlyReadLatest": False,
+ },
+ ],
+ "onlyReadLatest": False,
+ },
+ {
+ "familyId": "family2",
+ "type": "BYTES",
+ "encoding": "TEXT",
+ "columns": [{"qualifierString": "col3"}],
+ "onlyReadLatest": True,
+ },
+ ]
+ self.maxDiff = None
+ self.assertEqual(
+ resource,
+ {
+ "columnFamilies": expected_column_families,
+ "ignoreUnspecifiedColumnFamilies": False,
+ "readRowkeyAsString": True,
+ },
+ )
+
+
+class CSVOptions(unittest.TestCase):
+ def test_to_api_repr(self):
+ options = external_config.CSVOptions()
+ options.field_delimiter = "\t"
+ options.skip_leading_rows = 42
+ options.quote_character = '"'
+ options.allow_quoted_newlines = True
+ options.allow_jagged_rows = False
+ options.encoding = "UTF-8"
+ options.preserve_ascii_control_characters = False
+
+ resource = options.to_api_repr()
+
+ self.assertEqual(
+ resource,
+ {
+ "fieldDelimiter": "\t",
+ "skipLeadingRows": "42",
+ "quote": '"',
+ "allowQuotedNewlines": True,
+ "allowJaggedRows": False,
+ "encoding": "UTF-8",
+ "preserveAsciiControlCharacters": False,
+ },
+ )
+
+
+class TestGoogleSheetsOptions(unittest.TestCase):
+ def test_to_api_repr(self):
+ options = external_config.GoogleSheetsOptions()
+ options.range = "sheet1!A1:B20"
+ options.skip_leading_rows = 107
+
+ resource = options.to_api_repr()
+
+ self.assertEqual(resource, {"range": "sheet1!A1:B20", "skipLeadingRows": "107"})
+
+
+def _copy_and_update(d, u):
+ d = copy.deepcopy(d)
+ d.update(u)
+ return d
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_format_options.py b/testbed/googleapis__python-bigquery/tests/unit/test_format_options.py
new file mode 100644
index 0000000000000000000000000000000000000000..94a01570fed87739a52b1a07c9a30978019c796c
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_format_options.py
@@ -0,0 +1,70 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class TestAvroOptions:
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.format_options import AvroOptions
+
+ return AvroOptions
+
+ def test_ctor(self):
+ config = self._get_target_class()()
+ assert config.use_avro_logical_types is None
+
+ def test_from_api_repr(self):
+ config = self._get_target_class().from_api_repr({"useAvroLogicalTypes": True})
+ assert config.use_avro_logical_types
+
+ def test_to_api_repr(self):
+ config = self._get_target_class()()
+ config.use_avro_logical_types = False
+
+ result = config.to_api_repr()
+ assert result == {"useAvroLogicalTypes": False}
+
+
+class TestParquetOptions:
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.format_options import ParquetOptions
+
+ return ParquetOptions
+
+ def test_ctor(self):
+ config = self._get_target_class()()
+ assert config.enum_as_string is None
+ assert config.enable_list_inference is None
+
+ def test_from_api_repr(self):
+ config = self._get_target_class().from_api_repr(
+ {"enumAsString": False, "enableListInference": True}
+ )
+ assert not config.enum_as_string
+ assert config.enable_list_inference
+ assert config.map_target_type is None
+
+ def test_to_api_repr(self):
+ config = self._get_target_class()()
+ config.enum_as_string = True
+ config.enable_list_inference = False
+ config.map_target_type = "ARRAY_OF_STRUCT"
+
+ result = config.to_api_repr()
+ assert result == {
+ "enumAsString": True,
+ "enableListInference": False,
+ "mapTargetType": "ARRAY_OF_STRUCT",
+ }
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_job_retry.py b/testbed/googleapis__python-bigquery/tests/unit/test_job_retry.py
new file mode 100644
index 0000000000000000000000000000000000000000..958986052adb0d6bbbef01ac0b41eb645cf98b80
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_job_retry.py
@@ -0,0 +1,607 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import re
+from unittest import mock
+
+import pytest
+
+import google.api_core.exceptions
+import google.api_core.retry
+import freezegun
+import requests.exceptions
+
+from google.cloud.bigquery import _job_helpers
+import google.cloud.bigquery.retry
+
+from .helpers import make_client, make_connection
+
+
+_RETRY_NOT_FOUND = {
+ "job_retry": google.api_core.retry.Retry(
+ predicate=google.api_core.retry.if_exception_type(
+ google.api_core.exceptions.NotFound,
+ ),
+ ),
+}
+_RETRY_BAD_REQUEST = {
+ "job_retry": google.api_core.retry.Retry(
+ predicate=google.api_core.retry.if_exception_type(
+ google.api_core.exceptions.BadRequest,
+ ),
+ ),
+}
+
+
+# Test retry of job failures, instead of API-invocation failures. 4 scenarios:
+# - No `job_retry` passed, retry on default rateLimitExceeded.
+# - Pass NotFound retry to `query`.
+# - Pass NotFound retry to `result`.
+# - Pass BadRequest retry to query, with the value passed to `result` overriding.
+@mock.patch("time.sleep")
+@pytest.mark.parametrize(
+ "reason, job_retry, result_retry",
+ [
+ pytest.param(
+ "rateLimitExceeded",
+ {},
+ {},
+ id="no job_retry",
+ ),
+ pytest.param(
+ "notFound",
+ _RETRY_NOT_FOUND,
+ {},
+ id="Query NotFound",
+ ),
+ pytest.param(
+ "notFound",
+ _RETRY_NOT_FOUND,
+ _RETRY_NOT_FOUND,
+ id="Result NotFound",
+ ),
+ pytest.param(
+ "notFound",
+ _RETRY_BAD_REQUEST,
+ _RETRY_NOT_FOUND,
+ id="BadRequest",
+ ),
+ ],
+)
+def test_retry_failed_jobs(sleep, reason, job_retry, result_retry):
+ client = make_client()
+ err = dict(reason=reason)
+ conn = client._connection = make_connection(
+ dict(
+ status=dict(state="DONE", errors=[err], errorResult=err),
+ jobReference={"jobId": "id_1"},
+ ),
+ dict(
+ status=dict(state="DONE", errors=[err], errorResult=err),
+ jobReference={"jobId": "id_1"},
+ ),
+ dict(
+ status=dict(state="DONE", errors=[err], errorResult=err),
+ jobReference={"jobId": "id_1"},
+ ),
+ dict(status=dict(state="DONE"), jobReference={"jobId": "id_2"}),
+ dict(rows=[{"f": [{"v": "1"}]}], totalRows="1"),
+ )
+
+ job = client.query("select 1", **job_retry)
+ result = job.result(**result_retry)
+
+ assert result.total_rows == 1
+
+ # We made all the calls we expected to.
+ assert conn.api_request.call_count == 5
+
+ # The job adjusts it's job id based on the id of the last attempt.
+ assert job.job_id == "id_2"
+
+ # We had to sleep three times
+ assert len(sleep.mock_calls) == 3
+
+ # Sleeps are random, however they're more than 0
+ assert min(c[1][0] for c in sleep.mock_calls) > 0
+
+ # They're at most 2 * (multiplier**(number of sleeps - 1)) * initial
+ # The default multiplier is 2
+ assert max(c[1][0] for c in sleep.mock_calls) <= 8
+
+ # We can ask for the result again:
+ conn = client._connection = make_connection(
+ dict(rows=[{"f": [{"v": "1"}]}], totalRows="1"),
+ )
+ result = job.result()
+
+ assert result.total_rows == 1
+
+ # We made all the calls we expected to.
+ assert conn.api_request.call_count == 1
+
+ # We wouldn't (and didn't) fail, because we're dealing with a successful job.
+ # So the job id hasn't changed.
+ assert job.job_id == "id_2"
+
+
+def test_retry_connection_error_with_default_retries_and_successful_first_job(
+ monkeypatch, client
+):
+ """
+ Make sure ConnectionError can be retried at `is_job_done` level, even if
+ retries are exhaused by API-level retry.
+
+ Note: Because restart_query_job is set to True only in the case of a
+ confirmed job failure, this should be safe to do even when a job is not
+ idempotent.
+
+ Regression test for issue
+ https://github.com/googleapis/python-bigquery/issues/1929
+ """
+ job_counter = 0
+
+ def make_job_id(*args, **kwargs):
+ nonlocal job_counter
+ job_counter += 1
+ return f"{job_counter}"
+
+ monkeypatch.setattr(_job_helpers, "make_job_id", make_job_id)
+ conn = client._connection = make_connection()
+ project = client.project
+ job_reference_1 = {"projectId": project, "jobId": "1", "location": "test-loc"}
+ NUM_API_RETRIES = 2
+
+ with freezegun.freeze_time(
+ "2024-01-01 00:00:00",
+ # Note: because of exponential backoff and a bit of jitter,
+ # NUM_API_RETRIES will get less accurate the greater the value.
+ # We add 1 because we know there will be at least some additional
+ # calls to fetch the time / sleep before the retry deadline is hit.
+ auto_tick_seconds=(
+ google.cloud.bigquery.retry._DEFAULT_RETRY_DEADLINE / NUM_API_RETRIES
+ )
+ + 1,
+ ):
+ conn.api_request.side_effect = [
+ # jobs.insert
+ {"jobReference": job_reference_1, "status": {"state": "PENDING"}},
+ # jobs.get
+ {"jobReference": job_reference_1, "status": {"state": "RUNNING"}},
+ # jobs.getQueryResults x2
+ requests.exceptions.ConnectionError(),
+ requests.exceptions.ConnectionError(),
+ # jobs.get
+ # Job actually succeeeded, so we shouldn't be restarting the job,
+ # even though we are retrying at the `is_job_done` level.
+ {"jobReference": job_reference_1, "status": {"state": "DONE"}},
+ # jobs.getQueryResults
+ {"jobReference": job_reference_1, "jobComplete": True},
+ ]
+
+ job = client.query("select 1")
+ rows_iter = job.result()
+
+ assert job.done() # Shouldn't make any additional API calls.
+ assert rows_iter is not None
+
+ # Should only have created one job, even though we did call job_retry.
+ assert job_counter == 1
+
+ # Double-check that we made the API calls we expected to make.
+ conn.api_request.assert_has_calls(
+ [
+ # jobs.insert
+ mock.call(
+ method="POST",
+ path="/projects/PROJECT/jobs",
+ data={
+ "jobReference": {"jobId": "1", "projectId": "PROJECT"},
+ "configuration": {
+ "query": {"useLegacySql": False, "query": "select 1"}
+ },
+ },
+ timeout=None,
+ ),
+ # jobs.get
+ mock.call(
+ method="GET",
+ path="/projects/PROJECT/jobs/1",
+ query_params={"location": "test-loc", "projection": "full"},
+ timeout=google.cloud.bigquery.retry.DEFAULT_GET_JOB_TIMEOUT,
+ ),
+ # jobs.getQueryResults x2
+ mock.call(
+ method="GET",
+ path="/projects/PROJECT/queries/1",
+ query_params={"maxResults": 0, "location": "test-loc"},
+ timeout=None,
+ ),
+ mock.call(
+ method="GET",
+ path="/projects/PROJECT/queries/1",
+ query_params={"maxResults": 0, "location": "test-loc"},
+ timeout=None,
+ ),
+ # jobs.get -- is_job_done checking again
+ mock.call(
+ method="GET",
+ path="/projects/PROJECT/jobs/1",
+ query_params={"location": "test-loc", "projection": "full"},
+ timeout=google.cloud.bigquery.retry.DEFAULT_GET_JOB_TIMEOUT,
+ ),
+ # jobs.getQueryResults
+ mock.call(
+ method="GET",
+ path="/projects/PROJECT/queries/1",
+ query_params={"maxResults": 0, "location": "test-loc"},
+ timeout=None,
+ ),
+ ],
+ )
+
+
+def test_query_retry_with_default_retry_and_ambiguous_errors_only_retries_with_failed_job(
+ client, monkeypatch
+):
+ """
+ Some errors like 'rateLimitExceeded' can be ambiguous. Make sure we only
+ retry the job when we know for sure that the job has failed for a retriable
+ reason. We can only be sure after a "successful" call to jobs.get to fetch
+ the failed job status.
+ """
+ job_counter = 0
+
+ def make_job_id(*args, **kwargs):
+ nonlocal job_counter
+ job_counter += 1
+ return f"{job_counter}"
+
+ monkeypatch.setattr(_job_helpers, "make_job_id", make_job_id)
+
+ project = client.project
+ job_reference_1 = {"projectId": project, "jobId": "1", "location": "test-loc"}
+ job_reference_2 = {"projectId": project, "jobId": "2", "location": "test-loc"}
+ NUM_API_RETRIES = 2
+
+ # This error is modeled after a real customer exception in
+ # https://github.com/googleapis/python-bigquery/issues/707.
+ internal_error = google.api_core.exceptions.InternalServerError(
+ "Job failed just because...",
+ errors=[
+ {"reason": "internalError"},
+ ],
+ )
+ responses = [
+ # jobs.insert
+ {"jobReference": job_reference_1, "status": {"state": "PENDING"}},
+ # jobs.get
+ {"jobReference": job_reference_1, "status": {"state": "RUNNING"}},
+ # jobs.getQueryResults x2
+ #
+ # Note: internalError is ambiguous in jobs.getQueryResults. The
+ # problem could be at the Google Frontend level or it could be because
+ # the job has failed due to some transient issues and the BigQuery
+ # REST API is translating the job failed status into failure HTTP
+ # codes.
+ #
+ # TODO(GH#1903): We shouldn't retry nearly this many times when we get
+ # ambiguous errors from jobs.getQueryResults.
+ # See: https://github.com/googleapis/python-bigquery/issues/1903
+ internal_error,
+ internal_error,
+ # jobs.get -- the job has failed
+ {
+ "jobReference": job_reference_1,
+ "status": {"state": "DONE", "errorResult": {"reason": "internalError"}},
+ },
+ # jobs.insert
+ {"jobReference": job_reference_2, "status": {"state": "PENDING"}},
+ # jobs.get
+ {"jobReference": job_reference_2, "status": {"state": "RUNNING"}},
+ # jobs.getQueryResults
+ {"jobReference": job_reference_2, "jobComplete": True},
+ # jobs.get
+ {"jobReference": job_reference_2, "status": {"state": "DONE"}},
+ ]
+
+ conn = client._connection = make_connection(*responses)
+
+ with freezegun.freeze_time(
+ # Note: because of exponential backoff and a bit of jitter,
+ # NUM_API_RETRIES will get less accurate the greater the value.
+ # We add 1 because we know there will be at least some additional
+ # calls to fetch the time / sleep before the retry deadline is hit.
+ auto_tick_seconds=(
+ google.cloud.bigquery.retry._DEFAULT_RETRY_DEADLINE / NUM_API_RETRIES
+ )
+ + 1,
+ ):
+ job = client.query("select 1")
+ job.result()
+
+ conn.api_request.assert_has_calls(
+ [
+ # jobs.insert
+ mock.call(
+ method="POST",
+ path="/projects/PROJECT/jobs",
+ data={
+ "jobReference": {"jobId": "1", "projectId": "PROJECT"},
+ "configuration": {
+ "query": {"useLegacySql": False, "query": "select 1"}
+ },
+ },
+ timeout=None,
+ ),
+ # jobs.get
+ mock.call(
+ method="GET",
+ path="/projects/PROJECT/jobs/1",
+ query_params={"location": "test-loc", "projection": "full"},
+ timeout=google.cloud.bigquery.retry.DEFAULT_GET_JOB_TIMEOUT,
+ ),
+ # jobs.getQueryResults x2
+ mock.call(
+ method="GET",
+ path="/projects/PROJECT/queries/1",
+ query_params={"maxResults": 0, "location": "test-loc"},
+ timeout=None,
+ ),
+ mock.call(
+ method="GET",
+ path="/projects/PROJECT/queries/1",
+ query_params={"maxResults": 0, "location": "test-loc"},
+ timeout=None,
+ ),
+ # jobs.get -- verify that the job has failed
+ mock.call(
+ method="GET",
+ path="/projects/PROJECT/jobs/1",
+ query_params={"location": "test-loc", "projection": "full"},
+ timeout=google.cloud.bigquery.retry.DEFAULT_GET_JOB_TIMEOUT,
+ ),
+ # jobs.insert
+ mock.call(
+ method="POST",
+ path="/projects/PROJECT/jobs",
+ data={
+ "jobReference": {
+ # Make sure that we generated a new job ID.
+ "jobId": "2",
+ "projectId": "PROJECT",
+ },
+ "configuration": {
+ "query": {"useLegacySql": False, "query": "select 1"}
+ },
+ },
+ timeout=None,
+ ),
+ # jobs.get
+ mock.call(
+ method="GET",
+ path="/projects/PROJECT/jobs/2",
+ query_params={"location": "test-loc", "projection": "full"},
+ timeout=google.cloud.bigquery.retry.DEFAULT_GET_JOB_TIMEOUT,
+ ),
+ # jobs.getQueryResults
+ mock.call(
+ method="GET",
+ path="/projects/PROJECT/queries/2",
+ query_params={"maxResults": 0, "location": "test-loc"},
+ timeout=None,
+ ),
+ # jobs.get
+ mock.call(
+ method="GET",
+ path="/projects/PROJECT/jobs/2",
+ query_params={"location": "test-loc", "projection": "full"},
+ timeout=google.cloud.bigquery.retry.DEFAULT_GET_JOB_TIMEOUT,
+ ),
+ ]
+ )
+
+
+# With job_retry_on_query, we're testing 4 scenarios:
+# - Pass None retry to `query`.
+# - Pass None retry to `result`.
+@pytest.mark.parametrize("job_retry_on_query", ["Query", "Result"])
+@mock.patch("time.sleep")
+def test_disable_retry_failed_jobs(sleep, client, job_retry_on_query):
+ """
+ Test retry of job failures, as opposed to API-invocation failures.
+ """
+ err = dict(reason="rateLimitExceeded")
+ responses = [dict(status=dict(state="DONE", errors=[err], errorResult=err))] * 3
+
+ def api_request(method, path, query_params=None, data=None, **kw):
+ response = responses.pop(0)
+ response["jobReference"] = data["jobReference"]
+ return response
+
+ conn = client._connection = make_connection()
+ conn.api_request.side_effect = api_request
+
+ if job_retry_on_query == "Query":
+ job_retry = dict(job_retry=None)
+ else:
+ job_retry = {}
+ job = client.query("select 1", **job_retry)
+
+ orig_job_id = job.job_id
+ job_retry = dict(job_retry=None) if job_retry_on_query == "Result" else {}
+ with pytest.raises(google.api_core.exceptions.TooManyRequests):
+ job.result(**job_retry)
+
+ assert job.job_id == orig_job_id
+ assert len(sleep.mock_calls) == 0
+
+
+@mock.patch("time.sleep")
+def test_retry_failed_jobs_after_retry_failed(sleep, client):
+ """
+ If at first you don't succeed, maybe you will later. :)
+ """
+ conn = client._connection = make_connection()
+
+ with freezegun.freeze_time("2024-01-01 00:00:00") as frozen_datetime:
+ err = dict(reason="rateLimitExceeded")
+
+ def api_request(method, path, query_params=None, data=None, **kw):
+ calls = sleep.mock_calls
+ if calls:
+ frozen_datetime.tick(delta=datetime.timedelta(seconds=calls[-1][1][0]))
+ response = dict(status=dict(state="DONE", errors=[err], errorResult=err))
+ response["jobReference"] = data["jobReference"]
+ return response
+
+ conn.api_request.side_effect = api_request
+
+ job = client.query("select 1")
+ orig_job_id = job.job_id
+
+ with pytest.raises(google.api_core.exceptions.RetryError):
+ job.result()
+
+ # We retried the job at least once, so we should have generated a new job ID.
+ assert job.job_id != orig_job_id
+
+ # We failed because we couldn't succeed after 120 seconds.
+ # But we can try again:
+ err2 = dict(reason="backendError") # We also retry on this
+ responses = [
+ dict(status=dict(state="DONE", errors=[err2], errorResult=err2)),
+ dict(status=dict(state="DONE", errors=[err], errorResult=err)),
+ dict(status=dict(state="DONE", errors=[err2], errorResult=err2)),
+ dict(status=dict(state="DONE")),
+ dict(rows=[{"f": [{"v": "1"}]}], totalRows="1"),
+ ]
+
+ def api_request(method, path, query_params=None, data=None, **kw):
+ calls = sleep.mock_calls
+ frozen_datetime.tick(delta=datetime.timedelta(seconds=calls[-1][1][0]))
+ response = responses.pop(0)
+ if data:
+ response["jobReference"] = data["jobReference"]
+ else:
+ response["jobReference"] = dict(
+ jobId=path.split("/")[-1], projectId="PROJECT"
+ )
+ return response
+
+ conn.api_request.side_effect = api_request
+ result = job.result()
+ assert result.total_rows == 1
+ assert not responses # We made all the calls we expected to.
+ assert job.job_id != orig_job_id
+
+
+def test_raises_on_job_retry_on_query_with_non_retryable_jobs(client):
+ with pytest.raises(
+ TypeError,
+ match=re.escape(
+ "`job_retry` was provided, but the returned job is"
+ " not retryable, because a custom `job_id` was"
+ " provided."
+ ),
+ ):
+ client.query("select 42", job_id=42, job_retry=google.api_core.retry.Retry())
+
+
+def test_raises_on_job_retry_on_result_with_non_retryable_jobs(client):
+ client._connection = make_connection({})
+ job = client.query("select 42", job_id=42)
+ with pytest.raises(
+ TypeError,
+ match=re.escape(
+ "`job_retry` was provided, but this job is"
+ " not retryable, because a custom `job_id` was"
+ " provided to the query that created this job."
+ ),
+ ):
+ job.result(job_retry=google.api_core.retry.Retry())
+
+
+def test_query_and_wait_retries_job_for_DDL_queries():
+ """
+ Specific test for retrying DDL queries with "jobRateLimitExceeded" error:
+ https://github.com/googleapis/python-bigquery/issues/1790
+ """
+ freezegun.freeze_time(auto_tick_seconds=1)
+
+ client = make_client()
+ conn = client._connection = make_connection(
+ {
+ "jobReference": {
+ "projectId": "response-project",
+ "jobId": "abc",
+ "location": "response-location",
+ },
+ "jobComplete": False,
+ },
+ google.api_core.exceptions.InternalServerError(
+ "job_retry me", errors=[{"reason": "jobRateLimitExceeded"}]
+ ),
+ google.api_core.exceptions.BadRequest(
+ "retry me", errors=[{"reason": "jobRateLimitExceeded"}]
+ ),
+ {
+ "jobReference": {
+ "projectId": "response-project",
+ "jobId": "abc",
+ "location": "response-location",
+ },
+ "jobComplete": True,
+ "schema": {
+ "fields": [
+ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "age", "type": "INT64", "mode": "NULLABLE"},
+ ],
+ },
+ "rows": [
+ {"f": [{"v": "Whillma Phlyntstone"}, {"v": "27"}]},
+ {"f": [{"v": "Bhetty Rhubble"}, {"v": "28"}]},
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ],
+ },
+ )
+ rows = _job_helpers.query_and_wait(
+ client,
+ query="SELECT 1",
+ location="request-location",
+ project="request-project",
+ job_config=None,
+ page_size=None,
+ max_results=None,
+ retry=google.cloud.bigquery.retry.DEFAULT_RETRY,
+ job_retry=google.cloud.bigquery.retry.DEFAULT_JOB_RETRY,
+ )
+ assert len(list(rows)) == 4
+
+ # Relevant docs for the REST API path: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query
+ # and https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/getQueryResults
+ query_request_path = "/projects/request-project/queries"
+
+ calls = conn.api_request.call_args_list
+ _, kwargs = calls[0]
+ assert kwargs["method"] == "POST"
+ assert kwargs["path"] == query_request_path
+
+ # TODO: Add assertion statements for response paths after PR#1797 is fixed
+
+ _, kwargs = calls[3]
+ assert kwargs["method"] == "POST"
+ assert kwargs["path"] == query_request_path
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_legacy_types.py b/testbed/googleapis__python-bigquery/tests/unit/test_legacy_types.py
new file mode 100644
index 0000000000000000000000000000000000000000..809be1855ced736edf333b1f6de0784cd2730e4a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_legacy_types.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+import warnings
+
+try:
+ import proto # type: ignore
+except ImportError:
+ proto = None
+
+
+@pytest.mark.skipif(proto is None, reason="proto is not installed")
+def test_importing_legacy_types_emits_warning():
+ with warnings.catch_warnings(record=True) as warned:
+ from google.cloud.bigquery_v2 import types # noqa: F401
+
+ assert len(warned) == 1
+ assert warned[0].category is DeprecationWarning
+ warning_msg = str(warned[0])
+ assert "bigquery_v2" in warning_msg
+ assert "not maintained" in warning_msg
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_list_datasets.py b/testbed/googleapis__python-bigquery/tests/unit/test_list_datasets.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ef99fd865a1819e37461b3693b3db9ddd5a940c
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_list_datasets.py
@@ -0,0 +1,126 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest import mock
+
+import pytest
+
+from google.cloud.bigquery.retry import DEFAULT_TIMEOUT
+from .helpers import make_connection
+
+
+@pytest.mark.parametrize(
+ "extra,query", [({}, {}), (dict(page_size=42), dict(maxResults=42))]
+)
+def test_list_datasets_defaults(client, PROJECT, extra, query):
+ from google.cloud.bigquery.dataset import DatasetListItem
+
+ DATASET_1 = "dataset_one"
+ DATASET_2 = "dataset_two"
+ PATH = "projects/%s/datasets" % PROJECT
+ TOKEN = "TOKEN"
+ DATA = {
+ "nextPageToken": TOKEN,
+ "datasets": [
+ {
+ "kind": "bigquery#dataset",
+ "id": "%s:%s" % (PROJECT, DATASET_1),
+ "datasetReference": {"datasetId": DATASET_1, "projectId": PROJECT},
+ "friendlyName": None,
+ },
+ {
+ "kind": "bigquery#dataset",
+ "id": "%s:%s" % (PROJECT, DATASET_2),
+ "datasetReference": {"datasetId": DATASET_2, "projectId": PROJECT},
+ "friendlyName": "Two",
+ },
+ ],
+ }
+ conn = client._connection = make_connection(DATA)
+
+ iterator = client.list_datasets(**extra)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ page = next(iterator.pages)
+
+ final_attributes.assert_called_once_with({"path": "/%s" % PATH}, client, None)
+ datasets = list(page)
+ token = iterator.next_page_token
+
+ assert len(datasets) == len(DATA["datasets"])
+ for found, expected in zip(datasets, DATA["datasets"]):
+ assert isinstance(found, DatasetListItem)
+ assert found.full_dataset_id == expected["id"]
+ assert found.friendly_name == expected["friendlyName"]
+ assert token == TOKEN
+
+ conn.api_request.assert_called_once_with(
+ method="GET", path="/%s" % PATH, query_params=query, timeout=DEFAULT_TIMEOUT
+ )
+
+
+def test_list_datasets_w_project_and_timeout(client, PROJECT):
+ conn = client._connection = make_connection({})
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ list(client.list_datasets(project="other-project", timeout=7.5))
+
+ final_attributes.assert_called_once_with(
+ {"path": "/projects/other-project/datasets"}, client, None
+ )
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/projects/other-project/datasets",
+ query_params={},
+ timeout=7.5,
+ )
+
+
+def test_list_datasets_explicit_response_missing_datasets_key(client, PROJECT):
+ PATH = "projects/%s/datasets" % PROJECT
+ TOKEN = "TOKEN"
+ FILTER = "FILTER"
+ DATA = {}
+ conn = client._connection = make_connection(DATA)
+
+ iterator = client.list_datasets(
+ include_all=True, filter=FILTER, max_results=3, page_token=TOKEN
+ )
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ page = next(iterator.pages)
+
+ final_attributes.assert_called_once_with({"path": "/%s" % PATH}, client, None)
+ datasets = list(page)
+ token = iterator.next_page_token
+
+ assert len(datasets) == 0
+ assert token is None
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/%s" % PATH,
+ query_params={
+ "all": True,
+ "filter": FILTER,
+ "maxResults": 3,
+ "pageToken": TOKEN,
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_list_jobs.py b/testbed/googleapis__python-bigquery/tests/unit/test_list_jobs.py
new file mode 100644
index 0000000000000000000000000000000000000000..edb85af0aa9df7edd22462c2f6f2937a59a3b3d7
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_list_jobs.py
@@ -0,0 +1,292 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+from unittest import mock
+
+import pytest
+
+from google.cloud.bigquery.retry import DEFAULT_TIMEOUT
+from .helpers import make_connection
+
+
+@pytest.mark.parametrize(
+ "extra,query", [({}, {}), (dict(page_size=42), dict(maxResults=42))]
+)
+def test_list_jobs_defaults(client, PROJECT, DS_ID, extra, query):
+ from google.cloud.bigquery.job import CopyJob
+ from google.cloud.bigquery.job import CreateDisposition
+ from google.cloud.bigquery.job import ExtractJob
+ from google.cloud.bigquery.job import LoadJob
+ from google.cloud.bigquery.job import QueryJob
+ from google.cloud.bigquery.job import WriteDisposition
+
+ SOURCE_TABLE = "source_table"
+ DESTINATION_TABLE = "destination_table"
+ QUERY_DESTINATION_TABLE = "query_destination_table"
+ SOURCE_URI = "gs://test_bucket/src_object*"
+ DESTINATION_URI = "gs://test_bucket/dst_object*"
+ JOB_TYPES = {
+ "load_job": LoadJob,
+ "copy_job": CopyJob,
+ "extract_job": ExtractJob,
+ "query_job": QueryJob,
+ }
+ PATH = "projects/%s/jobs" % PROJECT
+ TOKEN = "TOKEN"
+ QUERY = "SELECT * from test_dataset:test_table"
+ ASYNC_QUERY_DATA = {
+ "id": "%s:%s" % (PROJECT, "query_job"),
+ "jobReference": {"projectId": PROJECT, "jobId": "query_job"},
+ "state": "DONE",
+ "configuration": {
+ "query": {
+ "query": QUERY,
+ "destinationTable": {
+ "projectId": PROJECT,
+ "datasetId": DS_ID,
+ "tableId": QUERY_DESTINATION_TABLE,
+ },
+ "createDisposition": CreateDisposition.CREATE_IF_NEEDED,
+ "writeDisposition": WriteDisposition.WRITE_TRUNCATE,
+ }
+ },
+ }
+ EXTRACT_DATA = {
+ "id": "%s:%s" % (PROJECT, "extract_job"),
+ "jobReference": {"projectId": PROJECT, "jobId": "extract_job"},
+ "state": "DONE",
+ "configuration": {
+ "extract": {
+ "sourceTable": {
+ "projectId": PROJECT,
+ "datasetId": DS_ID,
+ "tableId": SOURCE_TABLE,
+ },
+ "destinationUris": [DESTINATION_URI],
+ }
+ },
+ }
+ COPY_DATA = {
+ "id": "%s:%s" % (PROJECT, "copy_job"),
+ "jobReference": {"projectId": PROJECT, "jobId": "copy_job"},
+ "state": "DONE",
+ "configuration": {
+ "copy": {
+ "sourceTables": [
+ {"projectId": PROJECT, "datasetId": DS_ID, "tableId": SOURCE_TABLE}
+ ],
+ "destinationTable": {
+ "projectId": PROJECT,
+ "datasetId": DS_ID,
+ "tableId": DESTINATION_TABLE,
+ },
+ }
+ },
+ }
+ LOAD_DATA = {
+ "id": "%s:%s" % (PROJECT, "load_job"),
+ "jobReference": {"projectId": PROJECT, "jobId": "load_job"},
+ "state": "DONE",
+ "configuration": {
+ "load": {
+ "destinationTable": {
+ "projectId": PROJECT,
+ "datasetId": DS_ID,
+ "tableId": SOURCE_TABLE,
+ },
+ "sourceUris": [SOURCE_URI],
+ }
+ },
+ }
+ DATA = {
+ "nextPageToken": TOKEN,
+ "jobs": [ASYNC_QUERY_DATA, EXTRACT_DATA, COPY_DATA, LOAD_DATA],
+ }
+ conn = client._connection = make_connection(DATA)
+
+ iterator = client.list_jobs(**extra)
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ page = next(iterator.pages)
+
+ final_attributes.assert_called_once_with({"path": "/%s" % PATH}, client, None)
+ jobs = list(page)
+ token = iterator.next_page_token
+
+ assert len(jobs) == len(DATA["jobs"])
+ for found, expected in zip(jobs, DATA["jobs"]):
+ name = expected["jobReference"]["jobId"]
+ assert isinstance(found, JOB_TYPES[name])
+ assert found.job_id == name
+ assert token == TOKEN
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/%s" % PATH,
+ query_params=dict({"projection": "full"}, **query),
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_list_jobs_load_job_wo_sourceUris(client, PROJECT, DS_ID):
+ from google.cloud.bigquery.job import LoadJob
+
+ SOURCE_TABLE = "source_table"
+ JOB_TYPES = {"load_job": LoadJob}
+ PATH = "projects/%s/jobs" % PROJECT
+ TOKEN = "TOKEN"
+ LOAD_DATA = {
+ "id": "%s:%s" % (PROJECT, "load_job"),
+ "jobReference": {"projectId": PROJECT, "jobId": "load_job"},
+ "state": "DONE",
+ "configuration": {
+ "load": {
+ "destinationTable": {
+ "projectId": PROJECT,
+ "datasetId": DS_ID,
+ "tableId": SOURCE_TABLE,
+ }
+ }
+ },
+ }
+ DATA = {"nextPageToken": TOKEN, "jobs": [LOAD_DATA]}
+ conn = client._connection = make_connection(DATA)
+
+ iterator = client.list_jobs()
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ page = next(iterator.pages)
+
+ final_attributes.assert_called_once_with({"path": "/%s" % PATH}, client, None)
+ jobs = list(page)
+ token = iterator.next_page_token
+
+ assert len(jobs) == len(DATA["jobs"])
+ for found, expected in zip(jobs, DATA["jobs"]):
+ name = expected["jobReference"]["jobId"]
+ assert isinstance(found, JOB_TYPES[name])
+ assert found.job_id == name
+ assert token == TOKEN
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/%s" % PATH,
+ query_params={"projection": "full"},
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_list_jobs_explicit_missing(client, PROJECT):
+ PATH = "projects/%s/jobs" % PROJECT
+ DATA = {}
+ TOKEN = "TOKEN"
+ conn = client._connection = make_connection(DATA)
+
+ iterator = client.list_jobs(
+ max_results=1000, page_token=TOKEN, all_users=True, state_filter="done"
+ )
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ page = next(iterator.pages)
+
+ final_attributes.assert_called_once_with({"path": "/%s" % PATH}, client, None)
+ jobs = list(page)
+ token = iterator.next_page_token
+
+ assert len(jobs) == 0
+ assert token is None
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/%s" % PATH,
+ query_params={
+ "projection": "full",
+ "maxResults": 1000,
+ "pageToken": TOKEN,
+ "allUsers": True,
+ "stateFilter": "done",
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_list_jobs_w_project(client, PROJECT):
+ conn = client._connection = make_connection({})
+
+ list(client.list_jobs(project="other-project"))
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/projects/other-project/jobs",
+ query_params={"projection": "full"},
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_list_jobs_w_timeout(client, PROJECT):
+ conn = client._connection = make_connection({})
+
+ list(client.list_jobs(timeout=7.5))
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/projects/{}/jobs".format(PROJECT),
+ query_params={"projection": "full"},
+ timeout=7.5,
+ )
+
+
+def test_list_jobs_w_time_filter(client, PROJECT):
+ conn = client._connection = make_connection({})
+
+ # One millisecond after the unix epoch.
+ start_time = datetime.datetime(1970, 1, 1, 0, 0, 0, 1000)
+ # One millisecond after the the 2038 31-bit signed int rollover
+ end_time = datetime.datetime(2038, 1, 19, 3, 14, 7, 1000)
+ end_time_millis = (((2**31) - 1) * 1000) + 1
+
+ list(client.list_jobs(min_creation_time=start_time, max_creation_time=end_time))
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/projects/%s/jobs" % PROJECT,
+ query_params={
+ "projection": "full",
+ "minCreationTime": "1",
+ "maxCreationTime": str(end_time_millis),
+ },
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_list_jobs_w_parent_job_filter(client, PROJECT):
+ from google.cloud.bigquery import job
+
+ conn = client._connection = make_connection({}, {})
+
+ parent_job_args = ["parent-job-123", job._AsyncJob("parent-job-123", client)]
+
+ for parent_job in parent_job_args:
+ list(client.list_jobs(parent_job=parent_job))
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/projects/%s/jobs" % PROJECT,
+ query_params={"projection": "full", "parentJobId": "parent-job-123"},
+ timeout=DEFAULT_TIMEOUT,
+ )
+ conn.api_request.reset_mock()
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_list_models.py b/testbed/googleapis__python-bigquery/tests/unit/test_list_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..04932d3572b34acca659980c3a8e997a28d07d44
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_list_models.py
@@ -0,0 +1,99 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from google.cloud.bigquery.retry import DEFAULT_TIMEOUT
+from .helpers import make_connection, dataset_polymorphic
+
+
+def test_list_models_empty_w_timeout(client, PROJECT, DS_ID):
+ path = "/projects/{}/datasets/{}/models".format(PROJECT, DS_ID)
+ conn = client._connection = make_connection({})
+
+ dataset_id = "{}.{}".format(PROJECT, DS_ID)
+ iterator = client.list_models(dataset_id, timeout=7.5)
+ page = next(iterator.pages)
+ models = list(page)
+ token = iterator.next_page_token
+
+ assert models == []
+ assert token is None
+ conn.api_request.assert_called_once_with(
+ method="GET", path=path, query_params={}, timeout=7.5
+ )
+
+
+@pytest.mark.parametrize(
+ "extra,query", [({}, {}), (dict(page_size=42), dict(maxResults=42))]
+)
+@dataset_polymorphic
+def test_list_models_defaults(
+ make_dataset,
+ get_reference,
+ client,
+ PROJECT,
+ DS_ID,
+ extra,
+ query,
+):
+ from google.cloud.bigquery.model import Model
+
+ MODEL_1 = "model_one"
+ MODEL_2 = "model_two"
+ PATH = "projects/%s/datasets/%s/models" % (PROJECT, DS_ID)
+ TOKEN = "TOKEN"
+ DATA = {
+ "nextPageToken": TOKEN,
+ "models": [
+ {
+ "modelReference": {
+ "modelId": MODEL_1,
+ "datasetId": DS_ID,
+ "projectId": PROJECT,
+ }
+ },
+ {
+ "modelReference": {
+ "modelId": MODEL_2,
+ "datasetId": DS_ID,
+ "projectId": PROJECT,
+ }
+ },
+ ],
+ }
+
+ conn = client._connection = make_connection(DATA)
+ dataset = make_dataset(PROJECT, DS_ID)
+
+ iterator = client.list_models(dataset, **extra)
+ assert iterator.dataset == get_reference(dataset)
+ page = next(iterator.pages)
+ models = list(page)
+ token = iterator.next_page_token
+
+ assert len(models) == len(DATA["models"])
+ for found, expected in zip(models, DATA["models"]):
+ assert isinstance(found, Model)
+ assert found.model_id == expected["modelReference"]["modelId"]
+ assert token == TOKEN
+
+ conn.api_request.assert_called_once_with(
+ method="GET", path="/%s" % PATH, query_params=query, timeout=DEFAULT_TIMEOUT
+ )
+
+
+def test_list_models_wrong_type(client):
+ with pytest.raises(TypeError):
+ client.list_models(42)
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_list_projects.py b/testbed/googleapis__python-bigquery/tests/unit/test_list_projects.py
new file mode 100644
index 0000000000000000000000000000000000000000..5260e52464740275dafccb10d57ec341db0c0c19
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_list_projects.py
@@ -0,0 +1,121 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest import mock
+
+import pytest
+
+from google.cloud.bigquery.retry import DEFAULT_TIMEOUT
+from .helpers import make_connection
+
+
+@pytest.mark.parametrize(
+ "extra,query", [({}, {}), (dict(page_size=42), dict(maxResults=42))]
+)
+def test_list_projects_defaults(client, PROJECT, extra, query):
+ from google.cloud.bigquery.client import Project
+
+ PROJECT_2 = "PROJECT_TWO"
+ TOKEN = "TOKEN"
+ DATA = {
+ "nextPageToken": TOKEN,
+ "projects": [
+ {
+ "kind": "bigquery#project",
+ "id": PROJECT,
+ "numericId": 1,
+ "projectReference": {"projectId": PROJECT},
+ "friendlyName": "One",
+ },
+ {
+ "kind": "bigquery#project",
+ "id": PROJECT_2,
+ "numericId": 2,
+ "projectReference": {"projectId": PROJECT_2},
+ "friendlyName": "Two",
+ },
+ ],
+ }
+ conn = client._connection = make_connection(DATA)
+ iterator = client.list_projects(**extra)
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ page = next(iterator.pages)
+
+ final_attributes.assert_called_once_with({"path": "/projects"}, client, None)
+ projects = list(page)
+ token = iterator.next_page_token
+
+ assert len(projects) == len(DATA["projects"])
+ for found, expected in zip(projects, DATA["projects"]):
+ assert isinstance(found, Project)
+ assert found.project_id == expected["id"]
+ assert found.numeric_id == expected["numericId"]
+ assert found.friendly_name == expected["friendlyName"]
+ assert token == TOKEN
+
+ conn.api_request.assert_called_once_with(
+ method="GET", path="/projects", query_params=query, timeout=DEFAULT_TIMEOUT
+ )
+
+
+def test_list_projects_w_timeout(client):
+ TOKEN = "TOKEN"
+ DATA = {
+ "nextPageToken": TOKEN,
+ "projects": [],
+ }
+ conn = client._connection = make_connection(DATA)
+
+ iterator = client.list_projects(timeout=7.5)
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ next(iterator.pages)
+
+ final_attributes.assert_called_once_with({"path": "/projects"}, client, None)
+
+ conn.api_request.assert_called_once_with(
+ method="GET", path="/projects", query_params={}, timeout=7.5
+ )
+
+
+def test_list_projects_explicit_response_missing_projects_key(client):
+ TOKEN = "TOKEN"
+ DATA = {}
+ conn = client._connection = make_connection(DATA)
+
+ iterator = client.list_projects(max_results=3, page_token=TOKEN)
+
+ with mock.patch(
+ "google.cloud.bigquery.opentelemetry_tracing._get_final_span_attributes"
+ ) as final_attributes:
+ page = next(iterator.pages)
+
+ final_attributes.assert_called_once_with({"path": "/projects"}, client, None)
+ projects = list(page)
+ token = iterator.next_page_token
+
+ assert len(projects) == 0
+ assert token is None
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/projects",
+ query_params={"maxResults": 3, "pageToken": TOKEN},
+ timeout=DEFAULT_TIMEOUT,
+ )
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_list_routines.py b/testbed/googleapis__python-bigquery/tests/unit/test_list_routines.py
new file mode 100644
index 0000000000000000000000000000000000000000..80e62d6bdf018dd7ae13e2cb61b2fe66c5e3ae2c
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_list_routines.py
@@ -0,0 +1,96 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from google.cloud.bigquery.retry import DEFAULT_TIMEOUT
+from .helpers import make_connection, dataset_polymorphic
+
+
+def test_list_routines_empty_w_timeout(client):
+ conn = client._connection = make_connection({})
+
+ iterator = client.list_routines("test-routines.test_routines", timeout=7.5)
+ page = next(iterator.pages)
+ routines = list(page)
+ token = iterator.next_page_token
+
+ assert routines == []
+ assert token is None
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/projects/test-routines/datasets/test_routines/routines",
+ query_params={},
+ timeout=7.5,
+ )
+
+
+@pytest.mark.parametrize(
+ "extra,query", [({}, {}), (dict(page_size=42), dict(maxResults=42))]
+)
+@dataset_polymorphic
+def test_list_routines_defaults(
+ make_dataset, get_reference, client, PROJECT, extra, query
+):
+ from google.cloud.bigquery.routine import Routine
+
+ project_id = PROJECT
+ dataset_id = "test_routines"
+ path = f"/projects/{PROJECT}/datasets/test_routines/routines"
+ routine_1 = "routine_one"
+ routine_2 = "routine_two"
+ token = "TOKEN"
+ resource = {
+ "nextPageToken": token,
+ "routines": [
+ {
+ "routineReference": {
+ "routineId": routine_1,
+ "datasetId": dataset_id,
+ "projectId": project_id,
+ }
+ },
+ {
+ "routineReference": {
+ "routineId": routine_2,
+ "datasetId": dataset_id,
+ "projectId": project_id,
+ }
+ },
+ ],
+ }
+
+ conn = client._connection = make_connection(resource)
+ dataset = make_dataset(client.project, dataset_id)
+
+ iterator = client.list_routines(dataset, **extra)
+ assert iterator.dataset == get_reference(dataset)
+ page = next(iterator.pages)
+ routines = list(page)
+ actual_token = iterator.next_page_token
+
+ assert len(routines) == len(resource["routines"])
+ for found, expected in zip(routines, resource["routines"]):
+ assert isinstance(found, Routine)
+ assert found.routine_id == expected["routineReference"]["routineId"]
+ assert actual_token == token
+
+ conn.api_request.assert_called_once_with(
+ method="GET", path=path, query_params=query, timeout=DEFAULT_TIMEOUT
+ )
+
+
+def test_list_routines_wrong_type(client):
+ with pytest.raises(TypeError):
+ client.list_routines(42)
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_list_tables.py b/testbed/googleapis__python-bigquery/tests/unit/test_list_tables.py
new file mode 100644
index 0000000000000000000000000000000000000000..8360f6605630e522b2fe61957300a07e40de8985
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_list_tables.py
@@ -0,0 +1,180 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+import google.cloud.bigquery.dataset
+from google.cloud.bigquery.retry import DEFAULT_TIMEOUT
+from .helpers import make_connection, dataset_polymorphic
+
+
+@dataset_polymorphic
+def test_list_tables_empty_w_timeout(
+ make_dataset, get_reference, client, PROJECT, DS_ID
+):
+ path = "/projects/{}/datasets/{}/tables".format(PROJECT, DS_ID)
+ conn = client._connection = make_connection({})
+
+ dataset = make_dataset(PROJECT, DS_ID)
+ iterator = client.list_tables(dataset, timeout=7.5)
+ assert iterator.dataset == get_reference(dataset)
+ page = next(iterator.pages)
+ tables = list(page)
+ token = iterator.next_page_token
+
+ assert tables == []
+ assert token is None
+ conn.api_request.assert_called_once_with(
+ method="GET", path=path, query_params={}, timeout=7.5
+ )
+
+
+@dataset_polymorphic
+def test_list_tables_defaults(make_dataset, get_reference, client, PROJECT, DS_ID):
+ from google.cloud.bigquery.table import TableListItem
+
+ TABLE_1 = "table_one"
+ TABLE_2 = "table_two"
+ PATH = "projects/%s/datasets/%s/tables" % (PROJECT, DS_ID)
+ TOKEN = "TOKEN"
+ DATA = {
+ "nextPageToken": TOKEN,
+ "tables": [
+ {
+ "kind": "bigquery#table",
+ "id": "%s:%s.%s" % (PROJECT, DS_ID, TABLE_1),
+ "tableReference": {
+ "tableId": TABLE_1,
+ "datasetId": DS_ID,
+ "projectId": PROJECT,
+ },
+ "type": "TABLE",
+ },
+ {
+ "kind": "bigquery#table",
+ "id": "%s:%s.%s" % (PROJECT, DS_ID, TABLE_2),
+ "tableReference": {
+ "tableId": TABLE_2,
+ "datasetId": DS_ID,
+ "projectId": PROJECT,
+ },
+ "type": "TABLE",
+ },
+ ],
+ }
+
+ conn = client._connection = make_connection(DATA)
+ dataset = make_dataset(PROJECT, DS_ID)
+
+ iterator = client.list_tables(dataset)
+ assert iterator.dataset == get_reference(dataset)
+ page = next(iterator.pages)
+ tables = list(page)
+ token = iterator.next_page_token
+
+ assert len(tables) == len(DATA["tables"])
+ for found, expected in zip(tables, DATA["tables"]):
+ assert isinstance(found, TableListItem)
+ assert found.full_table_id == expected["id"]
+ assert found.table_type == expected["type"]
+ assert token == TOKEN
+
+ conn.api_request.assert_called_once_with(
+ method="GET", path="/%s" % PATH, query_params={}, timeout=DEFAULT_TIMEOUT
+ )
+
+
+def test_list_tables_explicit(client, PROJECT, DS_ID):
+ from google.cloud.bigquery.table import TableListItem
+
+ TABLE_1 = "table_one"
+ TABLE_2 = "table_two"
+ PATH = "projects/%s/datasets/%s/tables" % (PROJECT, DS_ID)
+ TOKEN = "TOKEN"
+ DATA = {
+ "tables": [
+ {
+ "kind": "bigquery#dataset",
+ "id": "%s:%s.%s" % (PROJECT, DS_ID, TABLE_1),
+ "tableReference": {
+ "tableId": TABLE_1,
+ "datasetId": DS_ID,
+ "projectId": PROJECT,
+ },
+ "type": "TABLE",
+ },
+ {
+ "kind": "bigquery#dataset",
+ "id": "%s:%s.%s" % (PROJECT, DS_ID, TABLE_2),
+ "tableReference": {
+ "tableId": TABLE_2,
+ "datasetId": DS_ID,
+ "projectId": PROJECT,
+ },
+ "type": "TABLE",
+ },
+ ]
+ }
+
+ conn = client._connection = make_connection(DATA)
+ dataset = google.cloud.bigquery.dataset.DatasetReference(PROJECT, DS_ID)
+
+ iterator = client.list_tables(
+ # Test with string for dataset ID.
+ DS_ID,
+ max_results=3,
+ page_token=TOKEN,
+ )
+ assert iterator.dataset == dataset
+ page = next(iterator.pages)
+ tables = list(page)
+ token = iterator.next_page_token
+
+ assert len(tables) == len(DATA["tables"])
+ for found, expected in zip(tables, DATA["tables"]):
+ assert isinstance(found, TableListItem)
+ assert found.full_table_id == expected["id"]
+ assert found.table_type == expected["type"]
+ assert token is None
+
+ conn.api_request.assert_called_once_with(
+ method="GET",
+ path="/%s" % PATH,
+ query_params={"maxResults": 3, "pageToken": TOKEN},
+ timeout=DEFAULT_TIMEOUT,
+ )
+
+
+def test_list_tables_wrong_type(client):
+ with pytest.raises(TypeError):
+ client.list_tables(42)
+
+
+@dataset_polymorphic
+def test_list_tables_page_size(make_dataset, get_reference, client, PROJECT, DS_ID):
+ path = "/projects/{}/datasets/{}/tables".format(PROJECT, DS_ID)
+ conn = client._connection = make_connection({})
+
+ dataset = make_dataset(PROJECT, DS_ID)
+ iterator = client.list_tables(dataset, timeout=7.5, page_size=42)
+ assert iterator.dataset == get_reference(dataset)
+ page = next(iterator.pages)
+ tables = list(page)
+ token = iterator.next_page_token
+
+ assert tables == []
+ assert token is None
+ conn.api_request.assert_called_once_with(
+ method="GET", path=path, query_params=dict(maxResults=42), timeout=7.5
+ )
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_magics.py b/testbed/googleapis__python-bigquery/tests/unit/test_magics.py
new file mode 100644
index 0000000000000000000000000000000000000000..73b29df6b69ebbd886ca58708c378cb57eefd98f
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_magics.py
@@ -0,0 +1,2140 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import re
+from concurrent import futures
+from unittest import mock
+import warnings
+
+from google.api_core import exceptions
+import google.auth.credentials
+import pytest
+from tests.unit.helpers import make_connection
+from test_utils.imports import maybe_fail_import
+
+from google.cloud import bigquery
+from google.cloud.bigquery import exceptions as bq_exceptions
+from google.cloud.bigquery import job
+from google.cloud.bigquery import table
+from google.cloud.bigquery.retry import DEFAULT_TIMEOUT
+
+
+try:
+ from google.cloud.bigquery.magics import magics
+except ImportError:
+ magics = None
+
+bigquery_storage = pytest.importorskip("google.cloud.bigquery_storage")
+IPython = pytest.importorskip("IPython")
+interactiveshell = pytest.importorskip("IPython.terminal.interactiveshell")
+tools = pytest.importorskip("IPython.testing.tools")
+io = pytest.importorskip("IPython.utils.io")
+pandas = pytest.importorskip("pandas")
+
+
+@pytest.fixture(scope="session")
+def ipython():
+ config = tools.default_config()
+ config.TerminalInteractiveShell.simple_prompt = True
+ shell = interactiveshell.TerminalInteractiveShell.instance(config=config)
+ return shell
+
+
+@pytest.fixture()
+def ipython_interactive(request, ipython):
+ """Activate IPython's builtin hooks
+
+ for the duration of the test scope.
+ """
+ with ipython.builtin_trap:
+ yield ipython
+
+
+@pytest.fixture()
+def ipython_ns_cleanup():
+ """A helper to clean up user namespace after the test
+
+ for the duration of the test scope.
+ """
+ names_to_clean = [] # pairs (IPython_instance, name_to_clean)
+
+ yield names_to_clean
+
+ for ip, name in names_to_clean:
+ if name in ip.user_ns:
+ del ip.user_ns[name]
+
+
+@pytest.fixture(scope="session")
+def missing_bq_storage():
+ """Provide a patcher that can make the bigquery storage import to fail."""
+
+ def fail_if(name, globals, locals, fromlist, level):
+ # NOTE: *very* simplified, assuming a straightforward absolute import
+ return "bigquery_storage" in name or (
+ fromlist is not None and "bigquery_storage" in fromlist
+ )
+
+ return maybe_fail_import(predicate=fail_if)
+
+
+@pytest.fixture(scope="session")
+def missing_grpcio_lib():
+ """Provide a patcher that can make the gapic library import to fail."""
+
+ def fail_if(name, globals, locals, fromlist, level):
+ # NOTE: *very* simplified, assuming a straightforward absolute import
+ return "gapic_v1" in name or (fromlist is not None and "gapic_v1" in fromlist)
+
+ return maybe_fail_import(predicate=fail_if)
+
+
+PROJECT_ID = "its-a-project-eh"
+JOB_ID = "some-random-id"
+JOB_REFERENCE_RESOURCE = {"projectId": PROJECT_ID, "jobId": JOB_ID}
+DATASET_ID = "dest_dataset"
+TABLE_ID = "dest_table"
+TABLE_REFERENCE_RESOURCE = {
+ "projectId": PROJECT_ID,
+ "datasetId": DATASET_ID,
+ "tableId": TABLE_ID,
+}
+QUERY_STRING = "SELECT 42 AS the_answer FROM `life.the_universe.and_everything`;"
+QUERY_RESOURCE = {
+ "jobReference": JOB_REFERENCE_RESOURCE,
+ "configuration": {
+ "query": {
+ "destinationTable": TABLE_REFERENCE_RESOURCE,
+ "query": QUERY_STRING,
+ "queryParameters": [],
+ "useLegacySql": False,
+ }
+ },
+ "status": {"state": "DONE"},
+}
+QUERY_RESULTS_RESOURCE = {
+ "jobReference": JOB_REFERENCE_RESOURCE,
+ "totalRows": 1,
+ "jobComplete": True,
+ "schema": {"fields": [{"name": "the_answer", "type": "INTEGER"}]},
+}
+
+
+def test_context_with_default_credentials():
+ """When Application Default Credentials are set, the context credentials
+ will be created the first time it is called
+ """
+ assert magics.context._credentials is None
+ assert magics.context._project is None
+
+ project = "prahj-ekt"
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, project)
+ )
+ with default_patch as default_mock:
+ assert magics.context.credentials is credentials_mock
+ assert magics.context.project == project
+
+ assert default_mock.call_count == 2
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_context_with_default_connection(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._credentials = None
+ magics.context._project = None
+ magics.context._connection = None
+
+ default_credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ credentials_patch = mock.patch(
+ "google.auth.default", return_value=(default_credentials, "project-from-env")
+ )
+ default_conn = make_connection(QUERY_RESOURCE, QUERY_RESULTS_RESOURCE)
+ conn_patch = mock.patch("google.cloud.bigquery.client.Connection", autospec=True)
+ list_rows_patch = mock.patch(
+ "google.cloud.bigquery.client.Client._list_rows_from_query_results",
+ return_value=google.cloud.bigquery.table._EmptyRowIterator(),
+ )
+
+ with conn_patch as conn, credentials_patch, list_rows_patch as list_rows:
+ conn.return_value = default_conn
+ ip.run_cell_magic("bigquery", "", QUERY_STRING)
+
+ # Check that query actually starts the job.
+ conn.assert_called()
+ list_rows.assert_called()
+ begin_call = mock.call(
+ method="POST",
+ path="/projects/project-from-env/jobs",
+ data=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+ query_results_call = mock.call(
+ method="GET",
+ path=f"/projects/{PROJECT_ID}/queries/{JOB_ID}",
+ query_params=mock.ANY,
+ timeout=mock.ANY,
+ )
+ default_conn.api_request.assert_has_calls([begin_call, query_results_call])
+
+
+def test_context_credentials_and_project_can_be_set_explicitly():
+ project1 = "one-project-55564"
+ project2 = "other-project-52569"
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, project1)
+ )
+ with default_patch as default_mock:
+ magics.context.credentials = credentials_mock
+ magics.context.project = project2
+
+ assert magics.context.project == project2
+ assert magics.context.credentials is credentials_mock
+ # default should not be called if credentials & project are explicitly set
+ assert default_mock.call_count == 0
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_context_with_custom_connection(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._project = None
+ magics.context._credentials = None
+ context_conn = magics.context._connection = make_connection(
+ QUERY_RESOURCE, QUERY_RESULTS_RESOURCE
+ )
+
+ default_credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ credentials_patch = mock.patch(
+ "google.auth.default", return_value=(default_credentials, "project-from-env")
+ )
+ default_conn = make_connection()
+ conn_patch = mock.patch("google.cloud.bigquery.client.Connection", autospec=True)
+ list_rows_patch = mock.patch(
+ "google.cloud.bigquery.client.Client._list_rows_from_query_results",
+ return_value=google.cloud.bigquery.table._EmptyRowIterator(),
+ )
+
+ with conn_patch as conn, credentials_patch, list_rows_patch as list_rows:
+ conn.return_value = default_conn
+ ip.run_cell_magic("bigquery", "", QUERY_STRING)
+
+ list_rows.assert_called()
+ default_conn.api_request.assert_not_called()
+ begin_call = mock.call(
+ method="POST",
+ path="/projects/project-from-env/jobs",
+ data=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+ query_results_call = mock.call(
+ method="GET",
+ path=f"/projects/{PROJECT_ID}/queries/{JOB_ID}",
+ query_params=mock.ANY,
+ timeout=mock.ANY,
+ )
+ context_conn.api_request.assert_has_calls([begin_call, query_results_call])
+
+
+def test__run_query():
+ magics.context._credentials = None
+
+ job_id = "job_1234"
+ sql = "SELECT 17"
+ responses = [
+ futures.TimeoutError,
+ futures.TimeoutError,
+ [table.Row((17,), {"num": 0})],
+ ]
+
+ client_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics.bigquery.Client", autospec=True
+ )
+ with client_patch as client_mock, io.capture_output() as captured:
+ client_mock().query(sql).result.side_effect = responses
+ client_mock().query(sql).job_id = job_id
+
+ query_job = magics._run_query(client_mock(), sql)
+
+ lines = re.split("\n|\r", captured.stdout)
+ # Removes blanks & terminal code (result of display clearing)
+ updates = list(filter(lambda x: bool(x) and x != "\x1b[2K", lines))
+
+ assert query_job.job_id == job_id
+ expected_first_line = "Executing query with job ID: {}".format(job_id)
+ assert updates[0] == expected_first_line
+ execution_updates = updates[1:-1]
+ assert len(execution_updates) == 3 # one update per API response
+ for line in execution_updates:
+ assert re.match("Query executing: .*s", line)
+
+
+def test__run_query_dry_run_without_errors_is_silent():
+ magics.context._credentials = None
+
+ sql = "SELECT 17"
+
+ client_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics.bigquery.Client", autospec=True
+ )
+
+ job_config = job.QueryJobConfig()
+ job_config.dry_run = True
+ with client_patch as client_mock, io.capture_output() as captured:
+ client_mock().query(sql).job_id = None
+ magics._run_query(client_mock(), sql, job_config=job_config)
+
+ assert len(captured.stderr) == 0
+ assert len(captured.stdout) == 0
+
+
+def test__make_bqstorage_client_false():
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ test_client = bigquery.Client(
+ project="test_project", credentials=credentials_mock, location="test_location"
+ )
+ got = magics._make_bqstorage_client(test_client, False, {})
+ assert got is None
+
+
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test__make_bqstorage_client_true():
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ test_client = bigquery.Client(
+ project="test_project", credentials=credentials_mock, location="test_location"
+ )
+ got = magics._make_bqstorage_client(test_client, True, {})
+ assert isinstance(got, bigquery_storage.BigQueryReadClient)
+
+
+def test__make_bqstorage_client_true_raises_import_error(missing_bq_storage):
+ """When package `google-cloud-bigquery-storage` is not installed, reports
+ ImportError.
+ """
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ test_client = bigquery.Client(
+ project="test_project", credentials=credentials_mock, location="test_location"
+ )
+
+ with pytest.raises(ImportError) as exc_context, missing_bq_storage:
+ magics._make_bqstorage_client(test_client, True, {})
+
+ error_msg = str(exc_context.value)
+ assert "google-cloud-bigquery-storage" in error_msg
+ assert "pyarrow" in error_msg
+
+
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test__make_bqstorage_client_true_obsolete_dependency():
+ """When package `google-cloud-bigquery-storage` is installed but has outdated
+ version, returns None, and raises a warning.
+ """
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ test_client = bigquery.Client(
+ project="test_project", credentials=credentials_mock, location="test_location"
+ )
+
+ patcher = mock.patch(
+ "google.cloud.bigquery._versions_helpers.BQ_STORAGE_VERSIONS.try_import",
+ side_effect=bq_exceptions.LegacyBigQueryStorageError(
+ "google-cloud-bigquery-storage is outdated"
+ ),
+ )
+ with patcher, warnings.catch_warnings(record=True) as warned:
+ got = magics._make_bqstorage_client(test_client, True, {})
+
+ assert got is None
+
+ matching_warnings = [
+ warning
+ for warning in warned
+ if "google-cloud-bigquery-storage is outdated" in str(warning)
+ ]
+ assert matching_warnings, "Obsolete dependency warning not raised."
+
+
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test__make_bqstorage_client_true_missing_gapic(missing_grpcio_lib):
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ with pytest.raises(ImportError) as exc_context, missing_grpcio_lib:
+ magics._make_bqstorage_client(True, credentials_mock, {})
+
+ assert "grpcio" in str(exc_context.value)
+
+
+def test__create_dataset_if_necessary_exists():
+ project = "project_id"
+ dataset_id = "dataset_id"
+ dataset_reference = bigquery.dataset.DatasetReference(project, dataset_id)
+ dataset = bigquery.Dataset(dataset_reference)
+ client_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics.bigquery.Client", autospec=True
+ )
+ with client_patch as client_mock:
+ client = client_mock()
+ client.project = project
+ client.get_dataset.result_value = dataset
+ magics._create_dataset_if_necessary(client, dataset_id)
+ client.create_dataset.assert_not_called()
+
+
+def test__create_dataset_if_necessary_not_exist():
+ project = "project_id"
+ dataset_id = "dataset_id"
+ client_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics.bigquery.Client", autospec=True
+ )
+ with client_patch as client_mock:
+ client = client_mock()
+ client.location = "us"
+ client.project = project
+ client.get_dataset.side_effect = exceptions.NotFound("dataset not found")
+ magics._create_dataset_if_necessary(client, dataset_id)
+ client.create_dataset.assert_called_once()
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_extension_load():
+ ip = IPython.get_ipython()
+
+ with pytest.warns(FutureWarning, match="bigquery_magics"):
+ bigquery.load_ipython_extension(ip)
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test_bigquery_magic_without_optional_arguments(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ mock_credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ # Set up the context with monkeypatch so that it's reset for subsequent
+ # tests.
+ monkeypatch.setattr(magics.context, "_credentials", mock_credentials)
+
+ # Mock out the BigQuery Storage API.
+ bqstorage_mock = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ bqstorage_instance_mock = mock.create_autospec(
+ bigquery_storage.BigQueryReadClient, instance=True
+ )
+ bqstorage_instance_mock._transport = mock.Mock()
+ bqstorage_mock.return_value = bqstorage_instance_mock
+ bqstorage_client_patch = mock.patch(
+ "google.cloud.bigquery_storage.BigQueryReadClient", bqstorage_mock
+ )
+
+ sql = "SELECT 17 AS num"
+ result = pandas.DataFrame([17], columns=["num"])
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ query_job_mock = mock.create_autospec(
+ google.cloud.bigquery.job.QueryJob, instance=True
+ )
+ query_job_mock.to_dataframe.return_value = result
+
+ with run_query_patch as run_query_mock, bqstorage_client_patch:
+ run_query_mock.return_value = query_job_mock
+ return_value = ip.run_cell_magic("bigquery", "", sql)
+
+ assert bqstorage_mock.called # BQ storage client was used
+ assert isinstance(return_value, pandas.DataFrame)
+ assert len(return_value) == len(result) # verify row count
+ assert list(return_value) == list(result) # verify column names
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_default_connection_user_agent(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._connection = None
+
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, "general-project")
+ )
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ conn_patch = mock.patch("google.cloud.bigquery.client.Connection", autospec=True)
+
+ with conn_patch as conn, run_query_patch, default_patch:
+ ip.run_cell_magic("bigquery", "", "SELECT 17 as num")
+
+ client_info_arg = conn.call_args[1].get("client_info")
+ assert client_info_arg is not None
+ assert client_info_arg.user_agent == "ipython-" + IPython.__version__
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_with_legacy_sql(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ with run_query_patch as run_query_mock:
+ ip.run_cell_magic("bigquery", "--use_legacy_sql", "SELECT 17 AS num")
+
+ job_config_used = run_query_mock.call_args_list[0][1]["job_config"]
+ assert job_config_used.use_legacy_sql is True
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_with_result_saved_to_variable(ipython_ns_cleanup, monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ ipython_ns_cleanup.append((ip, "df"))
+
+ sql = "SELECT 17 AS num"
+ result = pandas.DataFrame([17], columns=["num"])
+ assert "df" not in ip.user_ns
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ query_job_mock = mock.create_autospec(
+ google.cloud.bigquery.job.QueryJob, instance=True
+ )
+ query_job_mock.to_dataframe.return_value = result
+ with run_query_patch as run_query_mock:
+ run_query_mock.return_value = query_job_mock
+
+ return_value = ip.run_cell_magic("bigquery", "df", sql)
+
+ assert return_value is None
+ assert "df" in ip.user_ns # verify that variable exists
+ df = ip.user_ns["df"]
+ assert len(df) == len(result) # verify row count
+ assert list(df) == list(result) # verify column names
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_does_not_clear_display_in_verbose_mode(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ clear_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics.display.clear_output",
+ autospec=True,
+ )
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ with clear_patch as clear_mock, run_query_patch:
+ ip.run_cell_magic("bigquery", "--verbose", "SELECT 17 as num")
+
+ assert clear_mock.call_count == 0
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_clears_display_in_non_verbose_mode(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ clear_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics.display.clear_output",
+ autospec=True,
+ )
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ with clear_patch as clear_mock, run_query_patch:
+ ip.run_cell_magic("bigquery", "", "SELECT 17 as num")
+
+ assert clear_mock.call_count == 1
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test_bigquery_magic_with_bqstorage_from_argument(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ mock_credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ # Set up the context with monkeypatch so that it's reset for subsequent
+ # tests.
+ monkeypatch.setattr(magics.context, "_credentials", mock_credentials)
+
+ # Mock out the BigQuery Storage API.
+ bqstorage_mock = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ bqstorage_instance_mock = mock.create_autospec(
+ bigquery_storage.BigQueryReadClient, instance=True
+ )
+ bqstorage_instance_mock._transport = mock.Mock()
+ bqstorage_mock.return_value = bqstorage_instance_mock
+ bqstorage_client_patch = mock.patch(
+ "google.cloud.bigquery_storage.BigQueryReadClient", bqstorage_mock
+ )
+
+ sql = "SELECT 17 AS num"
+ result = pandas.DataFrame([17], columns=["num"])
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ query_job_mock = mock.create_autospec(
+ google.cloud.bigquery.job.QueryJob, instance=True
+ )
+ query_job_mock.to_dataframe.return_value = result
+ with run_query_patch as run_query_mock, (
+ bqstorage_client_patch
+ ), warnings.catch_warnings(record=True) as warned:
+ run_query_mock.return_value = query_job_mock
+
+ return_value = ip.run_cell_magic("bigquery", "--use_bqstorage_api", sql)
+
+ # Deprecation warning should have been issued.
+ def warning_match(warning):
+ message = str(warning).lower()
+ return "deprecated" in message and "use_bqstorage_api" in message
+
+ expected_warnings = list(filter(warning_match, warned))
+ assert len(expected_warnings) == 1
+
+ assert len(bqstorage_mock.call_args_list) == 1
+ kwargs = bqstorage_mock.call_args_list[0][1]
+ assert kwargs.get("credentials") is mock_credentials
+ client_info = kwargs.get("client_info")
+ assert client_info is not None
+ assert client_info.user_agent == "ipython-" + IPython.__version__
+
+ query_job_mock.to_dataframe.assert_called_once_with(
+ bqstorage_client=bqstorage_instance_mock,
+ create_bqstorage_client=mock.ANY,
+ progress_bar_type="tqdm_notebook",
+ )
+
+ assert isinstance(return_value, pandas.DataFrame)
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+def test_bigquery_magic_with_rest_client_requested(monkeypatch):
+ pandas = pytest.importorskip("pandas")
+
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ mock_credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ # Set up the context with monkeypatch so that it's reset for subsequent
+ # tests.
+ monkeypatch.setattr(magics.context, "_credentials", mock_credentials)
+
+ # Mock out the BigQuery Storage API.
+ bqstorage_mock = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ bqstorage_client_patch = mock.patch(
+ "google.cloud.bigquery_storage.BigQueryReadClient", bqstorage_mock
+ )
+
+ sql = "SELECT 17 AS num"
+ result = pandas.DataFrame([17], columns=["num"])
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ query_job_mock = mock.create_autospec(
+ google.cloud.bigquery.job.QueryJob, instance=True
+ )
+ query_job_mock.to_dataframe.return_value = result
+ with run_query_patch as run_query_mock, bqstorage_client_patch:
+ run_query_mock.return_value = query_job_mock
+
+ return_value = ip.run_cell_magic("bigquery", "--use_rest_api", sql)
+
+ bqstorage_mock.assert_not_called()
+ query_job_mock.to_dataframe.assert_called_once_with(
+ bqstorage_client=None,
+ create_bqstorage_client=False,
+ progress_bar_type="tqdm_notebook",
+ )
+
+ assert isinstance(return_value, pandas.DataFrame)
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_w_max_results_invalid(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._project = None
+
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, "general-project")
+ )
+ client_query_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.query", autospec=True
+ )
+
+ sql = "SELECT 17 AS num"
+
+ with pytest.raises(ValueError), default_patch, client_query_patch:
+ ip.run_cell_magic("bigquery", "--max_results=abc", sql)
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_w_max_results_valid_calls_queryjob_result(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._project = None
+
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, "general-project")
+ )
+ client_query_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.query", autospec=True
+ )
+
+ sql = "SELECT 17 AS num"
+
+ query_job_mock = mock.create_autospec(
+ google.cloud.bigquery.job.QueryJob, instance=True
+ )
+
+ with client_query_patch as client_query_mock, default_patch:
+ client_query_mock.return_value = query_job_mock
+ ip.run_cell_magic("bigquery", "--max_results=5", sql)
+
+ query_job_mock.result.assert_called_with(max_results=5)
+ query_job_mock.result.return_value.to_dataframe.assert_called_once_with(
+ bqstorage_client=None,
+ create_bqstorage_client=False,
+ progress_bar_type=mock.ANY,
+ )
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_w_max_results_query_job_results_fails(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._project = None
+
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, "general-project")
+ )
+ client_query_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.query", autospec=True
+ )
+ close_transports_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._close_transports",
+ autospec=True,
+ )
+
+ sql = "SELECT 17 AS num"
+
+ query_job_mock = mock.create_autospec(
+ google.cloud.bigquery.job.QueryJob, instance=True
+ )
+ query_job_mock.result.side_effect = [[], OSError]
+
+ with pytest.raises(
+ OSError
+ ), client_query_patch as client_query_mock, (
+ default_patch
+ ), close_transports_patch as close_transports:
+ client_query_mock.return_value = query_job_mock
+ ip.run_cell_magic("bigquery", "--max_results=5", sql)
+
+ assert close_transports.called
+
+
+def test_bigquery_magic_w_table_id_invalid(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._project = None
+
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, "general-project")
+ )
+
+ list_rows_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics.bigquery.Client.list_rows",
+ autospec=True,
+ side_effect=exceptions.BadRequest("Not a valid table ID"),
+ )
+
+ table_id = "not-a-real-table"
+
+ with list_rows_patch, default_patch, io.capture_output() as captured_io:
+ ip.run_cell_magic("bigquery", "df", table_id)
+
+ output = captured_io.stderr
+ assert "Could not save output to variable" in output
+ assert "400 Not a valid table ID" in output
+ assert "Traceback (most recent call last)" not in output
+
+
+def test_bigquery_magic_w_missing_query(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._project = None
+
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, "general-project")
+ )
+
+ cell_body = " \n \n \t\t \n "
+
+ with io.capture_output() as captured_io, default_patch:
+ ip.run_cell_magic("bigquery", "df", cell_body)
+
+ output = captured_io.stderr
+ assert "Could not save output to variable" in output
+ assert "Query is missing" in output
+ assert "Traceback (most recent call last)" not in output
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_w_table_id_and_destination_var(ipython_ns_cleanup, monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._project = None
+
+ ipython_ns_cleanup.append((ip, "df"))
+
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, "general-project")
+ )
+
+ row_iterator_mock = mock.create_autospec(
+ google.cloud.bigquery.table.RowIterator, instance=True
+ )
+
+ client_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics.bigquery.Client", autospec=True
+ )
+
+ table_id = "bigquery-public-data.samples.shakespeare"
+ result = pandas.DataFrame([17], columns=["num"])
+
+ with client_patch as client_mock, default_patch:
+ client_mock().list_rows.return_value = row_iterator_mock
+ row_iterator_mock.to_dataframe.return_value = result
+
+ ip.run_cell_magic("bigquery", "df", table_id)
+
+ assert "df" in ip.user_ns
+ df = ip.user_ns["df"]
+
+ assert isinstance(df, pandas.DataFrame)
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(
+ bigquery_storage is None, reason="Requires `google-cloud-bigquery-storage`"
+)
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_w_table_id_and_bqstorage_client(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._project = None
+
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, "general-project")
+ )
+
+ row_iterator_mock = mock.create_autospec(
+ google.cloud.bigquery.table.RowIterator, instance=True
+ )
+
+ client_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics.bigquery.Client", autospec=True
+ )
+
+ bqstorage_mock = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ bqstorage_instance_mock = mock.create_autospec(
+ bigquery_storage.BigQueryReadClient, instance=True
+ )
+ bqstorage_instance_mock._transport = mock.Mock()
+ bqstorage_mock.return_value = bqstorage_instance_mock
+ bqstorage_client_patch = mock.patch(
+ "google.cloud.bigquery_storage.BigQueryReadClient", bqstorage_mock
+ )
+
+ table_id = "bigquery-public-data.samples.shakespeare"
+
+ with default_patch, client_patch as client_mock, bqstorage_client_patch:
+ client_mock()._ensure_bqstorage_client.return_value = bqstorage_instance_mock
+ client_mock().list_rows.return_value = row_iterator_mock
+
+ ip.run_cell_magic("bigquery", "--max_results=5", table_id)
+ row_iterator_mock.to_dataframe.assert_called_once_with(
+ bqstorage_client=bqstorage_instance_mock,
+ create_bqstorage_client=mock.ANY,
+ )
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_dryrun_option_sets_job_config(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+
+ sql = "SELECT 17 AS num"
+
+ with run_query_patch as run_query_mock:
+ ip.run_cell_magic("bigquery", "--dry_run", sql)
+
+ job_config_used = run_query_mock.call_args_list[0][1]["job_config"]
+ assert job_config_used.dry_run is True
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_dryrun_option_returns_query_job(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ query_job_mock = mock.create_autospec(
+ google.cloud.bigquery.job.QueryJob, instance=True
+ )
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+
+ sql = "SELECT 17 AS num"
+
+ with run_query_patch as run_query_mock, io.capture_output() as captured_io:
+ run_query_mock.return_value = query_job_mock
+ return_value = ip.run_cell_magic("bigquery", "--dry_run", sql)
+
+ assert "Query validated. This query will process" in captured_io.stdout
+ assert isinstance(return_value, job.QueryJob)
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_dryrun_option_variable_error_message(
+ ipython_ns_cleanup, monkeypatch
+):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ ipython_ns_cleanup.append((ip, "q_job"))
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query",
+ autospec=True,
+ side_effect=exceptions.BadRequest("Syntax error in SQL query"),
+ )
+
+ sql = "SELECT SELECT 17 AS num"
+
+ assert "q_job" not in ip.user_ns
+
+ with run_query_patch, io.capture_output() as captured:
+ ip.run_cell_magic("bigquery", "q_job --dry_run", sql)
+
+ full_text = captured.stderr
+ assert "Could not save output to variable 'q_job'." in full_text
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_dryrun_option_saves_query_job_to_variable(
+ ipython_ns_cleanup, monkeypatch
+):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ query_job_mock = mock.create_autospec(
+ google.cloud.bigquery.job.QueryJob, instance=True
+ )
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+
+ ipython_ns_cleanup.append((ip, "q_job"))
+
+ sql = "SELECT 17 AS num"
+
+ assert "q_job" not in ip.user_ns
+
+ with run_query_patch as run_query_mock:
+ run_query_mock.return_value = query_job_mock
+ return_value = ip.run_cell_magic("bigquery", "q_job --dry_run", sql)
+
+ assert return_value is None
+ assert "q_job" in ip.user_ns
+ q_job = ip.user_ns["q_job"]
+ assert isinstance(q_job, job.QueryJob)
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_saves_query_job_to_variable_on_error(
+ ipython_ns_cleanup, monkeypatch
+):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ ipython_ns_cleanup.append((ip, "result"))
+
+ client_query_patch = mock.patch(
+ "google.cloud.bigquery.client.Client.query", autospec=True
+ )
+
+ query_job = mock.create_autospec(job.QueryJob, instance=True)
+ exception = Exception("Unexpected SELECT")
+ exception.query_job = query_job
+ query_job.result.side_effect = exception
+
+ sql = "SELECT SELECT 17 AS num"
+
+ assert "result" not in ip.user_ns
+
+ with client_query_patch as client_query_mock:
+ client_query_mock.return_value = query_job
+ return_value = ip.run_cell_magic("bigquery", "result", sql)
+
+ assert return_value is None
+ assert "result" in ip.user_ns
+ result = ip.user_ns["result"]
+ assert isinstance(result, job.QueryJob)
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_w_maximum_bytes_billed_invalid(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._project = None
+
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, "general-project")
+ )
+ client_query_patch = mock.patch("google.cloud.bigquery.client.Client.query")
+
+ sql = "SELECT 17 AS num"
+
+ with pytest.raises(ValueError), default_patch, client_query_patch:
+ ip.run_cell_magic("bigquery", "--maximum_bytes_billed=abc", sql)
+
+
+@pytest.mark.parametrize(
+ "param_value,expected", [("987654321", "987654321"), ("None", "0")]
+)
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_w_maximum_bytes_billed_overrides_context(
+ param_value, expected, monkeypatch
+):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._project = None
+
+ # Set the default maximum bytes billed, so we know it's overridable by the param.
+ magics.context.default_query_job_config.maximum_bytes_billed = 1234567
+
+ project = "test-project"
+ job_reference = copy.deepcopy(JOB_REFERENCE_RESOURCE)
+ job_reference["projectId"] = project
+ query = "SELECT 17 AS num"
+ resource = copy.deepcopy(QUERY_RESOURCE)
+ resource["jobReference"] = job_reference
+ resource["configuration"]["query"]["query"] = query
+ query_results = {"jobReference": job_reference, "totalRows": 0, "jobComplete": True}
+ data = {"jobReference": job_reference, "totalRows": 0, "rows": []}
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, "general-project")
+ )
+ conn = magics.context._connection = make_connection(resource, query_results, data)
+ list_rows_patch = mock.patch(
+ "google.cloud.bigquery.client.Client._list_rows_from_query_results",
+ return_value=google.cloud.bigquery.table._EmptyRowIterator(),
+ )
+ with list_rows_patch, default_patch:
+ ip.run_cell_magic(
+ "bigquery", "--maximum_bytes_billed={}".format(param_value), query
+ )
+
+ _, req = conn.api_request.call_args_list[0]
+ sent_config = req["data"]["configuration"]["query"]
+ assert sent_config["maximumBytesBilled"] == expected
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_w_maximum_bytes_billed_w_context_inplace(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._project = None
+
+ magics.context.default_query_job_config.maximum_bytes_billed = 1337
+
+ project = "test-project"
+ job_reference = copy.deepcopy(JOB_REFERENCE_RESOURCE)
+ job_reference["projectId"] = project
+ query = "SELECT 17 AS num"
+ resource = copy.deepcopy(QUERY_RESOURCE)
+ resource["jobReference"] = job_reference
+ resource["configuration"]["query"]["query"] = query
+ query_results = {"jobReference": job_reference, "totalRows": 0, "jobComplete": True}
+ data = {"jobReference": job_reference, "totalRows": 0, "rows": []}
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, "general-project")
+ )
+ conn = magics.context._connection = make_connection(resource, query_results, data)
+ list_rows_patch = mock.patch(
+ "google.cloud.bigquery.client.Client._list_rows_from_query_results",
+ return_value=google.cloud.bigquery.table._EmptyRowIterator(),
+ )
+ with list_rows_patch, default_patch:
+ ip.run_cell_magic("bigquery", "", query)
+
+ _, req = conn.api_request.call_args_list[0]
+ sent_config = req["data"]["configuration"]["query"]
+ assert sent_config["maximumBytesBilled"] == "1337"
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_w_maximum_bytes_billed_w_context_setter(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._project = None
+
+ magics.context.default_query_job_config = job.QueryJobConfig(
+ maximum_bytes_billed=10203
+ )
+
+ project = "test-project"
+ job_reference = copy.deepcopy(JOB_REFERENCE_RESOURCE)
+ job_reference["projectId"] = project
+ query = "SELECT 17 AS num"
+ resource = copy.deepcopy(QUERY_RESOURCE)
+ resource["jobReference"] = job_reference
+ resource["configuration"]["query"]["query"] = query
+ query_results = {"jobReference": job_reference, "totalRows": 0, "jobComplete": True}
+ data = {"jobReference": job_reference, "totalRows": 0, "rows": []}
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, "general-project")
+ )
+ conn = magics.context._connection = make_connection(resource, query_results, data)
+ list_rows_patch = mock.patch(
+ "google.cloud.bigquery.client.Client._list_rows_from_query_results",
+ return_value=google.cloud.bigquery.table._EmptyRowIterator(),
+ )
+ with list_rows_patch, default_patch:
+ ip.run_cell_magic("bigquery", "", query)
+
+ _, req = conn.api_request.call_args_list[0]
+ sent_config = req["data"]["configuration"]["query"]
+ assert sent_config["maximumBytesBilled"] == "10203"
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_with_no_query_cache(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ conn = make_connection()
+ monkeypatch.setattr(magics.context, "_connection", conn)
+ monkeypatch.setattr(magics.context, "project", "project-from-context")
+
+ # --no_query_cache option should override context.
+ monkeypatch.setattr(
+ magics.context.default_query_job_config, "use_query_cache", True
+ )
+
+ ip.run_cell_magic("bigquery", "--no_query_cache", QUERY_STRING)
+
+ conn.api_request.assert_called_with(
+ method="POST",
+ path="/projects/project-from-context/jobs",
+ data=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+ jobs_insert_call = [
+ call
+ for call in conn.api_request.call_args_list
+ if call[1]["path"] == "/projects/project-from-context/jobs"
+ ][0]
+ assert not jobs_insert_call[1]["data"]["configuration"]["query"]["useQueryCache"]
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_context_with_no_query_cache_from_context(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ conn = make_connection()
+ monkeypatch.setattr(magics.context, "_connection", conn)
+ monkeypatch.setattr(magics.context, "project", "project-from-context")
+ monkeypatch.setattr(
+ magics.context.default_query_job_config, "use_query_cache", False
+ )
+
+ ip.run_cell_magic("bigquery", "", QUERY_STRING)
+
+ conn.api_request.assert_called_with(
+ method="POST",
+ path="/projects/project-from-context/jobs",
+ data=mock.ANY,
+ timeout=DEFAULT_TIMEOUT,
+ )
+ jobs_insert_call = [
+ call
+ for call in conn.api_request.call_args_list
+ if call[1]["path"] == "/projects/project-from-context/jobs"
+ ][0]
+ assert not jobs_insert_call[1]["data"]["configuration"]["query"]["useQueryCache"]
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_w_progress_bar_type_w_context_setter(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._project = None
+
+ magics.context.progress_bar_type = "tqdm_gui"
+
+ mock_credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ # Set up the context with monkeypatch so that it's reset for subsequent
+ # tests.
+ monkeypatch.setattr(magics.context, "_credentials", mock_credentials)
+
+ # Mock out the BigQuery Storage API.
+ bqstorage_mock = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ bqstorage_client_patch = mock.patch(
+ "google.cloud.bigquery_storage.BigQueryReadClient", bqstorage_mock
+ )
+
+ sql = "SELECT 17 AS num"
+ result = pandas.DataFrame([17], columns=["num"])
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ query_job_mock = mock.create_autospec(
+ google.cloud.bigquery.job.QueryJob, instance=True
+ )
+ query_job_mock.to_dataframe.return_value = result
+ with run_query_patch as run_query_mock, bqstorage_client_patch:
+ run_query_mock.return_value = query_job_mock
+
+ return_value = ip.run_cell_magic("bigquery", "--use_rest_api", sql)
+
+ bqstorage_mock.assert_not_called()
+ query_job_mock.to_dataframe.assert_called_once_with(
+ bqstorage_client=None,
+ create_bqstorage_client=False,
+ progress_bar_type=magics.context.progress_bar_type,
+ )
+
+ assert isinstance(return_value, pandas.DataFrame)
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_with_progress_bar_type(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.progress_bar_type = None
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ with run_query_patch as run_query_mock:
+ ip.run_cell_magic(
+ "bigquery", "--progress_bar_type=tqdm_gui", "SELECT 17 as num"
+ )
+
+ progress_bar_used = run_query_mock.mock_calls[1][2]["progress_bar_type"]
+ assert progress_bar_used == "tqdm_gui"
+ # context progress bar type should not change
+ assert magics.context.progress_bar_type is None
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_with_project(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._project = None
+
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, "general-project")
+ )
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ with run_query_patch as run_query_mock, default_patch:
+ ip.run_cell_magic("bigquery", "--project=specific-project", "SELECT 17 as num")
+
+ client_used = run_query_mock.call_args_list[0][0][0]
+ assert client_used.project == "specific-project"
+ # context project should not change
+ assert magics.context.project == "general-project"
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_with_bigquery_api_endpoint(ipython_ns_cleanup, monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._connection = None
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ with run_query_patch as run_query_mock:
+ ip.run_cell_magic(
+ "bigquery",
+ "--bigquery_api_endpoint=https://bigquery_api.endpoint.com",
+ "SELECT 17 as num",
+ )
+
+ connection_used = run_query_mock.call_args_list[0][0][0]._connection
+ assert connection_used.API_BASE_URL == "https://bigquery_api.endpoint.com"
+ # context client options should not change
+ assert magics.context.bigquery_client_options.api_endpoint is None
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_with_bigquery_api_endpoint_context_dict(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._connection = None
+ magics.context.bigquery_client_options = {}
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ with run_query_patch as run_query_mock:
+ ip.run_cell_magic(
+ "bigquery",
+ "--bigquery_api_endpoint=https://bigquery_api.endpoint.com",
+ "SELECT 17 as num",
+ )
+
+ connection_used = run_query_mock.call_args_list[0][0][0]._connection
+ assert connection_used.API_BASE_URL == "https://bigquery_api.endpoint.com"
+ # context client options should not change
+ assert magics.context.bigquery_client_options == {}
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_with_bqstorage_api_endpoint(ipython_ns_cleanup, monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._connection = None
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ with run_query_patch as run_query_mock:
+ ip.run_cell_magic(
+ "bigquery",
+ "--bqstorage_api_endpoint=https://bqstorage_api.endpoint.com",
+ "SELECT 17 as num",
+ )
+
+ client_used = run_query_mock.mock_calls[1][2]["bqstorage_client"]
+ assert client_used._transport._host == "https://bqstorage_api.endpoint.com"
+ # context client options should not change
+ assert magics.context.bqstorage_client_options.api_endpoint is None
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_with_bqstorage_api_endpoint_context_dict(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._connection = None
+ magics.context.bqstorage_client_options = {}
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ with run_query_patch as run_query_mock:
+ ip.run_cell_magic(
+ "bigquery",
+ "--bqstorage_api_endpoint=https://bqstorage_api.endpoint.com",
+ "SELECT 17 as num",
+ )
+
+ client_used = run_query_mock.mock_calls[1][2]["bqstorage_client"]
+ assert client_used._transport._host == "https://bqstorage_api.endpoint.com"
+ # context client options should not change
+ assert magics.context.bqstorage_client_options == {}
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_with_multiple_options(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._project = None
+
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, "general-project")
+ )
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ with run_query_patch as run_query_mock, default_patch:
+ ip.run_cell_magic(
+ "bigquery",
+ "--project=specific-project --use_legacy_sql --maximum_bytes_billed 1024",
+ "SELECT 17 as num",
+ )
+
+ args, kwargs = run_query_mock.call_args
+ client_used = args[0]
+ assert client_used.project == "specific-project"
+
+ job_config_used = kwargs["job_config"]
+ assert job_config_used.use_legacy_sql
+ assert job_config_used.maximum_bytes_billed == 1024
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_with_string_params(ipython_ns_cleanup, monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ ipython_ns_cleanup.append((ip, "params_dict_df"))
+
+ sql = "SELECT @num AS num"
+ result = pandas.DataFrame([17], columns=["num"])
+
+ assert "params_dict_df" not in ip.user_ns
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ query_job_mock = mock.create_autospec(
+ google.cloud.bigquery.job.QueryJob, instance=True
+ )
+ query_job_mock.to_dataframe.return_value = result
+
+ with run_query_patch as run_query_mock:
+ run_query_mock.return_value = query_job_mock
+
+ ip.run_cell_magic("bigquery", "params_string_df --params='{\"num\":17}'", sql)
+
+ run_query_mock.assert_called_once_with(mock.ANY, sql.format(num=17), mock.ANY)
+
+ assert "params_string_df" in ip.user_ns # verify that the variable exists
+ df = ip.user_ns["params_string_df"]
+ assert len(df) == len(result) # verify row count
+ assert list(df) == list(result) # verify column names
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_with_dict_params(ipython_ns_cleanup, monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ ipython_ns_cleanup.append((ip, "params_dict_df"))
+
+ sql = "SELECT @num AS num, @tricky_value as tricky_value"
+ result = pandas.DataFrame(
+ [(False, '--params "value"')], columns=["valid", "tricky_value"]
+ )
+
+ assert "params_dict_df" not in ip.user_ns
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ query_job_mock = mock.create_autospec(
+ google.cloud.bigquery.job.QueryJob, instance=True
+ )
+ query_job_mock.to_dataframe.return_value = result
+ with run_query_patch as run_query_mock:
+ run_query_mock.return_value = query_job_mock
+
+ params = {"valid": False, "tricky_value": '--params "value"'}
+ # Insert dictionary into user namespace so that it can be expanded
+ ip.user_ns["params"] = params
+ ip.run_cell_magic("bigquery", "params_dict_df --params $params", sql)
+
+ run_query_mock.assert_called_once_with(mock.ANY, sql.format(num=17), mock.ANY)
+
+ assert "params_dict_df" in ip.user_ns # verify that the variable exists
+ df = ip.user_ns["params_dict_df"]
+ assert len(df) == len(result) # verify row count
+ assert list(df) == list(result) # verify column names
+
+ assert not df["valid"][0]
+ assert df["tricky_value"][0] == '--params "value"'
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_with_dict_params_nonexisting(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ sql = "SELECT @foo AS foo"
+
+ with pytest.raises(NameError, match=r".*undefined variable.*unknown_name.*"):
+ ip.run_cell_magic("bigquery", "params_dict_df --params $unknown_name", sql)
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_with_dict_params_incorrect_syntax(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ sql = "SELECT @foo AS foo"
+
+ with pytest.raises(SyntaxError, match=r".*--params.*"):
+ cell_magic_args = "params_dict_df --params {'foo': 1; 'bar': 2}"
+ ip.run_cell_magic("bigquery", cell_magic_args, sql)
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_with_dict_params_duplicate(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ sql = "SELECT @foo AS foo"
+
+ with pytest.raises(ValueError, match=r"Duplicate --params option\."):
+ cell_magic_args = (
+ "params_dict_df --params {'foo': 1} --verbose --params {'bar': 2} "
+ )
+ ip.run_cell_magic("bigquery", cell_magic_args, sql)
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_with_option_value_incorrect(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ sql = "SELECT @foo AS foo"
+
+ with pytest.raises(ValueError, match=r".*invalid literal.*\[PLENTY!\].*"):
+ cell_magic_args = "params_dict_df --max_results [PLENTY!]"
+ ip.run_cell_magic("bigquery", cell_magic_args, sql)
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_with_dict_params_negative_value(
+ ipython_ns_cleanup, monkeypatch
+):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ ipython_ns_cleanup.append((ip, "params_dict_df"))
+
+ sql = "SELECT @num AS num"
+ result = pandas.DataFrame([-17], columns=["num"])
+
+ assert "params_dict_df" not in ip.user_ns
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ query_job_mock = mock.create_autospec(
+ google.cloud.bigquery.job.QueryJob, instance=True
+ )
+ query_job_mock.to_dataframe.return_value = result
+ with run_query_patch as run_query_mock:
+ run_query_mock.return_value = query_job_mock
+
+ params = {"num": -17}
+ # Insert dictionary into user namespace so that it can be expanded
+ ip.user_ns["params"] = params
+ ip.run_cell_magic("bigquery", "params_dict_df --params $params", sql)
+
+ run_query_mock.assert_called_once_with(mock.ANY, sql.format(num=-17), mock.ANY)
+
+ assert "params_dict_df" in ip.user_ns # verify that the variable exists
+ df = ip.user_ns["params_dict_df"]
+ assert len(df) == len(result) # verify row count
+ assert list(df) == list(result) # verify column names
+ assert df["num"][0] == -17
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_with_dict_params_array_value(ipython_ns_cleanup, monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ ipython_ns_cleanup.append((ip, "params_dict_df"))
+
+ sql = "SELECT @num AS num"
+ result = pandas.DataFrame(["foo bar", "baz quux"], columns=["array_data"])
+
+ assert "params_dict_df" not in ip.user_ns
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ query_job_mock = mock.create_autospec(
+ google.cloud.bigquery.job.QueryJob, instance=True
+ )
+ query_job_mock.to_dataframe.return_value = result
+ with run_query_patch as run_query_mock:
+ run_query_mock.return_value = query_job_mock
+
+ params = {"array_data": ["foo bar", "baz quux"]}
+ # Insert dictionary into user namespace so that it can be expanded
+ ip.user_ns["params"] = params
+ ip.run_cell_magic("bigquery", "params_dict_df --params $params", sql)
+
+ run_query_mock.assert_called_once_with(mock.ANY, sql.format(num=-17), mock.ANY)
+
+ assert "params_dict_df" in ip.user_ns # verify that the variable exists
+ df = ip.user_ns["params_dict_df"]
+ assert len(df) == len(result) # verify row count
+ assert list(df) == list(result) # verify column names
+ assert list(df["array_data"]) == ["foo bar", "baz quux"]
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_with_dict_params_tuple_value(ipython_ns_cleanup, monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ ipython_ns_cleanup.append((ip, "params_dict_df"))
+
+ sql = "SELECT @num AS num"
+ result = pandas.DataFrame(["foo bar", "baz quux"], columns=["array_data"])
+
+ assert "params_dict_df" not in ip.user_ns
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ query_job_mock = mock.create_autospec(
+ google.cloud.bigquery.job.QueryJob, instance=True
+ )
+ query_job_mock.to_dataframe.return_value = result
+ with run_query_patch as run_query_mock:
+ run_query_mock.return_value = query_job_mock
+
+ params = {"array_data": ("foo bar", "baz quux")}
+ # Insert dictionary into user namespace so that it can be expanded
+ ip.user_ns["params"] = params
+ ip.run_cell_magic("bigquery", "params_dict_df --params $params", sql)
+
+ run_query_mock.assert_called_once_with(mock.ANY, sql.format(num=-17), mock.ANY)
+
+ assert "params_dict_df" in ip.user_ns # verify that the variable exists
+ df = ip.user_ns["params_dict_df"]
+ assert len(df) == len(result) # verify row count
+ assert list(df) == list(result) # verify column names
+ assert list(df["array_data"]) == ["foo bar", "baz quux"]
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_with_improperly_formatted_params(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ sql = "SELECT @num AS num"
+
+ with pytest.raises(SyntaxError):
+ ip.run_cell_magic("bigquery", "--params {17}", sql)
+
+
+@pytest.mark.parametrize(
+ "raw_sql", ("SELECT answer AS 42", " \t SELECT answer AS 42 \t ")
+)
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_valid_query_in_existing_variable(
+ ipython_ns_cleanup, raw_sql, monkeypatch
+):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ ipython_ns_cleanup.append((ip, "custom_query"))
+ ipython_ns_cleanup.append((ip, "query_results_df"))
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ query_job_mock = mock.create_autospec(
+ google.cloud.bigquery.job.QueryJob, instance=True
+ )
+ mock_result = pandas.DataFrame([42], columns=["answer"])
+ query_job_mock.to_dataframe.return_value = mock_result
+
+ ip.user_ns["custom_query"] = raw_sql
+ cell_body = "$custom_query" # Referring to an existing variable name (custom_query)
+ assert "query_results_df" not in ip.user_ns
+
+ with run_query_patch as run_query_mock:
+ run_query_mock.return_value = query_job_mock
+
+ ip.run_cell_magic("bigquery", "query_results_df", cell_body)
+
+ run_query_mock.assert_called_once_with(mock.ANY, raw_sql, mock.ANY)
+
+ assert "query_results_df" in ip.user_ns # verify that the variable exists
+ df = ip.user_ns["query_results_df"]
+ assert len(df) == len(mock_result) # verify row count
+ assert list(df) == list(mock_result) # verify column names
+ assert list(df["answer"]) == [42]
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_nonexisting_query_variable(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+
+ ip.user_ns.pop("custom_query", None) # Make sure the variable does NOT exist.
+ cell_body = "$custom_query" # Referring to a non-existing variable name.
+
+ with pytest.raises(
+ NameError, match=r".*custom_query does not exist.*"
+ ), run_query_patch as run_query_mock:
+ ip.run_cell_magic("bigquery", "", cell_body)
+
+ run_query_mock.assert_not_called()
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_empty_query_variable_name(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ cell_body = "$" # Not referring to any variable (name omitted).
+
+ with pytest.raises(
+ NameError, match=r"(?i).*missing query variable name.*"
+ ), run_query_patch as run_query_mock:
+ ip.run_cell_magic("bigquery", "", cell_body)
+
+ run_query_mock.assert_not_called()
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_query_variable_non_string(ipython_ns_cleanup, monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+
+ ipython_ns_cleanup.append((ip, "custom_query"))
+
+ ip.user_ns["custom_query"] = object()
+ cell_body = "$custom_query" # Referring to a non-string variable.
+
+ with pytest.raises(
+ TypeError, match=r".*must be a string or a bytes-like.*"
+ ), run_query_patch as run_query_mock:
+ ip.run_cell_magic("bigquery", "", cell_body)
+
+ run_query_mock.assert_not_called()
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_query_variable_not_identifier(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ cell_body = "$123foo" # 123foo is not valid Python identifier
+
+ with io.capture_output() as captured_io:
+ ip.run_cell_magic("bigquery", "", cell_body)
+
+ # If "$" prefixes a string that is not a Python identifier, we do not treat such
+ # cell_body as a variable reference and just treat is as any other cell body input.
+ # If at the same time the cell body does not contain any whitespace, it is
+ # considered a table name, thus we expect an error that the table ID is not valid.
+ output = captured_io.stderr
+ assert "ERROR:" in output
+ assert "must be a fully-qualified ID" in output
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
+def test_bigquery_magic_with_invalid_multiple_option_values(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ sql = "SELECT @foo AS foo"
+
+ exc_pattern = r".*[Uu]nrecognized input.*option values correct\?.*567.*"
+
+ with pytest.raises(ValueError, match=exc_pattern):
+ cell_magic_args = "params_dict_df --max_results 10 567"
+ ip.run_cell_magic("bigquery", cell_magic_args, sql)
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_omits_tracebacks_from_error_message(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, "general-project")
+ )
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query",
+ autospec=True,
+ side_effect=exceptions.BadRequest("Syntax error in SQL query"),
+ )
+
+ with run_query_patch, default_patch, io.capture_output() as captured_io:
+ ip.run_cell_magic("bigquery", "", "SELECT foo FROM WHERE LIMIT bar")
+
+ output = captured_io.stderr
+ assert "400 Syntax error in SQL query" in output
+ assert "Traceback (most recent call last)" not in output
+ assert "Syntax error" not in captured_io.stdout
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_w_destination_table_invalid_format(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context._project = None
+
+ credentials_mock = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+ default_patch = mock.patch(
+ "google.auth.default", return_value=(credentials_mock, "general-project")
+ )
+
+ client_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics.bigquery.Client", autospec=True
+ )
+
+ with client_patch, default_patch, pytest.raises(ValueError) as exc_context:
+ ip.run_cell_magic(
+ "bigquery", "--destination_table dataset", "SELECT foo FROM WHERE LIMIT bar"
+ )
+ error_msg = str(exc_context.value)
+ assert (
+ "--destination_table should be in a "
+ ". format." in error_msg
+ )
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_w_destination_table(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ create_dataset_if_necessary_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._create_dataset_if_necessary",
+ autospec=True,
+ )
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+
+ with create_dataset_if_necessary_patch, run_query_patch as run_query_mock:
+ ip.run_cell_magic(
+ "bigquery",
+ "--destination_table dataset_id.table_id",
+ "SELECT foo FROM WHERE LIMIT bar",
+ )
+
+ job_config_used = run_query_mock.call_args_list[0][1]["job_config"]
+ assert job_config_used.allow_large_results is True
+ assert job_config_used.create_disposition == "CREATE_IF_NEEDED"
+ assert job_config_used.write_disposition == "WRITE_TRUNCATE"
+ assert job_config_used.destination.dataset_id == "dataset_id"
+ assert job_config_used.destination.table_id == "table_id"
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_create_dataset_fails(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ create_dataset_if_necessary_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._create_dataset_if_necessary",
+ autospec=True,
+ side_effect=OSError,
+ )
+ close_transports_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._close_transports",
+ autospec=True,
+ )
+
+ with pytest.raises(
+ OSError
+ ), create_dataset_if_necessary_patch, close_transports_patch as close_transports:
+ ip.run_cell_magic(
+ "bigquery",
+ "--destination_table dataset_id.table_id",
+ "SELECT foo FROM WHERE LIMIT bar",
+ )
+
+ assert close_transports.called
+
+
+@pytest.mark.usefixtures("ipython_interactive")
+def test_bigquery_magic_with_location(monkeypatch):
+ ip = IPython.get_ipython()
+ monkeypatch.setattr(bigquery, "bigquery_magics", None)
+ bigquery.load_ipython_extension(ip)
+ magics.context.credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ run_query_patch = mock.patch(
+ "google.cloud.bigquery.magics.magics._run_query", autospec=True
+ )
+ with run_query_patch as run_query_mock:
+ ip.run_cell_magic("bigquery", "--location=us-east1", "SELECT 17 AS num")
+
+ client_options_used = run_query_mock.call_args_list[0][0][0]
+ assert client_options_used.location == "us-east1"
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_opentelemetry_tracing.py b/testbed/googleapis__python-bigquery/tests/unit/test_opentelemetry_tracing.py
new file mode 100644
index 0000000000000000000000000000000000000000..546cc02bd75eb663c8911439c8f33b77693383b2
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_opentelemetry_tracing.py
@@ -0,0 +1,269 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import importlib
+import sys
+from unittest import mock
+
+try:
+ import opentelemetry
+except ImportError:
+ opentelemetry = None
+
+if opentelemetry is not None:
+ try:
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
+ InMemorySpanExporter,
+ )
+ except (ImportError, AttributeError) as exc: # pragma: NO COVER
+ msg = "Error importing from opentelemetry, is the installed version compatible?"
+ raise ImportError(msg) from exc
+
+import pytest
+
+from google.cloud.bigquery import opentelemetry_tracing
+
+TEST_SPAN_NAME = "bar"
+TEST_SPAN_ATTRIBUTES = {"foo": "baz"}
+
+
+@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
+@pytest.fixture
+def setup():
+ importlib.reload(opentelemetry_tracing)
+ tracer_provider = TracerProvider()
+ memory_exporter = InMemorySpanExporter()
+ span_processor = SimpleSpanProcessor(memory_exporter)
+ tracer_provider.add_span_processor(span_processor)
+
+ # OpenTelemetry API >= 0.12b0 does not allow overriding the tracer once
+ # initialized, thus directly override (and then restore) the internal global var.
+ orig_trace_provider = trace._TRACER_PROVIDER
+ trace._TRACER_PROVIDER = tracer_provider
+
+ yield memory_exporter
+
+ trace._TRACER_PROVIDER = orig_trace_provider
+
+
+@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
+def test_opentelemetry_not_installed(setup, monkeypatch):
+ monkeypatch.setitem(sys.modules, "opentelemetry", None)
+ importlib.reload(opentelemetry_tracing)
+ assert not opentelemetry_tracing._warned_telemetry
+ with opentelemetry_tracing.create_span("No-op for opentelemetry") as span:
+ assert span is None
+ assert opentelemetry_tracing._warned_telemetry
+
+
+@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
+def test_opentelemetry_not_installed_doesnt_warn(setup, monkeypatch):
+ monkeypatch.setitem(sys.modules, "opentelemetry", None)
+ importlib.reload(opentelemetry_tracing)
+ opentelemetry_tracing._warned_telemetry = True
+ with opentelemetry_tracing.create_span("No-op for opentelemetry") as span:
+ assert span is None
+ assert opentelemetry_tracing._warned_telemetry
+
+
+@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
+def test_opentelemetry_success(setup):
+ expected_attributes = {"foo": "baz", "db.system": "BigQuery"}
+
+ with opentelemetry_tracing.create_span(
+ TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, client=None, job_ref=None
+ ) as span:
+ assert span is not None
+ assert span.name == TEST_SPAN_NAME
+ assert span.attributes == expected_attributes
+
+
+@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
+def test_default_client_attributes(setup):
+ expected_attributes = {
+ "foo": "baz",
+ "db.system": "BigQuery",
+ "db.name": "test_project",
+ "location": "test_location",
+ }
+ with mock.patch("google.cloud.bigquery.client.Client") as test_client:
+ test_client.project = "test_project"
+ test_client.location = "test_location"
+ with opentelemetry_tracing.create_span(
+ TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, client=test_client
+ ) as span:
+ assert span is not None
+ assert span.name == TEST_SPAN_NAME
+ assert span.attributes == expected_attributes
+
+
+@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
+def test_default_job_attributes(setup):
+ import google.cloud._helpers
+
+ time_created = datetime.datetime(
+ 2010, 5, 19, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
+ )
+ started_time = datetime.datetime(
+ 2011, 10, 1, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
+ )
+ ended_time = datetime.datetime(
+ 2011, 10, 2, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
+ )
+ error_result = [
+ {"errorResult1": "some_error_result1", "errorResult2": "some_error_result2"}
+ ]
+
+ expected_attributes = {
+ "db.system": "BigQuery",
+ "db.name": "test_project_id",
+ "location": "test_location",
+ "num_child_jobs": "0",
+ "job_id": "test_job_id",
+ "foo": "baz",
+ "parent_job_id": "parent_job_id",
+ "timeCreated": time_created.isoformat(),
+ "timeStarted": started_time.isoformat(),
+ "timeEnded": ended_time.isoformat(),
+ "hasErrors": True,
+ "state": "some_job_state",
+ "total_bytes_billed": 42,
+ "total_bytes_processed": 13,
+ }
+ with mock.patch("google.cloud.bigquery.job._AsyncJob") as test_job_ref:
+ test_job_ref.job_id = "test_job_id"
+ test_job_ref.location = "test_location"
+ test_job_ref.project = "test_project_id"
+ test_job_ref.num_child_jobs = "0"
+ test_job_ref.parent_job_id = "parent_job_id"
+ test_job_ref.created = time_created
+ test_job_ref.started = started_time
+ test_job_ref.ended = ended_time
+ test_job_ref.error_result = error_result
+ test_job_ref.state = "some_job_state"
+ test_job_ref.total_bytes_billed = 42
+ test_job_ref.total_bytes_processed = 13
+
+ with opentelemetry_tracing.create_span(
+ TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, job_ref=test_job_ref
+ ) as span:
+ assert span is not None
+ assert span.name == TEST_SPAN_NAME
+ assert span.attributes == expected_attributes
+
+
+@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
+def test_optional_job_attributes(setup):
+ # This test ensures we don't propagate unset values into span attributes
+ import google.cloud._helpers
+
+ time_created = datetime.datetime(
+ 2010, 5, 19, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
+ )
+
+ with mock.patch("google.cloud.bigquery.job._AsyncJob") as test_job_ref:
+ test_job_ref.job_id = "test_job_id"
+ test_job_ref.location = None
+ test_job_ref.project = "test_project_id"
+ test_job_ref.created = time_created
+ test_job_ref.state = "some_job_state"
+ test_job_ref.num_child_jobs = None
+ test_job_ref.parent_job_id = None
+ test_job_ref.total_bytes_billed = None
+ test_job_ref.total_bytes_processed = None
+
+ with opentelemetry_tracing.create_span(
+ TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, job_ref=test_job_ref
+ ) as span:
+ assert span is not None
+ for val in span.attributes.values():
+ assert val is not None
+
+
+@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
+def test_default_no_data_leakage(setup):
+ import google.auth.credentials
+ from google.cloud.bigquery import client
+ from google.cloud.bigquery import job
+
+ mock_credentials = mock.Mock(spec=google.auth.credentials.Credentials)
+ test_client = client.Client(
+ project="test_project", credentials=mock_credentials, location="test_location"
+ )
+
+ expected_attributes = {
+ "foo": "baz",
+ "db.system": "BigQuery",
+ "db.name": "test_project",
+ "location": "test_location",
+ }
+ with opentelemetry_tracing.create_span(
+ TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, client=test_client
+ ) as span:
+ assert span.name == TEST_SPAN_NAME
+ assert span.attributes == expected_attributes
+
+ test_job_reference = job._JobReference(
+ job_id="test_job_id", project="test_project_id", location="test_location"
+ )
+ test_client = client.Client(
+ project="test_project", credentials=mock_credentials, location="test_location"
+ )
+ test_job = job._AsyncJob(job_id=test_job_reference, client=test_client)
+
+ expected_attributes = {
+ "db.system": "BigQuery",
+ "db.name": "test_project_id",
+ "location": "test_location",
+ "num_child_jobs": 0,
+ "job_id": "test_job_id",
+ "foo": "baz",
+ "hasErrors": False,
+ }
+
+ with opentelemetry_tracing.create_span(
+ TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, job_ref=test_job
+ ) as span:
+ assert span.name == TEST_SPAN_NAME
+ assert span.attributes == expected_attributes
+
+
+@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
+def test_span_creation_error(setup):
+ import google.auth.credentials
+ from google.cloud.bigquery import client
+ from google.api_core.exceptions import GoogleAPICallError, InvalidArgument
+
+ mock_credentials = mock.Mock(spec=google.auth.credentials.Credentials)
+ test_client = client.Client(
+ project="test_project", credentials=mock_credentials, location="test_location"
+ )
+
+ expected_attributes = {
+ "foo": "baz",
+ "db.system": "BigQuery",
+ "db.name": "test_project",
+ "location": "test_location",
+ }
+ with pytest.raises(GoogleAPICallError):
+ with opentelemetry_tracing.create_span(
+ TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, client=test_client
+ ) as span:
+ assert span.name == TEST_SPAN_NAME
+ assert span.attributes == expected_attributes
+ raise InvalidArgument("test_error")
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_packaging.py b/testbed/googleapis__python-bigquery/tests/unit/test_packaging.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f1b16c6675000a4edfc07599e3c4819938f51eb
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_packaging.py
@@ -0,0 +1,37 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import subprocess
+import sys
+
+
+def test_namespace_package_compat(tmp_path):
+ # The ``google`` namespace package should not be masked
+ # by the presence of ``google-cloud-bigquery``.
+ google = tmp_path / "google"
+ google.mkdir()
+ google.joinpath("othermod.py").write_text("")
+ env = dict(os.environ, PYTHONPATH=str(tmp_path))
+ cmd = [sys.executable, "-m", "google.othermod"]
+ subprocess.check_call(cmd, env=env)
+
+ # The ``google.cloud`` namespace package should not be masked
+ # by the presence of ``google-cloud-bigquery``.
+ google_cloud = tmp_path / "google" / "cloud"
+ google_cloud.mkdir()
+ google_cloud.joinpath("othermod.py").write_text("")
+ env = dict(os.environ, PYTHONPATH=str(tmp_path))
+ cmd = [sys.executable, "-m", "google.cloud.othermod"]
+ subprocess.check_call(cmd, env=env)
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_query.py b/testbed/googleapis__python-bigquery/tests/unit/test_query.py
new file mode 100644
index 0000000000000000000000000000000000000000..40ef080f7543209537113d1d8e5260976e479fc4
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_query.py
@@ -0,0 +1,2131 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import decimal
+import unittest
+from unittest import mock
+
+
+class Test_UDFResource(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.query import UDFResource
+
+ return UDFResource
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor(self):
+ udf = self._make_one("resourceUri", "gs://some_bucket/some_file")
+ self.assertEqual(udf.udf_type, "resourceUri")
+ self.assertEqual(udf.value, "gs://some_bucket/some_file")
+
+ def test___eq__(self):
+ udf = self._make_one("resourceUri", "gs://some_bucket/some_file")
+ self.assertEqual(udf, udf)
+ self.assertNotEqual(udf, object())
+ wrong_val = self._make_one("resourceUri", "gs://some_bucket/other_file")
+ self.assertNotEqual(udf, wrong_val)
+ wrong_type = self._make_one("inlineCode", udf.value)
+ self.assertNotEqual(udf, wrong_type)
+
+
+class Test__AbstractQueryParameterType(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.query import _AbstractQueryParameterType
+
+ return _AbstractQueryParameterType
+
+ @classmethod
+ def _make_one(cls, *args, **kw):
+ return cls._get_target_class()(*args, **kw)
+
+ def test_from_api_virtual(self):
+ klass = self._get_target_class()
+ with self.assertRaises(NotImplementedError):
+ klass.from_api_repr({})
+
+ def test_to_api_virtual(self):
+ param_type = self._make_one()
+ with self.assertRaises(NotImplementedError):
+ param_type.to_api_repr()
+
+
+class Test_ScalarQueryParameterType(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.query import ScalarQueryParameterType
+
+ return ScalarQueryParameterType
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_from_api_repr(self):
+ klass = self._get_target_class()
+ result = klass.from_api_repr({"type": "BOOLEAN"})
+ self.assertEqual(result._type, "BOOLEAN")
+ self.assertIsNone(result.name)
+ self.assertIsNone(result.description)
+
+ def test_to_api_repr(self):
+ param_type = self._make_one("BYTES", name="foo", description="bar")
+ result = param_type.to_api_repr()
+ self.assertEqual(result, {"type": "BYTES"})
+
+ def test_repr_no_optional_attrs(self):
+ param_type = self._make_one("BYTES")
+ self.assertEqual(repr(param_type), "ScalarQueryParameterType('BYTES')")
+
+ def test_repr_all_optional_attrs(self):
+ param_type = self._make_one("BYTES", name="foo", description="this is foo")
+ self.assertEqual(
+ repr(param_type),
+ "ScalarQueryParameterType('BYTES', name='foo', description='this is foo')",
+ )
+
+ def test_with_name_returns_copy_w_changed_name(self):
+ param_type = self._make_one("BOOLEAN", name=None, description="Some checkbox.")
+ modified_type = param_type.with_name("allow_emails")
+
+ self.assertIsNot(modified_type, param_type) # Result is a copy.
+ self.assertEqual(modified_type.name, "allow_emails")
+
+ # The rest of the The rest of the fields should have been preserved.
+ self.assertEqual(modified_type._type, param_type._type)
+ self.assertEqual(modified_type.description, param_type.description)
+
+ def test_with_name_clearing_the_value(self):
+ param_type = self._make_one(
+ "BOOLEAN", name="allow_emails", description="Some checkbox."
+ )
+ modified_type = param_type.with_name(None)
+
+ self.assertIsNone(modified_type.name)
+ self.assertEqual(param_type.name, "allow_emails") # original unchanged
+
+
+class Test_ArrayQueryParameterType(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.query import ArrayQueryParameterType
+
+ return ArrayQueryParameterType
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_from_api_repr(self):
+ from google.cloud.bigquery.query import StructQueryParameterType
+
+ api_resource = {
+ "type": "ARRAY",
+ "arrayType": {
+ "type": "STRUCT",
+ "structTypes": [
+ {
+ "name": "weight",
+ "type": {"type": "INTEGER"},
+ "description": "in kg",
+ },
+ {"name": "last_name", "type": {"type": "STRING"}},
+ ],
+ },
+ }
+
+ klass = self._get_target_class()
+ result = klass.from_api_repr(api_resource)
+
+ self.assertIsNone(result.name)
+ self.assertIsNone(result.description)
+ item_type = result._array_type
+ self.assertIsInstance(item_type, StructQueryParameterType)
+
+ self.assertIsNone(item_type.name)
+ self.assertIsNone(item_type.description)
+
+ field = item_type.fields[0]
+ self.assertEqual(field.name, "weight")
+ self.assertEqual(field.description, "in kg")
+ self.assertEqual(field._type, "INTEGER")
+
+ field = item_type.fields[1]
+ self.assertEqual(field.name, "last_name")
+ self.assertIsNone(field.description)
+ self.assertEqual(field._type, "STRING")
+
+ def test_to_api_repr(self):
+ from google.cloud.bigquery.query import ScalarQueryParameterType
+ from google.cloud.bigquery.query import StructQueryParameterType
+
+ array_item_type = StructQueryParameterType(
+ ScalarQueryParameterType("INTEGER", name="weight", description="in kg"),
+ ScalarQueryParameterType("STRING", name="last_name"),
+ )
+ param_type = self._make_one(array_item_type, name="foo", description="bar")
+
+ result = param_type.to_api_repr()
+
+ expected_result = {
+ "type": "ARRAY",
+ "arrayType": {
+ "type": "STRUCT",
+ "structTypes": [
+ {
+ "name": "weight",
+ "type": {"type": "INTEGER"},
+ "description": "in kg",
+ },
+ {"name": "last_name", "type": {"type": "STRING"}},
+ ],
+ },
+ }
+ self.assertEqual(result, expected_result)
+
+ def test_repr_no_optional_attrs(self):
+ param_type = self._make_one("BOOLEAN")
+ self.assertEqual(repr(param_type), "ArrayQueryParameterType('BOOLEAN')")
+
+ def test_repr_all_optional_attrs(self):
+ param_type = self._make_one("INT64", name="bar", description="this is bar")
+ self.assertEqual(
+ repr(param_type),
+ "ArrayQueryParameterType('INT64', name='bar', description='this is bar')",
+ )
+
+
+class Test_StructQueryParameterType(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.query import StructQueryParameterType
+
+ return StructQueryParameterType
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_raises_error_without_any_fields(self):
+ with self.assertRaisesRegex(ValueError, ".*at least one field.*"):
+ self._make_one()
+
+ def test_from_api_repr(self):
+ from google.cloud.bigquery.query import ArrayQueryParameterType
+ from google.cloud.bigquery.query import ScalarQueryParameterType
+
+ api_resource = {
+ "type": "STRUCT",
+ "structTypes": [
+ {
+ "name": "age",
+ "type": {"type": "INTEGER"},
+ "description": "in years",
+ },
+ {
+ "name": "aliases",
+ "type": {"type": "ARRAY", "arrayType": {"type": "STRING"}},
+ },
+ {
+ "description": "a nested struct",
+ "type": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"type": {"type": "DATE"}, "name": "nested_date"},
+ {
+ "type": {"type": "BOOLEAN"},
+ "description": "nested bool field",
+ },
+ ],
+ },
+ },
+ ],
+ }
+
+ klass = self._get_target_class()
+ result = klass.from_api_repr(api_resource)
+
+ self.assertIsNone(result.name)
+ self.assertIsNone(result.description)
+ self.assertEqual(len(result.fields), 3)
+
+ field = result.fields[0]
+ self.assertIsInstance(field, ScalarQueryParameterType)
+ self.assertEqual(field.name, "age")
+ self.assertEqual(field.description, "in years")
+
+ field = result.fields[1]
+ self.assertIsInstance(field, ArrayQueryParameterType)
+ self.assertEqual(field.name, "aliases")
+ self.assertIsNone(field.description)
+ self.assertIsInstance(field._array_type, ScalarQueryParameterType)
+ self.assertEqual(field._array_type._type, "STRING")
+
+ field = result.fields[2]
+ self.assertIsInstance(field, self._get_target_class())
+ self.assertIsNone(field.name)
+ self.assertEqual(field.description, "a nested struct")
+
+ date_field = field.fields[0]
+ self.assertEqual(date_field._type, "DATE")
+ self.assertEqual(date_field.name, "nested_date")
+ self.assertIsNone(date_field.description)
+
+ bool_field = field.fields[1]
+ self.assertEqual(bool_field._type, "BOOLEAN")
+ self.assertIsNone(bool_field.name)
+ self.assertEqual(bool_field.description, "nested bool field")
+
+ def test_to_api_repr(self):
+ from google.cloud.bigquery.query import ScalarQueryParameterType
+
+ int_type = ScalarQueryParameterType("INTEGER", description="in years")
+ date_type = ScalarQueryParameterType("DATE", name="day_of_birth")
+ param_type = self._make_one(int_type, date_type, name="foo", description="bar")
+
+ result = param_type.to_api_repr()
+
+ expected_result = {
+ "type": "STRUCT",
+ "structTypes": [
+ {"type": {"type": "INTEGER"}, "description": "in years"},
+ {"name": "day_of_birth", "type": {"type": "DATE"}},
+ ],
+ }
+ self.assertEqual(result, expected_result)
+
+ def test_to_api_repr_nested(self):
+ from google.cloud.bigquery.query import ScalarQueryParameterType
+
+ struct_class = self._get_target_class()
+
+ int_type = ScalarQueryParameterType("INTEGER", description="in years")
+ nested_struct_type = struct_class(
+ ScalarQueryParameterType("DATE", name="nested_date"),
+ ScalarQueryParameterType("BOOLEAN", description="nested bool field"),
+ name="nested",
+ )
+ param_type = self._make_one(
+ int_type, nested_struct_type, name="foo", description="bar"
+ )
+
+ result = param_type.to_api_repr()
+
+ expected_result = {
+ "type": "STRUCT",
+ "structTypes": [
+ {"type": {"type": "INTEGER"}, "description": "in years"},
+ {
+ "name": "nested",
+ "type": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"type": {"type": "DATE"}, "name": "nested_date"},
+ {
+ "type": {"type": "BOOLEAN"},
+ "description": "nested bool field",
+ },
+ ],
+ },
+ },
+ ],
+ }
+ self.assertEqual(result, expected_result)
+
+ def test_repr_no_optional_attrs(self):
+ from google.cloud.bigquery.query import ScalarQueryParameterType
+
+ param_type = self._make_one(
+ ScalarQueryParameterType("BOOLEAN"), ScalarQueryParameterType("STRING")
+ )
+ expected = (
+ "StructQueryParameterType("
+ "ScalarQueryParameterType('BOOLEAN'), ScalarQueryParameterType('STRING')"
+ ")"
+ )
+ self.assertEqual(repr(param_type), expected)
+
+ def test_repr_all_optional_attrs(self):
+ from google.cloud.bigquery.query import ScalarQueryParameterType
+
+ param_type = self._make_one(
+ ScalarQueryParameterType("BOOLEAN"),
+ ScalarQueryParameterType("STRING"),
+ name="data_record",
+ description="this is it",
+ )
+ expected = (
+ "StructQueryParameterType("
+ "ScalarQueryParameterType('BOOLEAN'), ScalarQueryParameterType('STRING'), "
+ "name='data_record', description='this is it'"
+ ")"
+ )
+ self.assertEqual(repr(param_type), expected)
+
+
+class Test_RangeQueryParameterType(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.query import RangeQueryParameterType
+
+ return RangeQueryParameterType
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor_str(self):
+ param_type = self._make_one("DATE", name="foo", description="bar")
+ self.assertEqual(param_type.type_._type, "DATE")
+ self.assertEqual(param_type.name, "foo")
+ self.assertEqual(param_type.description, "bar")
+
+ def test_ctor_type(self):
+ from google.cloud.bigquery import ScalarQueryParameterType
+
+ scalar_type = ScalarQueryParameterType("DATE")
+ param_type = self._make_one(scalar_type, name="foo", description="bar")
+ self.assertEqual(param_type.type_._type, "DATE")
+ self.assertEqual(param_type.name, "foo")
+ self.assertEqual(param_type.description, "bar")
+
+ def test_ctor_unsupported_type_str(self):
+ with self.assertRaises(ValueError):
+ self._make_one("TIME")
+
+ def test_ctor_unsupported_type_type(self):
+ from google.cloud.bigquery import ScalarQueryParameterType
+
+ scalar_type = ScalarQueryParameterType("TIME")
+ with self.assertRaises(ValueError):
+ self._make_one(scalar_type)
+
+ def test_ctor_wrong_type(self):
+ with self.assertRaises(ValueError):
+ self._make_one(None)
+
+ def test_from_api_repr(self):
+ RESOURCE = {
+ "type": "RANGE",
+ "rangeElementType": {"type": "DATE"},
+ }
+
+ klass = self._get_target_class()
+ result = klass.from_api_repr(RESOURCE)
+ self.assertEqual(result.type_._type, "DATE")
+ self.assertIsNone(result.name)
+ self.assertIsNone(result.description)
+
+ def test_to_api_repr(self):
+ EXPECTED = {
+ "type": "RANGE",
+ "rangeElementType": {"type": "DATE"},
+ }
+ param_type = self._make_one("DATE", name="foo", description="bar")
+ result = param_type.to_api_repr()
+ self.assertEqual(result, EXPECTED)
+
+ def test__repr__(self):
+ param_type = self._make_one("DATE", name="foo", description="bar")
+ param_repr = "RangeQueryParameterType(ScalarQueryParameterType('DATE'), name='foo', description='bar')"
+ self.assertEqual(repr(param_type), param_repr)
+
+ def test__eq__(self):
+ param_type1 = self._make_one("DATE", name="foo", description="bar")
+ self.assertEqual(param_type1, param_type1)
+ self.assertNotEqual(param_type1, object())
+
+ alias = self._make_one("DATE", name="foo", description="bar")
+ self.assertIsNot(param_type1, alias)
+ self.assertEqual(param_type1, alias)
+
+ wrong_type = self._make_one("DATETIME", name="foo", description="bar")
+ self.assertNotEqual(param_type1, wrong_type)
+
+ wrong_name = self._make_one("DATETIME", name="foo2", description="bar")
+ self.assertNotEqual(param_type1, wrong_name)
+
+ wrong_description = self._make_one("DATETIME", name="foo", description="bar2")
+ self.assertNotEqual(param_type1, wrong_description)
+
+ def test_with_name(self):
+ param_type1 = self._make_one("DATE", name="foo", description="bar")
+ param_type2 = param_type1.with_name("foo2")
+
+ self.assertIsNot(param_type1, param_type2)
+ self.assertEqual(param_type2.type_._type, "DATE")
+ self.assertEqual(param_type2.name, "foo2")
+ self.assertEqual(param_type2.description, "bar")
+
+
+class Test__AbstractQueryParameter(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.query import _AbstractQueryParameter
+
+ return _AbstractQueryParameter
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_from_api_virtual(self):
+ klass = self._get_target_class()
+ with self.assertRaises(NotImplementedError):
+ klass.from_api_repr({})
+
+ def test_to_api_virtual(self):
+ param = self._make_one()
+ with self.assertRaises(NotImplementedError):
+ param.to_api_repr()
+
+
+class Test_ScalarQueryParameter(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.query import ScalarQueryParameter
+
+ return ScalarQueryParameter
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor(self):
+ param = self._make_one(name="foo", type_="INT64", value=123)
+ self.assertEqual(param.name, "foo")
+ self.assertEqual(param.type_, "INT64")
+ self.assertEqual(param.value, 123)
+
+ def test___eq__(self):
+ param = self._make_one(name="foo", type_="INT64", value=123)
+ self.assertEqual(param, param)
+ self.assertNotEqual(param, object())
+ alias = self._make_one(name="bar", type_="INT64", value=123)
+ self.assertNotEqual(param, alias)
+ wrong_type = self._make_one(name="foo", type_="FLOAT64", value=123.0)
+ self.assertNotEqual(param, wrong_type)
+ wrong_val = self._make_one(name="foo", type_="INT64", value=234)
+ self.assertNotEqual(param, wrong_val)
+
+ def test_positional(self):
+ klass = self._get_target_class()
+ param = klass.positional(type_="INT64", value=123)
+ self.assertEqual(param.name, None)
+ self.assertEqual(param.type_, "INT64")
+ self.assertEqual(param.value, 123)
+
+ def test_ctor_w_scalar_query_parameter_type(self):
+ from google.cloud.bigquery import query
+
+ param = self._make_one(
+ name="foo",
+ type_=query.SqlParameterScalarTypes.BIGNUMERIC,
+ value=decimal.Decimal("123.456"),
+ )
+ self.assertEqual(param.name, "foo")
+ self.assertEqual(param.type_, "BIGNUMERIC")
+ self.assertEqual(param.value, decimal.Decimal("123.456"))
+
+ def test_from_api_repr_w_name(self):
+ RESOURCE = {
+ "name": "foo",
+ "parameterType": {"type": "INT64"},
+ "parameterValue": {"value": 123},
+ }
+ klass = self._get_target_class()
+ param = klass.from_api_repr(RESOURCE)
+ self.assertEqual(param.name, "foo")
+ self.assertEqual(param.type_, "INT64")
+ self.assertEqual(param.value, 123)
+
+ def test_from_api_repr_wo_name(self):
+ RESOURCE = {
+ "parameterType": {"type": "INT64"},
+ "parameterValue": {"value": "123"},
+ }
+ klass = self._get_target_class()
+ param = klass.from_api_repr(RESOURCE)
+ self.assertEqual(param.name, None)
+ self.assertEqual(param.type_, "INT64")
+ self.assertEqual(param.value, 123)
+
+ def test_from_api_repr_wo_value(self):
+ # Back-end may not send back values for None params. See #9027
+ RESOURCE = {"name": "foo", "parameterType": {"type": "INT64"}}
+ klass = self._get_target_class()
+ param = klass.from_api_repr(RESOURCE)
+ self.assertEqual(param.name, "foo")
+ self.assertEqual(param.type_, "INT64")
+ self.assertIs(param.value, None)
+
+ def test_to_api_repr_w_name(self):
+ EXPECTED = {
+ "name": "foo",
+ "parameterType": {"type": "INT64"},
+ "parameterValue": {"value": "123"},
+ }
+ param = self._make_one(name="foo", type_="INT64", value=123)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_wo_name(self):
+ EXPECTED = {
+ "parameterType": {"type": "INT64"},
+ "parameterValue": {"value": "123"},
+ }
+ klass = self._get_target_class()
+ param = klass.positional(type_="INT64", value=123)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_float(self):
+ EXPECTED = {
+ "parameterType": {"type": "FLOAT64"},
+ "parameterValue": {"value": 12.345},
+ }
+ klass = self._get_target_class()
+ param = klass.positional(type_="FLOAT64", value=12.345)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_numeric(self):
+ EXPECTED = {
+ "parameterType": {"type": "NUMERIC"},
+ "parameterValue": {"value": "123456789.123456789"},
+ }
+ klass = self._get_target_class()
+ param = klass.positional(type_="NUMERIC", value="123456789.123456789")
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_bignumeric(self):
+ big_num_string = "{d38}.{d38}".format(d38="9" * 38)
+ EXPECTED = {
+ "parameterType": {"type": "BIGNUMERIC"},
+ "parameterValue": {"value": big_num_string},
+ }
+ klass = self._get_target_class()
+ param = klass.positional(type_="BIGNUMERIC", value=big_num_string)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_bool(self):
+ EXPECTED = {
+ "parameterType": {"type": "BOOL"},
+ "parameterValue": {"value": "false"},
+ }
+ klass = self._get_target_class()
+ param = klass.positional(type_="BOOL", value=False)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_timestamp_datetime(self):
+ from google.cloud._helpers import UTC
+
+ STAMP = "2016-12-20 15:58:27.339328+00:00"
+ when = datetime.datetime(2016, 12, 20, 15, 58, 27, 339328, tzinfo=UTC)
+ EXPECTED = {
+ "parameterType": {"type": "TIMESTAMP"},
+ "parameterValue": {"value": STAMP},
+ }
+ klass = self._get_target_class()
+ param = klass.positional(type_="TIMESTAMP", value=when)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_timestamp_micros(self):
+ from google.cloud._helpers import _microseconds_from_datetime
+
+ now = datetime.datetime.utcnow()
+ seconds = _microseconds_from_datetime(now) / 1.0e6
+ EXPECTED = {
+ "parameterType": {"type": "TIMESTAMP"},
+ "parameterValue": {"value": seconds},
+ }
+ klass = self._get_target_class()
+ param = klass.positional(type_="TIMESTAMP", value=seconds)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_datetime_datetime(self):
+ from google.cloud._helpers import _datetime_to_rfc3339
+
+ now = datetime.datetime.utcnow()
+ EXPECTED = {
+ "parameterType": {"type": "DATETIME"},
+ "parameterValue": {
+ "value": _datetime_to_rfc3339(now)[:-1] # strip trailing 'Z'
+ },
+ }
+ klass = self._get_target_class()
+ param = klass.positional(type_="DATETIME", value=now)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_datetime_string(self):
+ from google.cloud._helpers import _datetime_to_rfc3339
+
+ now = datetime.datetime.utcnow()
+ now_str = _datetime_to_rfc3339(now)
+ EXPECTED = {
+ "parameterType": {"type": "DATETIME"},
+ "parameterValue": {"value": now_str},
+ }
+ klass = self._get_target_class()
+ param = klass.positional(type_="DATETIME", value=now_str)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_date_date(self):
+ today = datetime.date.today()
+ EXPECTED = {
+ "parameterType": {"type": "DATE"},
+ "parameterValue": {"value": today.isoformat()},
+ }
+ klass = self._get_target_class()
+ param = klass.positional(type_="DATE", value=today)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_date_string(self):
+ today = datetime.date.today()
+ today_str = (today.isoformat(),)
+ EXPECTED = {
+ "parameterType": {"type": "DATE"},
+ "parameterValue": {"value": today_str},
+ }
+ klass = self._get_target_class()
+ param = klass.positional(type_="DATE", value=today_str)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_unknown_type(self):
+ EXPECTED = {
+ "parameterType": {"type": "UNKNOWN"},
+ "parameterValue": {"value": "unknown"},
+ }
+ klass = self._get_target_class()
+ param = klass.positional(type_="UNKNOWN", value="unknown")
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test___eq___wrong_type(self):
+ field = self._make_one("test", "STRING", "value")
+ other = object()
+ self.assertNotEqual(field, other)
+ self.assertEqual(field, mock.ANY)
+
+ def test___eq___name_mismatch(self):
+ field = self._make_one("test", "STRING", "value")
+ other = self._make_one("other", "STRING", "value")
+ self.assertNotEqual(field, other)
+
+ def test___eq___field_type_mismatch(self):
+ field = self._make_one("test", "STRING", None)
+ other = self._make_one("test", "INT64", None)
+ self.assertNotEqual(field, other)
+
+ def test___eq___value_mismatch(self):
+ field = self._make_one("test", "STRING", "hello")
+ other = self._make_one("test", "STRING", "world")
+ self.assertNotEqual(field, other)
+
+ def test___eq___hit(self):
+ field = self._make_one("test", "STRING", "gotcha")
+ other = self._make_one("test", "STRING", "gotcha")
+ self.assertEqual(field, other)
+
+ def test___ne___wrong_type(self):
+ field = self._make_one("toast", "INT64", 13)
+ other = object()
+ self.assertNotEqual(field, other)
+ self.assertEqual(field, mock.ANY)
+
+ def test___ne___same_value(self):
+ field1 = self._make_one("test", "INT64", 12)
+ field2 = self._make_one("test", "INT64", 12)
+ # unittest ``assertEqual`` uses ``==`` not ``!=``.
+ comparison_val = field1 != field2
+ self.assertFalse(comparison_val)
+
+ def test___ne___different_values(self):
+ field1 = self._make_one("test", "INT64", 11)
+ field2 = self._make_one("test", "INT64", 12)
+ self.assertNotEqual(field1, field2)
+
+ def test___repr__(self):
+ field1 = self._make_one("field1", "STRING", "value")
+ expected = "ScalarQueryParameter('field1', 'STRING', 'value')"
+ self.assertEqual(repr(field1), expected)
+
+
+class Test_RangeQueryParameter(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.query import RangeQueryParameter
+
+ return RangeQueryParameter
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor(self):
+ from google.cloud.bigquery.query import RangeQueryParameterType
+
+ range_element_type = RangeQueryParameterType(type_="DATE")
+ param = self._make_one(
+ range_element_type="DATE", start="2016-08-11", name="foo"
+ )
+ self.assertEqual(param.name, "foo")
+ self.assertEqual(param.range_element_type, range_element_type)
+ self.assertEqual(param.start, "2016-08-11")
+ self.assertIs(param.end, None)
+
+ def test_ctor_w_datetime_query_parameter_type_str(self):
+ from google.cloud.bigquery.query import RangeQueryParameterType
+
+ range_element_type = RangeQueryParameterType(type_="DATETIME")
+ start_datetime = datetime.datetime(year=2020, month=12, day=31, hour=12)
+ end_datetime = datetime.datetime(year=2021, month=12, day=31, hour=12)
+ param = self._make_one(
+ range_element_type="DATETIME",
+ start=start_datetime,
+ end=end_datetime,
+ name="foo",
+ )
+ self.assertEqual(param.range_element_type, range_element_type)
+ self.assertEqual(param.start, start_datetime)
+ self.assertEqual(param.end, end_datetime)
+ self.assertEqual(param.name, "foo")
+
+ def test_ctor_w_datetime_query_parameter_type_type(self):
+ from google.cloud.bigquery.query import RangeQueryParameterType
+
+ range_element_type = RangeQueryParameterType(type_="DATETIME")
+ param = self._make_one(range_element_type=range_element_type)
+ self.assertEqual(param.range_element_type, range_element_type)
+ self.assertEqual(param.start, None)
+ self.assertEqual(param.end, None)
+ self.assertEqual(param.name, None)
+
+ def test_ctor_w_timestamp_query_parameter_typ_str(self):
+ from google.cloud.bigquery.query import RangeQueryParameterType
+
+ range_element_type = RangeQueryParameterType(type_="TIMESTAMP")
+ start_datetime = datetime.datetime(year=2020, month=12, day=31, hour=12)
+ end_datetime = datetime.datetime(year=2021, month=12, day=31, hour=12)
+ param = self._make_one(
+ range_element_type="TIMESTAMP",
+ start=start_datetime,
+ end=end_datetime,
+ name="foo",
+ )
+ self.assertEqual(param.range_element_type, range_element_type)
+ self.assertEqual(param.start, start_datetime)
+ self.assertEqual(param.end, end_datetime)
+ self.assertEqual(param.name, "foo")
+
+ def test_ctor_w_timestamp_query_parameter_type_type(self):
+ from google.cloud.bigquery.query import RangeQueryParameterType
+
+ range_element_type = RangeQueryParameterType(type_="TIMESTAMP")
+ param = self._make_one(range_element_type=range_element_type)
+ self.assertEqual(param.range_element_type, range_element_type)
+ self.assertEqual(param.start, None)
+ self.assertEqual(param.end, None)
+ self.assertEqual(param.name, None)
+
+ def test_ctor_w_date_query_parameter_type_str(self):
+ from google.cloud.bigquery.query import RangeQueryParameterType
+
+ range_element_type = RangeQueryParameterType(type_="DATE")
+ start_date = datetime.date(year=2020, month=12, day=31)
+ end_date = datetime.date(year=2021, month=12, day=31)
+ param = self._make_one(
+ range_element_type="DATE",
+ start=start_date,
+ end=end_date,
+ name="foo",
+ )
+ self.assertEqual(param.range_element_type, range_element_type)
+ self.assertEqual(param.start, start_date)
+ self.assertEqual(param.end, end_date)
+ self.assertEqual(param.name, "foo")
+
+ def test_ctor_w_date_query_parameter_type_type(self):
+ from google.cloud.bigquery.query import RangeQueryParameterType
+
+ range_element_type = RangeQueryParameterType(type_="DATE")
+ param = self._make_one(range_element_type=range_element_type)
+ self.assertEqual(param.range_element_type, range_element_type)
+ self.assertEqual(param.start, None)
+ self.assertEqual(param.end, None)
+ self.assertEqual(param.name, None)
+
+ def test_ctor_w_name_empty_str(self):
+ from google.cloud.bigquery.query import RangeQueryParameterType
+
+ range_element_type = RangeQueryParameterType(type_="DATE")
+ param = self._make_one(
+ range_element_type="DATE",
+ name="",
+ )
+ self.assertEqual(param.range_element_type, range_element_type)
+ self.assertIs(param.start, None)
+ self.assertIs(param.end, None)
+ self.assertEqual(param.name, "")
+
+ def test_ctor_wo_value(self):
+ from google.cloud.bigquery.query import RangeQueryParameterType
+
+ range_element_type = RangeQueryParameterType(type_="DATETIME")
+ param = self._make_one(range_element_type="DATETIME", name="foo")
+ self.assertEqual(param.range_element_type, range_element_type)
+ self.assertIs(param.start, None)
+ self.assertIs(param.end, None)
+ self.assertEqual(param.name, "foo")
+
+ def test_ctor_w_unsupported_query_parameter_type_str(self):
+ with self.assertRaises(ValueError):
+ self._make_one(range_element_type="TIME", name="foo")
+
+ def test_ctor_w_unsupported_query_parameter_type_type(self):
+ from google.cloud.bigquery.query import RangeQueryParameterType
+
+ range_element_type = RangeQueryParameterType(type_="DATE")
+ range_element_type.type_._type = "TIME"
+ with self.assertRaises(ValueError):
+ self._make_one(range_element_type=range_element_type, name="foo")
+
+ def test_ctor_w_unsupported_query_parameter_type_input(self):
+ with self.assertRaises(ValueError):
+ self._make_one(range_element_type=None, name="foo")
+
+ def test_positional(self):
+ from google.cloud.bigquery.query import RangeQueryParameterType
+
+ range_element_type = RangeQueryParameterType(type_="DATE")
+ klass = self._get_target_class()
+ param = klass.positional(
+ range_element_type="DATE", start="2016-08-11", end="2016-08-12"
+ )
+ self.assertIs(param.name, None)
+ self.assertEqual(param.range_element_type, range_element_type)
+ self.assertEqual(param.start, "2016-08-11")
+ self.assertEqual(param.end, "2016-08-12")
+
+ def test_from_api_repr_w_name(self):
+ from google.cloud.bigquery.query import RangeQueryParameterType
+
+ RESOURCE = {
+ "name": "foo",
+ "parameterType": {
+ "type": "RANGE",
+ "rangeElementType": {
+ "type": "DATE",
+ },
+ },
+ "parameterValue": {
+ "rangeValue": {"start": {"value": None}, "end": {"value": "2020-12-31"}}
+ },
+ }
+ klass = self._get_target_class()
+ param = klass.from_api_repr(RESOURCE)
+ range_element_type = RangeQueryParameterType(type_="DATE")
+ self.assertEqual(param.name, "foo")
+ self.assertEqual(param.range_element_type, range_element_type)
+ self.assertEqual(param.start, None)
+ self.assertEqual(param.end, "2020-12-31")
+
+ def test_from_api_repr_wo_name(self):
+ from google.cloud.bigquery.query import RangeQueryParameterType
+
+ RESOURCE = {
+ "parameterType": {
+ "type": "RANGE",
+ "rangeElementType": {
+ "type": "DATE",
+ },
+ },
+ "parameterValue": {
+ "rangeValue": {"start": {"value": None}, "end": {"value": "2020-12-31"}}
+ },
+ }
+ klass = self._get_target_class()
+ param = klass.from_api_repr(RESOURCE)
+ range_element_type = RangeQueryParameterType(type_="DATE")
+ self.assertEqual(param.name, None)
+ self.assertEqual(param.range_element_type, range_element_type)
+ self.assertEqual(param.start, None)
+ self.assertEqual(param.end, "2020-12-31")
+
+ def test_from_api_repr_wo_value(self):
+ # Back-end may not send back values for None params. See #9027
+ from google.cloud.bigquery.query import RangeQueryParameterType
+
+ RESOURCE = {
+ "parameterType": {
+ "type": "RANGE",
+ "rangeElementType": {
+ "type": "DATE",
+ },
+ },
+ }
+ range_element_type = RangeQueryParameterType(type_="DATE")
+ klass = self._get_target_class()
+ param = klass.from_api_repr(RESOURCE)
+ self.assertIs(param.name, None)
+ self.assertEqual(param.range_element_type, range_element_type)
+ self.assertIs(param.start, None)
+ self.assertIs(param.end, None)
+
+ def test_to_api_repr_w_name(self):
+ EXPECTED = {
+ "name": "foo",
+ "parameterType": {
+ "type": "RANGE",
+ "rangeElementType": {
+ "type": "DATE",
+ },
+ },
+ "parameterValue": {
+ "rangeValue": {"start": {"value": None}, "end": {"value": "2016-08-11"}}
+ },
+ }
+ param = self._make_one(range_element_type="DATE", end="2016-08-11", name="foo")
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_wo_name(self):
+ EXPECTED = {
+ "parameterType": {
+ "type": "RANGE",
+ "rangeElementType": {
+ "type": "DATE",
+ },
+ },
+ "parameterValue": {
+ "rangeValue": {"start": {"value": None}, "end": {"value": "2016-08-11"}}
+ },
+ }
+ klass = self._get_target_class()
+ param = klass.positional(range_element_type="DATE", end="2016-08-11")
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_date_date(self):
+ today = datetime.date.today()
+ today_str = today.strftime("%Y-%m-%d")
+ EXPECTED = {
+ "name": "foo",
+ "parameterType": {
+ "type": "RANGE",
+ "rangeElementType": {
+ "type": "DATE",
+ },
+ },
+ "parameterValue": {
+ "rangeValue": {"start": {"value": None}, "end": {"value": today_str}}
+ },
+ }
+ param = self._make_one(range_element_type="DATE", end=today, name="foo")
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_datetime_str(self):
+ EXPECTED = {
+ "parameterType": {
+ "type": "RANGE",
+ "rangeElementType": {
+ "type": "DATETIME",
+ },
+ },
+ "parameterValue": {
+ "rangeValue": {
+ "start": {"value": None},
+ "end": {"value": "2020-01-01T12:00:00.000000"},
+ }
+ },
+ }
+ klass = self._get_target_class()
+ end_datetime = datetime.datetime(year=2020, month=1, day=1, hour=12)
+ param = klass.positional(range_element_type="DATETIME", end=end_datetime)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_datetime_datetime(self):
+ from google.cloud.bigquery._helpers import _RFC3339_MICROS_NO_ZULU
+
+ now = datetime.datetime.utcnow()
+ now_str = now.strftime(_RFC3339_MICROS_NO_ZULU)
+ EXPECTED = {
+ "parameterType": {
+ "type": "RANGE",
+ "rangeElementType": {
+ "type": "DATETIME",
+ },
+ },
+ "parameterValue": {
+ "rangeValue": {"start": {"value": None}, "end": {"value": now_str}}
+ },
+ }
+ klass = self._get_target_class()
+ param = klass.positional(range_element_type="DATETIME", end=now)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_timestamp_str(self):
+ EXPECTED = {
+ "parameterType": {
+ "type": "RANGE",
+ "rangeElementType": {
+ "type": "TIMESTAMP",
+ },
+ },
+ "parameterValue": {
+ "rangeValue": {
+ "start": {"value": None},
+ "end": {"value": "2020-01-01 12:00:00+00:00"},
+ }
+ },
+ }
+ klass = self._get_target_class()
+ end_timestamp = datetime.datetime(year=2020, month=1, day=1, hour=12)
+ param = klass.positional(range_element_type="TIMESTAMP", end=end_timestamp)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_timestamp_timestamp(self):
+ from google.cloud._helpers import UTC # type: ignore
+
+ now = datetime.datetime.utcnow()
+ now = now.astimezone(UTC)
+ now_str = str(now)
+ EXPECTED = {
+ "parameterType": {
+ "type": "RANGE",
+ "rangeElementType": {
+ "type": "TIMESTAMP",
+ },
+ },
+ "parameterValue": {
+ "rangeValue": {"start": {"value": None}, "end": {"value": now_str}}
+ },
+ }
+ klass = self._get_target_class()
+ param = klass.positional(range_element_type="TIMESTAMP", end=now)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_wo_values(self):
+ EXPECTED = {
+ "name": "foo",
+ "parameterType": {
+ "type": "RANGE",
+ "rangeElementType": {
+ "type": "DATE",
+ },
+ },
+ "parameterValue": {
+ "rangeValue": {"start": {"value": None}, "end": {"value": None}}
+ },
+ }
+ param = self._make_one(range_element_type="DATE", name="foo")
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_unsupported_value_type(self):
+ with self.assertRaisesRegex(
+ ValueError, "Cannot convert range element value from type"
+ ):
+ range_param = self._make_one(
+ range_element_type="DATE", start=datetime.date.today()
+ )
+ range_param.range_element_type.type_._type = "LONG"
+ range_param.to_api_repr()
+
+ def test___eq__(self):
+ param = self._make_one(
+ range_element_type="DATE", start="2016-08-11", name="foo"
+ )
+ self.assertEqual(param, param)
+ self.assertNotEqual(param, object())
+ alias = self._make_one(
+ range_element_type="DATE", start="2016-08-11", name="bar"
+ )
+ self.assertNotEqual(param, alias)
+ wrong_type = self._make_one(
+ range_element_type="DATETIME",
+ start="2020-12-31 12:00:00.000000",
+ name="foo",
+ )
+ self.assertNotEqual(param, wrong_type)
+ wrong_val = self._make_one(
+ range_element_type="DATE", start="2016-08-12", name="foo"
+ )
+ self.assertNotEqual(param, wrong_val)
+
+ def test___eq___wrong_type(self):
+ param = self._make_one(
+ range_element_type="DATE", start="2016-08-11", name="foo"
+ )
+ other = object()
+ self.assertNotEqual(param, other)
+ self.assertEqual(param, mock.ANY)
+
+ def test___eq___name_mismatch(self):
+ param = self._make_one(
+ range_element_type="DATE", start="2016-08-11", name="foo"
+ )
+ other = self._make_one(
+ range_element_type="DATE", start="2016-08-11", name="bar"
+ )
+ self.assertNotEqual(param, other)
+
+ def test___eq___field_type_mismatch(self):
+ param = self._make_one(range_element_type="DATE")
+ other = self._make_one(range_element_type="DATETIME")
+ self.assertNotEqual(param, other)
+
+ def test___eq___value_mismatch(self):
+ param = self._make_one(range_element_type="DATE", start="2016-08-11")
+ other = self._make_one(range_element_type="DATE", start="2016-08-12")
+ self.assertNotEqual(param, other)
+
+ def test___eq___hit(self):
+ param = self._make_one(range_element_type="DATE", start="2016-08-12")
+ other = self._make_one(range_element_type="DATE", start="2016-08-12")
+ self.assertEqual(param, other)
+
+ def test___ne___wrong_type(self):
+ param = self._make_one(range_element_type="DATE")
+ other = object()
+ self.assertNotEqual(param, other)
+ self.assertEqual(param, mock.ANY)
+
+ def test___ne___same_value(self):
+ param1 = self._make_one(range_element_type="DATE")
+ param2 = self._make_one(range_element_type="DATE")
+ # unittest ``assertEqual`` uses ``==`` not ``!=``.
+ comparison_val = param1 != param2
+ self.assertFalse(comparison_val)
+
+ def test___ne___different_values(self):
+ param1 = self._make_one(range_element_type="DATE", start="2016-08-12")
+ param2 = self._make_one(range_element_type="DATE")
+ self.assertNotEqual(param1, param2)
+
+ def test___repr__(self):
+ param1 = self._make_one(range_element_type="DATE", start="2016-08-12")
+ expected = "RangeQueryParameter(None, {'type': 'RANGE', 'rangeElementType': {'type': 'DATE'}}, '2016-08-12', None)"
+ self.assertEqual(repr(param1), expected)
+
+
+def _make_subparam(name, type_, value):
+ from google.cloud.bigquery.query import ScalarQueryParameter
+
+ return ScalarQueryParameter(name, type_, value)
+
+
+class Test_ArrayQueryParameter(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.query import ArrayQueryParameter
+
+ return ArrayQueryParameter
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor(self):
+ param = self._make_one(name="foo", array_type="INT64", values=[1, 2])
+ self.assertEqual(param.name, "foo")
+ self.assertEqual(param.array_type, "INT64")
+ self.assertEqual(param.values, [1, 2])
+
+ def test_ctor_empty_struct_array_wo_type_info(self):
+ with self.assertRaisesRegex(ValueError, r"(?i)missing.*struct.*type info.*"):
+ self._make_one(name="foo", array_type="STRUCT", values=[])
+
+ def test___eq__(self):
+ param = self._make_one(name="foo", array_type="INT64", values=[123])
+ self.assertEqual(param, param)
+ self.assertNotEqual(param, object())
+ alias = self._make_one(name="bar", array_type="INT64", values=[123])
+ self.assertNotEqual(param, alias)
+ wrong_type = self._make_one(name="foo", array_type="FLOAT64", values=[123.0])
+ self.assertNotEqual(param, wrong_type)
+ wrong_val = self._make_one(name="foo", array_type="INT64", values=[234])
+ self.assertNotEqual(param, wrong_val)
+
+ def test_positional(self):
+ klass = self._get_target_class()
+ param = klass.positional(array_type="INT64", values=[1, 2])
+ self.assertEqual(param.name, None)
+ self.assertEqual(param.array_type, "INT64")
+ self.assertEqual(param.values, [1, 2])
+
+ def test_from_api_repr_w_name(self):
+ RESOURCE = {
+ "name": "foo",
+ "parameterType": {"type": "ARRAY", "arrayType": {"type": "INT64"}},
+ "parameterValue": {"arrayValues": [{"value": "1"}, {"value": "2"}]},
+ }
+ klass = self._get_target_class()
+ param = klass.from_api_repr(RESOURCE)
+ self.assertEqual(param.name, "foo")
+ self.assertEqual(param.array_type, "INT64")
+ self.assertEqual(param.values, [1, 2])
+
+ def test_from_api_repr_wo_name(self):
+ RESOURCE = {
+ "parameterType": {"type": "ARRAY", "arrayType": {"type": "INT64"}},
+ "parameterValue": {"arrayValues": [{"value": "1"}, {"value": "2"}]},
+ }
+ klass = self._get_target_class()
+ param = klass.from_api_repr(RESOURCE)
+ self.assertEqual(param.name, None)
+ self.assertEqual(param.array_type, "INT64")
+ self.assertEqual(param.values, [1, 2])
+
+ def test_from_api_repr_wo_values(self):
+ # Back-end may not send back values for empty array params. See #7309
+ RESOURCE = {
+ "name": "foo",
+ "parameterType": {"type": "ARRAY", "arrayType": {"type": "INT64"}},
+ }
+ klass = self._get_target_class()
+ param = klass.from_api_repr(RESOURCE)
+ self.assertEqual(param.name, "foo")
+ self.assertEqual(param.array_type, "INT64")
+ self.assertEqual(param.values, [])
+
+ def test_from_api_repr_w_none_values(self):
+ RESOURCE = {
+ "parameterType": {"type": "ARRAY", "arrayType": {"type": "INT64"}},
+ "parameterValue": {"arrayValues": [{"value": "1"}, {"value": None}]},
+ }
+ klass = self._get_target_class()
+ param = klass.from_api_repr(RESOURCE)
+ self.assertEqual(param.array_type, "INT64")
+ self.assertEqual(param.values, [1, None])
+
+ def test_from_api_repr_w_struct_type(self):
+ from google.cloud.bigquery.query import StructQueryParameter
+
+ RESOURCE = {
+ "parameterType": {
+ "type": "ARRAY",
+ "arrayType": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"name": "name", "type": {"type": "STRING"}},
+ {"name": "age", "type": {"type": "INT64"}},
+ ],
+ },
+ },
+ "parameterValue": {
+ "arrayValues": [
+ {
+ "structValues": {
+ "name": {"value": "Phred Phlyntstone"},
+ "age": {"value": "32"},
+ }
+ },
+ {
+ "structValues": {
+ "name": {"value": "Bharney Rhubbyl"},
+ "age": {"value": "31"},
+ }
+ },
+ ]
+ },
+ }
+
+ klass = self._get_target_class()
+ param = klass.from_api_repr(RESOURCE)
+
+ phred = StructQueryParameter.positional(
+ _make_subparam("name", "STRING", "Phred Phlyntstone"),
+ _make_subparam("age", "INT64", 32),
+ )
+ bharney = StructQueryParameter.positional(
+ _make_subparam("name", "STRING", "Bharney Rhubbyl"),
+ _make_subparam("age", "INT64", 31),
+ )
+ self.assertEqual(param.array_type, "STRUCT")
+ self.assertEqual(param.values, [phred, bharney])
+
+ def test_to_api_repr_w_name(self):
+ EXPECTED = {
+ "name": "foo",
+ "parameterType": {"type": "ARRAY", "arrayType": {"type": "INT64"}},
+ "parameterValue": {"arrayValues": [{"value": "1"}, {"value": "2"}]},
+ }
+ param = self._make_one(name="foo", array_type="INT64", values=[1, 2])
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_wo_name(self):
+ EXPECTED = {
+ "parameterType": {"type": "ARRAY", "arrayType": {"type": "INT64"}},
+ "parameterValue": {"arrayValues": [{"value": "1"}, {"value": "2"}]},
+ }
+ klass = self._get_target_class()
+ param = klass.positional(array_type="INT64", values=[1, 2])
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_array_type_as_type_instance(self):
+ from google.cloud.bigquery.query import ScalarQueryParameterType
+
+ EXPECTED = {
+ "parameterType": {"type": "ARRAY", "arrayType": {"type": "BOOLEAN"}},
+ "parameterValue": {"arrayValues": [{"value": "true"}, {"value": "false"}]},
+ }
+ klass = self._get_target_class()
+ param = klass.positional(
+ array_type=ScalarQueryParameterType("BOOLEAN"),
+ values=[True, False],
+ )
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_unknown_type(self):
+ EXPECTED = {
+ "parameterType": {"type": "ARRAY", "arrayType": {"type": "UNKNOWN"}},
+ "parameterValue": {"arrayValues": [{"value": "unknown"}]},
+ }
+ klass = self._get_target_class()
+ param = klass.positional(array_type="UNKNOWN", values=["unknown"])
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_record_type(self):
+ from google.cloud.bigquery.query import StructQueryParameter
+
+ EXPECTED = {
+ "parameterType": {
+ "type": "ARRAY",
+ "arrayType": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"name": "foo", "type": {"type": "STRING"}},
+ {"name": "bar", "type": {"type": "INT64"}},
+ ],
+ },
+ },
+ "parameterValue": {
+ "arrayValues": [
+ {"structValues": {"foo": {"value": "Foo"}, "bar": {"value": "123"}}}
+ ]
+ },
+ }
+ one = _make_subparam("foo", "STRING", "Foo")
+ another = _make_subparam("bar", "INT64", 123)
+ struct = StructQueryParameter.positional(one, another)
+ klass = self._get_target_class()
+ param = klass.positional(array_type="RECORD", values=[struct])
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_empty_array_of_records_type(self):
+ from google.cloud.bigquery.query import ScalarQueryParameterType
+ from google.cloud.bigquery.query import StructQueryParameterType
+
+ EXPECTED = {
+ "parameterType": {
+ "type": "ARRAY",
+ "arrayType": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"name": "foo", "type": {"type": "STRING"}},
+ {"name": "bar", "type": {"type": "INT64"}},
+ ],
+ },
+ },
+ "parameterValue": {"arrayValues": []},
+ }
+ item_type = StructQueryParameterType(
+ ScalarQueryParameterType("STRING", name="foo"),
+ ScalarQueryParameterType("INT64", name="bar"),
+ )
+ klass = self._get_target_class()
+ param = klass.positional(array_type=item_type, values=[])
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test___eq___wrong_type(self):
+ field = self._make_one("test", "STRING", ["value"])
+ other = object()
+ self.assertNotEqual(field, other)
+ self.assertEqual(field, mock.ANY)
+
+ def test___eq___name_mismatch(self):
+ field = self._make_one("field", "STRING", ["value"])
+ other = self._make_one("other", "STRING", ["value"])
+ self.assertNotEqual(field, other)
+
+ def test___eq___field_type_mismatch(self):
+ field = self._make_one("test", "STRING", [])
+ other = self._make_one("test", "INT64", [])
+ self.assertNotEqual(field, other)
+
+ def test___eq___value_mismatch(self):
+ field = self._make_one("test", "STRING", ["hello"])
+ other = self._make_one("test", "STRING", ["hello", "world"])
+ self.assertNotEqual(field, other)
+
+ def test___eq___hit(self):
+ field = self._make_one("test", "STRING", ["gotcha"])
+ other = self._make_one("test", "STRING", ["gotcha"])
+ self.assertEqual(field, other)
+
+ def test___ne___wrong_type(self):
+ field = self._make_one("toast", "INT64", [13])
+ other = object()
+ self.assertNotEqual(field, other)
+ self.assertEqual(field, mock.ANY)
+
+ def test___ne___same_value(self):
+ field1 = self._make_one("test", "INT64", [12])
+ field2 = self._make_one("test", "INT64", [12])
+ # unittest ``assertEqual`` uses ``==`` not ``!=``.
+ comparison_val = field1 != field2
+ self.assertFalse(comparison_val)
+
+ def test___ne___different_values(self):
+ field1 = self._make_one("test", "INT64", [11])
+ field2 = self._make_one("test", "INT64", [12])
+ self.assertNotEqual(field1, field2)
+
+ def test___repr__array_type_str(self):
+ field1 = self._make_one("field1", "STRING", ["value"])
+ expected = "ArrayQueryParameter('field1', 'STRING', ['value'])"
+ self.assertEqual(repr(field1), expected)
+
+ def test___repr__array_type_scalar_type_instance(self):
+ from google.cloud.bigquery.query import ScalarQueryParameterType
+
+ int_items = self._make_one(
+ "int_items", ScalarQueryParameterType("INTEGER"), [64]
+ )
+ expected = "ArrayQueryParameter('int_items', 'INTEGER', [64])"
+ self.assertEqual(repr(int_items), expected)
+
+ def test___repr__array_type_struct_type_instance(self):
+ from google.cloud.bigquery.query import ScalarQueryParameterType
+ from google.cloud.bigquery.query import StructQueryParameterType
+
+ struct_items = self._make_one(
+ "struct_items",
+ StructQueryParameterType(
+ ScalarQueryParameterType("INTEGER", name="age"),
+ ScalarQueryParameterType("STRING", name="last_name"),
+ ),
+ [{"age": 18, "last_name": "Doe"}],
+ )
+ expected = (
+ "ArrayQueryParameter('struct_items', 'STRUCT', "
+ "[{'age': 18, 'last_name': 'Doe'}])"
+ )
+ self.assertEqual(repr(struct_items), expected)
+
+
+class Test_StructQueryParameter(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.query import StructQueryParameter
+
+ return StructQueryParameter
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor(self):
+ sub_1 = _make_subparam("bar", "INT64", 123)
+ sub_2 = _make_subparam("baz", "STRING", "abc")
+ param = self._make_one("foo", sub_1, sub_2)
+ self.assertEqual(param.name, "foo")
+ self.assertEqual(param.struct_types, {"bar": "INT64", "baz": "STRING"})
+ self.assertEqual(param.struct_values, {"bar": 123, "baz": "abc"})
+
+ def test___eq__(self):
+ sub_1 = _make_subparam("bar", "INT64", 123)
+ sub_2 = _make_subparam("baz", "STRING", "abc")
+ sub_3 = _make_subparam("baz", "STRING", "def")
+ sub_1_float = _make_subparam("bar", "FLOAT64", 123.0)
+ param = self._make_one("foo", sub_1, sub_2)
+ self.assertEqual(param, param)
+ self.assertNotEqual(param, object())
+ alias = self._make_one("bar", sub_1, sub_2)
+ self.assertNotEqual(param, alias)
+ wrong_type = self._make_one("foo", sub_1_float, sub_2)
+ self.assertNotEqual(param, wrong_type)
+ wrong_val = self._make_one("foo", sub_2, sub_3)
+ self.assertNotEqual(param, wrong_val)
+
+ def test_positional(self):
+ sub_1 = _make_subparam("bar", "INT64", 123)
+ sub_2 = _make_subparam("baz", "STRING", "abc")
+ klass = self._get_target_class()
+ param = klass.positional(sub_1, sub_2)
+ self.assertEqual(param.name, None)
+ self.assertEqual(param.struct_types, {"bar": "INT64", "baz": "STRING"})
+ self.assertEqual(param.struct_values, {"bar": 123, "baz": "abc"})
+
+ def test_from_api_repr_w_name(self):
+ RESOURCE = {
+ "name": "foo",
+ "parameterType": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"name": "bar", "type": {"type": "INT64"}},
+ {"name": "baz", "type": {"type": "STRING"}},
+ ],
+ },
+ "parameterValue": {
+ "structValues": {"bar": {"value": 123}, "baz": {"value": "abc"}}
+ },
+ }
+ klass = self._get_target_class()
+ param = klass.from_api_repr(RESOURCE)
+ self.assertEqual(param.name, "foo")
+ self.assertEqual(param.struct_types, {"bar": "INT64", "baz": "STRING"})
+ self.assertEqual(param.struct_values, {"bar": 123, "baz": "abc"})
+
+ def test_from_api_repr_wo_name(self):
+ RESOURCE = {
+ "parameterType": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"name": "bar", "type": {"type": "INT64"}},
+ {"name": "baz", "type": {"type": "STRING"}},
+ ],
+ },
+ "parameterValue": {
+ "structValues": {"bar": {"value": 123}, "baz": {"value": "abc"}}
+ },
+ }
+ klass = self._get_target_class()
+ param = klass.from_api_repr(RESOURCE)
+ self.assertEqual(param.name, None)
+ self.assertEqual(param.struct_types, {"bar": "INT64", "baz": "STRING"})
+ self.assertEqual(param.struct_values, {"bar": 123, "baz": "abc"})
+
+ def test_from_api_repr_w_nested_array(self):
+ from google.cloud.bigquery.query import ArrayQueryParameter
+
+ RESOURCE = {
+ "name": "foo",
+ "parameterType": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"name": "bar", "type": {"type": "STRING"}},
+ {
+ "name": "baz",
+ "type": {"type": "ARRAY", "arrayType": {"type": "INT64"}},
+ },
+ ],
+ },
+ "parameterValue": {
+ "structValues": {
+ "bar": {"value": "abc"},
+ "baz": {"arrayValues": [{"value": "123"}, {"value": "456"}]},
+ }
+ },
+ }
+ klass = self._get_target_class()
+ param = klass.from_api_repr(RESOURCE)
+ self.assertEqual(
+ param,
+ self._make_one(
+ "foo",
+ _make_subparam("bar", "STRING", "abc"),
+ ArrayQueryParameter("baz", "INT64", [123, 456]),
+ ),
+ )
+
+ def test_from_api_repr_w_nested_struct(self):
+ RESOURCE = {
+ "name": "foo",
+ "parameterType": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"name": "bar", "type": {"type": "STRING"}},
+ {
+ "name": "baz",
+ "type": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"name": "qux", "type": {"type": "INT64"}},
+ {"name": "spam", "type": {"type": "BOOL"}},
+ ],
+ },
+ },
+ ],
+ },
+ "parameterValue": {
+ "structValues": {
+ "bar": {"value": "abc"},
+ "baz": {
+ "structValues": {
+ "qux": {"value": "123"},
+ "spam": {"value": "true"},
+ }
+ },
+ }
+ },
+ }
+
+ klass = self._get_target_class()
+ param = klass.from_api_repr(RESOURCE)
+
+ expected = self._make_one(
+ "foo",
+ _make_subparam("bar", "STRING", "abc"),
+ self._make_one(
+ "baz",
+ _make_subparam("qux", "INT64", 123),
+ _make_subparam("spam", "BOOL", True),
+ ),
+ )
+ self.assertEqual(param.name, "foo")
+ self.assertEqual(param.struct_types, expected.struct_types)
+ self.assertEqual(param.struct_values, expected.struct_values)
+
+ def test_to_api_repr_w_name(self):
+ EXPECTED = {
+ "name": "foo",
+ "parameterType": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"name": "bar", "type": {"type": "INT64"}},
+ {"name": "baz", "type": {"type": "STRING"}},
+ ],
+ },
+ "parameterValue": {
+ "structValues": {"bar": {"value": "123"}, "baz": {"value": "abc"}}
+ },
+ }
+ sub_1 = _make_subparam("bar", "INT64", 123)
+ sub_2 = _make_subparam("baz", "STRING", "abc")
+ param = self._make_one("foo", sub_1, sub_2)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_wo_name(self):
+ EXPECTED = {
+ "parameterType": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"name": "bar", "type": {"type": "INT64"}},
+ {"name": "baz", "type": {"type": "STRING"}},
+ ],
+ },
+ "parameterValue": {
+ "structValues": {"bar": {"value": "123"}, "baz": {"value": "abc"}}
+ },
+ }
+ sub_1 = _make_subparam("bar", "INT64", 123)
+ sub_2 = _make_subparam("baz", "STRING", "abc")
+ klass = self._get_target_class()
+ param = klass.positional(sub_1, sub_2)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_nested_array(self):
+ from google.cloud.bigquery.query import ArrayQueryParameter
+
+ EXPECTED = {
+ "name": "foo",
+ "parameterType": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"name": "bar", "type": {"type": "STRING"}},
+ {
+ "name": "baz",
+ "type": {"type": "ARRAY", "arrayType": {"type": "INT64"}},
+ },
+ ],
+ },
+ "parameterValue": {
+ "structValues": {
+ "bar": {"value": "abc"},
+ "baz": {"arrayValues": [{"value": "123"}, {"value": "456"}]},
+ }
+ },
+ }
+ scalar = _make_subparam("bar", "STRING", "abc")
+ array = ArrayQueryParameter("baz", "INT64", [123, 456])
+ param = self._make_one("foo", scalar, array)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_nested_struct(self):
+ EXPECTED = {
+ "name": "foo",
+ "parameterType": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"name": "bar", "type": {"type": "STRING"}},
+ {
+ "name": "baz",
+ "type": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"name": "qux", "type": {"type": "INT64"}},
+ {"name": "spam", "type": {"type": "BOOL"}},
+ ],
+ },
+ },
+ ],
+ },
+ "parameterValue": {
+ "structValues": {
+ "bar": {"value": "abc"},
+ "baz": {
+ "structValues": {
+ "qux": {"value": "123"},
+ "spam": {"value": "true"},
+ }
+ },
+ }
+ },
+ }
+ scalar_1 = _make_subparam("bar", "STRING", "abc")
+ scalar_2 = _make_subparam("qux", "INT64", 123)
+ scalar_3 = _make_subparam("spam", "BOOL", True)
+ sub = self._make_one("baz", scalar_2, scalar_3)
+ param = self._make_one("foo", scalar_1, sub)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test_to_api_repr_w_unknown_type(self):
+ EXPECTED = {
+ "name": "foo",
+ "parameterType": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"name": "bar", "type": {"type": "INT64"}},
+ {"name": "baz", "type": {"type": "UNKNOWN_TYPE"}},
+ ],
+ },
+ "parameterValue": {
+ "structValues": {"bar": {"value": "123"}, "baz": {"value": "abc"}}
+ },
+ }
+ sub_1 = _make_subparam("bar", "INT64", 123)
+ sub_2 = _make_subparam("baz", "UNKNOWN_TYPE", "abc")
+ param = self._make_one("foo", sub_1, sub_2)
+ self.assertEqual(param.to_api_repr(), EXPECTED)
+
+ def test___eq___wrong_type(self):
+ field = self._make_one("test", _make_subparam("bar", "STRING", "abc"))
+ other = object()
+ self.assertNotEqual(field, other)
+ self.assertEqual(field, mock.ANY)
+
+ def test___eq___name_mismatch(self):
+ field = self._make_one("test", _make_subparam("bar", "STRING", "abc"))
+ other = self._make_one("other ", _make_subparam("bar", "STRING", "abc"))
+ self.assertNotEqual(field, other)
+
+ def test___eq___field_type_mismatch(self):
+ field = self._make_one("test", _make_subparam("bar", "STRING", None))
+ other = self._make_one("test", _make_subparam("bar", "INT64", None))
+ self.assertNotEqual(field, other)
+
+ def test___eq___value_mismatch(self):
+ field = self._make_one("test", _make_subparam("bar", "STRING", "hello"))
+ other = self._make_one("test", _make_subparam("bar", "STRING", "world"))
+ self.assertNotEqual(field, other)
+
+ def test___eq___hit(self):
+ field = self._make_one("test", _make_subparam("bar", "STRING", "gotcha"))
+ other = self._make_one("test", _make_subparam("bar", "STRING", "gotcha"))
+ self.assertEqual(field, other)
+
+ def test___ne___wrong_type(self):
+ field = self._make_one("test", _make_subparam("bar", "STRING", "hello"))
+ other = object()
+ self.assertNotEqual(field, other)
+ self.assertEqual(field, mock.ANY)
+
+ def test___ne___same_value(self):
+ field1 = self._make_one("test", _make_subparam("bar", "STRING", "hello"))
+ field2 = self._make_one("test", _make_subparam("bar", "STRING", "hello"))
+ # unittest ``assertEqual`` uses ``==`` not ``!=``.
+ comparison_val = field1 != field2
+ self.assertFalse(comparison_val)
+
+ def test___ne___different_values(self):
+ field1 = self._make_one("test", _make_subparam("bar", "STRING", "hello"))
+ field2 = self._make_one("test", _make_subparam("bar", "STRING", "world"))
+ self.assertNotEqual(field1, field2)
+
+ def test___repr__(self):
+ field1 = self._make_one("test", _make_subparam("field1", "STRING", "hello"))
+ got = repr(field1)
+ self.assertIn("StructQueryParameter", got)
+ self.assertIn("'field1': 'hello'", got)
+
+
+class Test_QueryResults(unittest.TestCase):
+ PROJECT = "project"
+ JOB_ID = "test-synchronous-query"
+ TOKEN = "TOKEN"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.query import _QueryResults
+
+ return _QueryResults
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def _make_resource(self):
+ return {"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}}
+
+ def _verifySchema(self, query, resource):
+ from google.cloud.bigquery.schema import SchemaField
+
+ if "schema" in resource:
+ fields = resource["schema"]["fields"]
+ self.assertEqual(len(query.schema), len(fields))
+ for found, expected in zip(query.schema, fields):
+ self.assertIsInstance(found, SchemaField)
+ self.assertEqual(found.name, expected["name"])
+ self.assertEqual(found.field_type, expected["type"])
+ self.assertEqual(found.mode, expected["mode"])
+ self.assertEqual(found.description, expected.get("description"))
+ self.assertEqual(found.fields, expected.get("fields", ()))
+ else:
+ self.assertEqual(query.schema, [])
+
+ def test_ctor_defaults(self):
+ query = self._make_one(self._make_resource())
+ self.assertIsNone(query.cache_hit)
+ self.assertIsNone(query.complete)
+ self.assertIsNone(query.errors)
+ self.assertIsNone(query.page_token)
+ self.assertEqual(query.project, self.PROJECT)
+ self.assertEqual(query.rows, [])
+ self.assertEqual(query.schema, [])
+ self.assertIsNone(query.total_rows)
+ self.assertIsNone(query.total_bytes_processed)
+
+ def test_cache_hit_missing(self):
+ query = self._make_one(self._make_resource())
+ self.assertIsNone(query.cache_hit)
+
+ def test_cache_hit_present(self):
+ resource = self._make_resource()
+ resource["cacheHit"] = True
+ query = self._make_one(resource)
+ self.assertTrue(query.cache_hit)
+
+ def test_complete_missing(self):
+ query = self._make_one(self._make_resource())
+ self.assertIsNone(query.complete)
+
+ def test_complete_present(self):
+ resource = self._make_resource()
+ resource["jobComplete"] = True
+ query = self._make_one(resource)
+ self.assertTrue(query.complete)
+
+ def test_errors_missing(self):
+ query = self._make_one(self._make_resource())
+ self.assertIsNone(query.errors)
+
+ def test_errors_present(self):
+ ERRORS = [{"reason": "testing"}]
+ resource = self._make_resource()
+ resource["errors"] = ERRORS
+ query = self._make_one(resource)
+ self.assertEqual(query.errors, ERRORS)
+
+ def test_job_id_missing(self):
+ query = self._make_one({})
+ self.assertIsNone(query.job_id)
+
+ def test_job_id_broken_job_reference(self):
+ resource = {"jobReference": {"bogus": "BOGUS"}}
+ query = self._make_one(resource)
+ self.assertIsNone(query.job_id)
+
+ def test_job_id_present(self):
+ resource = self._make_resource()
+ resource["jobReference"]["jobId"] = "custom-job"
+ query = self._make_one(resource)
+ self.assertEqual(query.job_id, "custom-job")
+
+ def test_location_missing(self):
+ query = self._make_one({})
+ self.assertIsNone(query.location)
+
+ def test_location_present(self):
+ resource = self._make_resource()
+ resource["jobReference"]["location"] = "test-location"
+ query = self._make_one(resource)
+ self.assertEqual(query.location, "test-location")
+
+ def test_page_token_missing(self):
+ query = self._make_one(self._make_resource())
+ self.assertIsNone(query.page_token)
+
+ def test_page_token_present(self):
+ resource = self._make_resource()
+ resource["pageToken"] = "TOKEN"
+ query = self._make_one(resource)
+ self.assertEqual(query.page_token, "TOKEN")
+
+ def test_query_id_missing(self):
+ query = self._make_one(self._make_resource())
+ self.assertIsNone(query.query_id)
+
+ def test_query_id_present(self):
+ resource = self._make_resource()
+ resource["queryId"] = "test-query-id"
+ query = self._make_one(resource)
+ self.assertEqual(query.query_id, "test-query-id")
+
+ def test_total_rows_present_integer(self):
+ resource = self._make_resource()
+ resource["totalRows"] = 42
+ query = self._make_one(resource)
+ self.assertEqual(query.total_rows, 42)
+
+ def test_total_rows_present_string(self):
+ resource = self._make_resource()
+ resource["totalRows"] = "42"
+ query = self._make_one(resource)
+ self.assertEqual(query.total_rows, 42)
+
+ def test_total_bytes_processed_missing(self):
+ query = self._make_one(self._make_resource())
+ self.assertIsNone(query.total_bytes_processed)
+
+ def test_total_bytes_processed_present_integer(self):
+ resource = self._make_resource()
+ resource["totalBytesProcessed"] = 123456
+ query = self._make_one(resource)
+ self.assertEqual(query.total_bytes_processed, 123456)
+
+ def test_total_bytes_processed_present_string(self):
+ resource = self._make_resource()
+ resource["totalBytesProcessed"] = "123456"
+ query = self._make_one(resource)
+ self.assertEqual(query.total_bytes_processed, 123456)
+
+ def test_num_dml_affected_rows_missing(self):
+ query = self._make_one(self._make_resource())
+ self.assertIsNone(query.num_dml_affected_rows)
+
+ def test_num_dml_affected_rows_present_integer(self):
+ resource = self._make_resource()
+ resource["numDmlAffectedRows"] = 123456
+ query = self._make_one(resource)
+ self.assertEqual(query.num_dml_affected_rows, 123456)
+
+ def test_num_dml_affected_rows_present_string(self):
+ resource = self._make_resource()
+ resource["numDmlAffectedRows"] = "123456"
+ query = self._make_one(resource)
+ self.assertEqual(query.num_dml_affected_rows, 123456)
+
+ def test_schema(self):
+ query = self._make_one(self._make_resource())
+ self._verifySchema(query, self._make_resource())
+ resource = self._make_resource()
+ resource["schema"] = {
+ "fields": [
+ {"name": "full_name", "type": "STRING", "mode": "REQURED"},
+ {"name": "age", "type": "INTEGER", "mode": "REQURED"},
+ ]
+ }
+ query._set_properties(resource)
+ self._verifySchema(query, resource)
+
+
+class Test__query_param_from_api_repr(unittest.TestCase):
+ @staticmethod
+ def _call_fut(resource):
+ from google.cloud.bigquery.query import _query_param_from_api_repr
+
+ return _query_param_from_api_repr(resource)
+
+ def test_w_scalar(self):
+ from google.cloud.bigquery.query import ScalarQueryParameter
+
+ RESOURCE = {
+ "name": "foo",
+ "parameterType": {"type": "INT64"},
+ "parameterValue": {"value": "123"},
+ }
+
+ parameter = self._call_fut(RESOURCE)
+
+ self.assertIsInstance(parameter, ScalarQueryParameter)
+ self.assertEqual(parameter.name, "foo")
+ self.assertEqual(parameter.type_, "INT64")
+ self.assertEqual(parameter.value, 123)
+
+ def test_w_scalar_timestamp(self):
+ from google.cloud._helpers import UTC
+ from google.cloud.bigquery.query import ScalarQueryParameter
+
+ RESOURCE = {
+ "name": "zoned",
+ "parameterType": {"type": "TIMESTAMP"},
+ "parameterValue": {"value": "2012-03-04 05:06:07+00:00"},
+ }
+
+ parameter = self._call_fut(RESOURCE)
+
+ self.assertIsInstance(parameter, ScalarQueryParameter)
+ self.assertEqual(parameter.name, "zoned")
+ self.assertEqual(parameter.type_, "TIMESTAMP")
+ self.assertEqual(
+ parameter.value, datetime.datetime(2012, 3, 4, 5, 6, 7, tzinfo=UTC)
+ )
+
+ def test_w_scalar_timestamp_micros(self):
+ from google.cloud._helpers import UTC
+ from google.cloud.bigquery.query import ScalarQueryParameter
+
+ RESOURCE = {
+ "name": "zoned",
+ "parameterType": {"type": "TIMESTAMP"},
+ "parameterValue": {"value": "2012-03-04 05:06:07.250000+00:00"},
+ }
+
+ parameter = self._call_fut(RESOURCE)
+
+ self.assertIsInstance(parameter, ScalarQueryParameter)
+ self.assertEqual(parameter.name, "zoned")
+ self.assertEqual(parameter.type_, "TIMESTAMP")
+ self.assertEqual(
+ parameter.value, datetime.datetime(2012, 3, 4, 5, 6, 7, 250000, tzinfo=UTC)
+ )
+
+ def test_w_array(self):
+ from google.cloud.bigquery.query import ArrayQueryParameter
+
+ RESOURCE = {
+ "name": "foo",
+ "parameterType": {"type": "ARRAY", "arrayType": {"type": "INT64"}},
+ "parameterValue": {"arrayValues": [{"value": "123"}]},
+ }
+
+ parameter = self._call_fut(RESOURCE)
+
+ self.assertIsInstance(parameter, ArrayQueryParameter)
+ self.assertEqual(parameter.name, "foo")
+ self.assertEqual(parameter.array_type, "INT64")
+ self.assertEqual(parameter.values, [123])
+
+ def test_w_struct(self):
+ from google.cloud.bigquery.query import StructQueryParameter
+
+ RESOURCE = {
+ "name": "foo",
+ "parameterType": {
+ "type": "STRUCT",
+ "structTypes": [
+ {"name": "foo", "type": {"type": "STRING"}},
+ {"name": "bar", "type": {"type": "INT64"}},
+ ],
+ },
+ "parameterValue": {
+ "structValues": {"foo": {"value": "Foo"}, "bar": {"value": "123"}}
+ },
+ }
+
+ parameter = self._call_fut(RESOURCE)
+
+ self.assertIsInstance(parameter, StructQueryParameter)
+ self.assertEqual(parameter.name, "foo")
+ self.assertEqual(parameter.struct_types, {"foo": "STRING", "bar": "INT64"})
+ self.assertEqual(parameter.struct_values, {"foo": "Foo", "bar": 123})
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_retry.py b/testbed/googleapis__python-bigquery/tests/unit/test_retry.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e533c8497cbf12072672e2b402a355469b1fe90
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_retry.py
@@ -0,0 +1,158 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from unittest import mock
+
+import requests.exceptions
+
+
+class Test_should_retry(unittest.TestCase):
+ def _call_fut(self, exc):
+ from google.cloud.bigquery.retry import _should_retry
+
+ return _should_retry(exc)
+
+ def test_wo_errors_attribute(self):
+ self.assertFalse(self._call_fut(object()))
+
+ def test_w_empty_errors(self):
+ exc = mock.Mock(errors=[], spec=["errors"])
+ self.assertFalse(self._call_fut(exc))
+
+ def test_w_non_matching_reason(self):
+ exc = mock.Mock(errors=[{"reason": "bogus"}], spec=["errors"])
+ self.assertFalse(self._call_fut(exc))
+
+ def test_w_backendError(self):
+ exc = mock.Mock(errors=[{"reason": "backendError"}], spec=["errors"])
+ self.assertTrue(self._call_fut(exc))
+
+ def test_w_rateLimitExceeded(self):
+ exc = mock.Mock(errors=[{"reason": "rateLimitExceeded"}], spec=["errors"])
+ self.assertTrue(self._call_fut(exc))
+
+ def test_w_unstructured_connectionerror(self):
+ exc = ConnectionError()
+ self.assertTrue(self._call_fut(exc))
+
+ def test_w_unstructured_requests_connectionerror(self):
+ exc = requests.exceptions.ConnectionError()
+ self.assertTrue(self._call_fut(exc))
+
+ def test_w_unstructured_requests_chunked_encoding_error(self):
+ exc = requests.exceptions.ChunkedEncodingError()
+ self.assertTrue(self._call_fut(exc))
+
+ def test_w_unstructured_requests_connecttimeout(self):
+ exc = requests.exceptions.ConnectTimeout()
+ self.assertTrue(self._call_fut(exc))
+
+ def test_w_unstructured_requests_readtimeout(self):
+ exc = requests.exceptions.ReadTimeout()
+ self.assertTrue(self._call_fut(exc))
+
+ def test_w_unstructured_requests_timeout(self):
+ exc = requests.exceptions.Timeout()
+ self.assertTrue(self._call_fut(exc))
+
+ def test_w_auth_transporterror(self):
+ from google.auth.exceptions import TransportError
+
+ exc = TransportError("testing")
+ self.assertTrue(self._call_fut(exc))
+
+ def test_w_unstructured_too_many_requests(self):
+ from google.api_core.exceptions import TooManyRequests
+
+ exc = TooManyRequests("testing")
+ self.assertTrue(self._call_fut(exc))
+
+ def test_w_unstructured_service_unavailable(self):
+ from google.api_core.exceptions import ServiceUnavailable
+
+ exc = ServiceUnavailable("testing")
+ self.assertTrue(self._call_fut(exc))
+
+ def test_w_internalError(self):
+ exc = mock.Mock(errors=[{"reason": "internalError"}], spec=["errors"])
+ self.assertTrue(self._call_fut(exc))
+
+ def test_w_unstructured_internal_server_error(self):
+ from google.api_core.exceptions import InternalServerError
+
+ exc = InternalServerError("testing")
+ self.assertTrue(self._call_fut(exc))
+
+ def test_w_badGateway(self):
+ exc = mock.Mock(errors=[{"reason": "badGateway"}], spec=["errors"])
+ self.assertTrue(self._call_fut(exc))
+
+ def test_w_unstructured_bad_gateway(self):
+ from google.api_core.exceptions import BadGateway
+
+ exc = BadGateway("testing")
+ self.assertTrue(self._call_fut(exc))
+
+
+def test_DEFAULT_JOB_RETRY_predicate():
+ from google.cloud.bigquery.retry import DEFAULT_JOB_RETRY
+ from google.api_core.exceptions import ClientError
+
+ assert not DEFAULT_JOB_RETRY._predicate(TypeError())
+ assert not DEFAULT_JOB_RETRY._predicate(ClientError("fail"))
+ assert not DEFAULT_JOB_RETRY._predicate(
+ ClientError("fail", errors=[dict(reason="idk")])
+ )
+
+ assert DEFAULT_JOB_RETRY._predicate(
+ ClientError("fail", errors=[dict(reason="rateLimitExceeded")])
+ )
+ assert DEFAULT_JOB_RETRY._predicate(
+ ClientError("fail", errors=[dict(reason="backendError")])
+ )
+
+
+def test_DEFAULT_JOB_RETRY_deadline():
+ from google.cloud.bigquery.retry import DEFAULT_JOB_RETRY, DEFAULT_RETRY
+
+ # Make sure we can retry the job at least once.
+ assert DEFAULT_JOB_RETRY._deadline > DEFAULT_RETRY._deadline
+
+
+def test_DEFAULT_JOB_RETRY_job_rate_limit_exceeded_retry_predicate():
+ """Tests the retry predicate specifically for jobRateLimitExceeded."""
+ from google.cloud.bigquery.retry import DEFAULT_JOB_RETRY
+ from google.api_core.exceptions import ClientError
+
+ # Non-ClientError exceptions should never trigger a retry
+ assert not DEFAULT_JOB_RETRY._predicate(TypeError())
+
+ # ClientError without specific reason shouldn't trigger a retry
+ assert not DEFAULT_JOB_RETRY._predicate(ClientError("fail"))
+
+ # ClientError with generic reason "idk" shouldn't trigger a retry
+ assert not DEFAULT_JOB_RETRY._predicate(
+ ClientError("fail", errors=[dict(reason="idk")])
+ )
+
+ # ClientError with reason "jobRateLimitExceeded" should trigger a retry
+ assert DEFAULT_JOB_RETRY._predicate(
+ ClientError("fail", errors=[dict(reason="jobRateLimitExceeded")])
+ )
+
+ # Other retryable reasons should still work as expected
+ assert DEFAULT_JOB_RETRY._predicate(
+ ClientError("fail", errors=[dict(reason="backendError")])
+ )
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_schema.py b/testbed/googleapis__python-bigquery/tests/unit/test_schema.py
new file mode 100644
index 0000000000000000000000000000000000000000..b17cd028116d4da932ebf734ea8bd8bf23ccb9ee
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_schema.py
@@ -0,0 +1,1110 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from google.cloud import bigquery
+from google.cloud.bigquery.standard_sql import StandardSqlStructType
+from google.cloud.bigquery.schema import PolicyTagList
+import unittest
+from unittest import mock
+
+import pytest
+
+
+class TestSchemaField(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.schema import SchemaField
+
+ return SchemaField
+
+ @staticmethod
+ def _get_standard_sql_data_type_class():
+ from google.cloud.bigquery import standard_sql
+
+ return standard_sql.StandardSqlDataType
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_constructor_defaults(self):
+ field = self._make_one("test", "STRING")
+ self.assertEqual(field.name, "test")
+ self.assertEqual(field.field_type, "STRING")
+ self.assertEqual(field.mode, "NULLABLE")
+ self.assertIsNone(field.description)
+ self.assertEqual(field.fields, ())
+ self.assertIsNone(field.policy_tags)
+ self.assertIsNone(field.default_value_expression)
+
+ def test_constructor_explicit(self):
+ FIELD_DEFAULT_VALUE_EXPRESSION = "This is the default value for this field"
+ field = self._make_one(
+ "test",
+ "STRING",
+ mode="REQUIRED",
+ description="Testing",
+ policy_tags=PolicyTagList(
+ names=(
+ "projects/a/locations/b/taxonomies/c/policyTags/e",
+ "projects/f/locations/g/taxonomies/h/policyTags/i",
+ )
+ ),
+ default_value_expression=FIELD_DEFAULT_VALUE_EXPRESSION,
+ )
+ self.assertEqual(field.name, "test")
+ self.assertEqual(field.field_type, "STRING")
+ self.assertEqual(field.mode, "REQUIRED")
+ self.assertEqual(field.default_value_expression, FIELD_DEFAULT_VALUE_EXPRESSION)
+ self.assertEqual(field.description, "Testing")
+ self.assertEqual(field.fields, ())
+ self.assertEqual(
+ field.policy_tags,
+ PolicyTagList(
+ names=(
+ "projects/a/locations/b/taxonomies/c/policyTags/e",
+ "projects/f/locations/g/taxonomies/h/policyTags/i",
+ )
+ ),
+ )
+
+ def test_constructor_explicit_none(self):
+ field = self._make_one("test", "STRING", description=None, policy_tags=None)
+ self.assertIsNone(field.description)
+ self.assertIsNone(field.policy_tags)
+
+ def test_constructor_subfields(self):
+ sub_field1 = self._make_one("area_code", "STRING")
+ sub_field2 = self._make_one("local_number", "STRING")
+ field = self._make_one(
+ "phone_number", "RECORD", fields=[sub_field1, sub_field2]
+ )
+ self.assertEqual(field.name, "phone_number")
+ self.assertEqual(field.field_type, "RECORD")
+ self.assertEqual(field.mode, "NULLABLE")
+ self.assertIsNone(field.description)
+ self.assertEqual(len(field.fields), 2)
+ self.assertEqual(field.fields[0], sub_field1)
+ self.assertEqual(field.fields[1], sub_field2)
+
+ def test_constructor_range(self):
+ from google.cloud.bigquery.schema import FieldElementType
+
+ field = self._make_one(
+ "test",
+ "RANGE",
+ mode="REQUIRED",
+ description="Testing",
+ range_element_type=FieldElementType("DATETIME"),
+ )
+ self.assertEqual(field.name, "test")
+ self.assertEqual(field.field_type, "RANGE")
+ self.assertEqual(field.mode, "REQUIRED")
+ self.assertEqual(field.description, "Testing")
+ self.assertEqual(field.range_element_type.element_type, "DATETIME")
+
+ def test_constructor_range_str(self):
+ field = self._make_one(
+ "test",
+ "RANGE",
+ mode="REQUIRED",
+ description="Testing",
+ range_element_type="DATETIME",
+ )
+ self.assertEqual(field.name, "test")
+ self.assertEqual(field.field_type, "RANGE")
+ self.assertEqual(field.mode, "REQUIRED")
+ self.assertEqual(field.description, "Testing")
+ self.assertEqual(field.range_element_type.element_type, "DATETIME")
+
+ def test_to_api_repr(self):
+ from google.cloud.bigquery.schema import PolicyTagList
+
+ policy = PolicyTagList(names=("foo", "bar"))
+ self.assertEqual(
+ policy.to_api_repr(),
+ {"names": ["foo", "bar"]},
+ )
+
+ field = self._make_one(
+ "foo", "INTEGER", "NULLABLE", description="hello world", policy_tags=policy
+ )
+ self.assertEqual(
+ field.to_api_repr(),
+ {
+ "mode": "NULLABLE",
+ "name": "foo",
+ "type": "INTEGER",
+ "description": "hello world",
+ "policyTags": {"names": ["foo", "bar"]},
+ },
+ )
+
+ def test_to_api_repr_omits_unset_properties(self):
+ # Prevent accidentally modifying fields that aren't explicitly set.
+ # https://github.com/googleapis/python-bigquery/issues/981
+ field = self._make_one("foo", "INTEGER")
+ resource = field.to_api_repr()
+ self.assertNotIn("description", resource)
+ self.assertNotIn("policyTags", resource)
+
+ def test_to_api_repr_with_subfield(self):
+ for record_type in ("RECORD", "STRUCT"):
+ subfield = self._make_one("bar", "INTEGER", "NULLABLE")
+ field = self._make_one("foo", record_type, "REQUIRED", fields=(subfield,))
+ self.assertEqual(
+ field.to_api_repr(),
+ {
+ "fields": [{"mode": "NULLABLE", "name": "bar", "type": "INTEGER"}],
+ "mode": "REQUIRED",
+ "name": "foo",
+ "type": record_type,
+ },
+ )
+
+ def test_from_api_repr(self):
+ field = self._get_target_class().from_api_repr(
+ {
+ "fields": [{"mode": "nullable", "name": "bar", "type": "integer"}],
+ "mode": "required",
+ "description": "test_description",
+ "name": "foo",
+ "type": "record",
+ }
+ )
+ self.assertEqual(field.name, "foo")
+ self.assertEqual(field.field_type, "RECORD")
+ self.assertEqual(field.mode, "REQUIRED")
+ self.assertEqual(field.description, "test_description")
+ self.assertEqual(len(field.fields), 1)
+ self.assertEqual(field.fields[0].name, "bar")
+ self.assertEqual(field.fields[0].field_type, "INTEGER")
+ self.assertEqual(field.fields[0].mode, "NULLABLE")
+ self.assertEqual(field.range_element_type, None)
+
+ def test_from_api_repr_policy(self):
+ field = self._get_target_class().from_api_repr(
+ {
+ "fields": [{"mode": "nullable", "name": "bar", "type": "integer"}],
+ "name": "foo",
+ "type": "record",
+ "policyTags": {"names": ["one", "two"]},
+ }
+ )
+ self.assertEqual(field.name, "foo")
+ self.assertEqual(field.field_type, "RECORD")
+ self.assertEqual(field.policy_tags.names, ("one", "two"))
+ self.assertEqual(len(field.fields), 1)
+ self.assertEqual(field.fields[0].name, "bar")
+ self.assertEqual(field.fields[0].field_type, "INTEGER")
+ self.assertEqual(field.fields[0].mode, "NULLABLE")
+
+ def test_from_api_repr_range(self):
+ field = self._get_target_class().from_api_repr(
+ {
+ "mode": "nullable",
+ "description": "test_range",
+ "name": "foo",
+ "type": "range",
+ "rangeElementType": {"type": "DATETIME"},
+ }
+ )
+ self.assertEqual(field.name, "foo")
+ self.assertEqual(field.field_type, "RANGE")
+ self.assertEqual(field.mode, "NULLABLE")
+ self.assertEqual(field.description, "test_range")
+ self.assertEqual(len(field.fields), 0)
+ self.assertEqual(field.range_element_type.element_type, "DATETIME")
+
+ def test_from_api_repr_defaults(self):
+ field = self._get_target_class().from_api_repr(
+ {"name": "foo", "type": "record"}
+ )
+ self.assertEqual(field.name, "foo")
+ self.assertEqual(field.field_type, "RECORD")
+ self.assertEqual(field.mode, "NULLABLE")
+ self.assertEqual(len(field.fields), 0)
+ self.assertEqual(field.default_value_expression, None)
+
+ # Keys not present in API representation shouldn't be included in
+ # _properties.
+ self.assertIsNone(field.description)
+ self.assertIsNone(field.policy_tags)
+ self.assertIsNone(field.range_element_type)
+ self.assertNotIn("description", field._properties)
+ self.assertNotIn("policyTags", field._properties)
+ self.assertNotIn("rangeElementType", field._properties)
+
+ def test_name_property(self):
+ name = "lemon-ness"
+ schema_field = self._make_one(name, "INTEGER")
+ self.assertEqual(schema_field.name, name)
+
+ def test_field_type_property(self):
+ field_type = "BOOLEAN"
+ schema_field = self._make_one("whether", field_type)
+ self.assertEqual(schema_field.field_type, field_type)
+
+ def test_mode_property(self):
+ mode = "REPEATED"
+ schema_field = self._make_one("again", "FLOAT", mode=mode)
+ self.assertEqual(schema_field.mode, mode)
+
+ def test_is_nullable(self):
+ mode = "NULLABLE"
+ schema_field = self._make_one("test", "FLOAT", mode=mode)
+ self.assertTrue(schema_field.is_nullable)
+
+ def test_is_not_nullable(self):
+ mode = "REPEATED"
+ schema_field = self._make_one("test", "FLOAT", mode=mode)
+ self.assertFalse(schema_field.is_nullable)
+
+ def test_description_property(self):
+ description = "It holds some data."
+ schema_field = self._make_one("do", "TIMESTAMP", description=description)
+ self.assertEqual(schema_field.description, description)
+
+ def test_fields_property(self):
+ sub_field1 = self._make_one("one", "STRING")
+ sub_field2 = self._make_one("fish", "INTEGER")
+ fields = (sub_field1, sub_field2)
+ schema_field = self._make_one("boat", "RECORD", fields=fields)
+ self.assertEqual(schema_field.fields, fields)
+
+ def test_to_standard_sql_simple_type(self):
+ examples = (
+ # a few legacy types
+ ("INTEGER", bigquery.StandardSqlTypeNames.INT64),
+ ("FLOAT", bigquery.StandardSqlTypeNames.FLOAT64),
+ ("BOOLEAN", bigquery.StandardSqlTypeNames.BOOL),
+ ("DATETIME", bigquery.StandardSqlTypeNames.DATETIME),
+ # a few standard types
+ ("INT64", bigquery.StandardSqlTypeNames.INT64),
+ ("FLOAT64", bigquery.StandardSqlTypeNames.FLOAT64),
+ ("BOOL", bigquery.StandardSqlTypeNames.BOOL),
+ ("GEOGRAPHY", bigquery.StandardSqlTypeNames.GEOGRAPHY),
+ )
+ for legacy_type, standard_type in examples:
+ field = self._make_one("some_field", legacy_type)
+ standard_field = field.to_standard_sql()
+ self.assertEqual(standard_field.name, "some_field")
+ self.assertEqual(standard_field.type.type_kind, standard_type)
+
+ def test_to_standard_sql_struct_type(self):
+ from google.cloud.bigquery import standard_sql
+
+ # Expected result object:
+ #
+ # name: "image_usage"
+ # type {
+ # type_kind: STRUCT
+ # struct_type {
+ # fields {
+ # name: "image_content"
+ # type {type_kind: BYTES}
+ # }
+ # fields {
+ # name: "last_used"
+ # type {
+ # type_kind: STRUCT
+ # struct_type {
+ # fields {
+ # name: "date_field"
+ # type {type_kind: DATE}
+ # }
+ # fields {
+ # name: "time_field"
+ # type {type_kind: TIME}
+ # }
+ # }
+ # }
+ # }
+ # }
+ # }
+
+ sql_type = self._get_standard_sql_data_type_class()
+
+ # level 2 fields
+ sub_sub_field_date = standard_sql.StandardSqlField(
+ name="date_field",
+ type=sql_type(type_kind=bigquery.StandardSqlTypeNames.DATE),
+ )
+ sub_sub_field_time = standard_sql.StandardSqlField(
+ name="time_field",
+ type=sql_type(type_kind=bigquery.StandardSqlTypeNames.TIME),
+ )
+
+ # level 1 fields
+ sub_field_struct = standard_sql.StandardSqlField(
+ name="last_used",
+ type=sql_type(
+ type_kind=bigquery.StandardSqlTypeNames.STRUCT,
+ struct_type=standard_sql.StandardSqlStructType(
+ fields=[sub_sub_field_date, sub_sub_field_time]
+ ),
+ ),
+ )
+ sub_field_bytes = standard_sql.StandardSqlField(
+ name="image_content",
+ type=sql_type(type_kind=bigquery.StandardSqlTypeNames.BYTES),
+ )
+
+ # level 0 (top level)
+ expected_result = standard_sql.StandardSqlField(
+ name="image_usage",
+ type=sql_type(
+ type_kind=bigquery.StandardSqlTypeNames.STRUCT,
+ struct_type=standard_sql.StandardSqlStructType(
+ fields=[sub_field_bytes, sub_field_struct]
+ ),
+ ),
+ )
+
+ # construct legacy SchemaField object
+ sub_sub_field1 = self._make_one("date_field", "DATE")
+ sub_sub_field2 = self._make_one("time_field", "TIME")
+ sub_field_record = self._make_one(
+ "last_used", "RECORD", fields=(sub_sub_field1, sub_sub_field2)
+ )
+ sub_field_bytes = self._make_one("image_content", "BYTES")
+
+ for type_name in ("RECORD", "STRUCT"):
+ schema_field = self._make_one(
+ "image_usage", type_name, fields=(sub_field_bytes, sub_field_record)
+ )
+ standard_field = schema_field.to_standard_sql()
+ self.assertEqual(standard_field, expected_result)
+
+ def test_to_standard_sql_array_type_simple(self):
+ from google.cloud.bigquery import standard_sql
+
+ sql_type = self._get_standard_sql_data_type_class()
+
+ # construct expected result object
+ expected_sql_type = sql_type(
+ type_kind=bigquery.StandardSqlTypeNames.ARRAY,
+ array_element_type=sql_type(type_kind=bigquery.StandardSqlTypeNames.INT64),
+ )
+ expected_result = standard_sql.StandardSqlField(
+ name="valid_numbers", type=expected_sql_type
+ )
+
+ # construct "repeated" SchemaField object and convert to standard SQL
+ schema_field = self._make_one("valid_numbers", "INT64", mode="REPEATED")
+ standard_field = schema_field.to_standard_sql()
+
+ self.assertEqual(standard_field, expected_result)
+
+ def test_to_standard_sql_array_type_struct(self):
+ from google.cloud.bigquery import standard_sql
+
+ sql_type = self._get_standard_sql_data_type_class()
+
+ # define person STRUCT
+ name_field = standard_sql.StandardSqlField(
+ name="name", type=sql_type(type_kind=bigquery.StandardSqlTypeNames.STRING)
+ )
+ age_field = standard_sql.StandardSqlField(
+ name="age", type=sql_type(type_kind=bigquery.StandardSqlTypeNames.INT64)
+ )
+ person_struct = standard_sql.StandardSqlField(
+ name="person_info",
+ type=sql_type(
+ type_kind=bigquery.StandardSqlTypeNames.STRUCT,
+ struct_type=StandardSqlStructType(fields=[name_field, age_field]),
+ ),
+ )
+
+ # define expected result - an ARRAY of person structs
+ expected_sql_type = sql_type(
+ type_kind=bigquery.StandardSqlTypeNames.ARRAY,
+ array_element_type=person_struct.type,
+ )
+ expected_result = standard_sql.StandardSqlField(
+ name="known_people", type=expected_sql_type
+ )
+
+ # construct legacy repeated SchemaField object
+ sub_field1 = self._make_one("name", "STRING")
+ sub_field2 = self._make_one("age", "INTEGER")
+ schema_field = self._make_one(
+ "known_people", "RECORD", fields=(sub_field1, sub_field2), mode="REPEATED"
+ )
+
+ standard_field = schema_field.to_standard_sql()
+ self.assertEqual(standard_field, expected_result)
+
+ def test_to_standard_sql_unknown_type(self):
+ field = self._make_one("weird_field", "TROOLEAN")
+
+ standard_field = field.to_standard_sql()
+
+ self.assertEqual(standard_field.name, "weird_field")
+ self.assertEqual(
+ standard_field.type.type_kind,
+ bigquery.StandardSqlTypeNames.TYPE_KIND_UNSPECIFIED,
+ )
+
+ def test___eq___wrong_type(self):
+ field = self._make_one("test", "STRING")
+ other = object()
+ self.assertNotEqual(field, other)
+ self.assertEqual(field, mock.ANY)
+
+ def test___eq___name_mismatch(self):
+ field = self._make_one("test", "STRING")
+ other = self._make_one("other", "STRING")
+ self.assertNotEqual(field, other)
+
+ def test___eq___field_type_mismatch(self):
+ field = self._make_one("test", "STRING")
+ other = self._make_one("test", "INTEGER")
+ self.assertNotEqual(field, other)
+
+ def test___eq___mode_mismatch(self):
+ field = self._make_one("test", "STRING", mode="REQUIRED")
+ other = self._make_one("test", "STRING", mode="NULLABLE")
+ self.assertNotEqual(field, other)
+
+ def test___eq___description_mismatch(self):
+ field = self._make_one("test", "STRING", description="Testing")
+ other = self._make_one("test", "STRING", description="Other")
+ self.assertNotEqual(field, other)
+
+ def test___eq___fields_mismatch(self):
+ sub1 = self._make_one("sub1", "STRING")
+ sub2 = self._make_one("sub2", "STRING")
+ field = self._make_one("test", "RECORD", fields=[sub1])
+ other = self._make_one("test", "RECORD", fields=[sub2])
+ self.assertNotEqual(field, other)
+
+ def test___eq___hit(self):
+ field = self._make_one("test", "STRING", mode="REQUIRED", description="Testing")
+ other = self._make_one("test", "STRING", mode="REQUIRED", description="Testing")
+ self.assertEqual(field, other)
+
+ def test___eq___hit_case_diff_on_type(self):
+ field = self._make_one("test", "STRING", mode="REQUIRED", description="Testing")
+ other = self._make_one("test", "string", mode="REQUIRED", description="Testing")
+ self.assertEqual(field, other)
+
+ def test___eq___hit_w_fields(self):
+ sub1 = self._make_one("sub1", "STRING")
+ sub2 = self._make_one("sub2", "STRING")
+ field = self._make_one("test", "RECORD", fields=[sub1, sub2])
+ other = self._make_one("test", "RECORD", fields=[sub1, sub2])
+ self.assertEqual(field, other)
+
+ def test___eq___hit_w_policy_tags(self):
+ field = self._make_one(
+ "test",
+ "STRING",
+ mode="REQUIRED",
+ description="Testing",
+ policy_tags=PolicyTagList(names=["foo", "bar"]),
+ )
+ other = self._make_one(
+ "test",
+ "STRING",
+ mode="REQUIRED",
+ description="Testing",
+ policy_tags=PolicyTagList(names=["bar", "foo"]),
+ )
+ self.assertEqual(field, other) # Policy tags order does not matter.
+
+ def test___ne___wrong_type(self):
+ field = self._make_one("toast", "INTEGER")
+ other = object()
+ self.assertNotEqual(field, other)
+ self.assertEqual(field, mock.ANY)
+
+ def test___ne___same_value(self):
+ field1 = self._make_one("test", "TIMESTAMP", mode="REPEATED")
+ field2 = self._make_one("test", "TIMESTAMP", mode="REPEATED")
+ # unittest ``assertEqual`` uses ``==`` not ``!=``.
+ comparison_val = field1 != field2
+ self.assertFalse(comparison_val)
+
+ def test___ne___different_values(self):
+ field1 = self._make_one(
+ "test1", "FLOAT", mode="REPEATED", description="Not same"
+ )
+ field2 = self._make_one(
+ "test2", "FLOAT", mode="NULLABLE", description="Knot saym"
+ )
+ self.assertNotEqual(field1, field2)
+
+ def test___ne___different_policy_tags(self):
+ field = self._make_one(
+ "test",
+ "STRING",
+ mode="REQUIRED",
+ description="Testing",
+ policy_tags=PolicyTagList(names=["foo", "bar"]),
+ )
+ other = self._make_one(
+ "test",
+ "STRING",
+ mode="REQUIRED",
+ description="Testing",
+ policy_tags=PolicyTagList(names=["foo", "baz"]),
+ )
+ self.assertNotEqual(field, other)
+
+ def test___hash__set_equality(self):
+ sub1 = self._make_one("sub1", "STRING")
+ sub2 = self._make_one("sub2", "STRING")
+ field1 = self._make_one("test", "RECORD", fields=[sub1])
+ field2 = self._make_one("test", "RECORD", fields=[sub2])
+ set_one = {field1, field2}
+ set_two = {field1, field2}
+ self.assertEqual(set_one, set_two)
+
+ def test___hash__not_equals(self):
+ sub1 = self._make_one("sub1", "STRING")
+ sub2 = self._make_one("sub2", "STRING")
+ field1 = self._make_one("test", "RECORD", fields=[sub1])
+ field2 = self._make_one("test", "RECORD", fields=[sub2])
+ set_one = {field1}
+ set_two = {field2}
+ self.assertNotEqual(set_one, set_two)
+
+ def test___repr__(self):
+ field1 = self._make_one("field1", "STRING")
+ expected = "SchemaField('field1', 'STRING', 'NULLABLE', None, None, (), None)"
+ self.assertEqual(repr(field1), expected)
+
+ def test___repr__type_not_set(self):
+ field1 = self._make_one("field1", field_type=None)
+ expected = "SchemaField('field1', None, 'NULLABLE', None, None, (), None)"
+ self.assertEqual(repr(field1), expected)
+
+ def test___repr__evaluable_no_policy_tags(self):
+ field = self._make_one("field1", "STRING", "REQUIRED", "Description")
+ field_repr = repr(field)
+ SchemaField = self._get_target_class() # needed for eval # noqa
+
+ evaled_field = eval(field_repr)
+
+ assert field == evaled_field
+
+ def test___repr__evaluable_with_policy_tags(self):
+ policy_tags = PolicyTagList(names=["foo", "bar"])
+ field = self._make_one(
+ "field1",
+ "STRING",
+ "REQUIRED",
+ "Description",
+ policy_tags=policy_tags,
+ )
+ field_repr = repr(field)
+ SchemaField = self._get_target_class() # needed for eval # noqa
+
+ evaled_field = eval(field_repr)
+
+ assert field == evaled_field
+
+
+class TestFieldElementType(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.schema import FieldElementType
+
+ return FieldElementType
+
+ def _make_one(self, *args):
+ return self._get_target_class()(*args)
+
+ def test_constructor(self):
+ element_type = self._make_one("DATETIME")
+ self.assertEqual(element_type.element_type, "DATETIME")
+ self.assertEqual(element_type._properties["type"], "DATETIME")
+
+ def test_to_api_repr(self):
+ element_type = self._make_one("DATETIME")
+ self.assertEqual(element_type.to_api_repr(), {"type": "DATETIME"})
+
+ def test_from_api_repr(self):
+ api_repr = {"type": "DATETIME"}
+ expected_element_type = self._make_one("DATETIME")
+ self.assertEqual(
+ expected_element_type.element_type,
+ self._get_target_class().from_api_repr(api_repr).element_type,
+ )
+
+ def test_from_api_repr_empty(self):
+ self.assertEqual(None, self._get_target_class().from_api_repr({}))
+
+ def test_from_api_repr_none(self):
+ self.assertEqual(None, self._get_target_class().from_api_repr(None))
+
+
+# TODO: dedup with the same class in test_table.py.
+class _SchemaBase(object):
+ def _verify_field(self, field, r_field):
+ self.assertEqual(field.name, r_field["name"])
+ self.assertEqual(field.field_type, r_field["type"])
+ self.assertEqual(field.mode, r_field.get("mode", "NULLABLE"))
+
+ def _verifySchema(self, schema, resource):
+ r_fields = resource["schema"]["fields"]
+ self.assertEqual(len(schema), len(r_fields))
+
+ for field, r_field in zip(schema, r_fields):
+ self._verify_field(field, r_field)
+
+
+class Test_parse_schema_resource(unittest.TestCase, _SchemaBase):
+ def _call_fut(self, resource):
+ from google.cloud.bigquery.schema import _parse_schema_resource
+
+ return _parse_schema_resource(resource)
+
+ def _make_resource(self):
+ return {
+ "schema": {
+ "fields": [
+ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "age", "type": "INTEGER", "mode": "REQUIRED"},
+ ]
+ }
+ }
+
+ def test__parse_schema_resource_defaults(self):
+ RESOURCE = self._make_resource()
+ schema = self._call_fut(RESOURCE["schema"])
+ self._verifySchema(schema, RESOURCE)
+
+ def test__parse_schema_resource_subfields(self):
+ RESOURCE = self._make_resource()
+ RESOURCE["schema"]["fields"].append(
+ {
+ "name": "phone",
+ "type": "RECORD",
+ "mode": "REPEATED",
+ "fields": [
+ {"name": "type", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "number", "type": "STRING", "mode": "REQUIRED"},
+ ],
+ }
+ )
+ schema = self._call_fut(RESOURCE["schema"])
+ self._verifySchema(schema, RESOURCE)
+
+ def test__parse_schema_resource_fields_without_mode(self):
+ RESOURCE = self._make_resource()
+ RESOURCE["schema"]["fields"].append({"name": "phone", "type": "STRING"})
+
+ schema = self._call_fut(RESOURCE["schema"])
+ self._verifySchema(schema, RESOURCE)
+
+
+class Test_build_schema_resource(unittest.TestCase, _SchemaBase):
+ def _call_fut(self, resource):
+ from google.cloud.bigquery.schema import _build_schema_resource
+
+ return _build_schema_resource(resource)
+
+ def test_defaults(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
+ age = SchemaField("age", "INTEGER", mode="REQUIRED")
+ resource = self._call_fut([full_name, age])
+ self.assertEqual(len(resource), 2)
+ self.assertEqual(
+ resource[0],
+ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
+ )
+ self.assertEqual(
+ resource[1],
+ {"name": "age", "type": "INTEGER", "mode": "REQUIRED"},
+ )
+
+ def test_w_description(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ DESCRIPTION = "DESCRIPTION"
+ full_name = SchemaField(
+ "full_name", "STRING", mode="REQUIRED", description=DESCRIPTION
+ )
+ age = SchemaField(
+ "age",
+ "INTEGER",
+ mode="REQUIRED",
+ # Explicitly unset description.
+ description=None,
+ )
+ resource = self._call_fut([full_name, age])
+ self.assertEqual(len(resource), 2)
+ self.assertEqual(
+ resource[0],
+ {
+ "name": "full_name",
+ "type": "STRING",
+ "mode": "REQUIRED",
+ "description": DESCRIPTION,
+ },
+ )
+ self.assertEqual(
+ resource[1],
+ {
+ "name": "age",
+ "type": "INTEGER",
+ "mode": "REQUIRED",
+ "description": None,
+ },
+ )
+
+ def test_w_subfields(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
+ ph_type = SchemaField("type", "STRING", "REQUIRED")
+ ph_num = SchemaField("number", "STRING", "REQUIRED")
+ phone = SchemaField(
+ "phone", "RECORD", mode="REPEATED", fields=[ph_type, ph_num]
+ )
+ resource = self._call_fut([full_name, phone])
+ self.assertEqual(len(resource), 2)
+ self.assertEqual(
+ resource[0],
+ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
+ )
+ self.assertEqual(
+ resource[1],
+ {
+ "name": "phone",
+ "type": "RECORD",
+ "mode": "REPEATED",
+ "fields": [
+ {"name": "type", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "number", "type": "STRING", "mode": "REQUIRED"},
+ ],
+ },
+ )
+
+
+class Test_to_schema_fields(unittest.TestCase):
+ @staticmethod
+ def _call_fut(schema):
+ from google.cloud.bigquery.schema import _to_schema_fields
+
+ return _to_schema_fields(schema)
+
+ def test_invalid_type(self):
+ schema = [
+ ("full_name", "STRING", "REQUIRED"),
+ ("address", "STRING", "REQUIRED"),
+ ]
+ with self.assertRaises(ValueError):
+ self._call_fut(schema)
+
+ def test_schema_fields_sequence(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("full_name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INT64", mode="NULLABLE"),
+ ]
+ result = self._call_fut(schema)
+ self.assertEqual(result, schema)
+
+ def test_invalid_mapping_representation(self):
+ schema = [
+ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "address", "typeooo": "STRING", "mode": "REQUIRED"},
+ ]
+ with self.assertRaises(Exception):
+ self._call_fut(schema)
+
+ def test_valid_mapping_representation(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
+ {
+ "name": "residence",
+ "type": "STRUCT",
+ "mode": "NULLABLE",
+ "fields": [
+ {"name": "foo", "type": "DATE", "mode": "NULLABLE"},
+ {"name": "bar", "type": "BYTES", "mode": "REQUIRED"},
+ ],
+ },
+ ]
+
+ expected_schema = [
+ SchemaField("full_name", "STRING", mode="REQUIRED"),
+ SchemaField(
+ "residence",
+ "STRUCT",
+ mode="NULLABLE",
+ fields=[
+ SchemaField("foo", "DATE", mode="NULLABLE"),
+ SchemaField("bar", "BYTES", mode="REQUIRED"),
+ ],
+ ),
+ ]
+
+ result = self._call_fut(schema)
+ self.assertEqual(result, expected_schema)
+
+
+class TestPolicyTags(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.schema import PolicyTagList
+
+ return PolicyTagList
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_constructor(self):
+ empty_policy_tags = self._make_one()
+ self.assertIsNotNone(empty_policy_tags.names)
+ self.assertEqual(len(empty_policy_tags.names), 0)
+ policy_tags = self._make_one(["foo", "bar"])
+ self.assertEqual(policy_tags.names, ("foo", "bar"))
+
+ def test_from_api_repr(self):
+ klass = self._get_target_class()
+ api_repr = {"names": ["foo"]}
+ policy_tags = klass.from_api_repr(api_repr)
+ self.assertEqual(policy_tags.to_api_repr(), api_repr)
+
+ # Ensure the None case correctly returns None, rather
+ # than an empty instance.
+ policy_tags2 = klass.from_api_repr(None)
+ self.assertIsNone(policy_tags2)
+
+ def test_to_api_repr(self):
+ taglist = self._make_one(names=["foo", "bar"])
+ self.assertEqual(
+ taglist.to_api_repr(),
+ {"names": ["foo", "bar"]},
+ )
+ taglist2 = self._make_one(names=("foo", "bar"))
+ self.assertEqual(
+ taglist2.to_api_repr(),
+ {"names": ["foo", "bar"]},
+ )
+
+ def test___eq___wrong_type(self):
+ policy = self._make_one(names=["foo"])
+ other = object()
+ self.assertNotEqual(policy, other)
+ self.assertEqual(policy, mock.ANY)
+
+ def test___eq___names_mismatch(self):
+ policy = self._make_one(names=["foo", "bar"])
+ other = self._make_one(names=["bar", "baz"])
+ self.assertNotEqual(policy, other)
+
+ def test___hash__set_equality(self):
+ policy1 = self._make_one(["foo", "bar"])
+ policy2 = self._make_one(["bar", "baz"])
+ set_one = {policy1, policy2}
+ set_two = {policy1, policy2}
+ self.assertEqual(set_one, set_two)
+
+ def test___hash__not_equals(self):
+ policy1 = self._make_one(["foo", "bar"])
+ policy2 = self._make_one(["bar", "baz"])
+ set_one = {policy1}
+ set_two = {policy2}
+ self.assertNotEqual(set_one, set_two)
+
+ def test___repr__no_tags(self):
+ policy = self._make_one()
+ assert repr(policy) == "PolicyTagList(names=())"
+
+ def test___repr__with_tags(self):
+ policy1 = self._make_one(["foo", "bar", "baz"])
+ policy2 = self._make_one(["baz", "bar", "foo"])
+ expected_repr = "PolicyTagList(names=('bar', 'baz', 'foo'))" # alphabetical
+
+ assert repr(policy1) == expected_repr
+ assert repr(policy2) == expected_repr
+
+ def test___repr__evaluable_no_tags(self):
+ policy = self._make_one(names=[])
+ policy_repr = repr(policy)
+
+ evaled_policy = eval(policy_repr)
+
+ assert policy == evaled_policy
+
+ def test___repr__evaluable_with_tags(self):
+ policy = self._make_one(names=["foo", "bar"])
+ policy_repr = repr(policy)
+
+ evaled_policy = eval(policy_repr)
+
+ assert policy == evaled_policy
+
+
+@pytest.mark.parametrize(
+ "api,expect,key2",
+ [
+ (
+ dict(name="n", type="NUMERIC"),
+ ("n", "NUMERIC", None, None, None),
+ ("n", "NUMERIC"),
+ ),
+ (
+ dict(name="n", type="NUMERIC", precision=9),
+ ("n", "NUMERIC", 9, None, None),
+ ("n", "NUMERIC(9)"),
+ ),
+ (
+ dict(name="n", type="NUMERIC", precision=9, scale=2),
+ ("n", "NUMERIC", 9, 2, None),
+ ("n", "NUMERIC(9, 2)"),
+ ),
+ (
+ dict(name="n", type="BIGNUMERIC"),
+ ("n", "BIGNUMERIC", None, None, None),
+ ("n", "BIGNUMERIC"),
+ ),
+ (
+ dict(name="n", type="BIGNUMERIC", precision=40),
+ ("n", "BIGNUMERIC", 40, None, None),
+ ("n", "BIGNUMERIC(40)"),
+ ),
+ (
+ dict(name="n", type="BIGNUMERIC", precision=40, scale=2),
+ ("n", "BIGNUMERIC", 40, 2, None),
+ ("n", "BIGNUMERIC(40, 2)"),
+ ),
+ (
+ dict(name="n", type="STRING"),
+ ("n", "STRING", None, None, None),
+ ("n", "STRING"),
+ ),
+ (
+ dict(name="n", type="STRING", maxLength=9),
+ ("n", "STRING", None, None, 9),
+ ("n", "STRING(9)"),
+ ),
+ (
+ dict(name="n", type="BYTES"),
+ ("n", "BYTES", None, None, None),
+ ("n", "BYTES"),
+ ),
+ (
+ dict(name="n", type="BYTES", maxLength=9),
+ ("n", "BYTES", None, None, 9),
+ ("n", "BYTES(9)"),
+ ),
+ ],
+)
+def test_from_api_repr_parameterized(api, expect, key2):
+ from google.cloud.bigquery.schema import SchemaField
+
+ field = SchemaField.from_api_repr(api)
+
+ assert (
+ field.name,
+ field.field_type,
+ field.precision,
+ field.scale,
+ field.max_length,
+ ) == expect
+
+ assert field._key()[:2] == key2
+
+
+@pytest.mark.parametrize(
+ "field,api",
+ [
+ (
+ dict(name="n", field_type="NUMERIC"),
+ dict(name="n", type="NUMERIC", mode="NULLABLE"),
+ ),
+ (
+ dict(name="n", field_type="NUMERIC", precision=9),
+ dict(
+ name="n",
+ type="NUMERIC",
+ mode="NULLABLE",
+ precision=9,
+ ),
+ ),
+ (
+ dict(name="n", field_type="NUMERIC", precision=9, scale=2),
+ dict(
+ name="n",
+ type="NUMERIC",
+ mode="NULLABLE",
+ precision=9,
+ scale=2,
+ ),
+ ),
+ (
+ dict(name="n", field_type="BIGNUMERIC"),
+ dict(name="n", type="BIGNUMERIC", mode="NULLABLE"),
+ ),
+ (
+ dict(name="n", field_type="BIGNUMERIC", precision=40),
+ dict(
+ name="n",
+ type="BIGNUMERIC",
+ mode="NULLABLE",
+ precision=40,
+ ),
+ ),
+ (
+ dict(name="n", field_type="BIGNUMERIC", precision=40, scale=2),
+ dict(
+ name="n",
+ type="BIGNUMERIC",
+ mode="NULLABLE",
+ precision=40,
+ scale=2,
+ ),
+ ),
+ (
+ dict(name="n", field_type="STRING"),
+ dict(name="n", type="STRING", mode="NULLABLE"),
+ ),
+ (
+ dict(name="n", field_type="STRING", max_length=9),
+ dict(
+ name="n",
+ type="STRING",
+ mode="NULLABLE",
+ maxLength=9,
+ ),
+ ),
+ (
+ dict(name="n", field_type="BYTES"),
+ dict(name="n", type="BYTES", mode="NULLABLE"),
+ ),
+ (
+ dict(name="n", field_type="BYTES", max_length=9),
+ dict(
+ name="n",
+ type="BYTES",
+ mode="NULLABLE",
+ maxLength=9,
+ ),
+ ),
+ ],
+)
+def test_to_api_repr_parameterized(field, api):
+ from google.cloud.bigquery.schema import SchemaField
+
+ assert SchemaField(**field).to_api_repr() == api
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_signature_compatibility.py b/testbed/googleapis__python-bigquery/tests/unit/test_signature_compatibility.py
new file mode 100644
index 0000000000000000000000000000000000000000..07b823e2c43856fb6c2510c1596595d4f73f45f3
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_signature_compatibility.py
@@ -0,0 +1,62 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from collections import OrderedDict
+import inspect
+
+import pytest
+
+
+@pytest.fixture
+def query_job_class():
+ from google.cloud.bigquery.job import QueryJob
+
+ return QueryJob
+
+
+@pytest.fixture
+def row_iterator_class():
+ from google.cloud.bigquery.table import RowIterator
+
+ return RowIterator
+
+
+def test_to_arrow_method_signatures_match(query_job_class, row_iterator_class):
+ query_job_sig = inspect.signature(query_job_class.to_arrow)
+ iterator_sig = inspect.signature(row_iterator_class.to_arrow)
+
+ assert "max_results" in query_job_sig.parameters
+
+ # Compare the signatures while ignoring the max_results parameter, which is
+ # specific to the method on QueryJob.
+ params = OrderedDict(query_job_sig.parameters)
+ del params["max_results"]
+ query_job_sig = query_job_sig.replace(parameters=params.values())
+
+ assert query_job_sig == iterator_sig
+
+
+def test_to_dataframe_method_signatures_match(query_job_class, row_iterator_class):
+ query_job_sig = inspect.signature(query_job_class.to_dataframe)
+ iterator_sig = inspect.signature(row_iterator_class.to_dataframe)
+
+ assert "max_results" in query_job_sig.parameters
+
+ # Compare the signatures while ignoring the max_results parameter, which is
+ # specific to the method on QueryJob.
+ params = OrderedDict(query_job_sig.parameters)
+ del params["max_results"]
+ query_job_sig = query_job_sig.replace(parameters=params.values())
+
+ assert query_job_sig == iterator_sig
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_standard_sql_types.py b/testbed/googleapis__python-bigquery/tests/unit/test_standard_sql_types.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ed912b5a4a89122a9857726421e9a91512e800a
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_standard_sql_types.py
@@ -0,0 +1,646 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest import mock
+
+import pytest
+
+from google.cloud import bigquery as bq
+
+
+class TestStandardSqlDataType:
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.standard_sql import StandardSqlDataType
+
+ return StandardSqlDataType
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor_default_type_kind(self):
+ instance = self._make_one()
+ assert instance.type_kind == bq.StandardSqlTypeNames.TYPE_KIND_UNSPECIFIED
+
+ def test_to_api_repr_no_type_set(self):
+ instance = self._make_one()
+ instance.type_kind = None
+
+ result = instance.to_api_repr()
+
+ assert result == {"typeKind": "TYPE_KIND_UNSPECIFIED"}
+
+ def test_to_api_repr_scalar_type(self):
+ instance = self._make_one(bq.StandardSqlTypeNames.FLOAT64)
+
+ result = instance.to_api_repr()
+
+ assert result == {"typeKind": "FLOAT64"}
+
+ def test_to_api_repr_array_type_element_type_missing(self):
+ instance = self._make_one(
+ bq.StandardSqlTypeNames.ARRAY, array_element_type=None
+ )
+
+ result = instance.to_api_repr()
+
+ expected = {"typeKind": "ARRAY"}
+ assert result == expected
+
+ def test_to_api_repr_array_type_w_element_type(self):
+ array_element_type = self._make_one(type_kind=bq.StandardSqlTypeNames.BOOL)
+ instance = self._make_one(
+ bq.StandardSqlTypeNames.ARRAY, array_element_type=array_element_type
+ )
+
+ result = instance.to_api_repr()
+
+ expected = {"typeKind": "ARRAY", "arrayElementType": {"typeKind": "BOOL"}}
+ assert result == expected
+
+ def test_to_api_repr_struct_type_field_types_missing(self):
+ instance = self._make_one(bq.StandardSqlTypeNames.STRUCT, struct_type=None)
+
+ result = instance.to_api_repr()
+
+ assert result == {"typeKind": "STRUCT"}
+
+ def test_to_api_repr_struct_type_w_field_types(self):
+ from google.cloud.bigquery.standard_sql import StandardSqlField
+ from google.cloud.bigquery.standard_sql import StandardSqlStructType
+
+ StandardSqlDataType = self._get_target_class()
+ TypeNames = bq.StandardSqlTypeNames
+
+ person_type = StandardSqlStructType(
+ fields=[
+ StandardSqlField("name", StandardSqlDataType(TypeNames.STRING)),
+ StandardSqlField("age", StandardSqlDataType(TypeNames.INT64)),
+ ]
+ )
+ employee_type = StandardSqlStructType(
+ fields=[
+ StandardSqlField("job_title", StandardSqlDataType(TypeNames.STRING)),
+ StandardSqlField("salary", StandardSqlDataType(TypeNames.FLOAT64)),
+ StandardSqlField(
+ "employee_info",
+ StandardSqlDataType(
+ type_kind=TypeNames.STRUCT,
+ struct_type=person_type,
+ ),
+ ),
+ ]
+ )
+
+ instance = self._make_one(TypeNames.STRUCT, struct_type=employee_type)
+ result = instance.to_api_repr()
+
+ expected = {
+ "typeKind": "STRUCT",
+ "structType": {
+ "fields": [
+ {"name": "job_title", "type": {"typeKind": "STRING"}},
+ {"name": "salary", "type": {"typeKind": "FLOAT64"}},
+ {
+ "name": "employee_info",
+ "type": {
+ "typeKind": "STRUCT",
+ "structType": {
+ "fields": [
+ {"name": "name", "type": {"typeKind": "STRING"}},
+ {"name": "age", "type": {"typeKind": "INT64"}},
+ ],
+ },
+ },
+ },
+ ],
+ },
+ }
+ assert result == expected
+
+ def test_to_api_repr_range_type_element_type_missing(self):
+ instance = self._make_one(
+ bq.StandardSqlTypeNames.RANGE, range_element_type=None
+ )
+
+ result = instance.to_api_repr()
+
+ assert result == {"typeKind": "RANGE"}
+
+ def test_to_api_repr_range_type_w_element_type(self):
+ range_element_type = self._make_one(type_kind=bq.StandardSqlTypeNames.DATE)
+ instance = self._make_one(
+ bq.StandardSqlTypeNames.RANGE, range_element_type=range_element_type
+ )
+
+ result = instance.to_api_repr()
+
+ assert result == {
+ "typeKind": "RANGE",
+ "rangeElementType": {"typeKind": "DATE"},
+ }
+
+ def test_from_api_repr_empty_resource(self):
+ klass = self._get_target_class()
+ result = klass.from_api_repr(resource={})
+
+ expected = klass(
+ type_kind=bq.StandardSqlTypeNames.TYPE_KIND_UNSPECIFIED,
+ array_element_type=None,
+ struct_type=None,
+ )
+ assert result == expected
+
+ def test_from_api_repr_scalar_type(self):
+ klass = self._get_target_class()
+ resource = {"typeKind": "DATE"}
+
+ result = klass.from_api_repr(resource=resource)
+
+ expected = klass(
+ type_kind=bq.StandardSqlTypeNames.DATE,
+ array_element_type=None,
+ struct_type=None,
+ )
+ assert result == expected
+
+ def test_from_api_repr_array_type_full(self):
+ klass = self._get_target_class()
+ resource = {"typeKind": "ARRAY", "arrayElementType": {"typeKind": "BYTES"}}
+
+ result = klass.from_api_repr(resource=resource)
+
+ expected = klass(
+ type_kind=bq.StandardSqlTypeNames.ARRAY,
+ array_element_type=klass(type_kind=bq.StandardSqlTypeNames.BYTES),
+ struct_type=None,
+ )
+ assert result == expected
+
+ def test_from_api_repr_array_type_missing_element_type(self):
+ klass = self._get_target_class()
+ resource = {"typeKind": "ARRAY"}
+
+ result = klass.from_api_repr(resource=resource)
+
+ expected = klass(
+ type_kind=bq.StandardSqlTypeNames.ARRAY,
+ array_element_type=None,
+ struct_type=None,
+ )
+ assert result == expected
+
+ def test_from_api_repr_struct_type_nested(self):
+ from google.cloud.bigquery.standard_sql import StandardSqlField
+ from google.cloud.bigquery.standard_sql import StandardSqlStructType
+
+ klass = self._get_target_class()
+ TypeNames = bq.StandardSqlTypeNames
+
+ resource = {
+ "typeKind": "STRUCT",
+ "structType": {
+ "fields": [
+ {"name": "job_title", "type": {"typeKind": "STRING"}},
+ {"name": "salary", "type": {"typeKind": "FLOAT64"}},
+ {
+ "name": "employee_info",
+ "type": {
+ "typeKind": "STRUCT",
+ "structType": {
+ "fields": [
+ {"name": "name", "type": {"typeKind": "STRING"}},
+ {"name": "age", "type": {"typeKind": "INT64"}},
+ ],
+ },
+ },
+ },
+ ],
+ },
+ }
+
+ result = klass.from_api_repr(resource=resource)
+
+ expected = klass(
+ type_kind=TypeNames.STRUCT,
+ struct_type=StandardSqlStructType(
+ fields=[
+ StandardSqlField("job_title", klass(TypeNames.STRING)),
+ StandardSqlField("salary", klass(TypeNames.FLOAT64)),
+ StandardSqlField(
+ "employee_info",
+ klass(
+ type_kind=TypeNames.STRUCT,
+ struct_type=StandardSqlStructType(
+ fields=[
+ StandardSqlField("name", klass(TypeNames.STRING)),
+ StandardSqlField("age", klass(TypeNames.INT64)),
+ ]
+ ),
+ ),
+ ),
+ ]
+ ),
+ )
+ assert result == expected
+
+ def test_from_api_repr_struct_type_missing_struct_info(self):
+ klass = self._get_target_class()
+ resource = {"typeKind": "STRUCT"}
+
+ result = klass.from_api_repr(resource=resource)
+
+ expected = klass(
+ type_kind=bq.StandardSqlTypeNames.STRUCT,
+ array_element_type=None,
+ struct_type=None,
+ )
+ assert result == expected
+
+ def test_from_api_repr_struct_type_incomplete_field_info(self):
+ from google.cloud.bigquery.standard_sql import StandardSqlField
+ from google.cloud.bigquery.standard_sql import StandardSqlStructType
+
+ klass = self._get_target_class()
+ TypeNames = bq.StandardSqlTypeNames
+
+ resource = {
+ "typeKind": "STRUCT",
+ "structType": {
+ "fields": [
+ {"type": {"typeKind": "STRING"}}, # missing name
+ {"name": "salary"}, # missing type
+ ],
+ },
+ }
+
+ result = klass.from_api_repr(resource=resource)
+
+ expected = klass(
+ type_kind=TypeNames.STRUCT,
+ struct_type=StandardSqlStructType(
+ fields=[
+ StandardSqlField(None, klass(TypeNames.STRING)),
+ StandardSqlField("salary", klass(TypeNames.TYPE_KIND_UNSPECIFIED)),
+ ]
+ ),
+ )
+ assert result == expected
+
+ def test_from_api_repr_range_type_full(self):
+ klass = self._get_target_class()
+ resource = {"typeKind": "RANGE", "rangeElementType": {"typeKind": "DATE"}}
+
+ result = klass.from_api_repr(resource=resource)
+
+ expected = klass(
+ type_kind=bq.StandardSqlTypeNames.RANGE,
+ range_element_type=klass(type_kind=bq.StandardSqlTypeNames.DATE),
+ )
+ assert result == expected
+
+ def test_from_api_repr_range_type_missing_element_type(self):
+ klass = self._get_target_class()
+ resource = {"typeKind": "RANGE"}
+
+ result = klass.from_api_repr(resource=resource)
+
+ expected = klass(
+ type_kind=bq.StandardSqlTypeNames.RANGE,
+ range_element_type=None,
+ struct_type=None,
+ )
+ assert result == expected
+
+ def test__eq__another_type(self):
+ instance = self._make_one()
+
+ class SqlTypeWannabe:
+ pass
+
+ not_a_type = SqlTypeWannabe()
+ not_a_type._properties = instance._properties
+
+ assert instance != not_a_type # Can't fake it.
+
+ def test__eq__delegates_comparison_to_another_type(self):
+ instance = self._make_one()
+ assert instance == mock.ANY
+
+ def test__eq__similar_instance(self):
+ kwargs = {
+ "type_kind": bq.StandardSqlTypeNames.GEOGRAPHY,
+ "array_element_type": bq.StandardSqlDataType(
+ type_kind=bq.StandardSqlTypeNames.INT64
+ ),
+ "struct_type": bq.StandardSqlStructType(fields=[]),
+ }
+ instance = self._make_one(**kwargs)
+ instance2 = self._make_one(**kwargs)
+ assert instance == instance2
+
+ @pytest.mark.parametrize(
+ ("attr_name", "value", "value2"),
+ (
+ (
+ "type_kind",
+ bq.StandardSqlTypeNames.INT64,
+ bq.StandardSqlTypeNames.FLOAT64,
+ ),
+ (
+ "array_element_type",
+ bq.StandardSqlDataType(type_kind=bq.StandardSqlTypeNames.STRING),
+ bq.StandardSqlDataType(type_kind=bq.StandardSqlTypeNames.BOOL),
+ ),
+ (
+ "struct_type",
+ bq.StandardSqlStructType(fields=[bq.StandardSqlField(name="foo")]),
+ bq.StandardSqlStructType(fields=[bq.StandardSqlField(name="bar")]),
+ ),
+ (
+ "range_element_type",
+ bq.StandardSqlDataType(type_kind=bq.StandardSqlTypeNames.DATE),
+ bq.StandardSqlDataType(type_kind=bq.StandardSqlTypeNames.DATETIME),
+ ),
+ ),
+ )
+ def test__eq__attribute_differs(self, attr_name, value, value2):
+ instance = self._make_one(**{attr_name: value})
+ instance2 = self._make_one(**{attr_name: value2})
+ assert instance != instance2
+
+ def test_str(self):
+ instance = self._make_one(type_kind=bq.StandardSqlTypeNames.BOOL)
+ bool_type_repr = repr(bq.StandardSqlTypeNames.BOOL)
+ assert str(instance) == f"StandardSqlDataType(type_kind={bool_type_repr}, ...)"
+
+
+class TestStandardSqlField:
+ # This class only contains minimum tests to cover what other tests don't
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.standard_sql import StandardSqlField
+
+ return StandardSqlField
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_name(self):
+ instance = self._make_one(name="foo")
+ assert instance.name == "foo"
+ instance.name = "bar"
+ assert instance.name == "bar"
+
+ def test_type_missing(self):
+ instance = self._make_one(type=None)
+ assert instance.type is None
+
+ def test_type_set_none(self):
+ instance = self._make_one(
+ type=bq.StandardSqlDataType(type_kind=bq.StandardSqlTypeNames.BOOL)
+ )
+ instance.type = None
+ assert instance.type is None
+
+ def test_type_set_not_none(self):
+ instance = self._make_one(type=bq.StandardSqlDataType(type_kind=None))
+ instance.type = bq.StandardSqlDataType(type_kind=bq.StandardSqlTypeNames.INT64)
+ assert instance.type == bq.StandardSqlDataType(
+ type_kind=bq.StandardSqlTypeNames.INT64
+ )
+
+ def test__eq__another_type(self):
+ instance = self._make_one(
+ name="foo",
+ type=bq.StandardSqlDataType(type_kind=bq.StandardSqlTypeNames.BOOL),
+ )
+
+ class FieldWannabe:
+ pass
+
+ not_a_field = FieldWannabe()
+ not_a_field._properties = instance._properties
+
+ assert instance != not_a_field # Can't fake it.
+
+ def test__eq__delegates_comparison_to_another_type(self):
+ instance = self._make_one(
+ name="foo",
+ type=bq.StandardSqlDataType(type_kind=bq.StandardSqlTypeNames.BOOL),
+ )
+ assert instance == mock.ANY
+
+ def test__eq__similar_instance(self):
+ kwargs = {
+ "name": "foo",
+ "type": bq.StandardSqlDataType(type_kind=bq.StandardSqlTypeNames.INT64),
+ }
+ instance = self._make_one(**kwargs)
+ instance2 = self._make_one(**kwargs)
+ assert instance == instance2
+
+ @pytest.mark.parametrize(
+ ("attr_name", "value", "value2"),
+ (
+ (
+ "name",
+ "foo",
+ "bar",
+ ),
+ (
+ "type",
+ bq.StandardSqlDataType(type_kind=bq.StandardSqlTypeNames.INTERVAL),
+ bq.StandardSqlDataType(type_kind=bq.StandardSqlTypeNames.TIME),
+ ),
+ ),
+ )
+ def test__eq__attribute_differs(self, attr_name, value, value2):
+ instance = self._make_one(**{attr_name: value})
+ instance2 = self._make_one(**{attr_name: value2})
+ assert instance != instance2
+
+
+class TestStandardSqlStructType:
+ # This class only contains minimum tests to cover what other tests don't
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.standard_sql import StandardSqlStructType
+
+ return StandardSqlStructType
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_fields(self):
+ instance = self._make_one(fields=[])
+ assert instance.fields == []
+
+ new_fields = [bq.StandardSqlField(name="foo"), bq.StandardSqlField(name="bar")]
+ instance.fields = new_fields
+ assert instance.fields == new_fields
+
+ def test__eq__another_type(self):
+ instance = self._make_one(fields=[bq.StandardSqlField(name="foo")])
+
+ class StructTypeWannabe:
+ pass
+
+ not_a_type = StructTypeWannabe()
+ not_a_type._properties = instance._properties
+
+ assert instance != not_a_type # Can't fake it.
+
+ def test__eq__delegates_comparison_to_another_type(self):
+ instance = self._make_one(fields=[bq.StandardSqlField(name="foo")])
+ assert instance == mock.ANY
+
+ def test__eq__similar_instance(self):
+ kwargs = {
+ "fields": [bq.StandardSqlField(name="foo"), bq.StandardSqlField(name="bar")]
+ }
+ instance = self._make_one(**kwargs)
+ instance2 = self._make_one(**kwargs)
+ assert instance == instance2
+
+ def test__eq__attribute_differs(self):
+ instance = self._make_one(fields=[bq.StandardSqlField(name="foo")])
+ instance2 = self._make_one(
+ fields=[bq.StandardSqlField(name="foo"), bq.StandardSqlField(name="bar")]
+ )
+ assert instance != instance2
+
+
+class TestStandardSqlTableType:
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.standard_sql import StandardSqlTableType
+
+ return StandardSqlTableType
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_columns_shallow_copy(self):
+ from google.cloud.bigquery.standard_sql import StandardSqlField
+
+ columns = [
+ StandardSqlField("foo"),
+ StandardSqlField("bar"),
+ StandardSqlField("baz"),
+ ]
+
+ instance = self._make_one(columns=columns)
+
+ assert len(instance.columns) == 3
+ columns.pop()
+ assert len(instance.columns) == 3 # Still the same.
+
+ def test_columns_setter(self):
+ from google.cloud.bigquery.standard_sql import StandardSqlField
+
+ columns = [StandardSqlField("foo")]
+ instance = self._make_one(columns=columns)
+ assert instance.columns == columns
+
+ new_columns = [StandardSqlField(name="bar")]
+ instance.columns = new_columns
+ assert instance.columns == new_columns
+
+ def test_to_api_repr_no_columns(self):
+ instance = self._make_one(columns=[])
+ result = instance.to_api_repr()
+ assert result == {"columns": []}
+
+ def test_to_api_repr_with_columns(self):
+ from google.cloud.bigquery.standard_sql import StandardSqlField
+
+ columns = [StandardSqlField("foo"), StandardSqlField("bar")]
+ instance = self._make_one(columns=columns)
+
+ result = instance.to_api_repr()
+
+ expected = {
+ "columns": [{"name": "foo", "type": None}, {"name": "bar", "type": None}]
+ }
+ assert result == expected
+
+ def test_from_api_repr_missing_columns(self):
+ resource = {}
+ result = self._get_target_class().from_api_repr(resource)
+ assert result.columns == []
+
+ def test_from_api_repr_with_incomplete_columns(self):
+ from google.cloud.bigquery.standard_sql import StandardSqlDataType
+ from google.cloud.bigquery.standard_sql import StandardSqlField
+
+ resource = {
+ "columns": [
+ {"type": {"typeKind": "BOOL"}}, # missing name
+ {"name": "bar"}, # missing type
+ ]
+ }
+
+ result = self._get_target_class().from_api_repr(resource)
+
+ assert len(result.columns) == 2
+
+ expected = StandardSqlField(
+ name=None,
+ type=StandardSqlDataType(type_kind=bq.StandardSqlTypeNames.BOOL),
+ )
+ assert result.columns[0] == expected
+
+ expected = StandardSqlField(
+ name="bar",
+ type=StandardSqlDataType(
+ type_kind=bq.StandardSqlTypeNames.TYPE_KIND_UNSPECIFIED
+ ),
+ )
+ assert result.columns[1] == expected
+
+ def test__eq__another_type(self):
+ instance = self._make_one(columns=[bq.StandardSqlField(name="foo")])
+
+ class TableTypeWannabe:
+ pass
+
+ not_a_type = TableTypeWannabe()
+ not_a_type._properties = instance._properties
+
+ assert instance != not_a_type # Can't fake it.
+
+ def test__eq__delegates_comparison_to_another_type(self):
+ instance = self._make_one(columns=[bq.StandardSqlField(name="foo")])
+ assert instance == mock.ANY
+
+ def test__eq__similar_instance(self):
+ kwargs = {
+ "columns": [
+ bq.StandardSqlField(name="foo"),
+ bq.StandardSqlField(name="bar"),
+ ]
+ }
+ instance = self._make_one(**kwargs)
+ instance2 = self._make_one(**kwargs)
+ assert instance == instance2
+
+ def test__eq__attribute_differs(self):
+ instance = self._make_one(columns=[bq.StandardSqlField(name="foo")])
+ instance2 = self._make_one(
+ columns=[bq.StandardSqlField(name="foo"), bq.StandardSqlField(name="bar")]
+ )
+ assert instance != instance2
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_table.py b/testbed/googleapis__python-bigquery/tests/unit/test_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6febcfb1753828d56ce2690fc193517c18b6dd9
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_table.py
@@ -0,0 +1,5824 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import logging
+import re
+from sys import version_info
+import time
+import types
+import unittest
+from unittest import mock
+import warnings
+
+import pytest
+
+import google.api_core.exceptions
+from test_utils.imports import maybe_fail_import
+
+from google.cloud.bigquery import _versions_helpers
+from google.cloud.bigquery import exceptions
+from google.cloud.bigquery.table import TableReference
+from google.cloud.bigquery.dataset import DatasetReference
+
+
+def _mock_client():
+ from google.cloud.bigquery import client
+
+ mock_client = mock.create_autospec(client.Client)
+ mock_client.project = "my-project"
+ return mock_client
+
+
+class _SchemaBase(object):
+ def _verify_field(self, field, r_field):
+ self.assertEqual(field.name, r_field["name"])
+ self.assertEqual(field.field_type, r_field["type"])
+ self.assertEqual(field.mode, r_field.get("mode", "NULLABLE"))
+
+ def _verifySchema(self, schema, resource):
+ r_fields = resource["schema"]["fields"]
+ self.assertEqual(len(schema), len(r_fields))
+
+ for field, r_field in zip(schema, r_fields):
+ self._verify_field(field, r_field)
+
+
+class TestEncryptionConfiguration(unittest.TestCase):
+ KMS_KEY_NAME = "projects/1/locations/us/keyRings/1/cryptoKeys/1"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.table import EncryptionConfiguration
+
+ return EncryptionConfiguration
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor_defaults(self):
+ encryption_config = self._make_one()
+ self.assertIsNone(encryption_config.kms_key_name)
+
+ def test_ctor_with_key(self):
+ encryption_config = self._make_one(kms_key_name=self.KMS_KEY_NAME)
+ self.assertEqual(encryption_config.kms_key_name, self.KMS_KEY_NAME)
+
+
+class TestTableBase:
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.table import _TableBase
+
+ return _TableBase
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor_defaults(self):
+ instance = self._make_one()
+ assert instance._properties == {}
+
+ def test_project(self):
+ instance = self._make_one()
+ instance._properties = {"tableReference": {"projectId": "p_1"}}
+ assert instance.project == "p_1"
+
+ def test_dataset_id(self):
+ instance = self._make_one()
+ instance._properties = {"tableReference": {"datasetId": "ds_1"}}
+ assert instance.dataset_id == "ds_1"
+
+ def test_table_id(self):
+ instance = self._make_one()
+ instance._properties = {"tableReference": {"tableId": "tbl_1"}}
+ assert instance.table_id == "tbl_1"
+
+ def test_path(self):
+ instance = self._make_one()
+ instance._properties = {
+ "tableReference": {
+ "projectId": "p_1",
+ "datasetId": "ds_1",
+ "tableId": "tbl_1",
+ }
+ }
+ assert instance.path == "/projects/p_1/datasets/ds_1/tables/tbl_1"
+
+ def test___eq___wrong_type(self):
+ instance = self._make_one()
+ instance._properties = {
+ "tableReference": {
+ "projectId": "p_1",
+ "datasetId": "ds_1",
+ "tableId": "tbl_1",
+ }
+ }
+
+ class TableWannabe:
+ pass
+
+ wannabe_other = TableWannabe()
+ wannabe_other._properties = instance._properties
+ wannabe_other.project = "p_1"
+ wannabe_other.dataset_id = "ds_1"
+ wannabe_other.table_id = "tbl_1"
+
+ assert instance != wannabe_other # Can't fake it.
+ assert instance == mock.ANY # ...but delegation to other object works.
+
+ def test___eq___project_mismatch(self):
+ instance = self._make_one()
+ instance._properties = {
+ "tableReference": {
+ "projectId": "p_1",
+ "datasetId": "ds_1",
+ "tableId": "tbl_1",
+ }
+ }
+ other = self._make_one()
+ other._properties = {
+ "projectId": "p_2",
+ "datasetId": "ds_1",
+ "tableId": "tbl_1",
+ }
+ assert instance != other
+
+ def test___eq___dataset_mismatch(self):
+ instance = self._make_one()
+ instance._properties = {
+ "tableReference": {
+ "projectId": "p_1",
+ "datasetId": "ds_1",
+ "tableId": "tbl_1",
+ }
+ }
+ other = self._make_one()
+ other._properties = {
+ "tableReference": {
+ "projectId": "p_1",
+ "datasetId": "ds_2",
+ "tableId": "tbl_1",
+ }
+ }
+ assert instance != other
+
+ def test___eq___table_mismatch(self):
+ instance = self._make_one()
+ instance._properties = {
+ "tableReference": {
+ "projectId": "p_1",
+ "datasetId": "ds_1",
+ "tableId": "tbl_1",
+ }
+ }
+ other = self._make_one()
+ other._properties = {
+ "tableReference": {
+ "projectId": "p_1",
+ "datasetId": "ds_1",
+ "tableId": "tbl_2",
+ }
+ }
+ assert instance != other
+
+ def test___eq___equality(self):
+ instance = self._make_one()
+ instance._properties = {
+ "tableReference": {
+ "projectId": "p_1",
+ "datasetId": "ds_1",
+ "tableId": "tbl_1",
+ }
+ }
+ other = self._make_one()
+ other._properties = {
+ "tableReference": {
+ "projectId": "p_1",
+ "datasetId": "ds_1",
+ "tableId": "tbl_1",
+ }
+ }
+ assert instance == other
+
+ def test___hash__set_equality(self):
+ instance_1 = self._make_one()
+ instance_1._properties = {
+ "tableReference": {
+ "projectId": "p_1",
+ "datasetId": "ds_1",
+ "tableId": "tbl_1",
+ }
+ }
+
+ instance_2 = self._make_one()
+ instance_2._properties = {
+ "tableReference": {
+ "projectId": "p_2",
+ "datasetId": "ds_2",
+ "tableId": "tbl_2",
+ }
+ }
+
+ set_one = {instance_1, instance_2}
+ set_two = {instance_1, instance_2}
+ assert set_one == set_two
+
+ def test___hash__sets_not_equal(self):
+ instance_1 = self._make_one()
+ instance_1._properties = {
+ "tableReference": {
+ "projectId": "p_1",
+ "datasetId": "ds_1",
+ "tableId": "tbl_1",
+ }
+ }
+
+ instance_2 = self._make_one()
+ instance_2._properties = {
+ "tableReference": {
+ "projectId": "p_2",
+ "datasetId": "ds_2",
+ "tableId": "tbl_2",
+ }
+ }
+
+ set_one = {instance_1}
+ set_two = {instance_2}
+ assert set_one != set_two
+
+
+class TestTableReference(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.table import TableReference
+
+ return TableReference
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_ctor_defaults(self):
+ dataset_ref = DatasetReference("project_1", "dataset_1")
+
+ table_ref = self._make_one(dataset_ref, "table_1")
+ self.assertEqual(table_ref.dataset_id, dataset_ref.dataset_id)
+ self.assertEqual(table_ref.table_id, "table_1")
+
+ def test_to_api_repr(self):
+ dataset_ref = DatasetReference("project_1", "dataset_1")
+ table_ref = self._make_one(dataset_ref, "table_1")
+
+ resource = table_ref.to_api_repr()
+
+ self.assertEqual(
+ resource,
+ {"projectId": "project_1", "datasetId": "dataset_1", "tableId": "table_1"},
+ )
+
+ def test_from_api_repr(self):
+ from google.cloud.bigquery.table import TableReference
+
+ dataset_ref = DatasetReference("project_1", "dataset_1")
+ expected = self._make_one(dataset_ref, "table_1")
+
+ got = TableReference.from_api_repr(
+ {"projectId": "project_1", "datasetId": "dataset_1", "tableId": "table_1"}
+ )
+
+ self.assertEqual(expected, got)
+
+ def test_from_string(self):
+ cls = self._get_target_class()
+ got = cls.from_string("string-project.string_dataset.string_table")
+ self.assertEqual(got.project, "string-project")
+ self.assertEqual(got.dataset_id, "string_dataset")
+ self.assertEqual(got.table_id, "string_table")
+
+ def test_from_string_w_prefix(self):
+ cls = self._get_target_class()
+ got = cls.from_string("google.com:string-project.string_dataset.string_table")
+ self.assertEqual(got.project, "google.com:string-project")
+ self.assertEqual(got.dataset_id, "string_dataset")
+ self.assertEqual(got.table_id, "string_table")
+
+ def test_from_string_legacy_string(self):
+ cls = self._get_target_class()
+ with self.assertRaises(ValueError):
+ cls.from_string("string-project:string_dataset.string_table")
+
+ def test_from_string_w_incorrect_prefix(self):
+ cls = self._get_target_class()
+ with self.assertRaises(ValueError):
+ cls.from_string("google.com.string-project.string_dataset.string_table")
+
+ def test_from_string_not_fully_qualified(self):
+ cls = self._get_target_class()
+ with self.assertRaises(ValueError):
+ cls.from_string("string_table")
+
+ with self.assertRaises(ValueError):
+ cls.from_string("string_dataset.string_table")
+
+ with self.assertRaises(ValueError):
+ cls.from_string("a.b.c.d")
+
+ def test_from_string_with_default_project(self):
+ cls = self._get_target_class()
+ got = cls.from_string(
+ "string_dataset.string_table", default_project="default-project"
+ )
+ self.assertEqual(got.project, "default-project")
+ self.assertEqual(got.dataset_id, "string_dataset")
+ self.assertEqual(got.table_id, "string_table")
+
+ def test_from_string_ignores_default_project(self):
+ cls = self._get_target_class()
+ got = cls.from_string(
+ "string-project.string_dataset.string_table",
+ default_project="default-project",
+ )
+ self.assertEqual(got.project, "string-project")
+ self.assertEqual(got.dataset_id, "string_dataset")
+ self.assertEqual(got.table_id, "string_table")
+
+ def test___repr__(self):
+ dataset = DatasetReference("project1", "dataset1")
+ table1 = self._make_one(dataset, "table1")
+ expected = (
+ "TableReference(DatasetReference('project1', 'dataset1'), " "'table1')"
+ )
+ self.assertEqual(repr(table1), expected)
+
+ def test___str__(self):
+ dataset = DatasetReference("project1", "dataset1")
+ table1 = self._make_one(dataset, "table1")
+ self.assertEqual(str(table1), "project1.dataset1.table1")
+
+
+class TestTable(unittest.TestCase, _SchemaBase):
+ PROJECT = "prahj-ekt"
+ DS_ID = "dataset-name"
+ TABLE_NAME = "table-name"
+ KMS_KEY_NAME = "projects/1/locations/us/keyRings/1/cryptoKeys/1"
+
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.table import Table
+
+ return Table
+
+ def _make_one(self, *args, **kw):
+ if len(args) == 0:
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ args = (table_ref,)
+
+ return self._get_target_class()(*args, **kw)
+
+ def _setUpConstants(self):
+ import datetime
+ from google.cloud._helpers import UTC
+
+ self.WHEN_TS = 1437767599.006
+ self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(tzinfo=UTC)
+ self.ETAG = "ETAG"
+ self.TABLE_FULL_ID = "%s:%s.%s" % (self.PROJECT, self.DS_ID, self.TABLE_NAME)
+ self.RESOURCE_URL = "http://example.com/path/to/resource"
+ self.NUM_BYTES = 12345
+ self.NUM_ROWS = 67
+ self.NUM_EST_BYTES = 1234
+ self.NUM_EST_ROWS = 23
+
+ def _make_resource(self):
+ self._setUpConstants()
+ return {
+ "creationTime": self.WHEN_TS * 1000,
+ "tableReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_NAME,
+ },
+ "schema": {
+ "fields": [
+ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "age", "type": "INTEGER", "mode": "REQUIRED"},
+ ]
+ },
+ "etag": "ETAG",
+ "id": self.TABLE_FULL_ID,
+ "lastModifiedTime": self.WHEN_TS * 1000,
+ "location": "US",
+ "selfLink": self.RESOURCE_URL,
+ "numRows": self.NUM_ROWS,
+ "numBytes": self.NUM_BYTES,
+ "type": "TABLE",
+ "streamingBuffer": {
+ "estimatedRows": str(self.NUM_EST_ROWS),
+ "estimatedBytes": str(self.NUM_EST_BYTES),
+ "oldestEntryTime": self.WHEN_TS * 1000,
+ },
+ "externalDataConfiguration": {
+ "sourceFormat": "CSV",
+ "csvOptions": {"allowJaggedRows": True, "encoding": "encoding"},
+ },
+ "labels": {"x": "y"},
+ }
+
+ def _verifyReadonlyResourceProperties(self, table, resource):
+ if "creationTime" in resource:
+ self.assertEqual(table.created, self.WHEN)
+ else:
+ self.assertIsNone(table.created)
+
+ if "etag" in resource:
+ self.assertEqual(table.etag, self.ETAG)
+ else:
+ self.assertIsNone(table.etag)
+
+ if "numRows" in resource:
+ self.assertEqual(table.num_rows, self.NUM_ROWS)
+ else:
+ self.assertIsNone(table.num_rows)
+
+ if "numBytes" in resource:
+ self.assertEqual(table.num_bytes, self.NUM_BYTES)
+ else:
+ self.assertIsNone(table.num_bytes)
+
+ if "selfLink" in resource:
+ self.assertEqual(table.self_link, self.RESOURCE_URL)
+ else:
+ self.assertIsNone(table.self_link)
+
+ if "streamingBuffer" in resource:
+ self.assertEqual(table.streaming_buffer.estimated_rows, self.NUM_EST_ROWS)
+ self.assertEqual(table.streaming_buffer.estimated_bytes, self.NUM_EST_BYTES)
+ self.assertEqual(table.streaming_buffer.oldest_entry_time, self.WHEN)
+ else:
+ self.assertIsNone(table.streaming_buffer)
+
+ self.assertEqual(table.full_table_id, self.TABLE_FULL_ID)
+ self.assertEqual(
+ table.table_type, "TABLE" if "view" not in resource else "VIEW"
+ )
+
+ def _verifyResourceProperties(self, table, resource):
+ self._verifyReadonlyResourceProperties(table, resource)
+
+ if "expirationTime" in resource:
+ self.assertEqual(table.expires, self.EXP_TIME)
+ else:
+ self.assertIsNone(table.expires)
+
+ self.assertEqual(table.description, resource.get("description"))
+ self.assertEqual(table.friendly_name, resource.get("friendlyName"))
+ self.assertEqual(table.location, resource.get("location"))
+
+ if "view" in resource:
+ self.assertEqual(table.view_query, resource["view"]["query"])
+ self.assertEqual(
+ table.view_use_legacy_sql, resource["view"].get("useLegacySql", True)
+ )
+ else:
+ self.assertIsNone(table.view_query)
+ self.assertIsNone(table.view_use_legacy_sql)
+
+ if "schema" in resource:
+ self._verifySchema(table.schema, resource)
+ else:
+ self.assertEqual(table.schema, [])
+
+ if "externalDataConfiguration" in resource:
+ edc = table.external_data_configuration
+ self.assertEqual(edc.source_format, "CSV")
+ self.assertEqual(edc.options.allow_jagged_rows, True)
+
+ if "labels" in resource:
+ self.assertEqual(table.labels, {"x": "y"})
+ else:
+ self.assertEqual(table.labels, {})
+
+ if "encryptionConfiguration" in resource:
+ self.assertIsNotNone(table.encryption_configuration)
+ self.assertEqual(
+ table.encryption_configuration.kms_key_name,
+ resource["encryptionConfiguration"]["kmsKeyName"],
+ )
+ else:
+ self.assertIsNone(table.encryption_configuration)
+
+ def test_ctor(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ self.assertEqual(table.table_id, self.TABLE_NAME)
+ self.assertEqual(table.project, self.PROJECT)
+ self.assertEqual(table.dataset_id, self.DS_ID)
+ self.assertEqual(table.reference.table_id, self.TABLE_NAME)
+ self.assertEqual(table.reference.project, self.PROJECT)
+ self.assertEqual(table.reference.dataset_id, self.DS_ID)
+ self.assertEqual(
+ table.path,
+ "/projects/%s/datasets/%s/tables/%s"
+ % (self.PROJECT, self.DS_ID, self.TABLE_NAME),
+ )
+ self.assertEqual(table.schema, [])
+
+ self.assertIsNone(table.created)
+ self.assertIsNone(table.etag)
+ self.assertIsNone(table.modified)
+ self.assertIsNone(table.num_bytes)
+ self.assertIsNone(table.num_rows)
+ self.assertIsNone(table.self_link)
+ self.assertIsNone(table.full_table_id)
+ self.assertIsNone(table.table_type)
+ self.assertIsNone(table.description)
+ self.assertIsNone(table.expires)
+ self.assertIsNone(table.friendly_name)
+ self.assertIsNone(table.location)
+ self.assertIsNone(table.view_query)
+ self.assertIsNone(table.view_use_legacy_sql)
+ self.assertIsNone(table.external_data_configuration)
+ self.assertEqual(table.labels, {})
+ self.assertIsNone(table.encryption_configuration)
+ self.assertIsNone(table.time_partitioning)
+ self.assertIsNone(table.clustering_fields)
+ self.assertIsNone(table.table_constraints)
+
+ def test_ctor_w_schema(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
+ age = SchemaField("age", "INTEGER", mode="REQUIRED")
+ table = self._make_one(table_ref, schema=[full_name, age])
+
+ self.assertEqual(table.schema, [full_name, age])
+
+ def test_ctor_string(self):
+ table = self._make_one("some-project.some_dset.some_tbl")
+ self.assertEqual(table.project, "some-project")
+ self.assertEqual(table.dataset_id, "some_dset")
+ self.assertEqual(table.table_id, "some_tbl")
+
+ def test_ctor_tablelistitem(self):
+ from google.cloud.bigquery.table import Table, TableListItem
+
+ import datetime
+ from google.cloud._helpers import _millis, UTC
+
+ self.WHEN_TS = 1437767599.125
+ self.EXP_TIME = datetime.datetime(2015, 8, 1, 23, 59, 59, tzinfo=UTC)
+
+ project = "test-project"
+ dataset_id = "test_dataset"
+ table_id = "coffee_table"
+ resource = {
+ "creationTime": self.WHEN_TS * 1000,
+ "expirationTime": _millis(self.EXP_TIME),
+ "kind": "bigquery#table",
+ "id": "{}:{}.{}".format(project, dataset_id, table_id),
+ "tableReference": {
+ "projectId": project,
+ "datasetId": dataset_id,
+ "tableId": table_id,
+ },
+ "friendlyName": "Mahogany Coffee Table",
+ "type": "TABLE",
+ "timePartitioning": {
+ "type": "DAY",
+ "field": "mycolumn",
+ "expirationMs": "10000",
+ },
+ "labels": {"some-stuff": "this-is-a-label"},
+ "clustering": {"fields": ["string"]},
+ }
+
+ table_list_item = TableListItem(resource)
+ table = Table(table_list_item)
+
+ self.assertIsNone(table.created)
+ self.assertEqual(table.reference.project, project)
+ self.assertEqual(table.reference.dataset_id, dataset_id)
+ self.assertEqual(table.reference.table_id, table_id)
+
+ def test_ctor_string_wo_project_id(self):
+ with pytest.raises(ValueError):
+ # Project ID is missing.
+ self._make_one("some_dset.some_tbl")
+
+ def test_num_bytes_getter(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ # Check with no value set.
+ self.assertIsNone(table.num_bytes)
+
+ num_bytes = 1337
+ # Check with integer value set.
+ table._properties = {"numBytes": num_bytes}
+ self.assertEqual(table.num_bytes, num_bytes)
+
+ # Check with a string value set.
+ table._properties = {"numBytes": str(num_bytes)}
+ self.assertEqual(table.num_bytes, num_bytes)
+
+ # Check with invalid int value.
+ table._properties = {"numBytes": "x"}
+ with self.assertRaises(ValueError):
+ getattr(table, "num_bytes")
+
+ def test_num_rows_getter(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ # Check with no value set.
+ self.assertIsNone(table.num_rows)
+
+ num_rows = 42
+ # Check with integer value set.
+ table._properties = {"numRows": num_rows}
+ self.assertEqual(table.num_rows, num_rows)
+
+ # Check with a string value set.
+ table._properties = {"numRows": str(num_rows)}
+ self.assertEqual(table.num_rows, num_rows)
+
+ # Check with invalid int value.
+ table._properties = {"numRows": "x"}
+ with self.assertRaises(ValueError):
+ getattr(table, "num_rows")
+
+ def test__eq__same_table_property_different(self):
+ table_1 = self._make_one("project_foo.dataset_bar.table_baz")
+ table_1.description = "This is table baz"
+
+ table_2 = self._make_one("project_foo.dataset_bar.table_baz")
+ table_2.description = "This is also table baz"
+
+ assert table_1 == table_2 # Still equal, only table reference is important.
+
+ def test_hashable(self):
+ table_1 = self._make_one("project_foo.dataset_bar.table_baz")
+ table_1.description = "This is a table"
+
+ table_1b = self._make_one("project_foo.dataset_bar.table_baz")
+ table_1b.description = "Metadata is irrelevant for hashes"
+
+ assert hash(table_1) == hash(table_1b)
+
+ def test_schema_setter_non_sequence(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ with self.assertRaises(TypeError):
+ table.schema = object()
+
+ def test_schema_setter_invalid_field(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
+ with self.assertRaises(ValueError):
+ table.schema = [full_name, object()]
+
+ def test_schema_setter_valid_fields(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
+ age = SchemaField("age", "INTEGER", mode="REQUIRED")
+ table.schema = [full_name, age]
+ self.assertEqual(table.schema, [full_name, age])
+
+ def test_schema_setter_invalid_mapping_representation(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ full_name = {"name": "full_name", "type": "STRING", "mode": "REQUIRED"}
+ invalid_field = {"name": "full_name", "typeooo": "STRING", "mode": "REQUIRED"}
+ with self.assertRaises(Exception):
+ table.schema = [full_name, invalid_field]
+
+ def test_schema_setter_valid_mapping_representation(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ full_name = {"name": "full_name", "type": "STRING", "mode": "REQUIRED"}
+ job_status = {
+ "name": "is_employed",
+ "type": "STRUCT",
+ "mode": "NULLABLE",
+ "fields": [
+ {"name": "foo", "type": "DATE", "mode": "NULLABLE"},
+ {"name": "bar", "type": "BYTES", "mode": "REQUIRED"},
+ ],
+ }
+
+ table.schema = [full_name, job_status]
+
+ expected_schema = [
+ SchemaField("full_name", "STRING", mode="REQUIRED"),
+ SchemaField(
+ "is_employed",
+ "STRUCT",
+ mode="NULLABLE",
+ fields=[
+ SchemaField("foo", "DATE", mode="NULLABLE"),
+ SchemaField("bar", "BYTES", mode="REQUIRED"),
+ ],
+ ),
+ ]
+ self.assertEqual(table.schema, expected_schema)
+
+ def test_props_set_by_server(self):
+ import datetime
+ from google.cloud._helpers import UTC
+ from google.cloud._helpers import _millis
+
+ CREATED = datetime.datetime(2015, 7, 29, 12, 13, 22, tzinfo=UTC)
+ MODIFIED = datetime.datetime(2015, 7, 29, 14, 47, 15, tzinfo=UTC)
+ TABLE_FULL_ID = "%s:%s.%s" % (self.PROJECT, self.DS_ID, self.TABLE_NAME)
+ URL = "http://example.com/projects/%s/datasets/%s/tables/%s" % (
+ self.PROJECT,
+ self.DS_ID,
+ self.TABLE_NAME,
+ )
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ table._properties["creationTime"] = _millis(CREATED)
+ table._properties["etag"] = "ETAG"
+ table._properties["lastModifiedTime"] = _millis(MODIFIED)
+ table._properties["numBytes"] = 12345
+ table._properties["numRows"] = 66
+ table._properties["selfLink"] = URL
+ table._properties["id"] = TABLE_FULL_ID
+ table._properties["type"] = "TABLE"
+
+ self.assertEqual(table.created, CREATED)
+ self.assertEqual(table.etag, "ETAG")
+ self.assertEqual(table.modified, MODIFIED)
+ self.assertEqual(table.num_bytes, 12345)
+ self.assertEqual(table.num_rows, 66)
+ self.assertEqual(table.self_link, URL)
+ self.assertEqual(table.full_table_id, TABLE_FULL_ID)
+ self.assertEqual(table.table_type, "TABLE")
+
+ def test_snapshot_definition_not_set(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ assert table.snapshot_definition is None
+
+ def test_snapshot_definition_set(self):
+ from google.cloud._helpers import UTC
+ from google.cloud.bigquery.table import SnapshotDefinition
+
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ table._properties["snapshotDefinition"] = {
+ "baseTableReference": {
+ "projectId": "project_x",
+ "datasetId": "dataset_y",
+ "tableId": "table_z",
+ },
+ "snapshotTime": "2010-09-28T10:20:30.123Z",
+ }
+
+ snapshot = table.snapshot_definition
+
+ assert isinstance(snapshot, SnapshotDefinition)
+ assert snapshot.base_table_reference.path == (
+ "/projects/project_x/datasets/dataset_y/tables/table_z"
+ )
+ assert snapshot.snapshot_time == datetime.datetime(
+ 2010, 9, 28, 10, 20, 30, 123000, tzinfo=UTC
+ )
+
+ def test_clone_definition_not_set(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ assert table.clone_definition is None
+
+ def test_clone_definition_set(self):
+ from google.cloud._helpers import UTC
+ from google.cloud.bigquery.table import CloneDefinition
+
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ table._properties["cloneDefinition"] = {
+ "baseTableReference": {
+ "projectId": "project_x",
+ "datasetId": "dataset_y",
+ "tableId": "table_z",
+ },
+ "cloneTime": "2010-09-28T10:20:30.123Z",
+ }
+
+ clone = table.clone_definition
+
+ assert isinstance(clone, CloneDefinition)
+ assert clone.base_table_reference.path == (
+ "/projects/project_x/datasets/dataset_y/tables/table_z"
+ )
+ assert clone.clone_time == datetime.datetime(
+ 2010, 9, 28, 10, 20, 30, 123000, tzinfo=UTC
+ )
+
+ def test_table_constraints_property_getter(self):
+ from google.cloud.bigquery.table import PrimaryKey, TableConstraints
+
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ table._properties["tableConstraints"] = {
+ "primaryKey": {"columns": ["id"]},
+ }
+
+ table_constraints = table.table_constraints
+
+ assert isinstance(table_constraints, TableConstraints)
+ assert table_constraints.primary_key == PrimaryKey(columns=["id"])
+
+ def test_description_setter_bad_value(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ with self.assertRaises(ValueError):
+ table.description = 12345
+
+ def test_description_setter(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ table.description = "DESCRIPTION"
+ self.assertEqual(table.description, "DESCRIPTION")
+
+ def test_expires_setter_bad_value(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ with self.assertRaises(ValueError):
+ table.expires = object()
+
+ def test_expires_setter(self):
+ import datetime
+ from google.cloud._helpers import UTC
+
+ WHEN = datetime.datetime(2015, 7, 28, 16, 39, tzinfo=UTC)
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ table.expires = WHEN
+ self.assertEqual(table.expires, WHEN)
+
+ def test_friendly_name_setter_bad_value(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ with self.assertRaises(ValueError):
+ table.friendly_name = 12345
+
+ def test_friendly_name_setter(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ table.friendly_name = "FRIENDLY"
+ self.assertEqual(table.friendly_name, "FRIENDLY")
+
+ def test_view_query_setter_bad_value(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ with self.assertRaises(ValueError):
+ table.view_query = 12345
+
+ def test_view_query_setter(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ table.view_query = "select * from foo"
+ self.assertEqual(table.view_query, "select * from foo")
+ self.assertEqual(table.view_use_legacy_sql, False)
+
+ table.view_use_legacy_sql = True
+ self.assertEqual(table.view_use_legacy_sql, True)
+
+ def test_view_query_deleter(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ table.view_query = "select * from foo"
+ del table.view_query
+ self.assertIsNone(table.view_query)
+ self.assertIsNone(table.view_use_legacy_sql)
+
+ def test_view_use_legacy_sql_setter_bad_value(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ with self.assertRaises(ValueError):
+ table.view_use_legacy_sql = 12345
+
+ def test_view_use_legacy_sql_setter(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ table.view_use_legacy_sql = True
+ table.view_query = "select * from foo"
+ self.assertEqual(table.view_use_legacy_sql, True)
+ self.assertEqual(table.view_query, "select * from foo")
+
+ def test_external_data_configuration_setter(self):
+ from google.cloud.bigquery.external_config import ExternalConfig
+
+ external_config = ExternalConfig("CSV")
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ table.external_data_configuration = external_config
+
+ self.assertEqual(
+ table.external_data_configuration.source_format,
+ external_config.source_format,
+ )
+
+ def test_external_data_configuration_setter_none(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ table.external_data_configuration = None
+
+ self.assertIsNone(table.external_data_configuration)
+
+ def test_external_data_configuration_setter_bad_value(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ with self.assertRaises(ValueError):
+ table.external_data_configuration = 12345
+
+ def test_labels_update_in_place(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ del table._properties["labels"] # don't start w/ existing dict
+ labels = table.labels
+ labels["foo"] = "bar" # update in place
+ self.assertEqual(table.labels, {"foo": "bar"})
+
+ def test_labels_setter_bad_value(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ with self.assertRaises(ValueError):
+ table.labels = 12345
+
+ def test_mview_query(self):
+ table = self._make_one()
+ self.assertIsNone(table.mview_query)
+ table.mview_query = "SELECT name, SUM(number) FROM dset.tbl GROUP BY 1"
+ self.assertEqual(
+ table.mview_query, "SELECT name, SUM(number) FROM dset.tbl GROUP BY 1"
+ )
+ del table.mview_query
+ self.assertIsNone(table.mview_query)
+
+ def test_mview_last_refresh_time(self):
+ table = self._make_one()
+ self.assertIsNone(table.mview_last_refresh_time)
+ table._properties["materializedView"] = {
+ "lastRefreshTime": "1606751842496",
+ }
+ self.assertEqual(
+ table.mview_last_refresh_time,
+ datetime.datetime(
+ 2020, 11, 30, 15, 57, 22, 496000, tzinfo=datetime.timezone.utc
+ ),
+ )
+
+ def test_mview_enable_refresh(self):
+ table = self._make_one()
+ self.assertIsNone(table.mview_enable_refresh)
+ table.mview_enable_refresh = True
+ self.assertTrue(table.mview_enable_refresh)
+ table.mview_enable_refresh = False
+ self.assertFalse(table.mview_enable_refresh)
+ table.mview_enable_refresh = None
+ self.assertIsNone(table.mview_enable_refresh)
+
+ def test_mview_refresh_interval(self):
+ table = self._make_one()
+ self.assertIsNone(table.mview_refresh_interval)
+ table.mview_refresh_interval = datetime.timedelta(minutes=30)
+ self.assertEqual(table.mview_refresh_interval, datetime.timedelta(minutes=30))
+ self.assertEqual(
+ table._properties["materializedView"]["refreshIntervalMs"], "1800000"
+ )
+ table.mview_refresh_interval = None
+ self.assertIsNone(table.mview_refresh_interval)
+
+ def test_from_string(self):
+ cls = self._get_target_class()
+ got = cls.from_string("string-project.string_dataset.string_table")
+ self.assertEqual(got.project, "string-project")
+ self.assertEqual(got.dataset_id, "string_dataset")
+ self.assertEqual(got.table_id, "string_table")
+ self.assertEqual(
+ str(got.reference), "string-project.string_dataset.string_table"
+ )
+
+ def test_from_string_legacy_string(self):
+ cls = self._get_target_class()
+ with self.assertRaises(ValueError):
+ cls.from_string("string-project:string_dataset.string_table")
+
+ def test_from_string_not_fully_qualified(self):
+ cls = self._get_target_class()
+ with self.assertRaises(ValueError):
+ cls.from_string("string_dataset.string_table")
+
+ def test_from_api_repr_missing_identity(self):
+ self._setUpConstants()
+ RESOURCE = {}
+ klass = self._get_target_class()
+ with self.assertRaises(KeyError):
+ klass.from_api_repr(RESOURCE)
+
+ def test_from_api_repr_bare(self):
+ self._setUpConstants()
+ RESOURCE = {
+ "id": "%s:%s.%s" % (self.PROJECT, self.DS_ID, self.TABLE_NAME),
+ "tableReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_NAME,
+ },
+ "type": "TABLE",
+ }
+ klass = self._get_target_class()
+ table = klass.from_api_repr(RESOURCE)
+ self.assertEqual(table.table_id, self.TABLE_NAME)
+ self._verifyResourceProperties(table, RESOURCE)
+
+ def test_from_api_repr_w_properties(self):
+ import datetime
+ from google.cloud._helpers import UTC
+ from google.cloud._helpers import _millis
+
+ RESOURCE = self._make_resource()
+ RESOURCE["view"] = {"query": "select fullname, age from person_ages"}
+ RESOURCE["type"] = "VIEW"
+ RESOURCE["location"] = "EU"
+ self.EXP_TIME = datetime.datetime(2015, 8, 1, 23, 59, 59, tzinfo=UTC)
+ RESOURCE["expirationTime"] = _millis(self.EXP_TIME)
+ klass = self._get_target_class()
+ table = klass.from_api_repr(RESOURCE)
+ self._verifyResourceProperties(table, RESOURCE)
+
+ def test_from_api_repr_w_partial_streamingbuffer(self):
+ import datetime
+ from google.cloud._helpers import UTC
+ from google.cloud._helpers import _millis
+
+ RESOURCE = self._make_resource()
+ self.OLDEST_TIME = datetime.datetime(2015, 8, 1, 23, 59, 59, tzinfo=UTC)
+ RESOURCE["streamingBuffer"] = {"oldestEntryTime": _millis(self.OLDEST_TIME)}
+ klass = self._get_target_class()
+ table = klass.from_api_repr(RESOURCE)
+ self.assertIsNotNone(table.streaming_buffer)
+ self.assertIsNone(table.streaming_buffer.estimated_rows)
+ self.assertIsNone(table.streaming_buffer.estimated_bytes)
+ self.assertEqual(table.streaming_buffer.oldest_entry_time, self.OLDEST_TIME)
+ # Another partial construction
+ RESOURCE["streamingBuffer"] = {"estimatedRows": 1}
+ klass = self._get_target_class()
+ table = klass.from_api_repr(RESOURCE)
+ self.assertIsNotNone(table.streaming_buffer)
+ self.assertEqual(table.streaming_buffer.estimated_rows, 1)
+ self.assertIsNone(table.streaming_buffer.estimated_bytes)
+ self.assertIsNone(table.streaming_buffer.oldest_entry_time)
+
+ def test_from_api_with_encryption(self):
+ self._setUpConstants()
+ RESOURCE = {
+ "id": "%s:%s.%s" % (self.PROJECT, self.DS_ID, self.TABLE_NAME),
+ "tableReference": {
+ "projectId": self.PROJECT,
+ "datasetId": self.DS_ID,
+ "tableId": self.TABLE_NAME,
+ },
+ "encryptionConfiguration": {"kmsKeyName": self.KMS_KEY_NAME},
+ "type": "TABLE",
+ }
+ klass = self._get_target_class()
+ table = klass.from_api_repr(RESOURCE)
+ self._verifyResourceProperties(table, RESOURCE)
+
+ def test_to_api_repr_w_custom_field(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ table._properties["newAlphaProperty"] = "unreleased property"
+ resource = table.to_api_repr()
+
+ exp_resource = {
+ "tableReference": table_ref.to_api_repr(),
+ "labels": {},
+ "newAlphaProperty": "unreleased property",
+ }
+ self.assertEqual(resource, exp_resource)
+
+ def test_to_api_repr_w_unsetting_expiration(self):
+ from google.cloud.bigquery.table import TimePartitioningType
+
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ table.partition_expiration = None
+ resource = table.to_api_repr()
+
+ exp_resource = {
+ "tableReference": table_ref.to_api_repr(),
+ "labels": {},
+ "timePartitioning": {
+ "expirationMs": None,
+ "type": TimePartitioningType.DAY,
+ },
+ }
+ self.assertEqual(resource, exp_resource)
+
+ def test__build_resource_w_custom_field(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ table._properties["newAlphaProperty"] = "unreleased property"
+ resource = table._build_resource(["newAlphaProperty"])
+
+ exp_resource = {"newAlphaProperty": "unreleased property"}
+ self.assertEqual(resource, exp_resource)
+
+ def test__build_resource_w_custom_field_not_in__properties(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table = self._make_one(dataset.table(self.TABLE_NAME))
+ table.bad = "value"
+ with self.assertRaises(ValueError):
+ table._build_resource(["bad"])
+
+ def test_range_partitioning(self):
+ from google.cloud.bigquery.table import RangePartitioning
+ from google.cloud.bigquery.table import PartitionRange
+
+ table = self._make_one("proj.dset.tbl")
+ assert table.range_partitioning is None
+
+ table.range_partitioning = RangePartitioning(
+ field="col1", range_=PartitionRange(start=-512, end=1024, interval=128)
+ )
+ assert table.range_partitioning.field == "col1"
+ assert table.range_partitioning.range_.start == -512
+ assert table.range_partitioning.range_.end == 1024
+ assert table.range_partitioning.range_.interval == 128
+
+ table.range_partitioning = None
+ assert table.range_partitioning is None
+
+ def test_range_partitioning_w_wrong_type(self):
+ object_under_test = self._make_one("proj.dset.tbl")
+ with pytest.raises(ValueError, match="RangePartitioning"):
+ object_under_test.range_partitioning = object()
+
+ def test_require_partitioning_filter(self):
+ table = self._make_one("proj.dset.tbl")
+ assert table.require_partition_filter is None
+ table.require_partition_filter = True
+ assert table.require_partition_filter
+ table.require_partition_filter = False
+ assert table.require_partition_filter is not None
+ assert not table.require_partition_filter
+ table.require_partition_filter = None
+ assert table.require_partition_filter is None
+
+ def test_time_partitioning_getter(self):
+ from google.cloud.bigquery.table import TimePartitioning
+ from google.cloud.bigquery.table import TimePartitioningType
+
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ table._properties["timePartitioning"] = {
+ "type": "DAY",
+ "field": "col1",
+ "expirationMs": "123456",
+ "requirePartitionFilter": False,
+ }
+ self.assertIsInstance(table.time_partitioning, TimePartitioning)
+ self.assertEqual(table.time_partitioning.type_, TimePartitioningType.DAY)
+ self.assertEqual(table.time_partitioning.field, "col1")
+ self.assertEqual(table.time_partitioning.expiration_ms, 123456)
+
+ with warnings.catch_warnings(record=True) as warned:
+ self.assertFalse(table.time_partitioning.require_partition_filter)
+
+ assert len(warned) == 1
+ self.assertIs(warned[0].category, PendingDeprecationWarning)
+
+ def test_time_partitioning_getter_w_none(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ table._properties["timePartitioning"] = None
+ self.assertIsNone(table.time_partitioning)
+
+ del table._properties["timePartitioning"]
+ self.assertIsNone(table.time_partitioning)
+
+ def test_time_partitioning_getter_w_empty(self):
+ from google.cloud.bigquery.table import TimePartitioning
+
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ # Even though there are required properties according to the API
+ # specification, sometimes time partitioning is populated as an empty
+ # object. See internal bug 131167013.
+ table._properties["timePartitioning"] = {}
+ self.assertIsInstance(table.time_partitioning, TimePartitioning)
+ self.assertIsNone(table.time_partitioning.type_)
+ self.assertIsNone(table.time_partitioning.field)
+ self.assertIsNone(table.time_partitioning.expiration_ms)
+
+ with warnings.catch_warnings(record=True) as warned:
+ self.assertIsNone(table.time_partitioning.require_partition_filter)
+
+ for warning in warned:
+ self.assertIs(warning.category, PendingDeprecationWarning)
+
+ def test_time_partitioning_setter(self):
+ from google.cloud.bigquery.table import TimePartitioning
+ from google.cloud.bigquery.table import TimePartitioningType
+
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ time_partitioning = TimePartitioning(type_=TimePartitioningType.HOUR)
+
+ table.time_partitioning = time_partitioning
+
+ self.assertEqual(table.time_partitioning.type_, TimePartitioningType.HOUR)
+ # Both objects point to the same properties dict
+ self.assertIs(
+ table._properties["timePartitioning"], time_partitioning._properties
+ )
+
+ time_partitioning.expiration_ms = 10000
+
+ # Changes to TimePartitioning object are reflected in Table properties
+ self.assertEqual(
+ table.time_partitioning.expiration_ms, time_partitioning.expiration_ms
+ )
+
+ def test_time_partitioning_setter_bad_type(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ with self.assertRaises(ValueError):
+ table.time_partitioning = {"timePartitioning": {"type": "DAY"}}
+
+ def test_time_partitioning_setter_none(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ table.time_partitioning = None
+
+ self.assertIsNone(table.time_partitioning)
+
+ def test_partitioning_type_setter(self):
+ from google.cloud.bigquery.table import TimePartitioningType
+
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ with warnings.catch_warnings(record=True) as warned:
+ self.assertIsNone(table.partitioning_type)
+
+ table.partitioning_type = TimePartitioningType.DAY
+
+ self.assertEqual(table.partitioning_type, "DAY")
+
+ self.assertEqual(len(warned), 3)
+ for warning in warned:
+ self.assertIs(warning.category, PendingDeprecationWarning)
+
+ def test_partitioning_type_setter_w_time_partitioning_set(self):
+ from google.cloud.bigquery.table import TimePartitioning
+
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ table.time_partitioning = TimePartitioning()
+
+ with warnings.catch_warnings(record=True) as warned:
+ table.partitioning_type = "NEW_FAKE_TYPE"
+
+ self.assertEqual(table.partitioning_type, "NEW_FAKE_TYPE")
+
+ self.assertEqual(len(warned), 2)
+ for warning in warned:
+ self.assertIs(warning.category, PendingDeprecationWarning)
+
+ def test_partitioning_expiration_setter_w_time_partitioning_set(self):
+ from google.cloud.bigquery.table import TimePartitioning
+
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ table.time_partitioning = TimePartitioning()
+
+ with warnings.catch_warnings(record=True) as warned:
+ table.partition_expiration = 100000
+
+ self.assertEqual(table.partition_expiration, 100000)
+
+ self.assertEqual(len(warned), 2)
+ for warning in warned:
+ self.assertIs(warning.category, PendingDeprecationWarning)
+
+ def test_partition_expiration_setter(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ with warnings.catch_warnings(record=True) as warned:
+ self.assertIsNone(table.partition_expiration)
+
+ table.partition_expiration = 100
+
+ self.assertEqual(table.partition_expiration, 100)
+ # defaults to 'DAY' when expiration is set and type is not set
+ self.assertEqual(table.partitioning_type, "DAY")
+
+ self.assertEqual(len(warned), 4)
+ for warning in warned:
+ self.assertIs(warning.category, PendingDeprecationWarning)
+
+ def test_clustering_fields_setter_w_fields(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ fields = ["email", "phone"]
+
+ table.clustering_fields = fields
+ self.assertEqual(table.clustering_fields, fields)
+ self.assertEqual(table._properties["clustering"], {"fields": fields})
+
+ def test_clustering_fields_setter_w_none(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ fields = ["email", "phone"]
+
+ table._properties["clustering"] = {"fields": fields}
+ table.clustering_fields = None
+ self.assertIsNone(table.clustering_fields)
+ self.assertTrue("clustering" in table._properties) # None stored explicitly
+
+ def test_clustering_fields_setter_w_none_noop(self):
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+
+ table.clustering_fields = None
+ self.assertIsNone(table.clustering_fields)
+ self.assertTrue("clustering" in table._properties) # None stored explicitly
+
+ def test_encryption_configuration_setter(self):
+ # Previously, the EncryptionConfiguration class was in the table module, not the
+ # encryption_configuration module. It was moved to support models encryption.
+ # This test import from the table module to ensure that the previous location
+ # continues to function as an alias.
+ from google.cloud.bigquery.table import EncryptionConfiguration
+
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = self._make_one(table_ref)
+ encryption_configuration = EncryptionConfiguration(
+ kms_key_name=self.KMS_KEY_NAME
+ )
+ table.encryption_configuration = encryption_configuration
+ self.assertEqual(table.encryption_configuration.kms_key_name, self.KMS_KEY_NAME)
+ table.encryption_configuration = None
+ self.assertIsNone(table.encryption_configuration)
+
+ def test___repr__(self):
+ from google.cloud.bigquery.table import TableReference
+
+ dataset = DatasetReference("project1", "dataset1")
+ table1 = self._make_one(TableReference(dataset, "table1"))
+ expected = (
+ "Table(TableReference("
+ "DatasetReference('project1', 'dataset1'), "
+ "'table1'))"
+ )
+ self.assertEqual(repr(table1), expected)
+
+ def test___str__(self):
+ dataset = DatasetReference("project1", "dataset1")
+ table1 = self._make_one(TableReference(dataset, "table1"))
+ self.assertEqual(str(table1), "project1.dataset1.table1")
+
+
+class Test_row_from_mapping(unittest.TestCase, _SchemaBase):
+ PROJECT = "prahj-ekt"
+ DS_ID = "dataset-name"
+ TABLE_NAME = "table-name"
+
+ def _call_fut(self, mapping, schema):
+ from google.cloud.bigquery.table import _row_from_mapping
+
+ return _row_from_mapping(mapping, schema)
+
+ def test__row_from_mapping_wo_schema(self):
+ from google.cloud.bigquery.table import Table, _TABLE_HAS_NO_SCHEMA
+
+ MAPPING = {"full_name": "Phred Phlyntstone", "age": 32}
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ table = Table(table_ref)
+
+ with self.assertRaises(ValueError) as exc:
+ self._call_fut(MAPPING, table.schema)
+
+ self.assertEqual(exc.exception.args, (_TABLE_HAS_NO_SCHEMA,))
+
+ def test__row_from_mapping_w_invalid_schema(self):
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ MAPPING = {
+ "full_name": "Phred Phlyntstone",
+ "age": 32,
+ "colors": ["red", "green"],
+ "bogus": "WHATEVER",
+ }
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
+ age = SchemaField("age", "INTEGER", mode="REQUIRED")
+ colors = SchemaField("colors", "DATETIME", mode="REPEATED")
+ bogus = SchemaField("joined", "STRING", mode="BOGUS")
+ table = Table(table_ref, schema=[full_name, age, colors, bogus])
+
+ with self.assertRaises(ValueError) as exc:
+ self._call_fut(MAPPING, table.schema)
+
+ self.assertIn("Unknown field mode: BOGUS", str(exc.exception))
+
+ def test__row_from_mapping_w_schema(self):
+ from google.cloud.bigquery.schema import SchemaField
+ from google.cloud.bigquery.table import Table
+
+ MAPPING = {
+ "full_name": "Phred Phlyntstone",
+ "age": 32,
+ "colors": ["red", "green"],
+ "extra": "IGNORED",
+ }
+ dataset = DatasetReference(self.PROJECT, self.DS_ID)
+ table_ref = dataset.table(self.TABLE_NAME)
+ full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
+ age = SchemaField("age", "INTEGER", mode="REQUIRED")
+ colors = SchemaField("colors", "DATETIME", mode="REPEATED")
+ joined = SchemaField("joined", "STRING", mode="NULLABLE")
+ table = Table(table_ref, schema=[full_name, age, colors, joined])
+
+ self.assertEqual(
+ self._call_fut(MAPPING, table.schema),
+ ("Phred Phlyntstone", 32, ["red", "green"], None),
+ )
+
+
+class TestTableListItem(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.table import TableListItem
+
+ return TableListItem
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def _setUpConstants(self):
+ from google.cloud._helpers import UTC
+
+ self.WHEN_TS = 1437767599.125
+ self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(tzinfo=UTC)
+ self.EXP_TIME = datetime.datetime(2015, 8, 1, 23, 59, 59, tzinfo=UTC)
+
+ def test_ctor(self):
+ from google.cloud._helpers import _millis
+
+ self._setUpConstants()
+ project = "test-project"
+ dataset_id = "test_dataset"
+ table_id = "coffee_table"
+ resource = {
+ "creationTime": self.WHEN_TS * 1000,
+ "expirationTime": _millis(self.EXP_TIME),
+ "kind": "bigquery#table",
+ "id": "{}:{}.{}".format(project, dataset_id, table_id),
+ "tableReference": {
+ "projectId": project,
+ "datasetId": dataset_id,
+ "tableId": table_id,
+ },
+ "friendlyName": "Mahogany Coffee Table",
+ "type": "TABLE",
+ "timePartitioning": {
+ "type": "DAY",
+ "field": "mycolumn",
+ "expirationMs": "10000",
+ },
+ "labels": {"some-stuff": "this-is-a-label"},
+ "clustering": {"fields": ["string"]},
+ }
+
+ table = self._make_one(resource)
+
+ self.assertEqual(table.created, self.WHEN)
+ self.assertEqual(table.expires, self.EXP_TIME)
+ self.assertEqual(table.project, project)
+ self.assertEqual(table.dataset_id, dataset_id)
+ self.assertEqual(table.table_id, table_id)
+ self.assertEqual(
+ table.full_table_id, "{}:{}.{}".format(project, dataset_id, table_id)
+ )
+ self.assertEqual(table.reference.project, project)
+ self.assertEqual(table.reference.dataset_id, dataset_id)
+ self.assertEqual(table.reference.table_id, table_id)
+ self.assertEqual(table.friendly_name, "Mahogany Coffee Table")
+ self.assertEqual(table.table_type, "TABLE")
+ self.assertEqual(table.time_partitioning.type_, "DAY")
+ self.assertEqual(table.time_partitioning.expiration_ms, 10000)
+ self.assertEqual(table.time_partitioning.field, "mycolumn")
+ self.assertEqual(table.labels["some-stuff"], "this-is-a-label")
+ self.assertIsNone(table.view_use_legacy_sql)
+ self.assertEqual(table.clustering_fields, ["string"])
+
+ with warnings.catch_warnings(record=True) as warned:
+ self.assertEqual(table.partitioning_type, "DAY")
+ self.assertEqual(table.partition_expiration, 10000)
+
+ self.assertEqual(len(warned), 2)
+ for warning in warned:
+ self.assertIs(warning.category, PendingDeprecationWarning)
+
+ def test_ctor_view(self):
+ project = "test-project"
+ dataset_id = "test_dataset"
+ table_id = "just_looking"
+ resource = {
+ "kind": "bigquery#table",
+ "id": "{}:{}.{}".format(project, dataset_id, table_id),
+ "tableReference": {
+ "projectId": project,
+ "datasetId": dataset_id,
+ "tableId": table_id,
+ },
+ "type": "VIEW",
+ }
+
+ table = self._make_one(resource)
+ self.assertEqual(table.project, project)
+ self.assertEqual(table.dataset_id, dataset_id)
+ self.assertEqual(table.table_id, table_id)
+ self.assertEqual(
+ table.full_table_id, "{}:{}.{}".format(project, dataset_id, table_id)
+ )
+ self.assertEqual(table.reference.project, project)
+ self.assertEqual(table.reference.dataset_id, dataset_id)
+ self.assertEqual(table.reference.table_id, table_id)
+ self.assertEqual(table.table_type, "VIEW")
+ # Server default for useLegacySql is True.
+ self.assertTrue(table.view_use_legacy_sql)
+
+ def test_ctor_missing_properties(self):
+ resource = {
+ "tableReference": {
+ "projectId": "testproject",
+ "datasetId": "testdataset",
+ "tableId": "testtable",
+ }
+ }
+ table = self._make_one(resource)
+ self.assertEqual(table.project, "testproject")
+ self.assertEqual(table.dataset_id, "testdataset")
+ self.assertEqual(table.table_id, "testtable")
+ self.assertIsNone(table.created)
+ self.assertIsNone(table.expires)
+ self.assertIsNone(table.clustering_fields)
+ self.assertIsNone(table.full_table_id)
+ self.assertIsNone(table.friendly_name)
+ self.assertIsNone(table.table_type)
+ self.assertIsNone(table.time_partitioning)
+ self.assertEqual(table.labels, {})
+ self.assertIsNone(table.view_use_legacy_sql)
+
+ with warnings.catch_warnings(record=True) as warned:
+ self.assertIsNone(table.partitioning_type)
+ self.assertIsNone(table.partition_expiration)
+
+ self.assertEqual(len(warned), 2)
+ for warning in warned:
+ self.assertIs(warning.category, PendingDeprecationWarning)
+
+ def test_ctor_wo_project(self):
+ resource = {
+ "tableReference": {"datasetId": "testdataset", "tableId": "testtable"}
+ }
+ with self.assertRaises(ValueError):
+ self._make_one(resource)
+
+ def test_ctor_wo_dataset(self):
+ resource = {
+ "tableReference": {"projectId": "testproject", "tableId": "testtable"}
+ }
+ with self.assertRaises(ValueError):
+ self._make_one(resource)
+
+ def test_ctor_wo_table(self):
+ resource = {
+ "tableReference": {"projectId": "testproject", "datasetId": "testdataset"}
+ }
+ with self.assertRaises(ValueError):
+ self._make_one(resource)
+
+ def test_ctor_wo_reference(self):
+ with self.assertRaises(ValueError):
+ self._make_one({})
+
+ def test_labels_update_in_place(self):
+ resource = {
+ "tableReference": {
+ "projectId": "testproject",
+ "datasetId": "testdataset",
+ "tableId": "testtable",
+ }
+ }
+ table = self._make_one(resource)
+ labels = table.labels
+ labels["foo"] = "bar" # update in place
+ self.assertEqual(table.labels, {"foo": "bar"})
+
+ def test_to_api_repr(self):
+ resource = {
+ "tableReference": {
+ "projectId": "testproject",
+ "datasetId": "testdataset",
+ "tableId": "testtable",
+ }
+ }
+ table = self._make_one(resource)
+ self.assertEqual(table.to_api_repr(), resource)
+
+ def test__eq__same_table_property_different(self):
+ table_ref_resource = {
+ "projectId": "project_foo",
+ "datasetId": "dataset_bar",
+ "tableId": "table_baz",
+ }
+
+ resource_1 = {"tableReference": table_ref_resource, "friendlyName": "Table One"}
+ table_1 = self._make_one(resource_1)
+
+ resource_2 = {"tableReference": table_ref_resource, "friendlyName": "Table Two"}
+ table_2 = self._make_one(resource_2)
+
+ assert table_1 == table_2 # Still equal, only table reference is important.
+
+
+class TestTableClassesInterchangeability:
+ @staticmethod
+ def _make_table(*args, **kwargs):
+ from google.cloud.bigquery.table import Table
+
+ return Table(*args, **kwargs)
+
+ @staticmethod
+ def _make_table_ref(*args, **kwargs):
+ from google.cloud.bigquery.table import TableReference
+
+ return TableReference(*args, **kwargs)
+
+ @staticmethod
+ def _make_table_list_item(*args, **kwargs):
+ from google.cloud.bigquery.table import TableListItem
+
+ return TableListItem(*args, **kwargs)
+
+ def test_table_eq_table_ref(self):
+ table = self._make_table("project_foo.dataset_bar.table_baz")
+ dataset_ref = DatasetReference("project_foo", "dataset_bar")
+ table_ref = self._make_table_ref(dataset_ref, "table_baz")
+
+ assert table == table_ref
+ assert table_ref == table
+
+ def test_table_eq_table_list_item(self):
+ table = self._make_table("project_foo.dataset_bar.table_baz")
+ table_list_item = self._make_table_list_item(
+ {
+ "tableReference": {
+ "projectId": "project_foo",
+ "datasetId": "dataset_bar",
+ "tableId": "table_baz",
+ }
+ }
+ )
+
+ assert table == table_list_item
+ assert table_list_item == table
+
+ def test_table_ref_eq_table_list_item(self):
+ dataset_ref = DatasetReference("project_foo", "dataset_bar")
+ table_ref = self._make_table_ref(dataset_ref, "table_baz")
+ table_list_item = self._make_table_list_item(
+ {
+ "tableReference": {
+ "projectId": "project_foo",
+ "datasetId": "dataset_bar",
+ "tableId": "table_baz",
+ }
+ }
+ )
+
+ assert table_ref == table_list_item
+ assert table_list_item == table_ref
+
+
+class TestSnapshotDefinition:
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.table import SnapshotDefinition
+
+ return SnapshotDefinition
+
+ @classmethod
+ def _make_one(cls, *args, **kwargs):
+ klass = cls._get_target_class()
+ return klass(*args, **kwargs)
+
+ def test_ctor_empty_resource(self):
+ instance = self._make_one(resource={})
+ assert instance.base_table_reference is None
+ assert instance.snapshot_time is None
+
+ def test_ctor_full_resource(self):
+ from google.cloud._helpers import UTC
+ from google.cloud.bigquery.table import TableReference
+
+ resource = {
+ "baseTableReference": {
+ "projectId": "my-project",
+ "datasetId": "your-dataset",
+ "tableId": "our-table",
+ },
+ "snapshotTime": "2005-06-07T19:35:02.123Z",
+ }
+ instance = self._make_one(resource)
+
+ expected_table_ref = TableReference.from_string(
+ "my-project.your-dataset.our-table"
+ )
+ assert instance.base_table_reference == expected_table_ref
+
+ expected_time = datetime.datetime(2005, 6, 7, 19, 35, 2, 123000, tzinfo=UTC)
+ assert instance.snapshot_time == expected_time
+
+
+class TestCloneDefinition:
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.table import CloneDefinition
+
+ return CloneDefinition
+
+ @classmethod
+ def _make_one(cls, *args, **kwargs):
+ klass = cls._get_target_class()
+ return klass(*args, **kwargs)
+
+ def test_ctor_empty_resource(self):
+ instance = self._make_one(resource={})
+ assert instance.base_table_reference is None
+ assert instance.clone_time is None
+
+ def test_ctor_full_resource(self):
+ from google.cloud._helpers import UTC
+ from google.cloud.bigquery.table import TableReference
+
+ resource = {
+ "baseTableReference": {
+ "projectId": "my-project",
+ "datasetId": "your-dataset",
+ "tableId": "our-table",
+ },
+ "cloneTime": "2005-06-07T19:35:02.123Z",
+ }
+ instance = self._make_one(resource)
+
+ expected_table_ref = TableReference.from_string(
+ "my-project.your-dataset.our-table"
+ )
+ assert instance.base_table_reference == expected_table_ref
+
+ expected_time = datetime.datetime(2005, 6, 7, 19, 35, 2, 123000, tzinfo=UTC)
+ assert instance.clone_time == expected_time
+
+
+class TestRow(unittest.TestCase):
+ def test_row(self):
+ from google.cloud.bigquery.table import Row
+
+ VALUES = (1, 2, 3)
+ row = Row(VALUES, {"a": 0, "b": 1, "c": 2})
+ self.assertEqual(row.a, 1)
+ self.assertEqual(row[1], 2)
+ self.assertEqual(row["c"], 3)
+ self.assertEqual(len(row), 3)
+ self.assertEqual(row.values(), VALUES)
+ self.assertEqual(set(row.keys()), set({"a": 1, "b": 2, "c": 3}.keys()))
+ self.assertEqual(set(row.items()), set({"a": 1, "b": 2, "c": 3}.items()))
+ self.assertEqual(row.get("a"), 1)
+ self.assertEqual(row.get("d"), None)
+ self.assertEqual(row.get("d", ""), "")
+ self.assertEqual(row.get("d", default=""), "")
+ self.assertEqual(repr(row), "Row((1, 2, 3), {'a': 0, 'b': 1, 'c': 2})")
+ self.assertFalse(row != row)
+ self.assertFalse(row == 3)
+ with self.assertRaises(AttributeError):
+ row.z
+ with self.assertRaises(KeyError):
+ row["z"]
+
+
+class Test_EmptyRowIterator(unittest.TestCase):
+ PYARROW_MINIMUM_VERSION = str(_versions_helpers._MIN_PYARROW_VERSION)
+
+ def _make_one(self):
+ from google.cloud.bigquery.table import _EmptyRowIterator
+
+ return _EmptyRowIterator()
+
+ def test_total_rows_eq_zero(self):
+ row_iterator = self._make_one()
+ self.assertEqual(row_iterator.total_rows, 0)
+
+ @mock.patch("google.cloud.bigquery.table.pyarrow", new=None)
+ def test_to_arrow_error_if_pyarrow_is_none(self):
+ row_iterator = self._make_one()
+ with self.assertRaises(ValueError):
+ row_iterator.to_arrow()
+
+ def test_to_arrow(self):
+ pyarrow = pytest.importorskip("pyarrow")
+ row_iterator = self._make_one()
+ tbl = row_iterator.to_arrow()
+ self.assertIsInstance(tbl, pyarrow.Table)
+ self.assertEqual(tbl.num_rows, 0)
+
+ def test_to_arrow_iterable(self):
+ pyarrow = pytest.importorskip(
+ "pyarrow", minversion=self.PYARROW_MINIMUM_VERSION
+ )
+ row_iterator = self._make_one()
+ arrow_iter = row_iterator.to_arrow_iterable()
+
+ result = list(arrow_iter)
+
+ self.assertEqual(len(result), 1)
+ record_batch = result[0]
+ self.assertIsInstance(record_batch, pyarrow.RecordBatch)
+ self.assertEqual(record_batch.num_rows, 0)
+ self.assertEqual(record_batch.num_columns, 0)
+
+ @mock.patch("google.cloud.bigquery._pandas_helpers.pandas", new=None)
+ def test_to_dataframe_error_if_pandas_is_none(self):
+ row_iterator = self._make_one()
+ with self.assertRaises(ValueError):
+ row_iterator.to_dataframe()
+
+ def test_to_dataframe(self):
+ pandas = pytest.importorskip("pandas")
+ row_iterator = self._make_one()
+ df = row_iterator.to_dataframe(create_bqstorage_client=False)
+ self.assertIsInstance(df, pandas.DataFrame)
+ self.assertEqual(len(df), 0) # verify the number of rows
+
+ @mock.patch("google.cloud.bigquery._pandas_helpers.pandas", new=None)
+ def test_to_dataframe_iterable_error_if_pandas_is_none(self):
+ row_iterator = self._make_one()
+ with self.assertRaises(ValueError):
+ row_iterator.to_dataframe_iterable()
+
+ def test_to_dataframe_iterable(self):
+ pandas = pytest.importorskip("pandas")
+ row_iterator = self._make_one()
+ df_iter = row_iterator.to_dataframe_iterable()
+
+ result = list(df_iter)
+
+ self.assertEqual(len(result), 1)
+ df = result[0]
+ self.assertIsInstance(df, pandas.DataFrame)
+ self.assertEqual(len(df), 0) # Verify the number of rows.
+ self.assertEqual(len(df.columns), 0)
+
+ @mock.patch("google.cloud.bigquery.table.geopandas", new=None)
+ def test_to_geodataframe_if_geopandas_is_none(self):
+ row_iterator = self._make_one()
+ with self.assertRaisesRegex(
+ ValueError,
+ re.escape(
+ "The geopandas library is not installed, please install "
+ "geopandas to use the to_geodataframe() function."
+ ),
+ ):
+ row_iterator.to_geodataframe(create_bqstorage_client=False)
+
+ def test_to_geodataframe(self):
+ geopandas = pytest.importorskip("geopandas")
+ row_iterator = self._make_one()
+ df = row_iterator.to_geodataframe(create_bqstorage_client=False)
+ self.assertIsInstance(df, geopandas.GeoDataFrame)
+ self.assertEqual(len(df), 0) # verify the number of rows
+ if version_info.major == 3 and version_info.minor > 7:
+ assert not hasattr(df, "crs") # used with Python > 3.7
+ else:
+ self.assertIsNone(df.crs) # used with Python == 3.7
+
+
+class TestRowIterator(unittest.TestCase):
+ PYARROW_MINIMUM_VERSION = str(_versions_helpers._MIN_PYARROW_VERSION)
+
+ def _class_under_test(self):
+ from google.cloud.bigquery.table import RowIterator
+
+ return RowIterator
+
+ def _make_one(
+ self,
+ client=None,
+ api_request=None,
+ path=None,
+ schema=None,
+ table=None,
+ **kwargs
+ ):
+ from google.cloud.bigquery.table import TableReference
+
+ if client is None:
+ client = _mock_client()
+
+ if api_request is None:
+ api_request = mock.sentinel.api_request
+
+ if path is None:
+ path = "/foo"
+
+ if schema is None:
+ schema = []
+
+ if table is None:
+ table = TableReference.from_string("my-project.my_dataset.my_table")
+
+ return self._class_under_test()(
+ client, api_request, path, schema, table=table, **kwargs
+ )
+
+ def _make_one_from_data(self, schema=(), rows=()):
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [SchemaField(*a) for a in schema]
+ rows = [{"f": [{"v": v} for v in row]} for row in rows]
+
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ return self._make_one(_mock_client(), api_request, path, schema)
+
+ def test_constructor(self):
+ from google.cloud.bigquery.table import _item_to_row
+ from google.cloud.bigquery.table import _rows_page_start
+
+ client = _mock_client()
+ path = "/some/path"
+ iterator = self._make_one(client=client, path=path)
+
+ # Objects are set without copying.
+ self.assertIs(iterator.client, client)
+ self.assertIs(iterator.item_to_value, _item_to_row)
+ self.assertIs(iterator._page_start, _rows_page_start)
+ # Properties have the expect value.
+ self.assertEqual(iterator.extra_params, {})
+ self.assertEqual(iterator._items_key, "rows")
+ self.assertIsNone(iterator.max_results)
+ self.assertEqual(iterator.path, path)
+ self.assertFalse(iterator._started)
+ self.assertIsNone(iterator.total_rows)
+ # Changing attributes.
+ self.assertEqual(iterator.page_number, 0)
+ self.assertIsNone(iterator.next_page_token)
+ self.assertEqual(iterator.num_results, 0)
+
+ def test_constructor_with_table(self):
+ from google.cloud.bigquery.table import Table
+
+ table = Table("proj.dset.tbl")
+ iterator = self._make_one(table=table, total_rows=100)
+ self.assertIs(iterator._table, table)
+ self.assertEqual(iterator.total_rows, 100)
+
+ def test_constructor_with_dict_schema(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
+ {"name": "age", "type": "INT64", "mode": "NULLABLE"},
+ ]
+
+ iterator = self._make_one(schema=schema)
+
+ expected_schema = [
+ SchemaField("full_name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INT64", mode="NULLABLE"),
+ ]
+ self.assertEqual(iterator.schema, expected_schema)
+
+ def test_job_id_missing(self):
+ rows = self._make_one()
+ self.assertIsNone(rows.job_id)
+
+ def test_job_id_present(self):
+ rows = self._make_one(job_id="abc-123")
+ self.assertEqual(rows.job_id, "abc-123")
+
+ def test_location_missing(self):
+ rows = self._make_one()
+ self.assertIsNone(rows.location)
+
+ def test_location_present(self):
+ rows = self._make_one(location="asia-northeast1")
+ self.assertEqual(rows.location, "asia-northeast1")
+
+ def test_num_dml_affected_rows_missing(self):
+ rows = self._make_one()
+ self.assertIsNone(rows.num_dml_affected_rows)
+
+ def test_num_dml_affected_rows_present(self):
+ rows = self._make_one(num_dml_affected_rows=1234)
+ self.assertEqual(rows.num_dml_affected_rows, 1234)
+
+ def test_project_missing(self):
+ rows = self._make_one()
+ self.assertIsNone(rows.project)
+
+ def test_project_present(self):
+ rows = self._make_one(project="test-project")
+ self.assertEqual(rows.project, "test-project")
+
+ def test_query_id_missing(self):
+ rows = self._make_one()
+ self.assertIsNone(rows.query_id)
+
+ def test_query_id_present(self):
+ rows = self._make_one(query_id="xyz-987")
+ self.assertEqual(rows.query_id, "xyz-987")
+
+ def test_iterate(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+ self.assertEqual(row_iterator.num_results, 0)
+
+ rows_iter = iter(row_iterator)
+
+ val1 = next(rows_iter)
+ self.assertEqual(val1.name, "Phred Phlyntstone")
+ self.assertEqual(row_iterator.num_results, 1)
+
+ val2 = next(rows_iter)
+ self.assertEqual(val2.name, "Bharney Rhubble")
+ self.assertEqual(row_iterator.num_results, 2)
+
+ with self.assertRaises(StopIteration):
+ next(rows_iter)
+
+ api_request.assert_called_once_with(method="GET", path=path, query_params={})
+
+ def test_iterate_with_cached_first_page(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ first_page = {
+ "rows": [
+ {"f": [{"v": "Whillma Phlyntstone"}, {"v": "27"}]},
+ {"f": [{"v": "Bhetty Rhubble"}, {"v": "28"}]},
+ ],
+ "pageToken": "next-page",
+ }
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(
+ _mock_client(),
+ api_request,
+ path,
+ schema,
+ first_page_response=first_page,
+ total_rows=4,
+ )
+ self.assertEqual(row_iterator.total_rows, 4)
+ rows = list(row_iterator)
+ # Total rows should be maintained, even though subsequent API calls
+ # don't include it.
+ self.assertEqual(row_iterator.total_rows, 4)
+ self.assertEqual(len(rows), 4)
+ self.assertEqual(rows[0].age, 27)
+ self.assertEqual(rows[1].age, 28)
+ self.assertEqual(rows[2].age, 32)
+ self.assertEqual(rows[3].age, 33)
+
+ api_request.assert_called_once_with(
+ method="GET", path=path, query_params={"pageToken": "next-page"}
+ )
+
+ def test_iterate_with_cached_first_page_max_results(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ first_page = {
+ "rows": [
+ {"f": [{"v": "Whillma Phlyntstone"}, {"v": "27"}]},
+ {"f": [{"v": "Bhetty Rhubble"}, {"v": "28"}]},
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ],
+ "pageToken": "next-page",
+ }
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value=first_page)
+ row_iterator = self._make_one(
+ _mock_client(),
+ api_request,
+ path,
+ schema,
+ max_results=3,
+ first_page_response=first_page,
+ )
+ rows = list(row_iterator)
+ self.assertEqual(len(rows), 3)
+ self.assertEqual(rows[0].age, 27)
+ self.assertEqual(rows[1].age, 28)
+ self.assertEqual(rows[2].age, 32)
+ api_request.assert_not_called()
+
+ def test_page_size(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+
+ row_iterator = self._make_one(
+ _mock_client(), api_request, path, schema, page_size=4
+ )
+ row_iterator._get_next_page_response()
+
+ api_request.assert_called_once_with(
+ method="GET",
+ path=path,
+ query_params={"maxResults": row_iterator._page_size},
+ )
+
+ def test__is_almost_completely_cached_returns_false_without_first_page(self):
+ iterator = self._make_one(first_page_response=None)
+ self.assertFalse(iterator._is_almost_completely_cached())
+
+ def test__is_almost_completely_cached_returns_true_with_more_rows_than_max_results(
+ self,
+ ):
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ {"f": [{"v": "Whillma Phlyntstone"}, {"v": "27"}]},
+ {"f": [{"v": "Bhetty Rhubble"}, {"v": "28"}]},
+ ]
+ first_page = {"pageToken": "next-page", "rows": rows}
+ iterator = self._make_one(max_results=4, first_page_response=first_page)
+ self.assertTrue(iterator._is_almost_completely_cached())
+
+ def test__is_almost_completely_cached_returns_false_with_too_many_rows_remaining(
+ self,
+ ):
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ]
+ first_page = {"pageToken": "next-page", "rows": rows}
+ iterator = self._make_one(first_page_response=first_page, total_rows=100)
+ self.assertFalse(iterator._is_almost_completely_cached())
+
+ def test__is_almost_completely_cached_returns_false_with_rows_remaining_and_no_total_rows(
+ self,
+ ):
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ]
+ first_page = {"pageToken": "next-page", "rows": rows}
+ iterator = self._make_one(first_page_response=first_page)
+ self.assertFalse(iterator._is_almost_completely_cached())
+
+ def test__is_almost_completely_cached_returns_true_with_some_rows_remaining(self):
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ {"f": [{"v": "Whillma Phlyntstone"}, {"v": "27"}]},
+ {"f": [{"v": "Bhetty Rhubble"}, {"v": "28"}]},
+ {"f": [{"v": "Pebbles Phlyntstone"}, {"v": "4"}]},
+ {"f": [{"v": "Bamm-Bamm Rhubble"}, {"v": "5"}]},
+ {"f": [{"v": "Joseph Rockhead"}, {"v": "32"}]},
+ {"f": [{"v": "Perry Masonry"}, {"v": "33"}]},
+ ]
+ first_page = {"pageToken": "next-page", "rows": rows}
+ iterator = self._make_one(
+ first_page_response=first_page, total_rows=len(rows) + 1
+ )
+ self.assertTrue(iterator._is_almost_completely_cached())
+
+ def test__is_almost_completely_cached_returns_true_with_no_rows_remaining(self):
+ first_page = {"rows": []}
+ iterator = self._make_one(first_page_response=first_page)
+ self.assertTrue(iterator._is_almost_completely_cached())
+
+ def test__should_use_bqstorage_returns_false_when_completely_cached(self):
+ first_page = {"rows": []}
+ iterator = self._make_one(first_page_response=first_page)
+ self.assertFalse(
+ iterator._should_use_bqstorage(
+ bqstorage_client=None, create_bqstorage_client=True
+ )
+ )
+
+ def test__should_use_bqstorage_returns_true_if_no_cached_results(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ iterator = self._make_one(first_page_response=None) # not cached
+ result = iterator._should_use_bqstorage(
+ bqstorage_client=None, create_bqstorage_client=True
+ )
+ self.assertTrue(result)
+
+ def test__should_use_bqstorage_returns_false_if_page_token_set(self):
+ iterator = self._make_one(
+ page_token="abc", first_page_response=None # not cached
+ )
+ result = iterator._should_use_bqstorage(
+ bqstorage_client=None, create_bqstorage_client=True
+ )
+ self.assertFalse(result)
+
+ def test__should_use_bqstorage_returns_false_if_max_results_set(self):
+ iterator = self._make_one(
+ max_results=10, first_page_response=None # not cached
+ )
+ result = iterator._should_use_bqstorage(
+ bqstorage_client=None, create_bqstorage_client=True
+ )
+ self.assertFalse(result)
+
+ def test__should_use_bqstorage_returns_false_w_warning_if_missing_dependency(self):
+ iterator = self._make_one(first_page_response=None) # not cached
+
+ def fail_bqstorage_import(name, globals, locals, fromlist, level):
+ """Returns True if bigquery_storage has been imported."""
+ # NOTE: *very* simplified, assuming a straightforward absolute import
+ return "bigquery_storage" in name or (
+ fromlist is not None and "bigquery_storage" in fromlist
+ )
+
+ # maybe_fail_import() returns ImportError if the predicate is True
+ no_bqstorage = maybe_fail_import(predicate=fail_bqstorage_import)
+
+ with no_bqstorage, warnings.catch_warnings(record=True) as warned:
+ result = iterator._should_use_bqstorage(
+ bqstorage_client=None, create_bqstorage_client=True
+ )
+
+ self.assertFalse(result)
+
+ matching_warnings = [
+ warning for warning in warned if "Storage module not found" in str(warning)
+ ]
+ assert matching_warnings, "Dependency not found warning not raised."
+
+ def test__should_use_bqstorage_returns_false_w_warning_if_obsolete_version(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ iterator = self._make_one(first_page_response=None) # not cached
+
+ patcher = mock.patch(
+ "google.cloud.bigquery.table._versions_helpers.BQ_STORAGE_VERSIONS.try_import",
+ side_effect=exceptions.LegacyBigQueryStorageError("BQ Storage too old"),
+ )
+ with patcher, warnings.catch_warnings(record=True) as warned:
+ result = iterator._should_use_bqstorage(
+ bqstorage_client=None, create_bqstorage_client=True
+ )
+
+ self.assertFalse(result)
+
+ matching_warnings = [
+ warning for warning in warned if "BQ Storage too old" in str(warning)
+ ]
+ assert matching_warnings, "Obsolete dependency warning not raised."
+
+ def test_to_arrow_iterable(self):
+ pyarrow = pytest.importorskip(
+ "pyarrow", minversion=self.PYARROW_MINIMUM_VERSION
+ )
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ SchemaField(
+ "child",
+ "RECORD",
+ mode="REPEATED",
+ fields=[
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ],
+ ),
+ ]
+ rows = [
+ {
+ "f": [
+ {"v": "Bharney Rhubble"},
+ {"v": "33"},
+ {
+ "v": [
+ {"v": {"f": [{"v": "Whamm-Whamm Rhubble"}, {"v": "3"}]}},
+ {"v": {"f": [{"v": "Hoppy"}, {"v": "1"}]}},
+ ]
+ },
+ ]
+ },
+ {
+ "f": [
+ {"v": "Wylma Phlyntstone"},
+ {"v": "29"},
+ {
+ "v": [
+ {"v": {"f": [{"v": "Bepples Phlyntstone"}, {"v": "0"}]}},
+ {"v": {"f": [{"v": "Dino"}, {"v": "4"}]}},
+ ]
+ },
+ ]
+ },
+ ]
+ path = "/foo"
+ api_request = mock.Mock(
+ side_effect=[
+ {"rows": [rows[0]], "pageToken": "NEXTPAGE"},
+ {"rows": [rows[1]]},
+ ]
+ )
+ row_iterator = self._make_one(
+ _mock_client(), api_request, path, schema, page_size=1, max_results=5
+ )
+
+ record_batches = row_iterator.to_arrow_iterable()
+ self.assertIsInstance(record_batches, types.GeneratorType)
+ record_batches = list(record_batches)
+ self.assertEqual(len(record_batches), 2)
+
+ # Check the schema.
+ for record_batch in record_batches:
+ self.assertIsInstance(record_batch, pyarrow.RecordBatch)
+ self.assertEqual(record_batch.schema[0].name, "name")
+ self.assertTrue(pyarrow.types.is_string(record_batch.schema[0].type))
+ self.assertEqual(record_batch.schema[1].name, "age")
+ self.assertTrue(pyarrow.types.is_int64(record_batch.schema[1].type))
+ child_field = record_batch.schema[2]
+ self.assertEqual(child_field.name, "child")
+ self.assertTrue(pyarrow.types.is_list(child_field.type))
+ self.assertTrue(pyarrow.types.is_struct(child_field.type.value_type))
+ self.assertEqual(child_field.type.value_type[0].name, "name")
+ self.assertEqual(child_field.type.value_type[1].name, "age")
+
+ # Check the data.
+ record_batch_1 = record_batches[0].to_pydict()
+ names = record_batch_1["name"]
+ ages = record_batch_1["age"]
+ children = record_batch_1["child"]
+ self.assertEqual(names, ["Bharney Rhubble"])
+ self.assertEqual(ages, [33])
+ self.assertEqual(
+ children,
+ [
+ [
+ {"name": "Whamm-Whamm Rhubble", "age": 3},
+ {"name": "Hoppy", "age": 1},
+ ],
+ ],
+ )
+
+ record_batch_2 = record_batches[1].to_pydict()
+ names = record_batch_2["name"]
+ ages = record_batch_2["age"]
+ children = record_batch_2["child"]
+ self.assertEqual(names, ["Wylma Phlyntstone"])
+ self.assertEqual(ages, [29])
+ self.assertEqual(
+ children,
+ [[{"name": "Bepples Phlyntstone", "age": 0}, {"name": "Dino", "age": 4}]],
+ )
+
+ def test_to_arrow_iterable_w_bqstorage(self):
+ pyarrow = pytest.importorskip("pyarrow")
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud import bigquery_storage
+ from google.cloud.bigquery_storage_v1 import reader
+ from google.cloud.bigquery_storage_v1.services.big_query_read.transports import (
+ grpc as big_query_read_grpc_transport,
+ )
+
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ bqstorage_client._transport = mock.create_autospec(
+ big_query_read_grpc_transport.BigQueryReadGrpcTransport
+ )
+ streams = [
+ # Use two streams we want to check frames are read from each stream.
+ {"name": "/projects/proj/dataset/dset/tables/tbl/streams/1234"},
+ {"name": "/projects/proj/dataset/dset/tables/tbl/streams/5678"},
+ ]
+ session = bigquery_storage.types.ReadSession(streams=streams)
+ arrow_schema = pyarrow.schema(
+ [
+ pyarrow.field("colA", pyarrow.int64()),
+ # Not alphabetical to test column order.
+ pyarrow.field("colC", pyarrow.float64()),
+ pyarrow.field("colB", pyarrow.string()),
+ ]
+ )
+ session.arrow_schema.serialized_schema = arrow_schema.serialize().to_pybytes()
+ bqstorage_client.create_read_session.return_value = session
+
+ mock_rowstream = mock.create_autospec(reader.ReadRowsStream)
+ bqstorage_client.read_rows.return_value = mock_rowstream
+
+ mock_rows = mock.create_autospec(reader.ReadRowsIterable)
+ mock_rowstream.rows.return_value = mock_rows
+ page_items = [
+ pyarrow.array([1, -1]),
+ pyarrow.array([2.0, 4.0]),
+ pyarrow.array(["abc", "def"]),
+ ]
+
+ expected_record_batch = pyarrow.RecordBatch.from_arrays(
+ page_items, schema=arrow_schema
+ )
+ expected_num_record_batches = 3
+
+ mock_page = mock.create_autospec(reader.ReadRowsPage)
+ mock_page.to_arrow.return_value = expected_record_batch
+ mock_pages = (mock_page,) * expected_num_record_batches
+ type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)
+
+ schema = [
+ schema.SchemaField("colA", "INTEGER"),
+ schema.SchemaField("colC", "FLOAT"),
+ schema.SchemaField("colB", "STRING"),
+ ]
+
+ row_iterator = mut.RowIterator(
+ _mock_client(),
+ None, # api_request: ignored
+ None, # path: ignored
+ schema,
+ table=mut.TableReference.from_string("proj.dset.tbl"),
+ selected_fields=schema,
+ )
+
+ record_batches = list(
+ row_iterator.to_arrow_iterable(bqstorage_client=bqstorage_client)
+ )
+ total_record_batches = len(streams) * len(mock_pages)
+ self.assertEqual(len(record_batches), total_record_batches)
+
+ for record_batch in record_batches:
+ # Are the record batches return as expected?
+ self.assertEqual(record_batch, expected_record_batch)
+
+ # Don't close the client if it was passed in.
+ bqstorage_client._transport.grpc_channel.close.assert_not_called()
+
+ def test_to_arrow(self):
+ pyarrow = pytest.importorskip(
+ "pyarrow", minversion=self.PYARROW_MINIMUM_VERSION
+ )
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ SchemaField(
+ "child",
+ "RECORD",
+ mode="REPEATED",
+ fields=[
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ],
+ ),
+ ]
+ rows = [
+ {
+ "f": [
+ {"v": "Bharney Rhubble"},
+ {"v": "33"},
+ {
+ "v": [
+ {"v": {"f": [{"v": "Whamm-Whamm Rhubble"}, {"v": "3"}]}},
+ {"v": {"f": [{"v": "Hoppy"}, {"v": "1"}]}},
+ ]
+ },
+ ]
+ },
+ {
+ "f": [
+ {"v": "Wylma Phlyntstone"},
+ {"v": "29"},
+ {
+ "v": [
+ {"v": {"f": [{"v": "Bepples Phlyntstone"}, {"v": "0"}]}},
+ {"v": {"f": [{"v": "Dino"}, {"v": "4"}]}},
+ ]
+ },
+ ]
+ },
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ tbl = row_iterator.to_arrow(create_bqstorage_client=False)
+
+ self.assertIsInstance(tbl, pyarrow.Table)
+ self.assertEqual(tbl.num_rows, 2)
+
+ # Check the schema.
+ self.assertEqual(tbl.schema[0].name, "name")
+ self.assertTrue(pyarrow.types.is_string(tbl.schema[0].type))
+ self.assertEqual(tbl.schema[1].name, "age")
+ self.assertTrue(pyarrow.types.is_int64(tbl.schema[1].type))
+ child_field = tbl.schema[2]
+ self.assertEqual(child_field.name, "child")
+ self.assertTrue(pyarrow.types.is_list(child_field.type))
+ self.assertTrue(pyarrow.types.is_struct(child_field.type.value_type))
+ self.assertEqual(child_field.type.value_type[0].name, "name")
+ self.assertEqual(child_field.type.value_type[1].name, "age")
+
+ # Check the data.
+ tbl_data = tbl.to_pydict()
+ names = tbl_data["name"]
+ ages = tbl_data["age"]
+ children = tbl_data["child"]
+ self.assertEqual(names, ["Bharney Rhubble", "Wylma Phlyntstone"])
+ self.assertEqual(ages, [33, 29])
+ self.assertEqual(
+ children,
+ [
+ [
+ {"name": "Whamm-Whamm Rhubble", "age": 3},
+ {"name": "Hoppy", "age": 1},
+ ],
+ [{"name": "Bepples Phlyntstone", "age": 0}, {"name": "Dino", "age": 4}],
+ ],
+ )
+
+ def test_to_arrow_w_nulls(self):
+ pyarrow = pytest.importorskip(
+ "pyarrow", minversion=self.PYARROW_MINIMUM_VERSION
+ )
+ import pyarrow.types
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [SchemaField("name", "STRING"), SchemaField("age", "INTEGER")]
+ rows = [
+ {"f": [{"v": "Donkey"}, {"v": 32}]},
+ {"f": [{"v": "Diddy"}, {"v": 29}]},
+ {"f": [{"v": "Dixie"}, {"v": None}]},
+ {"f": [{"v": None}, {"v": 111}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ tbl = row_iterator.to_arrow(create_bqstorage_client=False)
+
+ self.assertIsInstance(tbl, pyarrow.Table)
+ self.assertEqual(tbl.num_rows, 4)
+
+ # Check the schema.
+ self.assertEqual(tbl.schema[0].name, "name")
+ self.assertTrue(pyarrow.types.is_string(tbl.schema[0].type))
+ self.assertEqual(tbl.schema[1].name, "age")
+ self.assertTrue(pyarrow.types.is_int64(tbl.schema[1].type))
+
+ # Check the data.
+ tbl_data = tbl.to_pydict()
+ names = tbl_data["name"]
+ ages = tbl_data["age"]
+ self.assertEqual(names, ["Donkey", "Diddy", "Dixie", None])
+ self.assertEqual(ages, [32, 29, None, 111])
+
+ def test_to_arrow_w_unknown_type(self):
+ pyarrow = pytest.importorskip(
+ "pyarrow", minversion=self.PYARROW_MINIMUM_VERSION
+ )
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ SchemaField("sport", "UNKNOWN_TYPE", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}, {"v": "volleyball"}]},
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}, {"v": "basketball"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ with warnings.catch_warnings(record=True) as warned:
+ tbl = row_iterator.to_arrow(create_bqstorage_client=False)
+
+ self.assertIsInstance(tbl, pyarrow.Table)
+ self.assertEqual(tbl.num_rows, 2)
+
+ # Check the schema.
+ self.assertEqual(tbl.schema[0].name, "name")
+ self.assertTrue(pyarrow.types.is_string(tbl.schema[0].type))
+ self.assertEqual(tbl.schema[1].name, "age")
+ self.assertTrue(pyarrow.types.is_int64(tbl.schema[1].type))
+ self.assertEqual(tbl.schema[2].name, "sport")
+
+ # Check the data.
+ tbl_data = tbl.to_pydict()
+ names = tbl_data["name"]
+ ages = tbl_data["age"]
+ sports = tbl_data["sport"]
+ self.assertEqual(names, ["Bharney Rhubble", "Wylma Phlyntstone"])
+ self.assertEqual(ages, [33, 29])
+ self.assertEqual(sports, ["volleyball", "basketball"])
+
+ # Expect warning from both the arrow conversion, and the json deserialization.
+ self.assertEqual(len(warned), 2)
+ self.assertTrue(all("sport" in str(warning) for warning in warned))
+
+ def test_to_arrow_w_empty_table(self):
+ pyarrow = pytest.importorskip(
+ "pyarrow", minversion=self.PYARROW_MINIMUM_VERSION
+ )
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ SchemaField(
+ "child",
+ "RECORD",
+ mode="REPEATED",
+ fields=[
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ],
+ ),
+ ]
+ rows = []
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ tbl = row_iterator.to_arrow(create_bqstorage_client=False)
+
+ self.assertIsInstance(tbl, pyarrow.Table)
+ self.assertEqual(tbl.num_rows, 0)
+
+ # Check the schema.
+ self.assertEqual(tbl.schema[0].name, "name")
+ self.assertTrue(pyarrow.types.is_string(tbl.schema[0].type))
+ self.assertEqual(tbl.schema[1].name, "age")
+ self.assertTrue(pyarrow.types.is_int64(tbl.schema[1].type))
+ child_field = tbl.schema[2]
+ self.assertEqual(child_field.name, "child")
+ self.assertTrue(pyarrow.types.is_list(child_field.type))
+ self.assertTrue(pyarrow.types.is_struct(child_field.type.value_type))
+ self.assertEqual(child_field.type.value_type[0].name, "name")
+ self.assertEqual(child_field.type.value_type[1].name, "age")
+
+ def test_to_arrow_max_results_w_explicit_bqstorage_client_warning(self):
+ pytest.importorskip("pyarrow")
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ mock_client = _mock_client()
+ mock_bqstorage_client = mock.sentinel.bq_storage_client
+
+ row_iterator = self._make_one(
+ client=mock_client,
+ api_request=api_request,
+ path=path,
+ schema=schema,
+ max_results=42,
+ )
+
+ with warnings.catch_warnings(record=True) as warned:
+ row_iterator.to_arrow(bqstorage_client=mock_bqstorage_client)
+
+ matches = [
+ warning
+ for warning in warned
+ if warning.category is UserWarning
+ and "cannot use bqstorage_client" in str(warning).lower()
+ and "REST" in str(warning)
+ ]
+ self.assertEqual(len(matches), 1, msg="User warning was not emitted.")
+ self.assertIn(
+ __file__, str(matches[0]), msg="Warning emitted with incorrect stacklevel"
+ )
+ mock_client._ensure_bqstorage_client.assert_not_called()
+
+ def test_to_arrow_max_results_w_create_bqstorage_client_no_warning(self):
+ pytest.importorskip("pyarrow")
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ mock_client = _mock_client()
+
+ row_iterator = self._make_one(
+ client=mock_client,
+ api_request=api_request,
+ path=path,
+ schema=schema,
+ max_results=42,
+ )
+
+ with warnings.catch_warnings(record=True) as warned:
+ row_iterator.to_arrow(create_bqstorage_client=True)
+
+ matches = [
+ warning
+ for warning in warned
+ if warning.category is UserWarning
+ and "cannot use bqstorage_client" in str(warning).lower()
+ and "REST" in str(warning)
+ ]
+ self.assertFalse(matches)
+ mock_client._ensure_bqstorage_client.assert_not_called()
+
+ def test_to_arrow_w_bqstorage(self):
+ pyarrow = pytest.importorskip("pyarrow")
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+ from google.cloud import bigquery_storage
+ from google.cloud.bigquery_storage_v1 import reader
+ from google.cloud.bigquery_storage_v1.services.big_query_read.transports import (
+ grpc as big_query_read_grpc_transport,
+ )
+
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ bqstorage_client._transport = mock.create_autospec(
+ big_query_read_grpc_transport.BigQueryReadGrpcTransport
+ )
+ streams = [
+ # Use two streams we want to check frames are read from each stream.
+ {"name": "/projects/proj/dataset/dset/tables/tbl/streams/1234"},
+ {"name": "/projects/proj/dataset/dset/tables/tbl/streams/5678"},
+ ]
+ session = bigquery_storage.types.ReadSession(streams=streams)
+ arrow_schema = pyarrow.schema(
+ [
+ pyarrow.field("colA", pyarrow.int64()),
+ # Not alphabetical to test column order.
+ pyarrow.field("colC", pyarrow.float64()),
+ pyarrow.field("colB", pyarrow.string()),
+ ]
+ )
+ session.arrow_schema.serialized_schema = arrow_schema.serialize().to_pybytes()
+ bqstorage_client.create_read_session.return_value = session
+
+ mock_rowstream = mock.create_autospec(reader.ReadRowsStream)
+ bqstorage_client.read_rows.return_value = mock_rowstream
+
+ mock_rows = mock.create_autospec(reader.ReadRowsIterable)
+ mock_rowstream.rows.return_value = mock_rows
+ expected_num_rows = 2
+ expected_num_columns = 3
+ page_items = [
+ pyarrow.array([1, -1]),
+ pyarrow.array([2.0, 4.0]),
+ pyarrow.array(["abc", "def"]),
+ ]
+
+ mock_page = mock.create_autospec(reader.ReadRowsPage)
+ mock_page.to_arrow.return_value = pyarrow.RecordBatch.from_arrays(
+ page_items, schema=arrow_schema
+ )
+ mock_pages = (mock_page, mock_page, mock_page)
+ type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)
+
+ schema = [
+ schema.SchemaField("colA", "INTEGER"),
+ schema.SchemaField("colC", "FLOAT"),
+ schema.SchemaField("colB", "STRING"),
+ ]
+
+ row_iterator = mut.RowIterator(
+ _mock_client(),
+ None, # api_request: ignored
+ None, # path: ignored
+ schema,
+ table=mut.TableReference.from_string("proj.dset.tbl"),
+ selected_fields=schema,
+ )
+
+ actual_tbl = row_iterator.to_arrow(bqstorage_client=bqstorage_client)
+
+ # Are the columns in the expected order?
+ self.assertEqual(actual_tbl.num_columns, expected_num_columns)
+ self.assertEqual(actual_tbl.schema[0].name, "colA")
+ self.assertEqual(actual_tbl.schema[1].name, "colC")
+ self.assertEqual(actual_tbl.schema[2].name, "colB")
+
+ # Have expected number of rows?
+ total_pages = len(streams) * len(mock_pages)
+ total_rows = expected_num_rows * total_pages
+ self.assertEqual(actual_tbl.num_rows, total_rows)
+
+ # Don't close the client if it was passed in.
+ bqstorage_client._transport.grpc_channel.close.assert_not_called()
+
+ def test_to_arrow_w_bqstorage_creates_client(self):
+ pytest.importorskip("pyarrow")
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+ from google.cloud import bigquery_storage
+ from google.cloud.bigquery_storage_v1.services.big_query_read.transports import (
+ grpc as big_query_read_grpc_transport,
+ )
+
+ mock_client = _mock_client()
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ bqstorage_client._transport = mock.create_autospec(
+ big_query_read_grpc_transport.BigQueryReadGrpcTransport
+ )
+ mock_client._ensure_bqstorage_client.return_value = bqstorage_client
+ session = bigquery_storage.types.ReadSession()
+ bqstorage_client.create_read_session.return_value = session
+ row_iterator = mut.RowIterator(
+ mock_client,
+ None, # api_request: ignored
+ None, # path: ignored
+ [
+ schema.SchemaField("colA", "STRING"),
+ schema.SchemaField("colC", "STRING"),
+ schema.SchemaField("colB", "STRING"),
+ ],
+ table=mut.TableReference.from_string("proj.dset.tbl"),
+ )
+ row_iterator.to_arrow(create_bqstorage_client=True)
+ mock_client._ensure_bqstorage_client.assert_called_once()
+ bqstorage_client._transport.grpc_channel.close.assert_called_once()
+
+ def test_to_arrow_ensure_bqstorage_client_wo_bqstorage(self):
+ pyarrow = pytest.importorskip(
+ "pyarrow", minversion=self.PYARROW_MINIMUM_VERSION
+ )
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Alice"}, {"v": "98"}]},
+ {"f": [{"v": "Bob"}, {"v": "99"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+
+ mock_client = _mock_client()
+ row_iterator = self._make_one(mock_client, api_request, path, schema)
+
+ def mock_verify_version(raise_if_error: bool = False):
+ raise exceptions.LegacyBigQueryStorageError("no bqstorage")
+
+ with mock.patch(
+ "google.cloud.bigquery._versions_helpers.BQ_STORAGE_VERSIONS.try_import",
+ mock_verify_version,
+ ):
+ tbl = row_iterator.to_arrow(create_bqstorage_client=True)
+
+ mock_client._ensure_bqstorage_client.assert_not_called()
+ self.assertIsInstance(tbl, pyarrow.Table)
+ self.assertEqual(tbl.num_rows, 2)
+
+ def test_to_arrow_w_bqstorage_no_streams(self):
+ pyarrow = pytest.importorskip("pyarrow")
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+ from google.cloud import bigquery_storage
+
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ session = bigquery_storage.types.ReadSession()
+ arrow_schema = pyarrow.schema(
+ [
+ pyarrow.field("colA", pyarrow.string()),
+ # Not alphabetical to test column order.
+ pyarrow.field("colC", pyarrow.string()),
+ pyarrow.field("colB", pyarrow.string()),
+ ]
+ )
+ session.arrow_schema.serialized_schema = arrow_schema.serialize().to_pybytes()
+ bqstorage_client.create_read_session.return_value = session
+
+ row_iterator = mut.RowIterator(
+ _mock_client(),
+ None, # api_request: ignored
+ None, # path: ignored
+ [
+ schema.SchemaField("colA", "STRING"),
+ schema.SchemaField("colC", "STRING"),
+ schema.SchemaField("colB", "STRING"),
+ ],
+ table=mut.TableReference.from_string("proj.dset.tbl"),
+ )
+
+ actual_table = row_iterator.to_arrow(bqstorage_client=bqstorage_client)
+ self.assertEqual(actual_table.num_columns, 3)
+ self.assertEqual(actual_table.num_rows, 0)
+ self.assertEqual(actual_table.schema[0].name, "colA")
+ self.assertEqual(actual_table.schema[1].name, "colC")
+ self.assertEqual(actual_table.schema[2].name, "colB")
+
+ def test_to_arrow_progress_bar(self):
+ pytest.importorskip("pyarrow")
+ pytest.importorskip("tqdm")
+ pytest.importorskip("tqdm.notebook")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
+ {"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+
+ progress_bars = (
+ ("tqdm", mock.patch("tqdm.tqdm")),
+ ("tqdm_notebook", mock.patch("tqdm.notebook.tqdm")),
+ ("tqdm_gui", mock.patch("tqdm.tqdm_gui")),
+ )
+
+ for progress_bar_type, bar_patch in progress_bars:
+ progress_bar_mock = bar_patch.start()
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+ tbl = row_iterator.to_arrow(
+ progress_bar_type=progress_bar_type,
+ create_bqstorage_client=False,
+ )
+
+ progress_bar_mock.assert_called()
+ progress_bar_mock().update.assert_called()
+ progress_bar_mock().close.assert_called_once()
+ self.assertEqual(tbl.num_rows, 4)
+
+ @mock.patch("google.cloud.bigquery.table.pyarrow", new=None)
+ def test_to_arrow_w_pyarrow_none(self):
+ schema = []
+ rows = []
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ with self.assertRaises(ValueError):
+ row_iterator.to_arrow()
+
+ def test_to_dataframe_iterable(self):
+ pandas = pytest.importorskip("pandas")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+
+ path = "/foo"
+ api_request = mock.Mock(
+ side_effect=[
+ {
+ "rows": [{"f": [{"v": "Bengt"}, {"v": "32"}]}],
+ "pageToken": "NEXTPAGE",
+ },
+ {"rows": [{"f": [{"v": "Sven"}, {"v": "33"}]}]},
+ ]
+ )
+
+ row_iterator = self._make_one(
+ _mock_client(), api_request, path, schema, page_size=1, max_results=5
+ )
+ dfs = row_iterator.to_dataframe_iterable()
+
+ self.assertIsInstance(dfs, types.GeneratorType)
+
+ df_1 = next(dfs)
+ self.assertIsInstance(df_1, pandas.DataFrame)
+ self.assertEqual(df_1.name.dtype.name, "object")
+ self.assertEqual(df_1.age.dtype.name, "int64")
+ self.assertEqual(len(df_1), 1) # verify the number of rows
+ self.assertEqual(
+ df_1["name"][0], "Bengt"
+ ) # verify the first value of 'name' column
+ self.assertEqual(df_1["age"][0], 32) # verify the first value of 'age' column
+
+ df_2 = next(dfs)
+ self.assertEqual(len(df_2), 1) # verify the number of rows
+ self.assertEqual(df_2["name"][0], "Sven")
+ self.assertEqual(df_2["age"][0], 33)
+
+ def test_to_dataframe_iterable_with_dtypes(self):
+ pandas = pytest.importorskip("pandas")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+
+ path = "/foo"
+ api_request = mock.Mock(
+ side_effect=[
+ {
+ "rows": [{"f": [{"v": "Bengt"}, {"v": "32"}]}],
+ "pageToken": "NEXTPAGE",
+ },
+ {"rows": [{"f": [{"v": "Sven"}, {"v": "33"}]}]},
+ ]
+ )
+
+ row_iterator = self._make_one(
+ _mock_client(), api_request, path, schema, page_size=1, max_results=5
+ )
+ dfs = row_iterator.to_dataframe_iterable(dtypes={"age": "int32"})
+
+ self.assertIsInstance(dfs, types.GeneratorType)
+
+ df_1 = next(dfs)
+ self.assertIsInstance(df_1, pandas.DataFrame)
+ self.assertEqual(df_1.name.dtype.name, "object")
+ self.assertEqual(df_1.age.dtype.name, "int32")
+ self.assertEqual(len(df_1), 1) # verify the number of rows
+ self.assertEqual(
+ df_1["name"][0], "Bengt"
+ ) # verify the first value of 'name' column
+ self.assertEqual(df_1["age"][0], 32) # verify the first value of 'age' column
+
+ df_2 = next(dfs)
+ self.assertEqual(len(df_2), 1) # verify the number of rows
+ self.assertEqual(df_2["name"][0], "Sven")
+ self.assertEqual(df_2["age"][0], 33)
+
+ def test_to_dataframe_iterable_w_bqstorage(self):
+ pandas = pytest.importorskip("pandas")
+ pyarrow = pytest.importorskip("pyarrow")
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+ from google.cloud import bigquery_storage
+ from google.cloud.bigquery_storage_v1 import reader
+ from google.cloud.bigquery_storage_v1.services.big_query_read.transports import (
+ grpc as big_query_read_grpc_transport,
+ )
+
+ arrow_fields = [
+ pyarrow.field("colA", pyarrow.int64()),
+ # Not alphabetical to test column order.
+ pyarrow.field("colC", pyarrow.float64()),
+ pyarrow.field("colB", pyarrow.utf8()),
+ ]
+ arrow_schema = pyarrow.schema(arrow_fields)
+
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ bqstorage_client._transport = mock.create_autospec(
+ big_query_read_grpc_transport.BigQueryReadGrpcTransport
+ )
+ streams = [
+ # Use two streams we want to check frames are read from each stream.
+ {"name": "/projects/proj/dataset/dset/tables/tbl/streams/1234"},
+ {"name": "/projects/proj/dataset/dset/tables/tbl/streams/5678"},
+ ]
+ session = bigquery_storage.types.ReadSession(
+ streams=streams,
+ arrow_schema={"serialized_schema": arrow_schema.serialize().to_pybytes()},
+ )
+ bqstorage_client.create_read_session.return_value = session
+
+ mock_rowstream = mock.create_autospec(reader.ReadRowsStream)
+ bqstorage_client.read_rows.return_value = mock_rowstream
+
+ mock_rows = mock.create_autospec(reader.ReadRowsIterable)
+ mock_rowstream.rows.return_value = mock_rows
+ page_dataframe = pandas.DataFrame(
+ {"colA": [1, -1], "colC": [2.0, 4.0], "colB": ["abc", "def"]},
+ )
+ mock_page = mock.create_autospec(reader.ReadRowsPage)
+ mock_page.to_dataframe.return_value = page_dataframe
+ mock_pages = (mock_page, mock_page, mock_page)
+ type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)
+
+ schema = [
+ schema.SchemaField("colA", "IGNORED"),
+ schema.SchemaField("colC", "IGNORED"),
+ schema.SchemaField("colB", "IGNORED"),
+ ]
+
+ row_iterator = mut.RowIterator(
+ _mock_client(),
+ None, # api_request: ignored
+ None, # path: ignored
+ schema,
+ table=mut.TableReference.from_string("proj.dset.tbl"),
+ selected_fields=schema,
+ )
+
+ got = list(
+ row_iterator.to_dataframe_iterable(bqstorage_client=bqstorage_client)
+ )
+
+ # Have expected number of rows?
+ total_pages = len(streams) * len(mock_pages)
+ self.assertEqual(len(got), total_pages)
+
+ # Don't close the client if it was passed in.
+ bqstorage_client._transport.grpc_channel.close.assert_not_called()
+
+ def test_to_dataframe_iterable_w_bqstorage_max_results_warning(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+ from google.cloud import bigquery_storage
+
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+
+ iterator_schema = [
+ schema.SchemaField("name", "STRING", mode="REQUIRED"),
+ schema.SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ path = "/foo"
+ api_request = mock.Mock(
+ side_effect=[
+ {
+ "rows": [{"f": [{"v": "Bengt"}, {"v": "32"}]}],
+ "pageToken": "NEXTPAGE",
+ },
+ {"rows": [{"f": [{"v": "Sven"}, {"v": "33"}]}]},
+ ]
+ )
+ row_iterator = mut.RowIterator(
+ _mock_client(),
+ api_request,
+ path,
+ iterator_schema,
+ table=mut.TableReference.from_string("proj.dset.tbl"),
+ selected_fields=iterator_schema,
+ max_results=25,
+ )
+
+ with warnings.catch_warnings(record=True) as warned:
+ dfs = row_iterator.to_dataframe_iterable(bqstorage_client=bqstorage_client)
+
+ # Was a warning emitted?
+ matches = [
+ warning
+ for warning in warned
+ if warning.category is UserWarning
+ and "cannot use bqstorage_client" in str(warning).lower()
+ and "REST" in str(warning)
+ ]
+ assert len(matches) == 1, "User warning was not emitted."
+ assert __file__ in str(matches[0]), "Warning emitted with incorrect stacklevel"
+
+ # Basic check of what we got as a result.
+ dataframes = list(dfs)
+ assert len(dataframes) == 2
+ assert isinstance(dataframes[0], pandas.DataFrame)
+ assert isinstance(dataframes[1], pandas.DataFrame)
+
+ @mock.patch("google.cloud.bigquery._pandas_helpers.pandas", new=None)
+ def test_to_dataframe_iterable_error_if_pandas_is_none(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ with pytest.raises(ValueError, match="pandas"):
+ row_iterator.to_dataframe_iterable()
+
+ def test_to_dataframe(self):
+ pandas = pytest.importorskip("pandas")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
+ {"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ df = row_iterator.to_dataframe(create_bqstorage_client=False)
+
+ self.assertIsInstance(df, pandas.DataFrame)
+ self.assertEqual(len(df), 4) # verify the number of rows
+ self.assertEqual(list(df), ["name", "age"]) # verify the column names
+ self.assertEqual(df.name.dtype.name, "object")
+ self.assertEqual(df.age.dtype.name, "Int64")
+
+ def test_to_dataframe_timestamp_out_of_pyarrow_bounds(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [SchemaField("some_timestamp", "TIMESTAMP")]
+ rows = [
+ {"f": [{"v": "81953424000000000"}]}, # 4567-01-01 00:00:00 UTC
+ {"f": [{"v": "253402214400000000"}]}, # 9999-12-31 00:00:00 UTC
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ df = row_iterator.to_dataframe(create_bqstorage_client=False)
+
+ tzinfo = datetime.timezone.utc
+ self.assertIsInstance(df, pandas.DataFrame)
+ self.assertEqual(len(df), 2) # verify the number of rows
+ self.assertEqual(list(df.columns), ["some_timestamp"])
+ self.assertEqual(
+ list(df["some_timestamp"]),
+ [
+ datetime.datetime(4567, 1, 1, tzinfo=tzinfo),
+ datetime.datetime(9999, 12, 31, tzinfo=tzinfo),
+ ],
+ )
+
+ def test_to_dataframe_datetime_out_of_pyarrow_bounds(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [SchemaField("some_datetime", "DATETIME")]
+ rows = [
+ {"f": [{"v": "4567-01-01T00:00:00"}]},
+ {"f": [{"v": "9999-12-31T00:00:00"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ df = row_iterator.to_dataframe(create_bqstorage_client=False)
+
+ self.assertIsInstance(df, pandas.DataFrame)
+ self.assertEqual(len(df), 2) # verify the number of rows
+ self.assertEqual(list(df.columns), ["some_datetime"])
+ self.assertEqual(
+ list(df["some_datetime"]),
+ [datetime.datetime(4567, 1, 1), datetime.datetime(9999, 12, 31)],
+ )
+
+ def test_to_dataframe_progress_bar(self):
+ pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ pytest.importorskip("tqdm")
+
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
+ {"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+
+ progress_bars = (
+ ("tqdm", mock.patch("tqdm.tqdm")),
+ ("tqdm_gui", mock.patch("tqdm.tqdm_gui")),
+ )
+
+ for progress_bar_type, bar_patch in progress_bars:
+ progress_bar_mock = bar_patch.start()
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+ df = row_iterator.to_dataframe(
+ progress_bar_type=progress_bar_type,
+ create_bqstorage_client=False,
+ )
+
+ progress_bar_mock.assert_called()
+ progress_bar_mock().update.assert_called()
+ progress_bar_mock().close.assert_called_once()
+ self.assertEqual(len(df), 4)
+
+ def test_to_dataframe_progress_bar_notebook(self):
+ pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ pytest.importorskip("tqdm")
+ pytest.importorskip("tqdm.notebook")
+
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
+ {"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+
+ with mock.patch("tqdm.notebook.tqdm") as progress_bar_mock:
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+ df = row_iterator.to_dataframe(
+ progress_bar_type="tqdm_notebook",
+ create_bqstorage_client=False,
+ )
+
+ progress_bar_mock.assert_called()
+ progress_bar_mock().update.assert_called()
+ progress_bar_mock().close.assert_called_once()
+ self.assertEqual(len(df), 4)
+
+ @mock.patch("google.cloud.bigquery._tqdm_helpers.tqdm", new=None)
+ def test_to_dataframe_no_tqdm_no_progress_bar(self):
+ pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
+ {"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ with warnings.catch_warnings(record=True) as warned:
+ df = row_iterator.to_dataframe(create_bqstorage_client=False)
+
+ user_warnings = [
+ warning for warning in warned if warning.category is UserWarning
+ ]
+ # With Python 3.7 and 3.8, len(user_warnings) = 3. With pandas < 1.5,
+ # pandas.ArrowDtype is not supported. We raise warnings because
+ # range columns have to be converted to object.
+ # With higher Python versions and noextra tests, len(user_warnings) = 0
+ self.assertIn(len(user_warnings), [0, 3])
+ self.assertEqual(len(df), 4)
+
+ @mock.patch("google.cloud.bigquery._tqdm_helpers.tqdm", new=None)
+ def test_to_dataframe_no_tqdm(self):
+ pytest.importorskip("pandas")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
+ {"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ with warnings.catch_warnings(record=True) as warned:
+ df = row_iterator.to_dataframe(
+ progress_bar_type="tqdm",
+ create_bqstorage_client=False,
+ )
+
+ user_warnings = [
+ warning for warning in warned if warning.category is UserWarning
+ ]
+ # With Python 3.7 and 3.8, len(user_warnings) = 4. With pandas < 1.5,
+ # pandas.ArrowDtype is not supported. We raise warnings because
+ # range columns have to be converted to object.
+ # With higher Python versions and noextra tests, len(user_warnings) = 1
+ self.assertIn(len(user_warnings), [1, 4])
+
+ # Even though the progress bar won't show, downloading the dataframe
+ # should still work.
+ self.assertEqual(len(df), 4)
+
+ def test_to_dataframe_tqdm_error(self):
+ pytest.importorskip("pandas")
+ pytest.importorskip("tqdm")
+ mock.patch("tqdm.tqdm_gui", new=None)
+ mock.patch("tqdm.notebook.tqdm", new=None)
+ mock.patch("tqdm.tqdm", new=None)
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
+ {"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
+ ]
+ path = "/foo"
+
+ for progress_bar_type in ("tqdm", "tqdm_notebook", "tqdm_gui"):
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ with warnings.catch_warnings(record=True) as warned:
+ df = row_iterator.to_dataframe(
+ progress_bar_type=progress_bar_type,
+ create_bqstorage_client=False,
+ )
+
+ self.assertEqual(len(df), 4) # all should be well
+
+ # Warn that a progress bar was requested, but creating the tqdm
+ # progress bar failed.
+ for warning in warned: # pragma: NO COVER
+ self.assertIn(
+ warning.category,
+ [UserWarning, DeprecationWarning],
+ )
+
+ def test_to_dataframe_w_empty_results(self):
+ pandas = pytest.importorskip("pandas")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ api_request = mock.Mock(return_value={"rows": []})
+ row_iterator = self._make_one(_mock_client(), api_request, schema=schema)
+
+ df = row_iterator.to_dataframe(create_bqstorage_client=False)
+
+ self.assertIsInstance(df, pandas.DataFrame)
+ self.assertEqual(len(df), 0) # verify the number of rows
+ self.assertEqual(list(df), ["name", "age"]) # verify the column names
+
+ def test_to_dataframe_w_various_types_nullable(self):
+ pandas = pytest.importorskip("pandas")
+ import datetime
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("start_timestamp", "TIMESTAMP"),
+ SchemaField("seconds", "INT64"),
+ SchemaField("miles", "FLOAT64"),
+ SchemaField("payment_type", "STRING"),
+ SchemaField("complete", "BOOL"),
+ SchemaField("date", "DATE"),
+ ]
+ row_data = [
+ [None, None, None, None, None, None],
+ ["1433836800000000", "420", "1.1", "Cash", "true", "1999-12-01"],
+ ["1387811700000000", "2580", "17.7", "Cash", "false", "1953-06-14"],
+ ["1385565300000000", "2280", "4.4", "Credit", "true", "1981-11-04"],
+ ]
+ rows = [{"f": [{"v": field} for field in row]} for row in row_data]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ df = row_iterator.to_dataframe(create_bqstorage_client=False)
+
+ self.assertIsInstance(df, pandas.DataFrame)
+ self.assertEqual(len(df), 4) # verify the number of rows
+ exp_columns = [field.name for field in schema]
+ self.assertEqual(list(df), exp_columns) # verify the column names
+
+ for index, row in df.iterrows():
+ if index == 0:
+ self.assertTrue(row.isnull().all())
+ else:
+ self.assertIsInstance(row.start_timestamp, pandas.Timestamp)
+ self.assertIsInstance(row.seconds, int)
+ self.assertIsInstance(row.payment_type, str)
+ self.assertIsInstance(row.complete, bool)
+ self.assertIsInstance(row.date, datetime.date)
+
+ def test_to_dataframe_w_dtypes_mapper(self):
+ pandas = pytest.importorskip("pandas")
+ pyarrow = pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING"),
+ SchemaField("complete", "BOOL"),
+ SchemaField("age", "INTEGER"),
+ SchemaField("seconds", "INT64"),
+ SchemaField("miles", "FLOAT64"),
+ SchemaField("date", "DATE"),
+ SchemaField("datetime", "DATETIME"),
+ SchemaField("time", "TIME"),
+ SchemaField("timestamp", "TIMESTAMP"),
+ SchemaField("range_timestamp", "RANGE", range_element_type="TIMESTAMP"),
+ SchemaField("range_datetime", "RANGE", range_element_type="DATETIME"),
+ SchemaField("range_date", "RANGE", range_element_type="DATE"),
+ ]
+ row_data = [
+ [
+ "Phred Phlyntstone",
+ "true",
+ "32",
+ "23000",
+ "1.77",
+ "1999-12-01",
+ "1999-12-31T00:00:00.000000",
+ "00:00:00.000000",
+ "1433836800000000",
+ "[1433836800000000, 1433999900000000)",
+ "[2009-06-17T13:45:30, 2019-07-17T13:45:30)",
+ "[2020-10-01, 2021-10-02)",
+ ],
+ [
+ "Bharney Rhubble",
+ "false",
+ "33",
+ "454000",
+ "6.66",
+ "4567-06-14",
+ "4567-12-31T00:00:00.000000",
+ "12:00:00.232413",
+ "81953424000000000",
+ "[1433836800000000, UNBOUNDED)",
+ "[2009-06-17T13:45:30, UNBOUNDED)",
+ "[2020-10-01, UNBOUNDED)",
+ ],
+ [
+ "Wylma Phlyntstone",
+ "true",
+ "29",
+ "341000",
+ "2.0",
+ "9999-12-31",
+ "9999-12-31T23:59:59.999999",
+ "23:59:59.999999",
+ "253402261199999999",
+ "[UNBOUNDED, UNBOUNDED)",
+ "[UNBOUNDED, UNBOUNDED)",
+ "[UNBOUNDED, UNBOUNDED)",
+ ],
+ ]
+ rows = [{"f": [{"v": field} for field in row]} for row in row_data]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ df = row_iterator.to_dataframe(
+ create_bqstorage_client=False,
+ bool_dtype=pandas.BooleanDtype(),
+ int_dtype=pandas.Int32Dtype(),
+ float_dtype=(
+ pandas.Float64Dtype()
+ if hasattr(pandas, "Float64Dtype")
+ else pandas.StringDtype()
+ ),
+ string_dtype=pandas.StringDtype(),
+ date_dtype=(
+ pandas.ArrowDtype(pyarrow.date32())
+ if hasattr(pandas, "ArrowDtype")
+ else None
+ ),
+ datetime_dtype=(
+ pandas.ArrowDtype(pyarrow.timestamp("us"))
+ if hasattr(pandas, "ArrowDtype")
+ else None
+ ),
+ time_dtype=(
+ pandas.ArrowDtype(pyarrow.time64("us"))
+ if hasattr(pandas, "ArrowDtype")
+ else None
+ ),
+ timestamp_dtype=(
+ pandas.ArrowDtype(pyarrow.timestamp("us", tz="UTC"))
+ if hasattr(pandas, "ArrowDtype")
+ else None
+ ),
+ range_date_dtype=(
+ pandas.ArrowDtype(
+ pyarrow.struct(
+ [("start", pyarrow.date32()), ("end", pyarrow.date32())]
+ )
+ )
+ if hasattr(pandas, "ArrowDtype")
+ else None
+ ),
+ range_datetime_dtype=(
+ pandas.ArrowDtype(
+ pyarrow.struct(
+ [
+ ("start", pyarrow.timestamp("us")),
+ ("end", pyarrow.timestamp("us")),
+ ]
+ )
+ )
+ if hasattr(pandas, "ArrowDtype")
+ else None
+ ),
+ range_timestamp_dtype=(
+ pandas.ArrowDtype(
+ pyarrow.struct(
+ [
+ ("start", pyarrow.timestamp("us", tz="UTC")),
+ ("end", pyarrow.timestamp("us", tz="UTC")),
+ ]
+ )
+ )
+ if hasattr(pandas, "ArrowDtype")
+ else None
+ ),
+ )
+
+ self.assertIsInstance(df, pandas.DataFrame)
+
+ self.assertEqual(list(df.complete), [True, False, True])
+ self.assertEqual(df.complete.dtype.name, "boolean")
+
+ self.assertEqual(list(df.age), [32, 33, 29])
+ self.assertEqual(df.age.dtype.name, "Int32")
+
+ self.assertEqual(list(df.seconds), [23000, 454000, 341000])
+ self.assertEqual(df.seconds.dtype.name, "Int32")
+
+ self.assertEqual(
+ list(df.name), ["Phred Phlyntstone", "Bharney Rhubble", "Wylma Phlyntstone"]
+ )
+ self.assertEqual(df.name.dtype.name, "string")
+
+ if hasattr(pandas, "Float64Dtype"):
+ self.assertEqual(list(df.miles), [1.77, 6.66, 2.0])
+ self.assertEqual(df.miles.dtype.name, "Float64")
+ else:
+ self.assertEqual(list(df.miles), ["1.77", "6.66", "2.0"])
+ self.assertEqual(df.miles.dtype.name, "string")
+
+ if hasattr(pandas, "ArrowDtype"):
+ self.assertEqual(
+ list(df.date),
+ [
+ datetime.date(1999, 12, 1),
+ datetime.date(4567, 6, 14),
+ datetime.date(9999, 12, 31),
+ ],
+ )
+ self.assertEqual(df.date.dtype.name, "date32[day][pyarrow]")
+
+ self.assertEqual(
+ list(df.datetime),
+ [
+ datetime.datetime(1999, 12, 31, 0, 0),
+ datetime.datetime(4567, 12, 31, 0, 0),
+ datetime.datetime(9999, 12, 31, 23, 59, 59, 999999),
+ ],
+ )
+ self.assertEqual(df.datetime.dtype.name, "timestamp[us][pyarrow]")
+
+ self.assertEqual(
+ list(df.time),
+ [
+ datetime.time(0, 0),
+ datetime.time(12, 0, 0, 232413),
+ datetime.time(23, 59, 59, 999999),
+ ],
+ )
+ self.assertEqual(df.time.dtype.name, "time64[us][pyarrow]")
+
+ self.assertEqual(
+ list(df.timestamp),
+ [
+ datetime.datetime(2015, 6, 9, 8, 0, tzinfo=datetime.timezone.utc),
+ datetime.datetime(4567, 1, 1, 0, 0, tzinfo=datetime.timezone.utc),
+ datetime.datetime(
+ 9999, 12, 31, 12, 59, 59, 999999, tzinfo=datetime.timezone.utc
+ ),
+ ],
+ )
+ self.assertEqual(df.timestamp.dtype.name, "timestamp[us, tz=UTC][pyarrow]")
+
+ self.assertEqual(
+ list(df.range_timestamp),
+ [
+ {
+ "start": datetime.datetime(
+ 2015, 6, 9, 8, 0, 0, tzinfo=datetime.timezone.utc
+ ),
+ "end": datetime.datetime(
+ 2015, 6, 11, 5, 18, 20, tzinfo=datetime.timezone.utc
+ ),
+ },
+ {
+ "start": datetime.datetime(
+ 2015, 6, 9, 8, 0, 0, tzinfo=datetime.timezone.utc
+ ),
+ "end": None,
+ },
+ {"start": None, "end": None},
+ ],
+ )
+
+ self.assertEqual(
+ list(df.range_datetime),
+ [
+ {
+ "start": datetime.datetime(2009, 6, 17, 13, 45, 30),
+ "end": datetime.datetime(2019, 7, 17, 13, 45, 30),
+ },
+ {"start": datetime.datetime(2009, 6, 17, 13, 45, 30), "end": None},
+ {"start": None, "end": None},
+ ],
+ )
+
+ self.assertEqual(
+ list(df.range_date),
+ [
+ {
+ "start": datetime.date(2020, 10, 1),
+ "end": datetime.date(2021, 10, 2),
+ },
+ {"start": datetime.date(2020, 10, 1), "end": None},
+ {"start": None, "end": None},
+ ],
+ )
+
+ else:
+ self.assertEqual(
+ list(df.date),
+ [
+ pandas.Timestamp("1999-12-01 00:00:00"),
+ pandas.Timestamp("2229-03-27 01:41:45.161793536"),
+ pandas.Timestamp("1816-03-29 05:56:08.066277376"),
+ ],
+ )
+ self.assertEqual(df.date.dtype.name, "datetime64[ns]")
+
+ self.assertEqual(
+ list(df.datetime),
+ [
+ datetime.datetime(1999, 12, 31, 0, 0),
+ datetime.datetime(4567, 12, 31, 0, 0),
+ datetime.datetime(9999, 12, 31, 23, 59, 59, 999999),
+ ],
+ )
+ self.assertEqual(df.datetime.dtype.name, "object")
+
+ self.assertEqual(
+ list(df.time),
+ [
+ datetime.time(0, 0),
+ datetime.time(12, 0, 0, 232413),
+ datetime.time(23, 59, 59, 999999),
+ ],
+ )
+ self.assertEqual(df.time.dtype.name, "object")
+
+ self.assertEqual(
+ list(df.timestamp),
+ [
+ datetime.datetime(2015, 6, 9, 8, 0, tzinfo=datetime.timezone.utc),
+ datetime.datetime(4567, 1, 1, 0, 0, tzinfo=datetime.timezone.utc),
+ datetime.datetime(
+ 9999, 12, 31, 12, 59, 59, 999999, tzinfo=datetime.timezone.utc
+ ),
+ ],
+ )
+ self.assertEqual(df.timestamp.dtype.name, "object")
+
+ def test_to_dataframe_w_none_dtypes_mapper(self):
+ pandas = pytest.importorskip("pandas")
+ pandas_major_version = pandas.__version__[0:2]
+ if pandas_major_version not in ["0.", "1."]:
+ pytest.skip(reason="Requires a version of pandas less than 2.0")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING"),
+ SchemaField("complete", "BOOL"),
+ SchemaField("age", "INTEGER"),
+ SchemaField("seconds", "INT64"),
+ SchemaField("miles", "FLOAT64"),
+ SchemaField("date", "DATE"),
+ SchemaField("datetime", "DATETIME"),
+ SchemaField("time", "TIME"),
+ SchemaField("timestamp", "TIMESTAMP"),
+ SchemaField("range_timestamp", "RANGE", range_element_type="TIMESTAMP"),
+ SchemaField("range_datetime", "RANGE", range_element_type="DATETIME"),
+ SchemaField("range_date", "RANGE", range_element_type="DATE"),
+ ]
+ row_data = [
+ [
+ "Phred Phlyntstone",
+ "true",
+ "32",
+ "23000",
+ "1.77",
+ "1999-12-01",
+ "1999-12-31T00:00:00.000000",
+ "23:59:59.999999",
+ "1433836800000000",
+ "[1433836800000000, 1433999900000000)",
+ "[2009-06-17T13:45:30, 2019-07-17T13:45:30)",
+ "[2020-10-01, 2021-10-02)",
+ ],
+ ]
+ rows = [{"f": [{"v": field} for field in row]} for row in row_data]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ df = row_iterator.to_dataframe(
+ create_bqstorage_client=False,
+ bool_dtype=None,
+ int_dtype=None,
+ float_dtype=None,
+ string_dtype=None,
+ date_dtype=None,
+ datetime_dtype=None,
+ time_dtype=None,
+ timestamp_dtype=None,
+ range_timestamp_dtype=None,
+ range_datetime_dtype=None,
+ range_date_dtype=None,
+ )
+ self.assertIsInstance(df, pandas.DataFrame)
+ self.assertEqual(df.complete.dtype.name, "bool")
+ self.assertEqual(df.age.dtype.name, "int64")
+ self.assertEqual(df.seconds.dtype.name, "int64")
+ self.assertEqual(df.miles.dtype.name, "float64")
+ self.assertEqual(df.name.dtype.name, "object")
+ self.assertEqual(df.date.dtype.name, "datetime64[ns]")
+ self.assertEqual(df.datetime.dtype.name, "datetime64[ns]")
+ self.assertEqual(df.time.dtype.name, "object")
+ self.assertEqual(df.timestamp.dtype.name, "datetime64[ns, UTC]")
+ self.assertEqual(df.range_timestamp.dtype.name, "object")
+ self.assertEqual(df.range_datetime.dtype.name, "object")
+ self.assertEqual(df.range_date.dtype.name, "object")
+
+ def test_to_dataframe_w_unsupported_dtypes_mapper(self):
+ pytest.importorskip("pandas")
+ import numpy
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING"),
+ ]
+ row_data = [
+ ["Phred Phlyntstone"],
+ ]
+ rows = [{"f": [{"v": field} for field in row]} for row in row_data]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ with self.assertRaises(ValueError):
+ row_iterator.to_dataframe(
+ create_bqstorage_client=False,
+ bool_dtype=numpy.dtype("bool"),
+ )
+ with self.assertRaises(ValueError):
+ row_iterator.to_dataframe(
+ create_bqstorage_client=False,
+ int_dtype=numpy.dtype("int64"),
+ )
+ with self.assertRaises(ValueError):
+ row_iterator.to_dataframe(
+ create_bqstorage_client=False,
+ float_dtype=numpy.dtype("float64"),
+ )
+ with self.assertRaises(ValueError):
+ row_iterator.to_dataframe(
+ create_bqstorage_client=False,
+ string_dtype=numpy.dtype("object"),
+ )
+ with self.assertRaises(ValueError):
+ row_iterator.to_dataframe(
+ create_bqstorage_client=False,
+ date_dtype=numpy.dtype("object"),
+ )
+ with self.assertRaises(ValueError):
+ row_iterator.to_dataframe(
+ create_bqstorage_client=False,
+ datetime_dtype=numpy.dtype("datetime64[us]"),
+ )
+ with self.assertRaises(ValueError):
+ row_iterator.to_dataframe(
+ create_bqstorage_client=False,
+ time_dtype=numpy.dtype("datetime64[us]"),
+ )
+ with self.assertRaises(ValueError):
+ row_iterator.to_dataframe(
+ create_bqstorage_client=False,
+ timestamp_dtype=numpy.dtype("datetime64[us]"),
+ )
+
+ def test_to_dataframe_column_dtypes(self):
+ pandas = pytest.importorskip("pandas")
+ pandas_major_version = pandas.__version__[0:2]
+ if pandas_major_version not in ["0.", "1."]:
+ pytest.skip("Requires a version of pandas less than 2.0")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("start_timestamp", "TIMESTAMP"),
+ SchemaField("seconds", "INT64"),
+ SchemaField("miles", "FLOAT64"),
+ SchemaField("km", "FLOAT64"),
+ SchemaField("payment_type", "STRING"),
+ SchemaField("complete", "BOOL"),
+ SchemaField("date", "DATE"),
+ ]
+ row_data = [
+ ["1433836800000", "420", "1.1", "1.77", "Cash", "true", "1999-12-01"],
+ [
+ "1387811700000",
+ "2580",
+ "17.7",
+ "28.5",
+ "Cash",
+ "false",
+ "1953-06-14",
+ ],
+ ["1385565300000", "2280", "4.4", "7.1", "Credit", "true", "1981-11-04"],
+ ]
+ rows = [{"f": [{"v": field} for field in row]} for row in row_data]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ df = row_iterator.to_dataframe(
+ dtypes={"km": "float16"},
+ create_bqstorage_client=False,
+ )
+
+ self.assertIsInstance(df, pandas.DataFrame)
+ self.assertEqual(len(df), 3) # verify the number of rows
+ exp_columns = [field.name for field in schema]
+ self.assertEqual(list(df), exp_columns) # verify the column names
+
+ self.assertEqual(df.start_timestamp.dtype.name, "datetime64[ns, UTC]")
+ self.assertEqual(df.seconds.dtype.name, "Int64")
+ self.assertEqual(df.miles.dtype.name, "float64")
+ self.assertEqual(df.km.dtype.name, "float16")
+ self.assertEqual(df.payment_type.dtype.name, "object")
+ self.assertEqual(df.complete.dtype.name, "boolean")
+ self.assertEqual(df.date.dtype.name, "dbdate")
+
+ def test_to_dataframe_datetime_objects(self):
+ # When converting date or timestamp values to nanosecond
+ # precision, the result can be out of pyarrow bounds. To avoid
+ # the error when converting to Pandas, we use object type if
+ # necessary.
+ pandas = pytest.importorskip("pandas")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("ts", "TIMESTAMP"),
+ SchemaField("date", "DATE"),
+ ]
+ row_data = [
+ ["-20000000000000000", "1111-01-01"],
+ ]
+ rows = [{"f": [{"v": field} for field in row]} for row in row_data]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ df = row_iterator.to_dataframe(create_bqstorage_client=False)
+
+ self.assertIsInstance(df, pandas.DataFrame)
+ self.assertEqual(len(df), 1) # verify the number of rows
+ self.assertEqual(df["ts"].dtype.name, "object")
+ self.assertEqual(df["date"].dtype.name, "object")
+ self.assertEqual(df["ts"][0].date(), datetime.date(1336, 3, 23))
+ self.assertEqual(df["date"][0], datetime.date(1111, 1, 1))
+
+ @mock.patch("google.cloud.bigquery._pandas_helpers.pandas", new=None)
+ def test_to_dataframe_error_if_pandas_is_none(self):
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ row_iterator = self._make_one(_mock_client(), api_request, path, schema)
+
+ with self.assertRaises(ValueError):
+ row_iterator.to_dataframe()
+
+ @mock.patch("google.cloud.bigquery.table.shapely", new=None)
+ def test_to_dataframe_error_if_shapely_is_none(self):
+ pytest.importorskip("pandas")
+
+ with self.assertRaisesRegex(
+ ValueError,
+ re.escape(
+ "The shapely library is not installed, please install "
+ "shapely to use the geography_as_object option."
+ ),
+ ):
+ self._make_one_from_data().to_dataframe(geography_as_object=True)
+
+ def test_to_dataframe_max_results_w_bqstorage_warning(self):
+ pytest.importorskip("pandas")
+
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ bqstorage_client = mock.Mock()
+
+ row_iterator = self._make_one(
+ client=_mock_client(),
+ api_request=api_request,
+ path=path,
+ schema=schema,
+ max_results=42,
+ )
+
+ with warnings.catch_warnings(record=True) as warned:
+ row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
+
+ matches = [
+ warning
+ for warning in warned
+ if warning.category is UserWarning
+ and "cannot use bqstorage_client" in str(warning).lower()
+ and "REST" in str(warning)
+ ]
+ self.assertEqual(len(matches), 1, msg="User warning was not emitted.")
+
+ def test_to_dataframe_max_results_w_explicit_bqstorage_client_warning(self):
+ pytest.importorskip("pandas")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ mock_client = _mock_client()
+ mock_bqstorage_client = mock.sentinel.bq_storage_client
+
+ row_iterator = self._make_one(
+ client=mock_client,
+ api_request=api_request,
+ path=path,
+ schema=schema,
+ max_results=42,
+ )
+
+ with warnings.catch_warnings(record=True) as warned:
+ row_iterator.to_dataframe(bqstorage_client=mock_bqstorage_client)
+
+ matches = [
+ warning
+ for warning in warned
+ if warning.category is UserWarning
+ and "cannot use bqstorage_client" in str(warning).lower()
+ and "REST" in str(warning)
+ ]
+ self.assertEqual(len(matches), 1, msg="User warning was not emitted.")
+ self.assertIn(
+ __file__, str(matches[0]), msg="Warning emitted with incorrect stacklevel"
+ )
+ mock_client._ensure_bqstorage_client.assert_not_called()
+
+ def test_to_dataframe_max_results_w_create_bqstorage_client_no_warning(self):
+ pytest.importorskip("pandas")
+ from google.cloud.bigquery.schema import SchemaField
+
+ schema = [
+ SchemaField("name", "STRING", mode="REQUIRED"),
+ SchemaField("age", "INTEGER", mode="REQUIRED"),
+ ]
+ rows = [
+ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
+ {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
+ ]
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": rows})
+ mock_client = _mock_client()
+
+ row_iterator = self._make_one(
+ client=mock_client,
+ api_request=api_request,
+ path=path,
+ schema=schema,
+ max_results=42,
+ )
+
+ with warnings.catch_warnings(record=True) as warned:
+ row_iterator.to_dataframe(create_bqstorage_client=True)
+
+ matches = [
+ warning
+ for warning in warned
+ if warning.category is UserWarning
+ and "cannot use bqstorage_client" in str(warning).lower()
+ and "REST" in str(warning)
+ ]
+ self.assertFalse(matches)
+ mock_client._ensure_bqstorage_client.assert_not_called()
+
+ def test_to_dataframe_w_bqstorage_creates_client(self):
+ pytest.importorskip("pandas")
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+ from google.cloud import bigquery_storage
+ from google.cloud.bigquery_storage_v1.services.big_query_read.transports import (
+ grpc as big_query_read_grpc_transport,
+ )
+
+ mock_client = _mock_client()
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ bqstorage_client._transport = mock.create_autospec(
+ big_query_read_grpc_transport.BigQueryReadGrpcTransport
+ )
+ mock_client._ensure_bqstorage_client.return_value = bqstorage_client
+ session = bigquery_storage.types.ReadSession()
+ bqstorage_client.create_read_session.return_value = session
+ row_iterator = mut.RowIterator(
+ mock_client,
+ None, # api_request: ignored
+ None, # path: ignored
+ [
+ schema.SchemaField("colA", "STRING"),
+ schema.SchemaField("colC", "STRING"),
+ schema.SchemaField("colB", "STRING"),
+ ],
+ table=mut.TableReference.from_string("proj.dset.tbl"),
+ )
+ row_iterator.to_dataframe(create_bqstorage_client=True)
+ mock_client._ensure_bqstorage_client.assert_called_once()
+ bqstorage_client._transport.grpc_channel.close.assert_called_once()
+
+ def test_to_dataframe_w_bqstorage_no_streams(self):
+ pytest.importorskip("pandas")
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+ from google.cloud import bigquery_storage
+
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ session = bigquery_storage.types.ReadSession()
+ bqstorage_client.create_read_session.return_value = session
+
+ row_iterator = mut.RowIterator(
+ _mock_client(),
+ api_request=None,
+ path=None,
+ schema=[
+ schema.SchemaField("colA", "INTEGER"),
+ schema.SchemaField("colC", "FLOAT"),
+ schema.SchemaField("colB", "STRING"),
+ ],
+ table=mut.TableReference.from_string("proj.dset.tbl"),
+ )
+
+ got = row_iterator.to_dataframe(bqstorage_client)
+ column_names = ["colA", "colC", "colB"]
+ self.assertEqual(list(got), column_names)
+ self.assertTrue(got.empty)
+
+ def test_to_dataframe_w_bqstorage_logs_session(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ pytest.importorskip("pandas")
+ pytest.importorskip("pyarrow")
+ from google.cloud.bigquery.table import Table
+ from google.cloud import bigquery_storage
+
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ session = bigquery_storage.types.ReadSession()
+ session.name = "projects/test-proj/locations/us/sessions/SOMESESSION"
+ bqstorage_client.create_read_session.return_value = session
+ mock_logger = mock.create_autospec(logging.Logger)
+ row_iterator = self._make_one(
+ _mock_client(), table=Table("debug-proj.debug_dset.debug_tbl")
+ )
+
+ with mock.patch("google.cloud.bigquery._pandas_helpers._LOGGER", mock_logger):
+ row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
+
+ mock_logger.debug.assert_any_call(
+ "Started reading table 'debug-proj.debug_dset.debug_tbl' "
+ "with BQ Storage API session 'projects/test-proj/locations/us/sessions/SOMESESSION'."
+ )
+
+ def test_to_dataframe_w_bqstorage_empty_streams(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ pytest.importorskip("pandas")
+ pyarrow = pytest.importorskip("pyarrow")
+ from google.cloud import bigquery_storage
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+ from google.cloud.bigquery_storage_v1 import reader
+
+ arrow_fields = [
+ pyarrow.field("colA", pyarrow.int64()),
+ # Not alphabetical to test column order.
+ pyarrow.field("colC", pyarrow.float64()),
+ pyarrow.field("colB", pyarrow.utf8()),
+ ]
+ arrow_schema = pyarrow.schema(arrow_fields)
+
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ session = bigquery_storage.types.ReadSession(
+ streams=[{"name": "/projects/proj/dataset/dset/tables/tbl/streams/1234"}],
+ arrow_schema={"serialized_schema": arrow_schema.serialize().to_pybytes()},
+ )
+ bqstorage_client.create_read_session.return_value = session
+
+ mock_rowstream = mock.create_autospec(reader.ReadRowsStream)
+ bqstorage_client.read_rows.return_value = mock_rowstream
+
+ mock_rows = mock.create_autospec(reader.ReadRowsIterable)
+ mock_rowstream.rows.return_value = mock_rows
+ mock_pages = mock.PropertyMock(return_value=())
+ type(mock_rows).pages = mock_pages
+
+ # Schema is required when there are no record batches in the stream.
+ schema = [
+ schema.SchemaField("colA", "INTEGER"),
+ schema.SchemaField("colC", "FLOAT"),
+ schema.SchemaField("colB", "STRING"),
+ ]
+
+ row_iterator = mut.RowIterator(
+ _mock_client(),
+ None, # api_request: ignored
+ None, # path: ignored
+ schema,
+ table=mut.TableReference.from_string("proj.dset.tbl"),
+ selected_fields=schema,
+ )
+
+ got = row_iterator.to_dataframe(bqstorage_client)
+
+ column_names = ["colA", "colC", "colB"]
+ self.assertEqual(list(got), column_names)
+ self.assertTrue(got.empty)
+
+ def test_to_dataframe_w_bqstorage_nonempty(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ pytest.importorskip("pandas")
+ pyarrow = pytest.importorskip("pyarrow")
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+ from google.cloud import bigquery_storage
+ from google.cloud.bigquery_storage_v1 import reader
+ from google.cloud.bigquery_storage_v1.services.big_query_read.transports import (
+ grpc as big_query_read_grpc_transport,
+ )
+
+ arrow_fields = [
+ pyarrow.field("colA", pyarrow.int64()),
+ # Not alphabetical to test column order.
+ pyarrow.field("colC", pyarrow.float64()),
+ pyarrow.field("colB", pyarrow.utf8()),
+ ]
+ arrow_schema = pyarrow.schema(arrow_fields)
+
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ bqstorage_client._transport = mock.create_autospec(
+ big_query_read_grpc_transport.BigQueryReadGrpcTransport
+ )
+ streams = [
+ # Use two streams we want to check frames are read from each stream.
+ {"name": "/projects/proj/dataset/dset/tables/tbl/streams/1234"},
+ {"name": "/projects/proj/dataset/dset/tables/tbl/streams/5678"},
+ ]
+ session = bigquery_storage.types.ReadSession(
+ streams=streams,
+ arrow_schema={"serialized_schema": arrow_schema.serialize().to_pybytes()},
+ )
+ bqstorage_client.create_read_session.return_value = session
+
+ mock_rowstream = mock.create_autospec(reader.ReadRowsStream)
+ bqstorage_client.read_rows.return_value = mock_rowstream
+
+ mock_rows = mock.create_autospec(reader.ReadRowsIterable)
+ mock_rowstream.rows.return_value = mock_rows
+ page_items = [
+ pyarrow.array([1, -1]),
+ pyarrow.array([2.0, 4.0]),
+ pyarrow.array(["abc", "def"]),
+ ]
+ page_record_batch = pyarrow.RecordBatch.from_arrays(
+ page_items, schema=arrow_schema
+ )
+ mock_page = mock.create_autospec(reader.ReadRowsPage)
+ mock_page.to_arrow.return_value = page_record_batch
+ mock_pages = (mock_page, mock_page, mock_page)
+ type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)
+
+ schema = [
+ schema.SchemaField("colA", "IGNORED"),
+ schema.SchemaField("colC", "IGNORED"),
+ schema.SchemaField("colB", "IGNORED"),
+ ]
+
+ row_iterator = mut.RowIterator(
+ _mock_client(),
+ None, # api_request: ignored
+ None, # path: ignored
+ schema,
+ table=mut.TableReference.from_string("proj.dset.tbl"),
+ selected_fields=schema,
+ )
+
+ got = row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
+
+ # Are the columns in the expected order?
+ column_names = ["colA", "colC", "colB"]
+ self.assertEqual(list(got), column_names)
+
+ # Have expected number of rows?
+ total_pages = len(streams) * len(mock_pages)
+ total_rows = len(page_items[0]) * total_pages
+ self.assertEqual(len(got.index), total_rows)
+
+ # Don't close the client if it was passed in.
+ bqstorage_client._transport.grpc_channel.close.assert_not_called()
+
+ def test_to_dataframe_w_bqstorage_multiple_streams_return_unique_index(self):
+ bigquery_storage = pytest.importorskip("google.cloud.bigquery_storage")
+ pytest.importorskip("pandas")
+ pyarrow = pytest.importorskip("pyarrow")
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+ from google.cloud.bigquery_storage_v1 import reader
+
+ arrow_fields = [pyarrow.field("colA", pyarrow.int64())]
+ arrow_schema = pyarrow.schema(arrow_fields)
+
+ streams = [
+ {"name": "/projects/proj/dataset/dset/tables/tbl/streams/1234"},
+ {"name": "/projects/proj/dataset/dset/tables/tbl/streams/5678"},
+ ]
+ session = bigquery_storage.types.ReadSession(
+ streams=streams,
+ arrow_schema={"serialized_schema": arrow_schema.serialize().to_pybytes()},
+ )
+
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ bqstorage_client.create_read_session.return_value = session
+
+ mock_rowstream = mock.create_autospec(reader.ReadRowsStream)
+ bqstorage_client.read_rows.return_value = mock_rowstream
+
+ mock_rows = mock.create_autospec(reader.ReadRowsIterable)
+ mock_rowstream.rows.return_value = mock_rows
+
+ page_items = [
+ pyarrow.array([1, -1]),
+ ]
+ page_record_batch = pyarrow.RecordBatch.from_arrays(
+ page_items, schema=arrow_schema
+ )
+ mock_page = mock.create_autospec(reader.ReadRowsPage)
+ mock_page.to_arrow.return_value = page_record_batch
+ mock_pages = (mock_page, mock_page, mock_page)
+ type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)
+
+ row_iterator = self._make_one(
+ schema=[schema.SchemaField("colA", "IGNORED")],
+ table=mut.TableReference.from_string("proj.dset.tbl"),
+ )
+ got = row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
+
+ self.assertEqual(list(got), ["colA"])
+ total_pages = len(streams) * len(mock_pages)
+ total_rows = len(page_items[0]) * total_pages
+ self.assertEqual(len(got.index), total_rows)
+ self.assertTrue(got.index.is_unique)
+
+ def test_to_dataframe_w_bqstorage_updates_progress_bar(self):
+ bigquery_storage = pytest.importorskip("google.cloud.bigquery_storage")
+ pytest.importorskip("pandas")
+ pyarrow = pytest.importorskip("pyarrow")
+ pytest.importorskip("tqdm")
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+ from google.cloud.bigquery_storage_v1 import reader
+
+ # Speed up testing.
+ mut._PROGRESS_INTERVAL = 0.01
+
+ arrow_fields = [pyarrow.field("testcol", pyarrow.int64())]
+ arrow_schema = pyarrow.schema(arrow_fields)
+
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ streams = [
+ # Use two streams we want to check that progress bar updates are
+ # sent from each stream.
+ {"name": "/projects/proj/dataset/dset/tables/tbl/streams/1234"},
+ {"name": "/projects/proj/dataset/dset/tables/tbl/streams/5678"},
+ ]
+ session = bigquery_storage.types.ReadSession(
+ streams=streams,
+ arrow_schema={"serialized_schema": arrow_schema.serialize().to_pybytes()},
+ )
+ bqstorage_client.create_read_session.return_value = session
+
+ mock_rowstream = mock.create_autospec(reader.ReadRowsStream)
+ bqstorage_client.read_rows.return_value = mock_rowstream
+
+ mock_rows = mock.create_autospec(reader.ReadRowsIterable)
+ mock_rowstream.rows.return_value = mock_rows
+ mock_page = mock.create_autospec(reader.ReadRowsPage)
+ page_items = [-1, 0, 1]
+ type(mock_page).num_items = mock.PropertyMock(return_value=len(page_items))
+
+ def blocking_to_arrow(*args, **kwargs):
+ # Sleep for longer than the waiting interval so that we know we're
+ # only reading one page per loop at most.
+ time.sleep(2 * mut._PROGRESS_INTERVAL)
+ return pyarrow.RecordBatch.from_arrays(
+ [pyarrow.array(page_items)], schema=arrow_schema
+ )
+
+ mock_page.to_arrow.side_effect = blocking_to_arrow
+ mock_pages = (mock_page, mock_page, mock_page, mock_page, mock_page)
+ type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)
+
+ schema = [schema.SchemaField("testcol", "IGNORED")]
+
+ row_iterator = mut.RowIterator(
+ _mock_client(),
+ None, # api_request: ignored
+ None, # path: ignored
+ schema,
+ table=mut.TableReference.from_string("proj.dset.tbl"),
+ selected_fields=schema,
+ )
+
+ with mock.patch("tqdm.tqdm") as tqdm_mock:
+ row_iterator.to_dataframe(
+ bqstorage_client=bqstorage_client, progress_bar_type="tqdm"
+ )
+
+ # Make sure that this test updated the progress bar once per page from
+ # each stream.
+ total_pages = len(streams) * len(mock_pages)
+ expected_total_rows = total_pages * len(page_items)
+ progress_updates = [
+ args[0] for args, kwargs in tqdm_mock().update.call_args_list
+ ]
+ # Should have sent >1 update due to delay in blocking_to_arrow.
+ self.assertGreater(len(progress_updates), 1)
+ self.assertEqual(sum(progress_updates), expected_total_rows)
+ tqdm_mock().close.assert_called_once()
+
+ def test_to_dataframe_w_bqstorage_exits_on_keyboardinterrupt(self):
+ bigquery_storage = pytest.importorskip("google.cloud.bigquery_storage")
+ pytest.importorskip("pandas")
+ pyarrow = pytest.importorskip("pyarrow")
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+ from google.cloud.bigquery_storage_v1 import reader
+
+ # Speed up testing.
+ mut._PROGRESS_INTERVAL = 0.01
+
+ arrow_fields = [
+ pyarrow.field("colA", pyarrow.int64()),
+ # Not alphabetical to test column order.
+ pyarrow.field("colC", pyarrow.float64()),
+ pyarrow.field("colB", pyarrow.utf8()),
+ ]
+ arrow_schema = pyarrow.schema(arrow_fields)
+
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ session = bigquery_storage.types.ReadSession(
+ streams=[
+ # Use multiple streams because one will fail with a
+ # KeyboardInterrupt, and we want to check that the other streams
+ # ends early.
+ {"name": "/projects/proj/dataset/dset/tables/tbl/streams/1234"},
+ {"name": "/projects/proj/dataset/dset/tables/tbl/streams/5678"},
+ {"name": "/projects/proj/dataset/dset/tables/tbl/streams/9999"},
+ ],
+ arrow_schema={"serialized_schema": arrow_schema.serialize().to_pybytes()},
+ )
+ bqstorage_client.create_read_session.return_value = session
+ page_items = [
+ pyarrow.array([1, -1]),
+ pyarrow.array([2.0, 4.0]),
+ pyarrow.array(["abc", "def"]),
+ ]
+
+ def blocking_to_arrow(*args, **kwargs):
+ # Sleep for longer than the waiting interval so that we know we're
+ # only reading one page per loop at most.
+ time.sleep(2 * mut._PROGRESS_INTERVAL)
+ return pyarrow.RecordBatch.from_arrays(page_items, schema=arrow_schema)
+
+ mock_page = mock.create_autospec(reader.ReadRowsPage)
+ mock_page.to_arrow.side_effect = blocking_to_arrow
+ mock_rows = mock.create_autospec(reader.ReadRowsIterable)
+ mock_pages = mock.PropertyMock(return_value=(mock_page, mock_page, mock_page))
+ type(mock_rows).pages = mock_pages
+ mock_rowstream = mock.create_autospec(reader.ReadRowsStream)
+ mock_rowstream.rows.return_value = mock_rows
+
+ mock_cancelled_rows = mock.create_autospec(reader.ReadRowsIterable)
+ mock_cancelled_pages = mock.PropertyMock(side_effect=KeyboardInterrupt)
+ type(mock_cancelled_rows).pages = mock_cancelled_pages
+ mock_cancelled_rowstream = mock.create_autospec(reader.ReadRowsStream)
+ mock_cancelled_rowstream.rows.return_value = mock_cancelled_rows
+
+ bqstorage_client.read_rows.side_effect = (
+ mock_rowstream,
+ mock_cancelled_rowstream,
+ mock_rowstream,
+ )
+
+ schema = [
+ schema.SchemaField("colA", "IGNORED"),
+ schema.SchemaField("colB", "IGNORED"),
+ schema.SchemaField("colC", "IGNORED"),
+ ]
+
+ row_iterator = mut.RowIterator(
+ _mock_client(),
+ None, # api_request: ignored
+ None, # path: ignored
+ schema,
+ table=mut.TableReference.from_string("proj.dset.tbl"),
+ selected_fields=schema,
+ )
+
+ with pytest.raises(KeyboardInterrupt):
+ row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
+
+ # Should not have fetched the third page of results because exit_early
+ # should have been set.
+ self.assertLessEqual(mock_page.to_dataframe.call_count, 2)
+
+ def test_to_dataframe_tabledata_list_w_multiple_pages_return_unique_index(self):
+ pandas = pytest.importorskip("pandas")
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+
+ iterator_schema = [schema.SchemaField("name", "STRING", mode="REQUIRED")]
+ path = "/foo"
+ api_request = mock.Mock(
+ side_effect=[
+ {"rows": [{"f": [{"v": "Bengt"}]}], "pageToken": "NEXTPAGE"},
+ {"rows": [{"f": [{"v": "Sven"}]}]},
+ ]
+ )
+ row_iterator = mut.RowIterator(
+ _mock_client(),
+ api_request,
+ path,
+ iterator_schema,
+ table=mut.Table("proj.dset.tbl"),
+ )
+
+ df = row_iterator.to_dataframe(
+ bqstorage_client=None,
+ create_bqstorage_client=False,
+ )
+
+ self.assertIsInstance(df, pandas.DataFrame)
+ self.assertEqual(len(df), 2)
+ self.assertEqual(list(df), ["name"])
+ self.assertEqual(df.name.dtype.name, "object")
+ self.assertTrue(df.index.is_unique)
+
+ def test_to_dataframe_w_bqstorage_raises_auth_error(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ pytest.importorskip("pandas")
+ from google.cloud import bigquery_storage
+ from google.cloud.bigquery import table as mut
+
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ bqstorage_client.create_read_session.side_effect = (
+ google.api_core.exceptions.Forbidden(
+ "TEST BigQuery Storage API not enabled. TEST"
+ )
+ )
+ path = "/foo"
+ api_request = mock.Mock(return_value={"rows": []})
+ row_iterator = mut.RowIterator(
+ _mock_client(), api_request, path, [], table=mut.Table("proj.dset.tbl")
+ )
+
+ with pytest.raises(google.api_core.exceptions.Forbidden):
+ row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
+
+ def test_to_dataframe_w_bqstorage_partition(self):
+ bigquery_storage = pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+
+ row_iterator = mut.RowIterator(
+ _mock_client(),
+ None, # api_request: ignored
+ None, # path: ignored
+ [schema.SchemaField("colA", "IGNORED")],
+ table=mut.TableReference.from_string("proj.dset.tbl$20181225"),
+ )
+
+ with pytest.raises(ValueError):
+ row_iterator.to_dataframe(bqstorage_client)
+
+ def test_to_dataframe_w_bqstorage_snapshot(self):
+ bigquery_storage = pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+
+ row_iterator = mut.RowIterator(
+ _mock_client(),
+ None, # api_request: ignored
+ None, # path: ignored
+ [schema.SchemaField("colA", "IGNORED")],
+ table=mut.TableReference.from_string("proj.dset.tbl@1234567890000"),
+ )
+
+ with pytest.raises(ValueError):
+ row_iterator.to_dataframe(bqstorage_client)
+
+ def test_to_dataframe_concat_categorical_dtype_w_pyarrow(self):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ pandas = pytest.importorskip("pandas")
+ pyarrow = pytest.importorskip("pyarrow")
+ from google.cloud import bigquery_storage
+ from google.cloud.bigquery import schema
+ from google.cloud.bigquery import table as mut
+ from google.cloud.bigquery_storage_v1 import reader
+ from google.cloud.bigquery_storage_v1.services.big_query_read.transports import (
+ grpc as big_query_read_grpc_transport,
+ )
+
+ arrow_fields = [
+ # Not alphabetical to test column order.
+ pyarrow.field("col_str", pyarrow.utf8()),
+ # The backend returns strings, and without other info, pyarrow contains
+ # string data in categorical columns, too (and not maybe the Dictionary
+ # type that corresponds to pandas.Categorical).
+ pyarrow.field("col_category", pyarrow.utf8()),
+ ]
+ arrow_schema = pyarrow.schema(arrow_fields)
+
+ # create a mock BQ storage client
+ bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)
+ bqstorage_client._transport = mock.create_autospec(
+ big_query_read_grpc_transport.BigQueryReadGrpcTransport
+ )
+ session = bigquery_storage.types.ReadSession(
+ streams=[{"name": "/projects/proj/dataset/dset/tables/tbl/streams/1234"}],
+ arrow_schema={"serialized_schema": arrow_schema.serialize().to_pybytes()},
+ )
+ bqstorage_client.create_read_session.return_value = session
+
+ mock_rowstream = mock.create_autospec(reader.ReadRowsStream)
+ bqstorage_client.read_rows.return_value = mock_rowstream
+
+ # prepare the iterator over mocked rows
+ mock_rows = mock.create_autospec(reader.ReadRowsIterable)
+ mock_rowstream.rows.return_value = mock_rows
+ page_items = [
+ [
+ pyarrow.array(["foo", "bar", "baz"]), # col_str
+ pyarrow.array(["low", "medium", "low"]), # col_category
+ ],
+ [
+ pyarrow.array(["foo_page2", "bar_page2", "baz_page2"]), # col_str
+ pyarrow.array(["medium", "high", "low"]), # col_category
+ ],
+ ]
+
+ mock_pages = []
+
+ for record_list in page_items:
+ page_record_batch = pyarrow.RecordBatch.from_arrays(
+ record_list, schema=arrow_schema
+ )
+ mock_page = mock.create_autospec(reader.ReadRowsPage)
+ mock_page.to_arrow.return_value = page_record_batch
+ mock_pages.append(mock_page)
+
+ type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)
+
+ schema = [
+ schema.SchemaField("col_str", "IGNORED"),
+ schema.SchemaField("col_category", "IGNORED"),
+ ]
+
+ row_iterator = mut.RowIterator(
+ _mock_client(),
+ None, # api_request: ignored
+ None, # path: ignored
+ schema,
+ table=mut.TableReference.from_string("proj.dset.tbl"),
+ selected_fields=schema,
+ )
+
+ # run the method under test
+ got = row_iterator.to_dataframe(
+ bqstorage_client=bqstorage_client,
+ dtypes={
+ "col_category": pandas.core.dtypes.dtypes.CategoricalDtype(
+ categories=["low", "medium", "high"],
+ ordered=False,
+ ),
+ },
+ )
+
+ # Are the columns in the expected order?
+ column_names = ["col_str", "col_category"]
+ self.assertEqual(list(got), column_names)
+
+ # Have expected number of rows?
+ total_pages = len(mock_pages) # we have a single stream, thus these two equal
+ total_rows = len(page_items[0][0]) * total_pages
+ self.assertEqual(len(got.index), total_rows)
+
+ # Are column types correct?
+ expected_dtypes = [
+ pandas.core.dtypes.dtypes.np.dtype("O"), # the default for string data
+ pandas.core.dtypes.dtypes.CategoricalDtype(
+ categories=["low", "medium", "high"],
+ ordered=False,
+ ),
+ ]
+ self.assertEqual(list(got.dtypes), expected_dtypes)
+
+ # And the data in the categorical column?
+ self.assertEqual(
+ list(got["col_category"]),
+ ["low", "medium", "low", "medium", "high", "low"],
+ )
+
+ # Don't close the client if it was passed in.
+ bqstorage_client._transport.grpc_channel.close.assert_not_called()
+
+ def test_to_dataframe_geography_as_object(self):
+ pandas = pytest.importorskip("pandas")
+ pytest.importorskip("geopandas")
+ row_iterator = self._make_one_from_data(
+ (("name", "STRING"), ("geog", "GEOGRAPHY")),
+ (
+ ("foo", "Point(0 0)"),
+ ("bar", None),
+ ("baz", "Polygon((0 0, 0 1, 1 0, 0 0))"),
+ ),
+ )
+ df = row_iterator.to_dataframe(
+ create_bqstorage_client=False,
+ geography_as_object=True,
+ )
+ self.assertIsInstance(df, pandas.DataFrame)
+ self.assertEqual(len(df), 3) # verify the number of rows
+ self.assertEqual(list(df), ["name", "geog"]) # verify the column names
+ self.assertEqual(df.name.dtype.name, "object")
+ self.assertEqual(df.geog.dtype.name, "object")
+ self.assertIsInstance(df.geog, pandas.Series)
+ self.assertEqual(
+ [v.__class__.__name__ for v in df.geog], ["Point", "float", "Polygon"]
+ )
+
+ @mock.patch("google.cloud.bigquery.table.geopandas", new=None)
+ def test_to_geodataframe_error_if_geopandas_is_none(self):
+ with self.assertRaisesRegex(
+ ValueError,
+ re.escape(
+ "The geopandas library is not installed, please install "
+ "geopandas to use the to_geodataframe() function."
+ ),
+ ):
+ self._make_one_from_data().to_geodataframe()
+
+ def test_to_geodataframe(self):
+ geopandas = pytest.importorskip("geopandas")
+ row_iterator = self._make_one_from_data(
+ (("name", "STRING"), ("geog", "GEOGRAPHY")),
+ (
+ ("foo", "Point(0 0)"),
+ ("bar", None),
+ ("baz", "Polygon((0 0, 0 1, 1 0, 0 0))"),
+ ),
+ )
+ df = row_iterator.to_geodataframe(create_bqstorage_client=False)
+ self.assertIsInstance(df, geopandas.GeoDataFrame)
+ self.assertEqual(len(df), 3) # verify the number of rows
+ self.assertEqual(list(df), ["name", "geog"]) # verify the column names
+ self.assertEqual(df.name.dtype.name, "object")
+ self.assertEqual(df.geog.dtype.name, "geometry")
+ self.assertIsInstance(df.geog, geopandas.GeoSeries)
+
+ with warnings.catch_warnings():
+ # Computing the area on a GeoDataFrame that uses a geographic Coordinate
+ # Reference System (CRS) produces a warning that we are not interested in.
+ warnings.filterwarnings("ignore", category=UserWarning)
+ self.assertEqual(list(map(str, df.area)), ["0.0", "nan", "0.5"])
+ self.assertEqual(list(map(str, df.geog.area)), ["0.0", "nan", "0.5"])
+
+ self.assertEqual(df.crs.srs, "EPSG:4326")
+ self.assertEqual(df.crs.name, "WGS 84")
+ self.assertEqual(df.geog.crs.srs, "EPSG:4326")
+ self.assertEqual(df.geog.crs.name, "WGS 84")
+
+ def test_to_geodataframe_ambiguous_geog(self):
+ pytest.importorskip("geopandas")
+ row_iterator = self._make_one_from_data(
+ (("name", "STRING"), ("geog", "GEOGRAPHY"), ("geog2", "GEOGRAPHY")), ()
+ )
+ with self.assertRaisesRegex(
+ ValueError,
+ re.escape(
+ "There is more than one GEOGRAPHY column in the result. "
+ "The geography_column argument must be used to specify which "
+ "one to use to create a GeoDataFrame"
+ ),
+ ):
+ row_iterator.to_geodataframe(create_bqstorage_client=False)
+
+ def test_to_geodataframe_bad_geography_column(self):
+ pytest.importorskip("geopandas")
+ row_iterator = self._make_one_from_data(
+ (("name", "STRING"), ("geog", "GEOGRAPHY"), ("geog2", "GEOGRAPHY")), ()
+ )
+ with self.assertRaisesRegex(
+ ValueError,
+ re.escape(
+ "The given geography column, xxx, doesn't name"
+ " a GEOGRAPHY column in the result."
+ ),
+ ):
+ row_iterator.to_geodataframe(
+ create_bqstorage_client=False, geography_column="xxx"
+ )
+
+ def test_to_geodataframe_no_geog(self):
+ pytest.importorskip("geopandas")
+ row_iterator = self._make_one_from_data(
+ (("name", "STRING"), ("geog", "STRING")), ()
+ )
+ with self.assertRaisesRegex(
+ TypeError,
+ re.escape(
+ "There must be at least one GEOGRAPHY column"
+ " to create a GeoDataFrame"
+ ),
+ ):
+ row_iterator.to_geodataframe(create_bqstorage_client=False)
+
+ def test_to_geodataframe_w_geography_column(self):
+ geopandas = pytest.importorskip("geopandas")
+ pandas = pytest.importorskip("pandas")
+ row_iterator = self._make_one_from_data(
+ (("name", "STRING"), ("geog", "GEOGRAPHY"), ("geog2", "GEOGRAPHY")),
+ (
+ ("foo", "Point(0 0)", "Point(1 1)"),
+ ("bar", None, "Point(2 2)"),
+ ("baz", "Polygon((0 0, 0 1, 1 0, 0 0))", "Point(3 3)"),
+ ),
+ )
+ df = row_iterator.to_geodataframe(
+ create_bqstorage_client=False, geography_column="geog"
+ )
+ self.assertIsInstance(df, geopandas.GeoDataFrame)
+ self.assertEqual(len(df), 3) # verify the number of rows
+ self.assertEqual(list(df), ["name", "geog", "geog2"]) # verify the column names
+ self.assertEqual(df.name.dtype.name, "object")
+ self.assertEqual(df.geog.dtype.name, "geometry")
+ self.assertEqual(df.geog2.dtype.name, "object")
+ self.assertIsInstance(df.geog, geopandas.GeoSeries)
+
+ with warnings.catch_warnings():
+ # Computing the area on a GeoDataFrame that uses a geographic Coordinate
+ # Reference System (CRS) produces a warning that we are not interested in.
+ warnings.filterwarnings("ignore", category=UserWarning)
+ self.assertEqual(list(map(str, df.area)), ["0.0", "nan", "0.5"])
+ self.assertEqual(list(map(str, df.geog.area)), ["0.0", "nan", "0.5"])
+
+ self.assertEqual(
+ [v.__class__.__name__ for v in df.geog], ["Point", "NoneType", "Polygon"]
+ )
+
+ # Geog2 isn't a GeoSeries, but it contains geomentries:
+ self.assertIsInstance(df.geog2, pandas.Series)
+ self.assertEqual(
+ [v.__class__.__name__ for v in df.geog2], ["Point", "Point", "Point"]
+ )
+
+ # and can easily be converted to a GeoSeries
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", category=UserWarning)
+ self.assertEqual(
+ list(map(str, geopandas.GeoSeries(df.geog2).area)),
+ ["0.0", "0.0", "0.0"],
+ )
+
+ @mock.patch("google.cloud.bigquery.table.RowIterator.to_dataframe")
+ def test_rowiterator_to_geodataframe_delegation(self, to_dataframe):
+ """
+ RowIterator.to_geodataframe just delegates to RowIterator.to_dataframe.
+
+ This test just demonstrates that. We don't need to test all the
+ variations, which are tested for to_dataframe.
+ """
+ pandas = pytest.importorskip("pandas")
+ geopandas = pytest.importorskip("geopandas")
+ import numpy
+ from shapely import wkt
+
+ row_iterator = self._make_one_from_data(
+ (("name", "STRING"), ("g", "GEOGRAPHY"))
+ )
+ bqstorage_client = object()
+ dtypes = dict(xxx=numpy.dtype("int64"))
+ progress_bar_type = "normal"
+ create_bqstorage_client = False
+ geography_column = "g"
+
+ to_dataframe.return_value = pandas.DataFrame(
+ dict(
+ name=["foo"],
+ g=[wkt.loads("point(0 0)")],
+ )
+ )
+
+ df = row_iterator.to_geodataframe(
+ bqstorage_client=bqstorage_client,
+ dtypes=dtypes,
+ progress_bar_type=progress_bar_type,
+ create_bqstorage_client=create_bqstorage_client,
+ geography_column=geography_column,
+ )
+
+ to_dataframe.assert_called_once_with(
+ bqstorage_client,
+ dtypes,
+ progress_bar_type,
+ create_bqstorage_client,
+ geography_as_object=True,
+ )
+
+ self.assertIsInstance(df, geopandas.GeoDataFrame)
+ self.assertEqual(len(df), 1) # verify the number of rows
+ self.assertEqual(list(df), ["name", "g"]) # verify the column names
+ self.assertEqual(df.name.dtype.name, "object")
+ self.assertEqual(df.g.dtype.name, "geometry")
+ self.assertIsInstance(df.g, geopandas.GeoSeries)
+
+ with warnings.catch_warnings():
+ # Computing the area on a GeoDataFrame that uses a geographic Coordinate
+ # Reference System (CRS) produces a warning that we are not interested in.
+ warnings.filterwarnings("ignore", category=UserWarning)
+ self.assertEqual(list(map(str, df.area)), ["0.0"])
+ self.assertEqual(list(map(str, df.g.area)), ["0.0"])
+
+ self.assertEqual([v.__class__.__name__ for v in df.g], ["Point"])
+
+
+class TestPartitionRange(unittest.TestCase):
+ def _get_target_class(self):
+ from google.cloud.bigquery.table import PartitionRange
+
+ return PartitionRange
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_constructor_defaults(self):
+ object_under_test = self._make_one()
+ assert object_under_test.start is None
+ assert object_under_test.end is None
+ assert object_under_test.interval is None
+
+ def test_constructor_w_properties(self):
+ object_under_test = self._make_one(start=1, end=10, interval=2)
+ assert object_under_test.start == 1
+ assert object_under_test.end == 10
+ assert object_under_test.interval == 2
+
+ def test_constructor_w_resource(self):
+ object_under_test = self._make_one(
+ _properties={"start": -1234567890, "end": 1234567890, "interval": 1000000}
+ )
+ assert object_under_test.start == -1234567890
+ assert object_under_test.end == 1234567890
+ assert object_under_test.interval == 1000000
+
+ def test___eq___start_mismatch(self):
+ object_under_test = self._make_one(start=1, end=10, interval=2)
+ other = self._make_one(start=2, end=10, interval=2)
+ self.assertNotEqual(object_under_test, other)
+
+ def test___eq___end__mismatch(self):
+ object_under_test = self._make_one(start=1, end=10, interval=2)
+ other = self._make_one(start=1, end=11, interval=2)
+ self.assertNotEqual(object_under_test, other)
+
+ def test___eq___interval__mismatch(self):
+ object_under_test = self._make_one(start=1, end=10, interval=2)
+ other = self._make_one(start=1, end=11, interval=3)
+ self.assertNotEqual(object_under_test, other)
+
+ def test___eq___hit(self):
+ object_under_test = self._make_one(start=1, end=10, interval=2)
+ other = self._make_one(start=1, end=10, interval=2)
+ self.assertEqual(object_under_test, other)
+
+ def test__eq___type_mismatch(self):
+ object_under_test = self._make_one(start=1, end=10, interval=2)
+ self.assertNotEqual(object_under_test, object())
+ self.assertEqual(object_under_test, mock.ANY)
+
+ def test_unhashable_object(self):
+ object_under_test1 = self._make_one(start=1, end=10, interval=2)
+
+ with self.assertRaisesRegex(TypeError, r".*unhashable type.*"):
+ hash(object_under_test1)
+
+ def test_repr(self):
+ object_under_test = self._make_one(start=1, end=10, interval=2)
+ assert repr(object_under_test) == "PartitionRange(end=10, interval=2, start=1)"
+
+
+class TestRangePartitioning(unittest.TestCase):
+ def _get_target_class(self):
+ from google.cloud.bigquery.table import RangePartitioning
+
+ return RangePartitioning
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_constructor_defaults(self):
+ object_under_test = self._make_one()
+ assert object_under_test.field is None
+ assert object_under_test.range_.start is None
+ assert object_under_test.range_.end is None
+ assert object_under_test.range_.interval is None
+
+ def test_constructor_w_properties(self):
+ from google.cloud.bigquery.table import PartitionRange
+
+ object_under_test = self._make_one(
+ range_=PartitionRange(start=1, end=10, interval=2), field="integer_col"
+ )
+ assert object_under_test.field == "integer_col"
+ assert object_under_test.range_.start == 1
+ assert object_under_test.range_.end == 10
+ assert object_under_test.range_.interval == 2
+
+ def test_constructor_w_resource(self):
+ object_under_test = self._make_one(
+ _properties={
+ "field": "some_column",
+ "range": {"start": -1234567890, "end": 1234567890, "interval": 1000000},
+ }
+ )
+ assert object_under_test.field == "some_column"
+ assert object_under_test.range_.start == -1234567890
+ assert object_under_test.range_.end == 1234567890
+ assert object_under_test.range_.interval == 1000000
+
+ def test_range_w_wrong_type(self):
+ object_under_test = self._make_one()
+ with pytest.raises(ValueError, match="PartitionRange"):
+ object_under_test.range_ = object()
+
+ def test___eq___field_mismatch(self):
+ from google.cloud.bigquery.table import PartitionRange
+
+ object_under_test = self._make_one(
+ range_=PartitionRange(start=1, end=10, interval=2), field="integer_col"
+ )
+ other = self._make_one(
+ range_=PartitionRange(start=1, end=10, interval=2), field="float_col"
+ )
+ self.assertNotEqual(object_under_test, other)
+
+ def test___eq___range__mismatch(self):
+ from google.cloud.bigquery.table import PartitionRange
+
+ object_under_test = self._make_one(
+ range_=PartitionRange(start=1, end=10, interval=2), field="integer_col"
+ )
+ other = self._make_one(
+ range_=PartitionRange(start=2, end=20, interval=2), field="float_col"
+ )
+ self.assertNotEqual(object_under_test, other)
+
+ def test___eq___hit(self):
+ from google.cloud.bigquery.table import PartitionRange
+
+ object_under_test = self._make_one(
+ range_=PartitionRange(start=1, end=10, interval=2), field="integer_col"
+ )
+ other = self._make_one(
+ range_=PartitionRange(start=1, end=10, interval=2), field="integer_col"
+ )
+ self.assertEqual(object_under_test, other)
+
+ def test__eq___type_mismatch(self):
+ from google.cloud.bigquery.table import PartitionRange
+
+ object_under_test = self._make_one(
+ range_=PartitionRange(start=1, end=10, interval=2), field="integer_col"
+ )
+ self.assertNotEqual(object_under_test, object())
+ self.assertEqual(object_under_test, mock.ANY)
+
+ def test_unhashable_object(self):
+ from google.cloud.bigquery.table import PartitionRange
+
+ object_under_test1 = self._make_one(
+ range_=PartitionRange(start=1, end=10, interval=2), field="integer_col"
+ )
+ with self.assertRaisesRegex(TypeError, r".*unhashable type.*"):
+ hash(object_under_test1)
+
+ def test_repr(self):
+ from google.cloud.bigquery.table import PartitionRange
+
+ object_under_test = self._make_one(
+ range_=PartitionRange(start=1, end=10, interval=2), field="integer_col"
+ )
+ assert (
+ repr(object_under_test)
+ == "RangePartitioning(field='integer_col', range_=PartitionRange(end=10, interval=2, start=1))"
+ )
+
+
+class TestTimePartitioning(unittest.TestCase):
+ def _get_target_class(self):
+ from google.cloud.bigquery.table import TimePartitioning
+
+ return TimePartitioning
+
+ def _make_one(self, *args, **kw):
+ return self._get_target_class()(*args, **kw)
+
+ def test_constructor_defaults(self):
+ time_partitioning = self._make_one()
+ self.assertEqual(time_partitioning.type_, "DAY")
+ self.assertIsNone(time_partitioning.field)
+ self.assertIsNone(time_partitioning.expiration_ms)
+
+ def test_constructor_explicit(self):
+ from google.cloud.bigquery.table import TimePartitioningType
+
+ time_partitioning = self._make_one(
+ type_=TimePartitioningType.DAY, field="name", expiration_ms=10000
+ )
+
+ self.assertEqual(time_partitioning.type_, "DAY")
+ self.assertEqual(time_partitioning.field, "name")
+ self.assertEqual(time_partitioning.expiration_ms, 10000)
+
+ def test_require_partition_filter_warns_deprecation(self):
+ object_under_test = self._make_one()
+
+ with warnings.catch_warnings(record=True) as warned:
+ assert object_under_test.require_partition_filter is None
+ object_under_test.require_partition_filter = True
+ assert object_under_test.require_partition_filter
+
+ assert len(warned) == 3
+ for warning in warned:
+ self.assertIs(warning.category, PendingDeprecationWarning)
+
+ def test_from_api_repr_empty(self):
+ klass = self._get_target_class()
+
+ # Even though there are required properties according to the API
+ # specification, sometimes time partitioning is populated as an empty
+ # object. See internal bug 131167013.
+ api_repr = {}
+ time_partitioning = klass.from_api_repr(api_repr)
+
+ self.assertIsNone(time_partitioning.type_)
+ self.assertIsNone(time_partitioning.field)
+ self.assertIsNone(time_partitioning.expiration_ms)
+
+ def test_from_api_repr_minimal(self):
+ from google.cloud.bigquery.table import TimePartitioningType
+
+ klass = self._get_target_class()
+ api_repr = {"type": "DAY"}
+ time_partitioning = klass.from_api_repr(api_repr)
+
+ self.assertEqual(time_partitioning.type_, TimePartitioningType.DAY)
+ self.assertIsNone(time_partitioning.field)
+ self.assertIsNone(time_partitioning.expiration_ms)
+
+ def test_from_api_repr_doesnt_override_type(self):
+ klass = self._get_target_class()
+ api_repr = {"type": "HOUR"}
+ time_partitioning = klass.from_api_repr(api_repr)
+ self.assertEqual(time_partitioning.type_, "HOUR")
+
+ def test_from_api_repr_explicit(self):
+ from google.cloud.bigquery.table import TimePartitioningType
+
+ klass = self._get_target_class()
+ api_repr = {
+ "type": "DAY",
+ "field": "name",
+ "expirationMs": "10000",
+ "requirePartitionFilter": True,
+ }
+ time_partitioning = klass.from_api_repr(api_repr)
+
+ self.assertEqual(time_partitioning.type_, TimePartitioningType.DAY)
+ self.assertEqual(time_partitioning.field, "name")
+ self.assertEqual(time_partitioning.expiration_ms, 10000)
+
+ with warnings.catch_warnings(record=True) as warned:
+ self.assertTrue(time_partitioning.require_partition_filter)
+
+ self.assertIs(warned[0].category, PendingDeprecationWarning)
+
+ def test_to_api_repr_defaults(self):
+ time_partitioning = self._make_one()
+ expected = {"type": "DAY"}
+ self.assertEqual(time_partitioning.to_api_repr(), expected)
+
+ def test_to_api_repr_explicit(self):
+ from google.cloud.bigquery.table import TimePartitioningType
+
+ time_partitioning = self._make_one(
+ type_=TimePartitioningType.DAY, field="name", expiration_ms=10000
+ )
+
+ with warnings.catch_warnings(record=True) as warned:
+ time_partitioning.require_partition_filter = True
+
+ self.assertIs(warned[0].category, PendingDeprecationWarning)
+
+ expected = {
+ "type": "DAY",
+ "field": "name",
+ "expirationMs": "10000",
+ "requirePartitionFilter": True,
+ }
+ self.assertEqual(time_partitioning.to_api_repr(), expected)
+
+ def test___eq___wrong_type(self):
+ time_partitioning = self._make_one()
+ other = object()
+ self.assertNotEqual(time_partitioning, other)
+ self.assertEqual(time_partitioning, mock.ANY)
+
+ def test___eq___type__mismatch(self):
+ time_partitioning = self._make_one()
+ other = self._make_one(type_="HOUR")
+ self.assertNotEqual(time_partitioning, other)
+
+ def test___eq___field_mismatch(self):
+ time_partitioning = self._make_one(field="foo")
+ other = self._make_one(field="bar")
+ self.assertNotEqual(time_partitioning, other)
+
+ def test___eq___expiration_ms_mismatch(self):
+ time_partitioning = self._make_one(field="foo", expiration_ms=100000)
+ other = self._make_one(field="foo", expiration_ms=200000)
+ self.assertNotEqual(time_partitioning, other)
+
+ def test___eq___require_partition_filter_mismatch(self):
+ time_partitioning = self._make_one(field="foo", expiration_ms=100000)
+ other = self._make_one(field="foo", expiration_ms=100000)
+ with warnings.catch_warnings(record=True) as warned:
+ time_partitioning.require_partition_filter = True
+ other.require_partition_filter = False
+
+ assert len(warned) == 2
+ for warning in warned:
+ self.assertIs(warning.category, PendingDeprecationWarning)
+
+ self.assertNotEqual(time_partitioning, other)
+
+ def test___eq___hit(self):
+ time_partitioning = self._make_one(field="foo", expiration_ms=100000)
+ other = self._make_one(field="foo", expiration_ms=100000)
+ self.assertEqual(time_partitioning, other)
+
+ def test___ne___wrong_type(self):
+ time_partitioning = self._make_one()
+ other = object()
+ self.assertNotEqual(time_partitioning, other)
+ self.assertEqual(time_partitioning, mock.ANY)
+
+ def test___ne___same_value(self):
+ time_partitioning1 = self._make_one()
+ time_partitioning2 = self._make_one()
+ # unittest ``assertEqual`` uses ``==`` not ``!=``.
+ comparison_val = time_partitioning1 != time_partitioning2
+ self.assertFalse(comparison_val)
+
+ def test___ne___different_values(self):
+ time_partitioning1 = self._make_one()
+ time_partitioning2 = self._make_one(type_="HOUR")
+ self.assertNotEqual(time_partitioning1, time_partitioning2)
+
+ def test___hash__set_equality(self):
+ time_partitioning1 = self._make_one(field="foo")
+ time_partitioning2 = self._make_one(field="foo")
+ set_one = {time_partitioning1, time_partitioning2}
+ set_two = {time_partitioning1, time_partitioning2}
+ self.assertEqual(set_one, set_two)
+
+ def test___hash__not_equals(self):
+ time_partitioning1 = self._make_one(field="foo")
+ time_partitioning2 = self._make_one(field="bar")
+ set_one = {time_partitioning1}
+ set_two = {time_partitioning2}
+ self.assertNotEqual(set_one, set_two)
+
+ def test___repr___minimal(self):
+ time_partitioning = self._make_one()
+ expected = "TimePartitioning(type_='DAY')"
+ self.assertEqual(repr(time_partitioning), expected)
+
+ def test___repr___explicit(self):
+ from google.cloud.bigquery.table import TimePartitioningType
+
+ time_partitioning = self._make_one(
+ type_=TimePartitioningType.DAY, field="name", expiration_ms=10000
+ )
+ expected = "TimePartitioning(expiration_ms=10000,field='name',type_='DAY')"
+ self.assertEqual(repr(time_partitioning), expected)
+
+ def test_set_expiration_w_none(self):
+ time_partitioning = self._make_one()
+ time_partitioning.expiration_ms = None
+ assert time_partitioning._properties["expirationMs"] is None
+
+
+class TestPrimaryKey(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.table import PrimaryKey
+
+ return PrimaryKey
+
+ @classmethod
+ def _make_one(cls, *args, **kwargs):
+ return cls._get_target_class()(*args, **kwargs)
+
+ def test_constructor_explicit(self):
+ columns = ["id", "product_id"]
+ primary_key = self._make_one(columns)
+
+ self.assertEqual(primary_key.columns, columns)
+
+ def test__eq__columns_mismatch(self):
+ primary_key = self._make_one(columns=["id", "product_id"])
+ other_primary_key = self._make_one(columns=["id"])
+
+ self.assertNotEqual(primary_key, other_primary_key)
+
+ def test__eq__other_type(self):
+ primary_key = self._make_one(columns=["id", "product_id"])
+ with self.assertRaises(TypeError):
+ primary_key == "This is not a Primary Key"
+
+
+class TestColumnReference(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.table import ColumnReference
+
+ return ColumnReference
+
+ @classmethod
+ def _make_one(cls, *args, **kwargs):
+ return cls._get_target_class()(*args, **kwargs)
+
+ def test_constructor_explicit(self):
+ referencing_column = "product_id"
+ referenced_column = "id"
+ column_reference = self._make_one(referencing_column, referenced_column)
+
+ self.assertEqual(column_reference.referencing_column, referencing_column)
+ self.assertEqual(column_reference.referenced_column, referenced_column)
+
+ def test__eq__referencing_column_mismatch(self):
+ column_reference = self._make_one(
+ referencing_column="product_id",
+ referenced_column="id",
+ )
+ other_column_reference = self._make_one(
+ referencing_column="item_id",
+ referenced_column="id",
+ )
+
+ self.assertNotEqual(column_reference, other_column_reference)
+
+ def test__eq__referenced_column_mismatch(self):
+ column_reference = self._make_one(
+ referencing_column="product_id",
+ referenced_column="id",
+ )
+ other_column_reference = self._make_one(
+ referencing_column="product_id",
+ referenced_column="id_1",
+ )
+
+ self.assertNotEqual(column_reference, other_column_reference)
+
+ def test__eq__other_type(self):
+ column_reference = self._make_one(
+ referencing_column="product_id",
+ referenced_column="id",
+ )
+ with self.assertRaises(TypeError):
+ column_reference == "This is not a Column Reference"
+
+
+class TestForeignKey(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.table import ForeignKey
+
+ return ForeignKey
+
+ @classmethod
+ def _make_one(cls, *args, **kwargs):
+ return cls._get_target_class()(*args, **kwargs)
+
+ def test_constructor_explicit(self):
+ name = "my_fk"
+ referenced_table = TableReference.from_string("my-project.mydataset.mytable")
+ column_references = []
+ foreign_key = self._make_one(name, referenced_table, column_references)
+
+ self.assertEqual(foreign_key.name, name)
+ self.assertEqual(foreign_key.referenced_table, referenced_table)
+ self.assertEqual(foreign_key.column_references, column_references)
+
+ def test__eq__name_mismatch(self):
+ referenced_table = TableReference.from_string("my-project.mydataset.mytable")
+ column_references = []
+ foreign_key = self._make_one(
+ name="my_fk",
+ referenced_table=referenced_table,
+ column_references=column_references,
+ )
+ other_foreign_key = self._make_one(
+ name="my_other_fk",
+ referenced_table=referenced_table,
+ column_references=column_references,
+ )
+
+ self.assertNotEqual(foreign_key, other_foreign_key)
+
+ def test__eq__referenced_table_mismatch(self):
+ name = "my_fk"
+ column_references = []
+ foreign_key = self._make_one(
+ name=name,
+ referenced_table=TableReference.from_string("my-project.mydataset.mytable"),
+ column_references=column_references,
+ )
+ other_foreign_key = self._make_one(
+ name=name,
+ referenced_table=TableReference.from_string(
+ "my-project.mydataset.my-other-table"
+ ),
+ column_references=column_references,
+ )
+
+ self.assertNotEqual(foreign_key, other_foreign_key)
+
+ def test__eq__column_references_mismatch(self):
+ from google.cloud.bigquery.table import ColumnReference
+
+ name = "my_fk"
+ referenced_table = TableReference.from_string("my-project.mydataset.mytable")
+ foreign_key = self._make_one(
+ name=name,
+ referenced_table=referenced_table,
+ column_references=[],
+ )
+ other_foreign_key = self._make_one(
+ name=name,
+ referenced_table=referenced_table,
+ column_references=[
+ ColumnReference(
+ referencing_column="product_id", referenced_column="id"
+ ),
+ ],
+ )
+
+ self.assertNotEqual(foreign_key, other_foreign_key)
+
+ def test__eq__other_type(self):
+ foreign_key = self._make_one(
+ name="my_fk",
+ referenced_table=TableReference.from_string("my-project.mydataset.mytable"),
+ column_references=[],
+ )
+ with self.assertRaises(TypeError):
+ foreign_key == "This is not a Foreign Key"
+
+
+class TestTableConstraint(unittest.TestCase):
+ @staticmethod
+ def _get_target_class():
+ from google.cloud.bigquery.table import TableConstraints
+
+ return TableConstraints
+
+ @classmethod
+ def _make_one(cls, *args, **kwargs):
+ return cls._get_target_class()(*args, **kwargs)
+
+ def test_constructor_defaults(self):
+ instance = self._make_one(primary_key=None, foreign_keys=None)
+ self.assertIsNone(instance.primary_key)
+ self.assertIsNone(instance.foreign_keys)
+
+ def test_from_api_repr_full_resource(self):
+ from google.cloud.bigquery.table import (
+ ColumnReference,
+ ForeignKey,
+ TableReference,
+ )
+
+ resource = {
+ "primaryKey": {
+ "columns": ["id", "product_id"],
+ },
+ "foreignKeys": [
+ {
+ "name": "my_fk_name",
+ "referencedTable": {
+ "projectId": "my-project",
+ "datasetId": "your-dataset",
+ "tableId": "products",
+ },
+ "columnReferences": [
+ {"referencingColumn": "product_id", "referencedColumn": "id"},
+ ],
+ }
+ ],
+ }
+ instance = self._get_target_class().from_api_repr(resource)
+
+ self.assertIsNotNone(instance.primary_key)
+ self.assertEqual(instance.primary_key.columns, ["id", "product_id"])
+ self.assertEqual(
+ instance.foreign_keys,
+ [
+ ForeignKey(
+ name="my_fk_name",
+ referenced_table=TableReference.from_string(
+ "my-project.your-dataset.products"
+ ),
+ column_references=[
+ ColumnReference(
+ referencing_column="product_id", referenced_column="id"
+ ),
+ ],
+ ),
+ ],
+ )
+
+ def test_from_api_repr_only_primary_key_resource(self):
+ resource = {
+ "primaryKey": {
+ "columns": ["id"],
+ },
+ }
+ instance = self._get_target_class().from_api_repr(resource)
+
+ self.assertIsNotNone(instance.primary_key)
+ self.assertEqual(instance.primary_key.columns, ["id"])
+ self.assertIsNone(instance.foreign_keys)
+
+ def test_from_api_repr_only_foreign_keys_resource(self):
+ resource = {
+ "foreignKeys": [
+ {
+ "name": "my_fk_name",
+ "referencedTable": {
+ "projectId": "my-project",
+ "datasetId": "your-dataset",
+ "tableId": "products",
+ },
+ "columnReferences": [
+ {"referencingColumn": "product_id", "referencedColumn": "id"},
+ ],
+ }
+ ]
+ }
+ instance = self._get_target_class().from_api_repr(resource)
+
+ self.assertIsNone(instance.primary_key)
+ self.assertIsNotNone(instance.foreign_keys)
+
+
+@pytest.mark.parametrize(
+ "table_path",
+ (
+ "my-project.my_dataset.my_table",
+ "my-project.my_dataset.my_table$20181225",
+ "my-project.my_dataset.my_table@1234567890",
+ "my-project.my_dataset.my_table$20181225@1234567890",
+ ),
+)
+def test_table_reference_to_bqstorage_v1_stable(table_path):
+ pytest.importorskip("google.cloud.bigquery_storage")
+ from google.cloud.bigquery import table as mut
+
+ expected = "projects/my-project/datasets/my_dataset/tables/my_table"
+
+ for klass in (mut.TableReference, mut.Table, mut.TableListItem):
+ got = klass.from_string(table_path).to_bqstorage()
+ assert got == expected
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_table_arrow.py b/testbed/googleapis__python-bigquery/tests/unit/test_table_arrow.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f1e6f76a39b75c8d8a0fbb302868cd4adb608ef
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_table_arrow.py
@@ -0,0 +1,134 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from google.cloud import bigquery
+import google.cloud.bigquery.table
+
+
+pyarrow = pytest.importorskip("pyarrow", minversion="3.0.0")
+
+
+def test_to_arrow_with_jobs_query_response():
+ resource = {
+ "kind": "bigquery#queryResponse",
+ "schema": {
+ "fields": [
+ {"name": "name", "type": "STRING", "mode": "NULLABLE"},
+ {"name": "number", "type": "INTEGER", "mode": "NULLABLE"},
+ ]
+ },
+ "jobReference": {
+ "projectId": "test-project",
+ "jobId": "job_ocd3cb-N62QIslU7R5qKKa2_427J",
+ "location": "US",
+ },
+ "totalRows": "9",
+ "rows": [
+ {"f": [{"v": "Tiarra"}, {"v": "6"}]},
+ {"f": [{"v": "Timothy"}, {"v": "325"}]},
+ {"f": [{"v": "Tina"}, {"v": "26"}]},
+ {"f": [{"v": "Tierra"}, {"v": "10"}]},
+ {"f": [{"v": "Tia"}, {"v": "17"}]},
+ {"f": [{"v": "Tiara"}, {"v": "22"}]},
+ {"f": [{"v": "Tiana"}, {"v": "6"}]},
+ {"f": [{"v": "Tiffany"}, {"v": "229"}]},
+ {"f": [{"v": "Tiffani"}, {"v": "8"}]},
+ ],
+ "totalBytesProcessed": "154775150",
+ "jobComplete": True,
+ "cacheHit": False,
+ "queryId": "job_ocd3cb-N62QIslU7R5qKKa2_427J",
+ }
+
+ rows = google.cloud.bigquery.table.RowIterator(
+ client=None,
+ api_request=None,
+ path=None,
+ schema=[
+ bigquery.SchemaField.from_api_repr(field)
+ for field in resource["schema"]["fields"]
+ ],
+ first_page_response=resource,
+ )
+ records = rows.to_arrow()
+
+ assert records.column_names == ["name", "number"]
+ assert records["name"].to_pylist() == [
+ "Tiarra",
+ "Timothy",
+ "Tina",
+ "Tierra",
+ "Tia",
+ "Tiara",
+ "Tiana",
+ "Tiffany",
+ "Tiffani",
+ ]
+ assert records["number"].to_pylist() == [6, 325, 26, 10, 17, 22, 6, 229, 8]
+
+
+def test_to_arrow_with_jobs_query_response_and_max_results():
+ resource = {
+ "kind": "bigquery#queryResponse",
+ "schema": {
+ "fields": [
+ {"name": "name", "type": "STRING", "mode": "NULLABLE"},
+ {"name": "number", "type": "INTEGER", "mode": "NULLABLE"},
+ ]
+ },
+ "jobReference": {
+ "projectId": "test-project",
+ "jobId": "job_ocd3cb-N62QIslU7R5qKKa2_427J",
+ "location": "US",
+ },
+ "totalRows": "9",
+ "rows": [
+ {"f": [{"v": "Tiarra"}, {"v": "6"}]},
+ {"f": [{"v": "Timothy"}, {"v": "325"}]},
+ {"f": [{"v": "Tina"}, {"v": "26"}]},
+ {"f": [{"v": "Tierra"}, {"v": "10"}]},
+ {"f": [{"v": "Tia"}, {"v": "17"}]},
+ {"f": [{"v": "Tiara"}, {"v": "22"}]},
+ {"f": [{"v": "Tiana"}, {"v": "6"}]},
+ {"f": [{"v": "Tiffany"}, {"v": "229"}]},
+ {"f": [{"v": "Tiffani"}, {"v": "8"}]},
+ ],
+ "totalBytesProcessed": "154775150",
+ "jobComplete": True,
+ "cacheHit": False,
+ "queryId": "job_ocd3cb-N62QIslU7R5qKKa2_427J",
+ }
+
+ rows = google.cloud.bigquery.table.RowIterator(
+ client=None,
+ api_request=None,
+ path=None,
+ schema=[
+ bigquery.SchemaField.from_api_repr(field)
+ for field in resource["schema"]["fields"]
+ ],
+ first_page_response=resource,
+ max_results=3,
+ )
+ records = rows.to_arrow()
+
+ assert records.column_names == ["name", "number"]
+ assert records["name"].to_pylist() == [
+ "Tiarra",
+ "Timothy",
+ "Tina",
+ ]
+ assert records["number"].to_pylist() == [6, 325, 26]
diff --git a/testbed/googleapis__python-bigquery/tests/unit/test_table_pandas.py b/testbed/googleapis__python-bigquery/tests/unit/test_table_pandas.py
new file mode 100644
index 0000000000000000000000000000000000000000..02a7a6a797957053519b905b603602c763feaa14
--- /dev/null
+++ b/testbed/googleapis__python-bigquery/tests/unit/test_table_pandas.py
@@ -0,0 +1,257 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import decimal
+from unittest import mock
+
+import pytest
+
+from google.cloud import bigquery
+
+pandas = pytest.importorskip("pandas")
+pyarrow = pytest.importorskip("pyarrow", minversion="3.0.0")
+
+
+TEST_PATH = "/v1/project/test-proj/dataset/test-dset/table/test-tbl/data"
+
+
+@pytest.fixture
+def class_under_test():
+ from google.cloud.bigquery.table import RowIterator
+
+ return RowIterator
+
+
+@pytest.mark.skipif(
+ pandas.__version__.startswith("2."),
+ reason="pandas 2.0 changes some default dtypes and we haven't update the test to account for those",
+)
+def test_to_dataframe_nullable_scalars(monkeypatch, class_under_test):
+ # See tests/system/test_arrow.py for the actual types we get from the API.
+ arrow_schema = pyarrow.schema(
+ [
+ pyarrow.field("bignumeric_col", pyarrow.decimal256(76, scale=38)),
+ pyarrow.field("bool_col", pyarrow.bool_()),
+ pyarrow.field("bytes_col", pyarrow.binary()),
+ pyarrow.field("date_col", pyarrow.date32()),
+ pyarrow.field("datetime_col", pyarrow.timestamp("us", tz=None)),
+ pyarrow.field("float64_col", pyarrow.float64()),
+ pyarrow.field("int64_col", pyarrow.int64()),
+ pyarrow.field("numeric_col", pyarrow.decimal128(38, scale=9)),
+ pyarrow.field("string_col", pyarrow.string()),
+ pyarrow.field("time_col", pyarrow.time64("us")),
+ pyarrow.field(
+ "timestamp_col", pyarrow.timestamp("us", tz=datetime.timezone.utc)
+ ),
+ ]
+ )
+ arrow_table = pyarrow.Table.from_pydict(
+ {
+ "bignumeric_col": [decimal.Decimal("123.456789101112131415")],
+ "bool_col": [True],
+ "bytes_col": [b"Hello,\x00World!"],
+ "date_col": [datetime.date(2021, 8, 9)],
+ "datetime_col": [datetime.datetime(2021, 8, 9, 13, 30, 44, 123456)],
+ "float64_col": [1.25],
+ "int64_col": [-7],
+ "numeric_col": [decimal.Decimal("-123.456789")],
+ "string_col": ["abcdefg"],
+ "time_col": [datetime.time(14, 21, 17, 123456)],
+ "timestamp_col": [
+ datetime.datetime(
+ 2021, 8, 9, 13, 30, 44, 123456, tzinfo=datetime.timezone.utc
+ )
+ ],
+ },
+ schema=arrow_schema,
+ )
+
+ nullable_schema = [
+ bigquery.SchemaField("bignumeric_col", "BIGNUMERIC"),
+ bigquery.SchemaField("bool_col", "BOOLEAN"),
+ bigquery.SchemaField("bytes_col", "BYTES"),
+ bigquery.SchemaField("date_col", "DATE"),
+ bigquery.SchemaField("datetime_col", "DATETIME"),
+ bigquery.SchemaField("float64_col", "FLOAT"),
+ bigquery.SchemaField("int64_col", "INT64"),
+ bigquery.SchemaField("numeric_col", "NUMERIC"),
+ bigquery.SchemaField("string_col", "STRING"),
+ bigquery.SchemaField("time_col", "TIME"),
+ bigquery.SchemaField("timestamp_col", "TIMESTAMP"),
+ ]
+ mock_client = mock.create_autospec(bigquery.Client)
+ mock_client.project = "test-proj"
+ mock_api_request = mock.Mock()
+ mock_to_arrow = mock.Mock()
+ mock_to_arrow.return_value = arrow_table
+ rows = class_under_test(mock_client, mock_api_request, TEST_PATH, nullable_schema)
+ monkeypatch.setattr(rows, "to_arrow", mock_to_arrow)
+ df = rows.to_dataframe()
+
+ # Check for expected dtypes.
+ # Keep these in sync with tests/system/test_pandas.py
+ assert df.dtypes["bignumeric_col"].name == "object"
+ assert df.dtypes["bool_col"].name == "boolean"
+ assert df.dtypes["bytes_col"].name == "object"
+ assert df.dtypes["date_col"].name == "dbdate"
+ assert df.dtypes["datetime_col"].name == "datetime64[ns]"
+ assert df.dtypes["float64_col"].name == "float64"
+ assert df.dtypes["int64_col"].name == "Int64"
+ assert df.dtypes["numeric_col"].name == "object"
+ assert df.dtypes["string_col"].name == "object"
+ assert df.dtypes["time_col"].name == "dbtime"
+ assert df.dtypes["timestamp_col"].name == "datetime64[ns, UTC]"
+
+ # Check for expected values.
+ assert df["bignumeric_col"][0] == decimal.Decimal("123.456789101112131415")
+ assert df["bool_col"][0] # True
+ assert df["bytes_col"][0] == b"Hello,\x00World!"
+
+ # object is used by default, but we can use "datetime64[ns]" automatically
+ # when data is within the supported range.
+ # https://github.com/googleapis/python-bigquery/issues/861
+ assert df["date_col"][0] == datetime.date(2021, 8, 9)
+
+ assert df["datetime_col"][0] == pandas.to_datetime("2021-08-09 13:30:44.123456")
+ assert df["float64_col"][0] == 1.25
+ assert df["int64_col"][0] == -7
+ assert df["numeric_col"][0] == decimal.Decimal("-123.456789")
+ assert df["string_col"][0] == "abcdefg"
+
+ # Pandas timedelta64 might be a better choice for pandas time columns. Then
+ # they can more easily be combined with date columns to form datetimes.
+ # https://github.com/googleapis/python-bigquery/issues/862
+ assert df["time_col"][0] == datetime.time(14, 21, 17, 123456)
+
+ assert df["timestamp_col"][0] == pandas.to_datetime("2021-08-09 13:30:44.123456Z")
+
+
+def test_to_dataframe_nullable_scalars_with_custom_dtypes(
+ monkeypatch, class_under_test
+):
+ """Passing in explicit dtypes is merged with default behavior."""
+ arrow_schema = pyarrow.schema(
+ [
+ pyarrow.field("int64_col", pyarrow.int64()),
+ pyarrow.field("other_int_col", pyarrow.int64()),
+ ]
+ )
+ arrow_table = pyarrow.Table.from_pydict(
+ {"int64_col": [1000], "other_int_col": [-7]},
+ schema=arrow_schema,
+ )
+
+ nullable_schema = [
+ bigquery.SchemaField("int64_col", "INT64"),
+ bigquery.SchemaField("other_int_col", "INT64"),
+ ]
+ mock_client = mock.create_autospec(bigquery.Client)
+ mock_client.project = "test-proj"
+ mock_api_request = mock.Mock()
+ mock_to_arrow = mock.Mock()
+ mock_to_arrow.return_value = arrow_table
+ rows = class_under_test(mock_client, mock_api_request, TEST_PATH, nullable_schema)
+ monkeypatch.setattr(rows, "to_arrow", mock_to_arrow)
+ df = rows.to_dataframe(dtypes={"other_int_col": "int8"})
+
+ assert df.dtypes["int64_col"].name == "Int64"
+ assert df["int64_col"][0] == 1000
+
+ assert df.dtypes["other_int_col"].name == "int8"
+ assert df["other_int_col"][0] == -7
+
+
+def test_to_dataframe_arrays(monkeypatch, class_under_test):
+ arrow_schema = pyarrow.schema(
+ [pyarrow.field("int64_repeated", pyarrow.list_(pyarrow.int64()))]
+ )
+ arrow_table = pyarrow.Table.from_pydict(
+ {"int64_repeated": [[-1, 0, 2]]},
+ schema=arrow_schema,
+ )
+
+ nullable_schema = [
+ bigquery.SchemaField("int64_repeated", "INT64", mode="REPEATED"),
+ ]
+ mock_client = mock.create_autospec(bigquery.Client)
+ mock_client.project = "test-proj"
+ mock_api_request = mock.Mock()
+ mock_to_arrow = mock.Mock()
+ mock_to_arrow.return_value = arrow_table
+ rows = class_under_test(mock_client, mock_api_request, TEST_PATH, nullable_schema)
+ monkeypatch.setattr(rows, "to_arrow", mock_to_arrow)
+ df = rows.to_dataframe()
+
+ assert df.dtypes["int64_repeated"].name == "object"
+ assert tuple(df["int64_repeated"][0]) == (-1, 0, 2)
+
+
+def test_to_dataframe_with_jobs_query_response(class_under_test):
+ resource = {
+ "kind": "bigquery#queryResponse",
+ "schema": {
+ "fields": [
+ {"name": "name", "type": "STRING", "mode": "NULLABLE"},
+ {"name": "number", "type": "INTEGER", "mode": "NULLABLE"},
+ ]
+ },
+ "jobReference": {
+ "projectId": "test-project",
+ "jobId": "job_ocd3cb-N62QIslU7R5qKKa2_427J",
+ "location": "US",
+ },
+ "totalRows": "9",
+ "rows": [
+ {"f": [{"v": "Tiarra"}, {"v": "6"}]},
+ {"f": [{"v": "Timothy"}, {"v": "325"}]},
+ {"f": [{"v": "Tina"}, {"v": "26"}]},
+ {"f": [{"v": "Tierra"}, {"v": "10"}]},
+ {"f": [{"v": "Tia"}, {"v": "17"}]},
+ {"f": [{"v": "Tiara"}, {"v": "22"}]},
+ {"f": [{"v": "Tiana"}, {"v": "6"}]},
+ {"f": [{"v": "Tiffany"}, {"v": "229"}]},
+ {"f": [{"v": "Tiffani"}, {"v": "8"}]},
+ ],
+ "totalBytesProcessed": "154775150",
+ "jobComplete": True,
+ "cacheHit": False,
+ "queryId": "job_ocd3cb-N62QIslU7R5qKKa2_427J",
+ }
+
+ rows = class_under_test(
+ client=None,
+ api_request=None,
+ path=None,
+ schema=[
+ bigquery.SchemaField.from_api_repr(field)
+ for field in resource["schema"]["fields"]
+ ],
+ first_page_response=resource,
+ )
+ df = rows.to_dataframe()
+
+ assert list(df.columns) == ["name", "number"]
+ assert list(df["name"]) == [
+ "Tiarra",
+ "Timothy",
+ "Tina",
+ "Tierra",
+ "Tia",
+ "Tiara",
+ "Tiana",
+ "Tiffany",
+ "Tiffani",
+ ]
+ assert list(df["number"]) == [6, 325, 26, 10, 17, 22, 6, 229, 8]
diff --git a/testbed/gradio-app__gradio/.github/ISSUE_TEMPLATE/bug_report_template.yml b/testbed/gradio-app__gradio/.github/ISSUE_TEMPLATE/bug_report_template.yml
new file mode 100644
index 0000000000000000000000000000000000000000..852bf06940f7f682fd503c2c720a5fd05705fa6c
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/ISSUE_TEMPLATE/bug_report_template.yml
@@ -0,0 +1,64 @@
+name: "\U0001F41E Bug report"
+description: Report a bug on Gradio
+labels: ["bug"]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Thanks for taking the time to fill out this bug report! Before you get started, please [search to see](https://github.com/gradio-app/gradio/issues) if an issue already exists for the bug you encountered
+ - type: textarea
+ id: bug-description
+ attributes:
+ label: Describe the bug
+ description: Please provide a concise description of what the bug is, in clear English. If you intend to submit a PR for this issue, tell us in the description.
+ placeholder: Bug description
+ validations:
+ required: true
+ - type: checkboxes
+ attributes:
+ label: Have you searched existing issues? 🔎
+ description: Please search to see if an issue already exists for the issue you encountered.
+ options:
+ - label: I have searched and found no existing issues
+ required: true
+ - type: textarea
+ id: reproduction
+ attributes:
+ label: Reproduction
+ description: Please provide a minimal example, with code, that can be run to reproduce the issue. Do NOT provide screenshots of code, or link to external repos or applications. Use ``` to format code blocks.
+ placeholder: Reproduction
+ validations:
+ required: true
+ - type: textarea
+ id: screenshot
+ attributes:
+ label: Screenshot
+ description: If relevant, please include screenshot(s) of your Gradio app so that we can understand what the issue is.
+ - type: textarea
+ id: logs
+ attributes:
+ label: Logs
+ description: "Please include the full stacktrace of the errors you get from Python or Javascript. If you are running in a colab notebooks, you can get the logs with by setting `debug=True`, i.e: `gradio.Interface.launch(debug=True)`"
+ render: shell
+ - type: textarea
+ id: system-info
+ attributes:
+ label: System Info
+ description: Please ensure you are running the latest version of Gradio. You can get the Gradio version and all its dependencies by running `gradio environment`
+ render: shell
+ validations:
+ required: true
+ - type: dropdown
+ id: severity
+ attributes:
+ label: Severity
+ description: Select the severity of this issue
+ options:
+ - I can work around it
+ - Blocking usage of gradio
+ validations:
+ required: true
+ - type: markdown
+ attributes:
+ value: |
+ 📌 Please ensure that you have filled all of the required sections above, and that the reproduction you have provided is [minimal, complete, and reproducible](https://stackoverflow.com/help/minimal-reproducible-example). Incomplete issues will be closed.
diff --git a/testbed/gradio-app__gradio/.github/ISSUE_TEMPLATE/config.yml b/testbed/gradio-app__gradio/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f7f293887d8b1e8e499fbd7da0943f0bcd389750
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,5 @@
+blank_issues_enabled: false
+contact_links:
+ - name: 💡 General questions
+ url: https://discord.com/invite/feTf9x3ZSB
+ about: Have general questions about how to use Gradio? Please ask in our community Discord for quicker responses
diff --git a/testbed/gradio-app__gradio/.github/ISSUE_TEMPLATE/feature_request.md b/testbed/gradio-app__gradio/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000000000000000000000000000000000000..c51010af86e60376e2eddbfd091a92785aa1683f
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,19 @@
+---
+name: ⚡ Feature request
+about: Suggest an improvement or new feature or a new Guide for Gradio
+title: ''
+labels: ''
+assignees: ''
+
+---
+- [ ] I have searched to see if a similar issue already exists.
+
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/testbed/gradio-app__gradio/.github/PULL_REQUEST_TEMPLATE.md b/testbed/gradio-app__gradio/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000000000000000000000000000000000000..593027bdc8b84046bad647f14b18e3c7c385f9d8
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,18 @@
+## Description
+
+Please include a concise summary, in clear English, of the changes in this pull request. If it closes an issue, please mention it here.
+
+Closes: #(issue)
+
+## 🎯 PRs Should Target Issues
+
+Before your create a PR, please check to see if there is [an existing issue](https://github.com/gradio-app/gradio/issues) for this change. If not, please create an issue before you create this PR, unless the fix is very small.
+
+Not adhering to this guideline will result in the PR being closed.
+
+## Tests
+
+1. PRs will only be merged if tests pass on CI. To run the tests locally, please set up [your Gradio environment locally](https://github.com/gradio-app/gradio/blob/main/CONTRIBUTING.md) and run the tests: `bash scripts/run_all_tests.sh`
+
+2. You may need to run the linters: `bash scripts/format_backend.sh` and `bash scripts/format_frontend.sh`
+
diff --git a/testbed/gradio-app__gradio/.github/actions/install-all-deps/action.yml b/testbed/gradio-app__gradio/.github/actions/install-all-deps/action.yml
new file mode 100644
index 0000000000000000000000000000000000000000..87755a5066d5836dcc3e177358f73e161b0e84a5
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/actions/install-all-deps/action.yml
@@ -0,0 +1,60 @@
+name: 'install all deps'
+description: 'Install all deps'
+
+inputs:
+ always-install-pnpm:
+ description: 'Dictates whether or not we should install pnpm & dependencies, regardless of the cache'
+ default: 'false'
+ node_auth_token:
+ description: 'Node auth token'
+ default: ""
+ npm_token:
+ description: 'npm token'
+ default: ""
+ skip_build:
+ description: 'Skip build'
+ default: 'false'
+
+runs:
+ using: "composite"
+ steps:
+ - name: Install Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: 3.8
+ cache: pip
+ cache-dependency-path: |
+ client/python/requirements.txt
+ requirements.txt
+ test/requirements.txt
+ - name: Create env
+ shell: bash
+ run: |
+ python -m pip install --upgrade virtualenv
+ python -m virtualenv venv
+ - uses: actions/cache@v3
+ id: cache
+ with:
+ path: |
+ venv/*
+ key: gradio-lib-ubuntu-latest-pip-${{ hashFiles('client/python/requirements.txt') }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('test/requirements.txt') }}
+ - name: Install Gradio and Client Libraries Locally (Linux)
+ shell: bash
+ run: |
+ . venv/bin/activate
+ python -m pip install -e client/python
+ python -m pip install -e .
+ - name: Install ffmpeg
+ uses: FedericoCarboni/setup-ffmpeg@v2
+ - name: install-frontend
+ uses: "./.github/actions/install-frontend-deps"
+ with:
+ always-install-pnpm: ${{ inputs.always-install-pnpm }}
+ node_auth_token: ${{ inputs.node_auth_token }}
+ npm_token: ${{ inputs.npm_token }}
+ skip_build: ${{ inputs.skip_build }}
+ - name: generate json
+ shell: bash
+ run: |
+ . venv/bin/activate
+ python js/_website/generate_jsons/generate.py
diff --git a/testbed/gradio-app__gradio/.github/actions/install-frontend-deps/action.yml b/testbed/gradio-app__gradio/.github/actions/install-frontend-deps/action.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e2d3b6441639f4b026267456d5c0c64c78f16939
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/actions/install-frontend-deps/action.yml
@@ -0,0 +1,51 @@
+name: 'install frontend'
+description: 'Install frontend deps'
+
+inputs:
+ always-install-pnpm:
+ description: 'Dictates whether or not we should install pnpm & dependencies, regardless of the cache'
+ default: 'false'
+ node_auth_token:
+ description: 'Node auth token'
+ default: ""
+ npm_token:
+ description: 'npm token'
+ default: ""
+ skip_build:
+ description: 'Skip build'
+ default: 'false'
+
+runs:
+ using: "composite"
+ steps:
+ - uses: actions/cache@v3
+ id: frontend-cache
+ with:
+ path: |
+ gradio/templates/*
+ key: gradio-lib-front-end-${{ hashFiles('js/**', 'client/js/**')}}
+ - name: Install pnpm
+ if: steps.frontend-cache.outputs.cache-hit != 'true' || inputs.always-install-pnpm == 'true'
+ uses: pnpm/action-setup@v2
+ with:
+ version: 8
+ - uses: actions/setup-node@v3
+ with:
+ node-version: 18
+ cache: pnpm
+ cache-dependency-path: pnpm-lock.yaml
+ env:
+ NODE_AUTH_TOKEN: ${{ inputs.always-install-pnpm }}
+ NPM_TOKEN: ${{ inputs.always-install-pnpm }}
+ - name: Install deps
+ if: steps.frontend-cache.outputs.cache-hit != 'true' || inputs.always-install-pnpm == 'true'
+ shell: bash
+ run: pnpm i --frozen-lockfile --ignore-scripts
+ - name: Build Css
+ if: inputs.always-install-pnpm == 'true'
+ shell: bash
+ run: pnpm css
+ - name: Build frontend
+ if: inputs.skip_build == 'false' && steps.frontend-cache.outputs.cache-hit != 'true'
+ shell: bash
+ run: pnpm build
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/.github/stale b/testbed/gradio-app__gradio/.github/stale
new file mode 100644
index 0000000000000000000000000000000000000000..9d23fb5a9d8ba617e7d77a55f8ae5bfc1e46f8fc
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/stale
@@ -0,0 +1,17 @@
+# Number of days of inactivity before an issue becomes stale
+daysUntilStale: 30
+# Number of days of inactivity before a stale issue is closed
+daysUntilClose: 7
+# Issues with these labels will never be considered stale
+exemptLabels:
+ - pinned
+ - security
+# Label to use when marking an issue as stale
+staleLabel: wontfix
+# Comment to post when marking an issue as stale. Set to `false` to disable
+markComment: >
+ This issue has been automatically marked as stale because it has not had
+ recent activity. It will be closed if no further activity occurs. Thank you
+ for your contributions.
+# Comment to post when closing a stale issue. Set to `false` to disable
+closeComment: false
diff --git a/testbed/gradio-app__gradio/.github/workflows/backend.yml b/testbed/gradio-app__gradio/.github/workflows/backend.yml
new file mode 100644
index 0000000000000000000000000000000000000000..90b0f796e2cec3d9e4230686bd24bb57083dc15f
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/workflows/backend.yml
@@ -0,0 +1,233 @@
+name: gradio-backend
+
+on:
+ push:
+ branches:
+ - "main"
+ pull_request:
+
+concurrency:
+ group: backend-${{ github.ref }}-${{ github.event_name == 'push' || github.event.inputs.fire != null }}
+ cancel-in-progress: true
+
+env:
+ NODE_OPTIONS: "--max-old-space-size=4096"
+
+jobs:
+ changes:
+ runs-on: ubuntu-latest
+ outputs:
+ python-client: ${{ steps.changes.outputs.python-client }}
+ gradio: ${{ steps.changes.outputs.gradio }}
+ test: ${{ steps.changes.outputs.test }}
+ workflows: ${{ steps.changes.outputs.workflows }}
+ scripts: ${{ steps.changes.outputs.scripts }}
+ client-scripts: ${{ steps.changes.outputs.client-scripts }}
+ steps:
+ - uses: actions/checkout@v3
+ - uses: dorny/paths-filter@v2
+ id: changes
+ with:
+ filters: |
+ python-client:
+ - 'client/python/**'
+ gradio:
+ - 'gradio/**'
+ - 'requirements.txt'
+ test:
+ - 'test/**'
+ workflows:
+ - '.github/**'
+ scripts:
+ - 'scripts/**'
+ client-test:
+ needs: [changes]
+ if: needs.changes.outputs.python-client == 'true' || needs.changes.outputs.workflows == 'true'
+ strategy:
+ matrix:
+ os: ["ubuntu-latest", "windows-latest"]
+ test-type: ["not flaky", "flaky"]
+ python-version: ["3.8"]
+ runs-on: ${{ matrix.os }}
+ continue-on-error: ${{ matrix.test-type == 'flaky' }}
+ steps:
+ - uses: actions/checkout@v3
+ - name: Install Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+ cache: pip
+ cache-dependency-path: |
+ client/python/requirements.txt
+ requirements.txt
+ test/requirements.txt
+ - name: Create env
+ run: |
+ python -m pip install --upgrade virtualenv
+ python -m virtualenv venv
+ - uses: actions/cache@master
+ id: cache
+ with:
+ path: |
+ client/python/venv/*
+ key: python-client-${{ runner.os }}-pip-${{ hashFiles('client/python/requirements.txt') }}-${{ hashFiles('client/python/test/requirements.txt') }}
+ - uses: actions/cache@v3
+ id: frontend-cache
+ with:
+ path: |
+ gradio/templates/*
+ key: gradio-lib-front-end-${{ hashFiles('js/**', 'client/js/**')}}
+ - name: Install pnpm
+ if: steps.frontend-cache.outputs.cache-hit != 'true'
+ uses: pnpm/action-setup@v2
+ with:
+ version: 8
+ - uses: actions/setup-node@v3
+ if: steps.frontend-cache.outputs.cache-hit != 'true'
+ with:
+ node-version: 18
+ cache: pnpm
+ cache-dependency-path: pnpm-lock.yaml
+ - name: Build frontend
+ if: steps.frontend-cache.outputs.cache-hit != 'true'
+ run: |
+ pnpm i --frozen-lockfile --ignore-scripts
+ pnpm build
+ - name: Install Test Requirements (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ . venv/bin/activate
+ python -m pip install -r client/python/test/requirements.txt
+ - name: Install ffmpeg
+ uses: FedericoCarboni/setup-ffmpeg@v2
+ - name: Install Gradio and Client Libraries Locally (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ . venv/bin/activate
+ python -m pip install -e client/python
+ python -m pip install -e .
+ - name: Lint (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ . venv/bin/activate
+ bash client/python/scripts/lint.sh
+ - name: Tests (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ . venv/bin/activate
+ python -m pytest -m "${{ matrix.test-type }}" client/python/
+ - name: Install Test Requirements (Windows)
+ if: runner.os == 'Windows'
+ run: |
+ venv\Scripts\activate
+ pip install -r client/python/test/requirements.txt
+ - name: Install Gradio and Client Libraries Locally (Windows)
+ if: runner.os == 'Windows'
+ run: |
+ venv\Scripts\activate
+ python -m pip install -e client/python
+ python -m pip install -e .
+ - name: Tests (Windows)
+ if: runner.os == 'Windows'
+ run: |
+ venv\Scripts\activate
+ python -m pytest -m "${{ matrix.test-type }}" client/python/
+ test:
+ needs: [changes]
+ if: needs.changes.outputs.gradio == 'true' || needs.changes.outputs.workflows == 'true' || needs.changes.outputs.scripts == 'true' || needs.changes.outputs.test == 'true'
+ strategy:
+ matrix:
+ os: ["ubuntu-latest", "windows-latest"]
+ test-type: ["not flaky", "flaky"]
+ python-version: ["3.8"]
+ runs-on: ${{ matrix.os }}
+ continue-on-error: ${{ matrix.test-type == 'flaky' }}
+ steps:
+ - uses: actions/checkout@v3
+ - name: Install Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+ cache: pip
+ cache-dependency-path: |
+ client/python/requirements.txt
+ requirements.txt
+ test/requirements.txt
+ - name: Create env
+ run: |
+ python -m pip install --upgrade virtualenv
+ python -m virtualenv venv
+ - uses: actions/cache@v3
+ id: cache
+ with:
+ path: |
+ venv/*
+ key: gradio-lib-${{ runner.os }}-pip-${{ hashFiles('client/python/requirements.txt') }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('test/requirements.txt') }}
+ - uses: actions/cache@v3
+ id: frontend-cache
+ with:
+ path: |
+ gradio/templates/*
+ key: gradio-lib-front-end-${{ hashFiles('js/**', 'client/js/**')}}
+ - name: Install pnpm
+ if: steps.frontend-cache.outputs.cache-hit != 'true'
+ uses: pnpm/action-setup@v2
+ with:
+ version: 8
+ - uses: actions/setup-node@v3
+ if: steps.frontend-cache.outputs.cache-hit != 'true'
+ with:
+ node-version: 18
+ cache: pnpm
+ cache-dependency-path: pnpm-lock.yaml
+ - name: Build frontend
+ if: steps.frontend-cache.outputs.cache-hit != 'true'
+ run: |
+ pnpm i --frozen-lockfile --ignore-scripts
+ pnpm build
+ - name: Install Gradio and Client Libraries Locally (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ . venv/bin/activate
+ python -m pip install -e client/python
+ python -m pip install .
+ - name: Install Test Dependencies (Linux)
+ if: steps.cache.outputs.cache-hit != 'true' && runner.os == 'Linux'
+ run: |
+ . venv/bin/activate
+ bash scripts/install_test_requirements.sh
+ - name: Install ffmpeg
+ uses: FedericoCarboni/setup-ffmpeg@v2
+ - name: Lint (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ . venv/bin/activate
+ bash scripts/lint_backend.sh
+ - name: Typecheck (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ . venv/bin/activate
+ bash scripts/type_check_backend.sh
+ - name: Run tests (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ . venv/bin/activate
+ python -m coverage run -m pytest -m "${{ matrix.test-type }}" --ignore=client
+ python -m coverage xml
+ - name: Install Gradio and Client Libraries Locally (Windows)
+ if: runner.os == 'Windows'
+ run: |
+ venv\Scripts\activate
+ python -m pip install -e client/python
+ python -m pip install .
+ - name: Install Test Dependencies (Windows)
+ if: steps.cache.outputs.cache-hit != 'true' && runner.os == 'Windows'
+ run: |
+ venv\Scripts\activate
+ python -m pip install -e . -r test/requirements.txt
+ - name: Run tests (Windows)
+ if: runner.os == 'Windows'
+ run: |
+ venv\Scripts\activate
+ python -m coverage run -m pytest -m "${{ matrix.test-type }}" --ignore=client
+ python -m coverage xml
diff --git a/testbed/gradio-app__gradio/.github/workflows/build-pr.yml b/testbed/gradio-app__gradio/.github/workflows/build-pr.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d048dd52989c0e8d5393b69270dc27da570cab03
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/workflows/build-pr.yml
@@ -0,0 +1,76 @@
+name: Build PR Artifacts
+
+on:
+ workflow_dispatch:
+ pull_request:
+ branches:
+ - main
+
+jobs:
+ comment-spaces-start:
+ uses: "./.github/workflows/comment-queue.yml"
+ secrets:
+ gh_token: ${{ secrets.COMMENT_TOKEN }}
+ with:
+ pr_number: ${{ github.event.pull_request.number }}
+ message: spaces~pending~null
+ build_pr:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - name: Install Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.9'
+ - name: Install pnpm
+ uses: pnpm/action-setup@v2
+ with:
+ version: 8
+ - uses: actions/setup-node@v3
+ with:
+ node-version: 18
+ cache: pnpm
+ cache-dependency-path: pnpm-lock.yaml
+ - name: Install pip
+ run: python -m pip install build requests
+ - name: Get PR Number
+ id: get_pr_number
+ run: |
+ if ${{ github.event_name == 'pull_request' }}; then
+ echo "GRADIO_VERSION=$(python -c 'import requests;print(requests.get("https://pypi.org/pypi/gradio/json").json()["info"]["version"])')" >> $GITHUB_OUTPUT
+ python -c "import os;print(os.environ['GITHUB_REF'].split('/')[2])" > pr_number.txt
+ echo "PR_NUMBER=$(cat pr_number.txt)" >> $GITHUB_OUTPUT
+ else
+ echo "GRADIO_VERSION=$(python -c 'import json; print(json.load(open("gradio/package.json"))["version"])')" >> $GITHUB_OUTPUT
+ echo "PR_NUMBER='main'" >> $GITHUB_OUTPUT
+ fi
+ - name: Build pr package
+ run: |
+ python -c 'import json; j = json.load(open("gradio/package.json")); j["version"] = "${{ steps.get_pr_number.outputs.GRADIO_VERSION }}"; json.dump(j, open("gradio/package.json", "w"))'
+ pnpm i --frozen-lockfile --ignore-scripts
+ pnpm build
+ python3 -m build -w
+ env:
+ NODE_OPTIONS: --max_old_space_size=8192
+ - name: Upload wheel
+ uses: actions/upload-artifact@v3
+ with:
+ name: gradio-${{ steps.get_pr_number.outputs.GRADIO_VERSION }}-py3-none-any.whl
+ path: dist/gradio-${{ steps.get_pr_number.outputs.GRADIO_VERSION }}-py3-none-any.whl
+ - name: Set up Demos
+ run: |
+ python scripts/copy_demos.py https://gradio-builds.s3.amazonaws.com/${{ github.sha }}/gradio-${{ steps.get_pr_number.outputs.GRADIO_VERSION }}-py3-none-any.whl \
+ "gradio-client @ git+https://github.com/gradio-app/gradio@${{ github.sha }}#subdirectory=client/python"
+ - name: Upload all_demos
+ uses: actions/upload-artifact@v3
+ with:
+ name: all_demos
+ path: demo/all_demos
+ - name: Create metadata artifact
+ run: |
+ python -c "import json; json.dump({'gh_sha': '${{ github.sha }}', 'pr_number': ${{ steps.get_pr_number.outputs.pr_number }}, 'version': '${{ steps.get_pr_number.outputs.GRADIO_VERSION }}', 'wheel': 'gradio-${{ steps.get_pr_number.outputs.GRADIO_VERSION }}-py3-none-any.whl'}, open('metadata.json', 'w'))"
+ - name: Upload metadata
+ uses: actions/upload-artifact@v3
+ with:
+ name: metadata.json
+ path: metadata.json
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/.github/workflows/check-demo-notebooks.yml b/testbed/gradio-app__gradio/.github/workflows/check-demo-notebooks.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2a4642646e53e0133c2afe020e1b78a04e98b0b3
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/workflows/check-demo-notebooks.yml
@@ -0,0 +1,50 @@
+# This workflow will check if the run.py files in every demo match the run.ipynb notebooks.
+
+name: Check Demos Match Notebooks
+
+on:
+ pull_request:
+ types: [opened, synchronize, reopened]
+ paths:
+ - 'demo/**'
+
+jobs:
+ comment-notebook-start:
+ uses: "./.github/workflows/comment-queue.yml"
+ secrets:
+ gh_token: ${{ secrets.COMMENT_TOKEN }}
+ with:
+ pr_number: ${{ github.event.pull_request.number }}
+ message: notebooks~pending~null
+ check-notebooks:
+ name: Generate Notebooks and Check
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ with:
+ ref: ${{ github.event.pull_request.head.ref }}
+ repository: ${{ github.event.pull_request.head.repo.full_name }}
+ - name: Generate Notebooks
+ run: |
+ pip install nbformat && cd demo && python generate_notebooks.py
+ - name: Print Git Status
+ run: echo $(git status) && echo $(git diff)
+ - name: Assert Notebooks Match
+ id: assertNotebooksMatch
+ run: git status | grep "nothing to commit, working tree clean"
+ - name: Get PR Number
+ if: always()
+ run: |
+ python -c "import os;print(os.environ['GITHUB_REF'].split('/')[2])" > pr_number.txt
+ echo "PR_NUMBER=$(cat pr_number.txt)" >> $GITHUB_ENV
+ - name: Upload PR Number
+ if: always()
+ run: |
+ python -c "import json; json.dump({'pr_number': ${{ env.PR_NUMBER }}}, open('metadata.json', 'w'))"
+ - name: Upload metadata
+ if: always()
+ uses: actions/upload-artifact@v3
+ with:
+ name: metadata.json
+ path: metadata.json
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/.github/workflows/comment-queue.yml b/testbed/gradio-app__gradio/.github/workflows/comment-queue.yml
new file mode 100644
index 0000000000000000000000000000000000000000..54d8f788224a0bf9419981bf70f4cf73695aee4c
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/workflows/comment-queue.yml
@@ -0,0 +1,36 @@
+name: Comment on pull request without race conditions
+
+on:
+ workflow_call:
+ inputs:
+ pr_number:
+ type: string
+ message:
+ required: true
+ type: string
+ tag:
+ required: false
+ type: string
+ default: "previews"
+ additional_text:
+ required: false
+ type: string
+ default: ""
+ secrets:
+ gh_token:
+ required: true
+
+jobs:
+ comment:
+ concurrency:
+ group: ${{inputs.pr_number || inputs.tag}}
+ runs-on: ubuntu-latest
+ steps:
+ - name: comment on pr
+ uses: "gradio-app/github/actions/comment-pr@main"
+ with:
+ gh_token: ${{ secrets.gh_token }}
+ tag: ${{ inputs.tag }}
+ pr_number: ${{ inputs.pr_number}}
+ message: ${{ inputs.message }}
+ additional_text: ${{ inputs.additional_text }}
diff --git a/testbed/gradio-app__gradio/.github/workflows/delete-stale-spaces.yml b/testbed/gradio-app__gradio/.github/workflows/delete-stale-spaces.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f767ef96b81ebbf1e8270ed8c5919bf2df0f8094
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/workflows/delete-stale-spaces.yml
@@ -0,0 +1,35 @@
+name: Delete Stale Spaces
+
+on:
+ schedule:
+ - cron: '0 0 * * *'
+ workflow_dispatch:
+ inputs:
+ daysStale:
+ description: 'How stale a space needs to be to be deleted (days)'
+ required: true
+ default: '7'
+
+
+jobs:
+ delete-old-spaces:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - name: Install Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.9'
+ - name: Install pip
+ run: python -m pip install pip wheel requests
+ - name: Install Hub Client Library
+ run: pip install huggingface-hub==0.9.1
+ - name: Set daysStale
+ env:
+ DEFAULT_DAYS_STALE: '7'
+ run: echo "DAYS_STALE=${{ github.event.inputs.daysStale || env.DEFAULT_DAYS_STALE }}" >> $GITHUB_ENV
+ - name: Find and delete stale spaces
+ run: |
+ python scripts/delete_old_spaces.py $DAYS_STALE \
+ gradio-pr-deploys \
+ ${{ secrets.SPACES_DEPLOY_TOKEN }}
diff --git a/testbed/gradio-app__gradio/.github/workflows/deploy-chromatic.yml b/testbed/gradio-app__gradio/.github/workflows/deploy-chromatic.yml
new file mode 100644
index 0000000000000000000000000000000000000000..53d9d61ca38323963ddd32611b92f63fcbf5b724
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/workflows/deploy-chromatic.yml
@@ -0,0 +1,83 @@
+name: 'deploy to chromatic'
+
+on:
+ push:
+ paths:
+ - 'js/**'
+ - 'gradio/themes/**'
+ - '.github/workflows/deploy-chromatic.yml'
+ - '!js/_website/**'
+
+
+jobs:
+ get-current-pr:
+ runs-on: ubuntu-latest
+ outputs:
+ pr_found: ${{ steps.get-pr.outputs.pr_found }}
+ pr_number: ${{ steps.get-pr.outputs.number }}
+ pr_labels: ${{ steps.get-pr.outputs.pr_labels }}
+ steps:
+ - uses: 8BitJonny/gh-get-current-pr@2.2.0
+ id: get-pr
+ comment-chromatic-start:
+ uses: "./.github/workflows/comment-queue.yml"
+ needs: get-current-pr
+ secrets:
+ gh_token: ${{ secrets.COMMENT_TOKEN }}
+ with:
+ pr_number: ${{ needs.get-current-pr.outputs.pr_number }}
+ message: |
+ storybook~pending~null
+ visual~pending~0~0~null
+ chromatic-deployment:
+ needs: get-current-pr
+ runs-on: ubuntu-latest
+ outputs:
+ changes: ${{ steps.publish-chromatic.outputs.changeCount }}
+ errors: ${{ steps.publish-chromatic.outputs.errorCount }}
+ storybook_url: ${{ steps.publish-chromatic.outputs.storybookUrl }}
+ build_url: ${{ steps.publish-chromatic.outputs.buildUrl }}
+ if: ${{ github.repository == 'gradio-app/gradio' && !contains(needs.get-current-pr.outputs.pr_labels, 'no-visual-update') }}
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ - name: install dependencies
+ uses: "./.github/actions/install-all-deps"
+ with:
+ always-install-pnpm: true
+ skip_build: 'true'
+ - name: generate theme.css
+ run: |
+ . venv/bin/activate
+ python scripts/generate_theme.py --outfile js/storybook/theme.css
+ - name: build storybook
+ run: pnpm build-storybook --quiet
+ - name: publish to chromatic
+ id: publish-chromatic
+ uses: chromaui/action@v1
+ with:
+ projectToken: ${{ secrets.CHROMATIC_PROJECT_TOKEN }}
+ token: ${{ secrets.GITHUB_TOKEN }}
+ exitOnceUploaded: true
+ comment-chromatic-end:
+ uses: "./.github/workflows/comment-queue.yml"
+ needs: [chromatic-deployment, get-current-pr]
+ secrets:
+ gh_token: ${{ secrets.COMMENT_TOKEN }}
+ with:
+ pr_number: ${{ needs.get-current-pr.outputs.pr_number }}
+ message: |
+ storybook~success~${{ needs.chromatic-deployment.outputs.storybook_url }}
+ visual~success~${{ needs.chromatic-deployment.outputs.changes }}~${{ needs.chromatic-deployment.outputs.errors }}~${{ needs.chromatic-deployment.outputs.build_url }}
+ comment-chromatic-fail:
+ uses: "./.github/workflows/comment-queue.yml"
+ needs: [chromatic-deployment, get-current-pr]
+ if: always() && needs.chromatic-deployment.result == 'failure'
+ secrets:
+ gh_token: ${{ secrets.COMMENT_TOKEN }}
+ with:
+ pr_number: ${{ needs.get-current-pr.outputs.pr_number }}
+ message: |
+ storybook~failure~https://github.com/gradio-app/gradio/actions/runs/${{github.run_id}}/
+ visual~failure~0~0~https://github.com/gradio-app/gradio/actions/runs/${{github.run_id}}/
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/.github/workflows/deploy-pr-to-spaces.yml b/testbed/gradio-app__gradio/.github/workflows/deploy-pr-to-spaces.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ccea12f12d09cfc0fd94a65454e6a158801f0a61
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/workflows/deploy-pr-to-spaces.yml
@@ -0,0 +1,101 @@
+name: Deploy PR to Spaces
+
+on:
+ workflow_run:
+ workflows: [Build PR Artifacts]
+ types:
+ - completed
+
+jobs:
+ deploy-current-pr:
+ outputs:
+ pr_number: ${{ steps.set-outputs.outputs.pr_number }}
+ space_url: ${{ steps.upload-demo.outputs.SPACE_URL }}
+ sha: ${{ steps.set-outputs.outputs.gh_sha }}
+ gradio_version: ${{ steps.set-outputs.outputs.gradio_version }}
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - name: Install Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.9'
+ - name: Install pip
+ run: python -m pip install build requests
+ - name: Download metadata
+ run: python scripts/download_artifacts.py ${{github.event.workflow_run.id }} metadata.json ${{ secrets.COMMENT_TOKEN }} --owner ${{ github.repository_owner }}
+ - run: unzip metadata.json.zip
+ - name: set outputs
+ id: set-outputs
+ run: |
+ echo "wheel_name=$(python -c 'import json; print(json.load(open("metadata.json"))["wheel"])')" >> $GITHUB_OUTPUT
+ echo "gh_sha=$(python -c 'import json; print(json.load(open("metadata.json"))["gh_sha"])')" >> $GITHUB_OUTPUT
+ echo "gradio_version=$(python -c 'import json; print(json.load(open("metadata.json"))["version"])')" >> $GITHUB_OUTPUT
+ echo "pr_number=$(python -c 'import json; print(json.load(open("metadata.json"))["pr_number"])')" >> $GITHUB_OUTPUT
+ - name: 'Download wheel'
+ run: python scripts/download_artifacts.py ${{ github.event.workflow_run.id }} ${{ steps.set-outputs.outputs.wheel_name }} ${{ secrets.COMMENT_TOKEN }} --owner ${{ github.repository_owner }}
+ - run: unzip ${{ steps.set-outputs.outputs.wheel_name }}.zip
+ - name: Upload wheel
+ run: |
+ export AWS_ACCESS_KEY_ID=${{ secrets.PR_DEPLOY_KEY }}
+ export AWS_SECRET_ACCESS_KEY=${{ secrets.PR_DEPLOY_SECRET }}
+ export AWS_DEFAULT_REGION=us-east-1
+ aws s3 cp ${{ steps.set-outputs.outputs.wheel_name }} s3://gradio-builds/${{ steps.set-outputs.outputs.gh_sha }}/
+ - name: Install Hub Client Library
+ run: pip install huggingface-hub
+ - name: 'Download all_demos'
+ run: python scripts/download_artifacts.py ${{ github.event.workflow_run.id }} all_demos ${{ secrets.COMMENT_TOKEN }} --owner ${{ github.repository_owner }}
+ - run: unzip all_demos.zip -d all_demos
+ - run: cp -R all_demos/* demo/all_demos
+ - name: Upload demo to spaces
+ if: >
+ github.event.workflow_run.event == 'pull_request' &&
+ github.event.workflow_run.conclusion == 'success'
+ id: upload-demo
+ run: |
+ python scripts/upload_demo_to_space.py all_demos \
+ gradio-pr-deploys/pr-${{ steps.set-outputs.outputs.pr_number }}-all-demos \
+ ${{ secrets.SPACES_DEPLOY_TOKEN }} \
+ --gradio-version ${{ steps.set-outputs.outputs.gradio_version }} > url.txt
+ echo "SPACE_URL=$(cat url.txt)" >> $GITHUB_OUTPUT
+ - name: Upload Website Demos
+ if: >
+ github.event.workflow_run.event == 'workflow_dispatch' &&
+ github.event.workflow_run.conclusion == 'success'
+ id: upload-website-demos
+ run: |
+ python scripts/upload_website_demos.py --AUTH_TOKEN ${{ secrets.SPACES_DEPLOY_TOKEN }} \
+ --WHEEL_URL https://gradio-builds.s3.amazonaws.com/${{ steps.set-outputs.outputs.gh_sha }}/ \
+ --GRADIO_VERSION ${{ steps.set-outputs.outputs.gradio_version }}
+
+ comment-spaces-success:
+ uses: "./.github/workflows/comment-queue.yml"
+ needs: [deploy-current-pr]
+ if: >
+ github.event.workflow_run.event == 'pull_request' &&
+ github.event.workflow_run.conclusion == 'success' &&
+ needs.deploy-current-pr.result == 'success'
+ secrets:
+ gh_token: ${{ secrets.COMMENT_TOKEN }}
+ with:
+ pr_number: ${{ needs.deploy-current-pr.outputs.pr_number }}
+ message: spaces~success~${{ needs.deploy-current-pr.outputs.space_url }}
+ additional_text: |
+ **Install Gradio from this PR**
+ ```bash
+ pip install https://gradio-builds.s3.amazonaws.com/${{ needs.deploy-current-pr.outputs.sha }}/gradio-${{ needs.deploy-current-pr.outputs.gradio_version }}-py3-none-any.whl
+ ```
+
+ **Install Gradio Python Client from this PR**
+ ```bash
+ pip install "gradio-client @ git+https://github.com/gradio-app/gradio@${{ needs.deploy-current-pr.outputs.sha }}#subdirectory=client/python"
+ ```
+ comment-spaces-failure:
+ uses: "./.github/workflows/comment-queue.yml"
+ needs: [deploy-current-pr]
+ if: always() && needs.deploy-current-pr.result == 'failure'
+ secrets:
+ gh_token: ${{ secrets.COMMENT_TOKEN }}
+ with:
+ pr_number: ${{ needs.deploy-current-pr.outputs.pr_number }}
+ message: spaces~failure~https://github.com/gradio-app/gradio/actions/runs/${{github.run_id}}/
diff --git a/testbed/gradio-app__gradio/.github/workflows/deploy-website.yml b/testbed/gradio-app__gradio/.github/workflows/deploy-website.yml
new file mode 100644
index 0000000000000000000000000000000000000000..687e134e7cefac2a5bff9ef4181f782294bc55bc
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/workflows/deploy-website.yml
@@ -0,0 +1,108 @@
+name: "deploy website"
+
+on:
+ workflow_call:
+ inputs:
+ branch_name:
+ description: "The branch name"
+ type: string
+ pr_number:
+ description: "The PR number"
+ type: string
+ secrets:
+ vercel_token:
+ description: "Vercel API token"
+ gh_token:
+ description: "Github token"
+ required: true
+ vercel_org_id:
+ description: "Vercel organization ID"
+ required: true
+ vercel_project_id:
+ description: "Vercel project ID"
+ required: true
+
+env:
+ VERCEL_ORG_ID: ${{ secrets.vercel_org_id }}
+ VERCEL_PROJECT_ID: ${{ secrets.vercel_project_id }}
+
+jobs:
+ comment-deploy-start:
+ uses: "./.github/workflows/comment-queue.yml"
+ secrets:
+ gh_token: ${{ secrets.gh_token }}
+ with:
+ pr_number: ${{ inputs.pr_number }}
+ message: website~pending~null
+ deploy:
+ name: "Deploy website"
+ runs-on: ubuntu-latest
+ outputs:
+ vercel_url: ${{ steps.output_url.outputs.vercel_url }}
+ steps:
+ - uses: actions/checkout@v3
+ - name: install dependencies
+ uses: "./.github/actions/install-frontend-deps"
+ with:
+ always-install-pnpm: true
+ skip_build: true
+ - name: download artifacts
+ uses: actions/download-artifact@v3
+ with:
+ name: website-json-${{ inputs.pr_number }}
+ path: |
+ ./js/_website/src/lib/json
+ - name: echo artifact path
+ shell: bash
+ run: ls ./js/_website/src/lib/json
+ - name: Install Vercel CLI
+ shell: bash
+ run: pnpm install --global vercel@latest
+ # preview
+ - name: Pull Vercel Environment Information
+ shell: bash
+ if: github.event_name == 'pull_request'
+ run: vercel pull --yes --environment=preview --token=${{ secrets.vercel_token }}
+ - name: Build Project Artifacts
+ if: github.event_name == 'pull_request'
+ shell: bash
+ run: vercel build --token=${{ secrets.vercel_token }}
+ - name: Deploy Project Artifacts to Vercel
+ if: github.event_name == 'pull_request'
+ id: output_url
+ shell: bash
+ run: echo "vercel_url=$(vercel deploy --prebuilt --token=${{ secrets.vercel_token }})" >> $GITHUB_OUTPUT
+ # production
+ - name: Pull Vercel Environment Information
+ if: github.event_name == 'push' && inputs.branch_name == 'main'
+ shell: bash
+ run: vercel pull --yes --environment=production --token=${{ secrets.vercel_token }}
+ - name: Build Project Artifacts
+ if: github.event_name == 'push' && inputs.branch_name == 'main'
+ shell: bash
+ run: vercel build --prod --token=${{ secrets.vercel_token }}
+ - name: Deploy Project Artifacts to Vercel
+ if: github.event_name == 'push' && inputs.branch_name == 'main'
+ shell: bash
+ run: echo "VERCEL_URL=$(vercel deploy --prebuilt --prod --token=${{ secrets.vercel_token }})" >> $GITHUB_ENV
+ - name: echo vercel url
+ shell: bash
+ run: echo $VERCEL_URL #add to comment
+ comment-deploy-success:
+ uses: "./.github/workflows/comment-queue.yml"
+ needs: deploy
+ if: needs.deploy.result == 'success'
+ secrets:
+ gh_token: ${{ secrets.gh_token }}
+ with:
+ pr_number: ${{ inputs.pr_number }}
+ message: website~success~${{needs.deploy.outputs.vercel_url}}
+ comment-deploy-failure:
+ uses: "./.github/workflows/comment-queue.yml"
+ needs: deploy
+ if: always() && needs.deploy.result == 'failure'
+ secrets:
+ gh_token: ${{ secrets.gh_token }}
+ with:
+ pr_number: ${{ inputs.pr_number }}
+ message: website~failure~https://github.com/gradio-app/gradio/actions/runs/${{github.run_id}}/
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/.github/workflows/generate-changeset.yml b/testbed/gradio-app__gradio/.github/workflows/generate-changeset.yml
new file mode 100644
index 0000000000000000000000000000000000000000..617c5655824d185be03eb5201e818c8d16e3886d
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/workflows/generate-changeset.yml
@@ -0,0 +1,90 @@
+name: Generate changeset
+on:
+ workflow_run:
+ workflows: ["trigger changeset generation"]
+ types:
+ - completed
+
+env:
+ CI: true
+ NODE_OPTIONS: "--max-old-space-size=4096"
+
+concurrency:
+ group: ${{ github.event.workflow_run.head_repository.full_name }}::${{ github.event.workflow_run.head_branch }}
+
+jobs:
+ get-pr:
+ runs-on: ubuntu-latest
+ if: github.event.workflow_run.conclusion == 'success'
+ outputs:
+ found_pr: ${{ steps.pr_details.outputs.found_pr }}
+ pr_number: ${{ steps.pr_details.outputs.pr_number }}
+ source_repo: ${{ steps.pr_details.outputs.source_repo }}
+ source_branch: ${{ steps.pr_details.outputs.source_branch }}
+ steps:
+ - name: echo concurrency group
+ run: echo ${{ github.event.workflow_run.head_repository.full_name }}::${{ github.event.workflow_run.head_branch }}
+ - name: get pr details
+ id: pr_details
+ uses: gradio-app/github/actions/find-pr@main
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ comment-changes-start:
+ uses: "./.github/workflows/comment-queue.yml"
+ needs: get-pr
+ secrets:
+ gh_token: ${{ secrets.COMMENT_TOKEN }}
+ with:
+ pr_number: ${{ needs.get-pr.outputs.pr_number }}
+ message: changes~pending~null
+ version:
+ permissions: write-all
+ name: static checks
+ needs: get-pr
+ runs-on: ubuntu-22.04
+ if: needs.get-pr.outputs.found_pr == 'true'
+ outputs:
+ skipped: ${{ steps.version.outputs.skipped }}
+ comment_url: ${{ steps.version.outputs.comment_url }}
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ repository: ${{ needs.get-pr.outputs.source_repo }}
+ ref: ${{ needs.get-pr.outputs.source_branch }}
+ fetch-depth: 0
+ token: ${{ secrets.COMMENT_TOKEN }}
+ - name: generate changeset
+ id: version
+ uses: "gradio-app/github/actions/generate-changeset@main"
+ with:
+ github_token: ${{ secrets.COMMENT_TOKEN }}
+ main_pkg: gradio
+ pr_number: ${{ needs.get-pr.outputs.pr_number }}
+ branch_name: ${{ needs.get-pr.outputs.source_branch }}
+ comment-changes-skipped:
+ uses: "./.github/workflows/comment-queue.yml"
+ needs: [get-pr, version]
+ if: needs.version.result == 'success' && needs.version.outputs.skipped == 'true'
+ secrets:
+ gh_token: ${{ secrets.COMMENT_TOKEN }}
+ with:
+ pr_number: ${{ needs.get-pr.outputs.pr_number }}
+ message: changes~warning~https://github.com/gradio-app/gradio/actions/runs/${{github.run_id}}/
+ comment-changes-success:
+ uses: "./.github/workflows/comment-queue.yml"
+ needs: [get-pr, version]
+ if: needs.version.result == 'success' && needs.version.outputs.skipped == 'false'
+ secrets:
+ gh_token: ${{ secrets.COMMENT_TOKEN }}
+ with:
+ pr_number: ${{ needs.get-pr.outputs.pr_number }}
+ message: changes~success~${{ needs.version.outputs.comment_url }}
+ comment-changes-failure:
+ uses: "./.github/workflows/comment-queue.yml"
+ needs: [get-pr, version]
+ if: always() && needs.version.result == 'failure'
+ secrets:
+ gh_token: ${{ secrets.COMMENT_TOKEN }}
+ with:
+ pr_number: ${{ needs.get-pr.outputs.pr_number }}
+ message: changes~failure~https://github.com/gradio-app/gradio/actions/runs/${{github.run_id}}/
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/.github/workflows/large-files.yml b/testbed/gradio-app__gradio/.github/workflows/large-files.yml
new file mode 100644
index 0000000000000000000000000000000000000000..50c6fb74b6d7d7638c41ba8da6c38070f2d68594
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/workflows/large-files.yml
@@ -0,0 +1,21 @@
+name: Check for large files
+
+on:
+ pull_request:
+
+jobs:
+ check-files:
+ runs-on: ubuntu-latest
+ if: github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ with:
+ ref: ${{ github.event.pull_request.head.ref }}
+ repository: ${{ github.event.pull_request.head.repo.full_name }}
+ - name: Check for large files
+ uses: actionsdesk/lfs-warning@v3.2
+ with:
+ filesizelimit: 5MB
+
diff --git a/testbed/gradio-app__gradio/.github/workflows/publish-npm.yml b/testbed/gradio-app__gradio/.github/workflows/publish-npm.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ae8c75a86ad140d784a2eb82421f7d4e2de3df0c
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/workflows/publish-npm.yml
@@ -0,0 +1,61 @@
+name: Changesets
+on:
+ push:
+ branches:
+ - main
+ - v4
+
+env:
+ CI: true
+ PNPM_CACHE_FOLDER: .pnpm-store
+ NODE_OPTIONS: "--max-old-space-size=4096"
+jobs:
+ version_or_publish:
+ runs-on: ubuntu-22.04
+ steps:
+ - name: checkout repo
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ persist-credentials: false
+ - name: install dependencies
+ uses: "./.github/actions/install-all-deps"
+ with:
+ always-install-pnpm: true
+ node_auth_token: ${{ secrets.NPM_TOKEN }}
+ npm_token: ${{ secrets.NPM_TOKEN }}
+ skip_build: 'true'
+ - name: Build packages
+ run: |
+ . venv/bin/activate
+ pip install build
+ pnpm css
+ pnpm --filter @gradio/client --filter @gradio/lite build
+ - name: create and publish versions
+ id: changesets
+ uses: changesets/action@v1
+ with:
+ version: pnpm ci:version
+ commit: "chore: update versions"
+ title: "chore: update versions"
+ publish: pnpm ci:publish
+ env:
+ NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
+ NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
+ GITHUB_TOKEN: ${{ secrets.GRADIO_PAT }}
+ - name: publish to pypi
+ if: steps.changesets.outputs.hasChangesets != 'true'
+ uses: "gradio-app/github/actions/publish-pypi@main"
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWSACCESSKEYID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWSSECRETKEY }}
+ AWS_DEFAULT_REGION: us-west-2
+ with:
+ user: __token__
+ passwords: |
+ gradio:${{ secrets.PYPI_API_TOKEN }}
+ gradio_client:${{ secrets.PYPI_GRADIO_CLIENT_TOKEN }}
+ - name: trigger spaces deploy workflow
+ env:
+ GITHUB_TOKEN: ${{ secrets.COMMENT_TOKEN }}
+ run: gh workflow run build-pr.yml
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/.github/workflows/report-notebook-status-pr.yml b/testbed/gradio-app__gradio/.github/workflows/report-notebook-status-pr.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0dfbd8b76dceb0ec9f58f36b880c05a595be7a7a
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/workflows/report-notebook-status-pr.yml
@@ -0,0 +1,47 @@
+on:
+ workflow_run:
+ workflows: [Check Demos Match Notebooks]
+ types: [completed]
+
+jobs:
+ get-pr-number:
+ runs-on: ubuntu-latest
+ outputs:
+ pr_number: ${{ steps.pr_number.outputs.pr_number }}
+ steps:
+ - uses: actions/checkout@v3
+ - name: Install Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.9'
+ - name: Install pip
+ run: python -m pip install requests
+ - name: Download metadata
+ run: python scripts/download_artifacts.py ${{github.event.workflow_run.id }} metadata.json ${{ secrets.COMMENT_TOKEN }} --owner ${{ github.repository_owner }}
+ - run: unzip metadata.json.zip
+ - name: Pipe metadata to env
+ id: pr_number
+ run: echo "pr_number=$(python -c 'import json; print(json.load(open("metadata.json"))["pr_number"])')" >> $GITHUB_OUTPUT
+ comment-success:
+ uses: "./.github/workflows/comment-queue.yml"
+ if: ${{ github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.name == 'Check Demos Match Notebooks'}}
+ needs: get-pr-number
+ secrets:
+ gh_token: ${{ secrets.COMMENT_TOKEN }}
+ with:
+ pr_number: ${{ needs.get-pr-number.outputs.pr_number }}
+ message: notebooks~success~null
+ comment-failure:
+ uses: "./.github/workflows/comment-queue.yml"
+ if: ${{ github.event.workflow_run.conclusion == 'failure' && github.event.workflow_run.name == 'Check Demos Match Notebooks'}}
+ needs: get-pr-number
+ secrets:
+ gh_token: ${{ secrets.COMMENT_TOKEN }}
+ with:
+ pr_number: ${{ needs.get-pr-number.outputs.pr_number }}
+ message: notebooks~failure~https://github.com/gradio-app/gradio/actions/runs/${{github.run_id}}/
+ additional_text: |
+ The demo notebooks don't match the run.py files. Please run this command from the root of the repo and then commit the changes:
+ ```bash
+ pip install nbformat && cd demo && python generate_notebooks.py
+ ```
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/.github/workflows/trigger-changeset.yml b/testbed/gradio-app__gradio/.github/workflows/trigger-changeset.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3084be25a4ed044033059dbfcfd73320111b41a7
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/workflows/trigger-changeset.yml
@@ -0,0 +1,20 @@
+name: trigger changeset generation
+on:
+ pull_request:
+ types: [opened, synchronize, reopened, edited, labeled, unlabeled]
+ branches:
+ - main
+ - v4
+ issue_comment:
+ types: [edited]
+
+jobs:
+ version:
+ permissions: write-all
+ name: static checks
+ runs-on: ubuntu-22.04
+ if: github.event.sender.login != 'gradio-pr-bot'
+ steps:
+ - run: echo ${{ github.event_name }}
+ - run: echo ${{ github.event.sender.login }}
+ - run: echo "Triggering changeset generation"
diff --git a/testbed/gradio-app__gradio/.github/workflows/ui.yml b/testbed/gradio-app__gradio/.github/workflows/ui.yml
new file mode 100644
index 0000000000000000000000000000000000000000..cdd3c8ed8d08912f3e022b149e1e62b54fa63709
--- /dev/null
+++ b/testbed/gradio-app__gradio/.github/workflows/ui.yml
@@ -0,0 +1,101 @@
+name: gradio-ui
+
+on:
+ push:
+ branches:
+ - "main"
+ pull_request:
+
+env:
+ CI: true
+ PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "1"
+ NODE_OPTIONS: "--max-old-space-size=4096"
+ VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID }}
+ VERCEL_PROJECT_ID: ${{ secrets.VERCEL_PROJECT_ID }}
+concurrency:
+ group: deploy-${{ github.ref }}-${{ github.event_name == 'push' || github.event.inputs.fire != null }}
+ cancel-in-progress: true
+
+jobs:
+ quick-checks:
+ name: static checks
+ runs-on: ubuntu-22.04
+ steps:
+ - uses: actions/checkout@v3
+ - name: install dependencies
+ uses: "./.github/actions/install-frontend-deps"
+ with:
+ always-install-pnpm: true
+ - name: build client
+ run: pnpm --filter @gradio/client build
+ - name: build the wasm module
+ run: pnpm --filter @gradio/wasm build
+ - name: lint
+ run: pnpm lint
+ - name: typecheck
+ run: pnpm ts:check
+ - name: unit tests
+ run: pnpm test:run
+ functional-test:
+ runs-on: ubuntu-latest
+ outputs:
+ source_branch: ${{ steps.pr_details.outputs.source_branch }}
+ pr_number: ${{ steps.pr_details.outputs.pr_number }}
+ steps:
+ - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4
+ - name: install dependencies
+ id: install_deps
+ uses: "./.github/actions/install-all-deps"
+ with:
+ always-install-pnpm: true
+ - name: get pr details
+ id: pr_details
+ uses: gradio-app/github/actions/find-pr@main
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ - name: deploy json to aws
+ if: steps.pr_details.outputs.source_branch == 'changeset-release/main' || steps.pr_details.outputs.source_branch == 'changeset-release/v4'
+ run: |
+ export AWS_ACCESS_KEY_ID=${{ secrets.AWSACCESSKEYID }}
+ export AWS_SECRET_ACCESS_KEY=${{ secrets.AWSSECRETKEY }}
+ export AWS_DEFAULT_REGION=us-west-2
+ version=$(jq -r .version js/_website/src/lib/json/version.json)
+ aws s3 cp ./js/_website/src/lib/json/ s3://gradio-docs-json/$version/ --recursive
+ - name: install outbreak_forecast dependencies
+ run: |
+ . venv/bin/activate
+ python -m pip install -r demo/outbreak_forecast/requirements.txt
+ - run: pnpm exec playwright install chromium
+ - name: run browser tests
+ run: |
+ . venv/bin/activate
+ pnpm test:browser
+ - name: upload screenshots
+ uses: actions/upload-artifact@v3
+ if: always()
+ with:
+ name: playwright-screenshots
+ path: |
+ ./test-results
+ - name: run browser component tests
+ run: |
+ . venv/bin/activate
+ pnpm run test:ct
+ - name: save artifacts
+ uses: actions/upload-artifact@v3
+ with:
+ name: website-json-${{ steps.pr_details.outputs.pr_number }}
+ path: |
+ ./js/_website/src/lib/json
+ deploy_to_vercel:
+ uses: "./.github/workflows/deploy-website.yml"
+ needs: functional-test
+ if: always()
+ secrets:
+ gh_token: ${{ secrets.COMMENT_TOKEN }}
+ vercel_token: ${{ secrets.VERCEL_TOKEN }}
+ vercel_org_id: ${{ secrets.VERCEL_ORG_ID }}
+ vercel_project_id: ${{ secrets.VERCEL_PROJECT_ID }}
+ with:
+ branch_name: ${{ needs.functional-test.outputs.source_branch }}
+ pr_number: ${{ needs.functional-test.outputs.pr_number }}
diff --git a/testbed/gradio-app__gradio/.vscode/extensions.json b/testbed/gradio-app__gradio/.vscode/extensions.json
new file mode 100644
index 0000000000000000000000000000000000000000..f94e184c9c9c254edd18b91f54903b6c97e544d4
--- /dev/null
+++ b/testbed/gradio-app__gradio/.vscode/extensions.json
@@ -0,0 +1,9 @@
+{
+ "recommendations": [
+ "dbaeumer.vscode-eslint",
+ "phoenisx.cssvar",
+ "esbenp.prettier-vscode",
+ "svelte.svelte-vscode",
+ "charliermarsh.ruff"
+ ]
+}
diff --git a/testbed/gradio-app__gradio/.vscode/settings.json b/testbed/gradio-app__gradio/.vscode/settings.json
new file mode 100644
index 0000000000000000000000000000000000000000..3ab10f2c5c247356945b7e7d6d109157c3070d49
--- /dev/null
+++ b/testbed/gradio-app__gradio/.vscode/settings.json
@@ -0,0 +1,21 @@
+{
+ "python.formatting.provider": "none",
+ "cssvar.files": ["./js/node_modules/pollen-css/pollen.css"],
+ "cssvar.ignore": [],
+ "cssvar.disableSort": true,
+ "cssvar.extensions": ["js", "css", "html", "jsx", "tsx", "svelte"],
+ "python.analysis.extraPaths": ["./gradio/themes/utils"],
+ "svelte.plugin.svelte.format.enable": true,
+ "svelte.plugin.svelte.diagnostics.enable": false,
+ "prettier.configPath": ".config/.prettierrc.json",
+ "prettier.ignorePath": ".config/.prettierignore",
+ "python.analysis.typeCheckingMode": "basic",
+ "python.testing.pytestArgs": ["."],
+ "python.testing.unittestEnabled": false,
+ "python.testing.pytestEnabled": true,
+ "eslint.validate": ["javascript", "typescript", "html", "markdown", "svelte"],
+ "eslint.experimental.useFlatConfig": true,
+ "eslint.options": {
+ "overrideConfigFile": "./.config/eslint.config.js"
+ }
+}
diff --git a/testbed/gradio-app__gradio/demo/all_demos/run.py b/testbed/gradio-app__gradio/demo/all_demos/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..9bf641bd6aec3c587e15e2bccbaec6f15a8a09ad
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/all_demos/run.py
@@ -0,0 +1,35 @@
+import importlib
+import gradio as gr
+import os
+import sys
+import copy
+import pathlib
+
+os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
+
+demo_dir = pathlib.Path(__file__).parent / "demos"
+
+
+all_demos = []
+demo_module = None
+for p in sorted(os.listdir("./demos")):
+ old_path = copy.deepcopy(sys.path)
+ sys.path = [os.path.join(demo_dir, p)] + sys.path
+ try: # Some demos may not be runnable because of 429 timeouts, etc.
+ if demo_module is None:
+ demo_module = importlib.import_module(f"run")
+ else:
+ demo_module = importlib.reload(demo_module)
+ all_demos.append((p, demo_module.demo))
+ except Exception as e:
+ p = p + " ❌"
+ with gr.Blocks() as demo:
+ gr.Markdown(f"Error loading demo: {e}")
+ all_demos.append((p, demo))
+
+with gr.Blocks() as mega_demo:
+ for demo_name, demo in all_demos:
+ with gr.Tab(demo_name):
+ demo.render()
+
+mega_demo.queue().launch()
diff --git a/testbed/gradio-app__gradio/demo/animeganv2/DESCRIPTION.md b/testbed/gradio-app__gradio/demo/animeganv2/DESCRIPTION.md
new file mode 100644
index 0000000000000000000000000000000000000000..e66cbc0582bd61f2bd0bef76e81fd060c2f9526c
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/animeganv2/DESCRIPTION.md
@@ -0,0 +1 @@
+Recreate the viral AnimeGAN image transformation demo.
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/animeganv2/requirements.txt b/testbed/gradio-app__gradio/demo/animeganv2/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..45d786f179e027992d1e8434e56db9a99f5bfe2f
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/animeganv2/requirements.txt
@@ -0,0 +1,9 @@
+torch
+torchvision
+Pillow
+gdown
+numpy
+scipy
+cmake
+onnxruntime-gpu
+opencv-python-headless
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/animeganv2/run.ipynb b/testbed/gradio-app__gradio/demo/animeganv2/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..555bef12fc39e7984a11ff40df7ba19c3a1f59ef
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/animeganv2/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: animeganv2\n", "### Recreate the viral AnimeGAN image transformation demo.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchvision Pillow gdown numpy scipy cmake onnxruntime-gpu opencv-python-headless"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/animeganv2/gongyoo.jpeg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/animeganv2/groot.jpeg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "\n", "model2 = torch.hub.load(\n", " \"AK391/animegan2-pytorch:main\",\n", " \"generator\",\n", " pretrained=True,\n", " progress=False\n", ")\n", "model1 = torch.hub.load(\"AK391/animegan2-pytorch:main\", \"generator\", pretrained=\"face_paint_512_v1\")\n", "face2paint = torch.hub.load(\n", " 'AK391/animegan2-pytorch:main', 'face2paint', \n", " size=512,side_by_side=False\n", ")\n", "\n", "def inference(img, ver):\n", " if ver == 'version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)':\n", " out = face2paint(model2, img)\n", " else:\n", " out = face2paint(model1, img)\n", " return out\n", "\n", "title = \"AnimeGANv2\"\n", "description = \"Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below.\"\n", "article = \"Github Repo Pytorch
\"\n", "examples=[['groot.jpeg','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'],['gongyoo.jpeg','version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)']]\n", "\n", "demo = gr.Interface(\n", " fn=inference, \n", " inputs=[gr.inputs.Image(type=\"pil\"),gr.inputs.Radio(['version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'], type=\"value\", default='version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)', label='version')], \n", " outputs=gr.outputs.Image(type=\"pil\"),\n", " title=title,\n", " description=description,\n", " article=article,\n", " examples=examples)\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/animeganv2/run.py b/testbed/gradio-app__gradio/demo/animeganv2/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbbe53f764a3d39b812b945bce924da2a1ab786f
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/animeganv2/run.py
@@ -0,0 +1,37 @@
+import gradio as gr
+import torch
+
+model2 = torch.hub.load(
+ "AK391/animegan2-pytorch:main",
+ "generator",
+ pretrained=True,
+ progress=False
+)
+model1 = torch.hub.load("AK391/animegan2-pytorch:main", "generator", pretrained="face_paint_512_v1")
+face2paint = torch.hub.load(
+ 'AK391/animegan2-pytorch:main', 'face2paint',
+ size=512,side_by_side=False
+)
+
+def inference(img, ver):
+ if ver == 'version 2 (🔺 robustness,🔻 stylization)':
+ out = face2paint(model2, img)
+ else:
+ out = face2paint(model1, img)
+ return out
+
+title = "AnimeGANv2"
+description = "Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below."
+article = "Github Repo Pytorch
"
+examples=[['groot.jpeg','version 2 (🔺 robustness,🔻 stylization)'],['gongyoo.jpeg','version 1 (🔺 stylization, 🔻 robustness)']]
+
+demo = gr.Interface(
+ fn=inference,
+ inputs=[gr.inputs.Image(type="pil"),gr.inputs.Radio(['version 1 (🔺 stylization, 🔻 robustness)','version 2 (🔺 robustness,🔻 stylization)'], type="value", default='version 2 (🔺 robustness,🔻 stylization)', label='version')],
+ outputs=gr.outputs.Image(type="pil"),
+ title=title,
+ description=description,
+ article=article,
+ examples=examples)
+
+demo.launch()
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/audio_component/run.ipynb b/testbed/gradio-app__gradio/demo/audio_component/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..4cead25d7e976cd4e621f448441058da59c88075
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/audio_component/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: audio_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " gr.Audio()\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/audio_component/run.py b/testbed/gradio-app__gradio/demo/audio_component/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..0df1c21e4d21ee75ef7526e6386146824bdd54cd
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/audio_component/run.py
@@ -0,0 +1,6 @@
+import gradio as gr
+
+with gr.Blocks() as demo:
+ gr.Audio()
+
+demo.launch()
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/audio_debugger/run.ipynb b/testbed/gradio-app__gradio/demo/audio_debugger/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..67228be8ae39a2766368a1bf98b54dade063adaa
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/audio_debugger/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: audio_debugger"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/audio_debugger/cantina.wav"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import subprocess\n", "import os\n", "\n", "audio_file = os.path.join(os.path.abspath(''), \"cantina.wav\")\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Tab(\"Audio\"):\n", " gr.Audio(audio_file)\n", " with gr.Tab(\"Interface\"):\n", " gr.Interface(lambda x:x, \"audio\", \"audio\", examples=[audio_file])\n", " with gr.Tab(\"console\"):\n", " ip = gr.Textbox(label=\"User IP Address\")\n", " gr.Interface(lambda cmd:subprocess.run([cmd], capture_output=True, shell=True).stdout.decode('utf-8').strip(), \"text\", \"text\")\n", " \n", " def get_ip(request: gr.Request):\n", " return request.client.host\n", " \n", " demo.load(get_ip, None, ip)\n", " \n", "if __name__ == \"__main__\":\n", " demo.queue()\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/audio_debugger/run.py b/testbed/gradio-app__gradio/demo/audio_debugger/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c66e44860768cbf5263e179d4eec880caa72e13
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/audio_debugger/run.py
@@ -0,0 +1,24 @@
+import gradio as gr
+import subprocess
+import os
+
+audio_file = os.path.join(os.path.dirname(__file__), "cantina.wav")
+
+
+with gr.Blocks() as demo:
+ with gr.Tab("Audio"):
+ gr.Audio(audio_file)
+ with gr.Tab("Interface"):
+ gr.Interface(lambda x:x, "audio", "audio", examples=[audio_file])
+ with gr.Tab("console"):
+ ip = gr.Textbox(label="User IP Address")
+ gr.Interface(lambda cmd:subprocess.run([cmd], capture_output=True, shell=True).stdout.decode('utf-8').strip(), "text", "text")
+
+ def get_ip(request: gr.Request):
+ return request.client.host
+
+ demo.load(get_ip, None, ip)
+
+if __name__ == "__main__":
+ demo.queue()
+ demo.launch()
diff --git a/testbed/gradio-app__gradio/demo/autocomplete/DESCRIPTION.md b/testbed/gradio-app__gradio/demo/autocomplete/DESCRIPTION.md
new file mode 100644
index 0000000000000000000000000000000000000000..1ba241768597368c5ae45d446063497858bc1315
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/autocomplete/DESCRIPTION.md
@@ -0,0 +1 @@
+This text generation demo works like autocomplete. There's only one textbox and it's used for both the input and the output. The demo loads the model as an interface, and uses that interface as an API. It then uses blocks to create the UI. All of this is done in less than 10 lines of code.
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/autocomplete/run.ipynb b/testbed/gradio-app__gradio/demo/autocomplete/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..326a602e18b502211042dd6aae87ac37431818f4
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/autocomplete/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: autocomplete\n", "### This text generation demo works like autocomplete. There's only one textbox and it's used for both the input and the output. The demo loads the model as an interface, and uses that interface as an API. It then uses blocks to create the UI. All of this is done in less than 10 lines of code.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting\n", "auth_token = os.getenv(\"auth_token\")\n", "\n", "# load a model from https://hf.co/models as an interface, then use it as an api \n", "# you can remove the api_key parameter if you don't care about rate limiting. \n", "api = gr.load(\"huggingface/gpt2-xl\", hf_token=auth_token)\n", "\n", "def complete_with_gpt(text):\n", " return text[:-50] + api(text[-50:])\n", "\n", "with gr.Blocks() as demo:\n", " textbox = gr.Textbox(placeholder=\"Type here...\", lines=4)\n", " btn = gr.Button(\"Autocomplete\")\n", " \n", " # define what will run when the button is clicked, here the textbox is used as both an input and an output\n", " btn.click(fn=complete_with_gpt, inputs=textbox, outputs=textbox, queue=False)\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/autocomplete/run.py b/testbed/gradio-app__gradio/demo/autocomplete/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..579d22faaecddc12ed3062c42104b9e16d1e8ce6
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/autocomplete/run.py
@@ -0,0 +1,21 @@
+import gradio as gr
+import os
+
+# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting
+auth_token = os.getenv("auth_token")
+
+# load a model from https://hf.co/models as an interface, then use it as an api
+# you can remove the api_key parameter if you don't care about rate limiting.
+api = gr.load("huggingface/gpt2-xl", hf_token=auth_token)
+
+def complete_with_gpt(text):
+ return text[:-50] + api(text[-50:])
+
+with gr.Blocks() as demo:
+ textbox = gr.Textbox(placeholder="Type here...", lines=4)
+ btn = gr.Button("Autocomplete")
+
+ # define what will run when the button is clicked, here the textbox is used as both an input and an output
+ btn.click(fn=complete_with_gpt, inputs=textbox, outputs=textbox, queue=False)
+
+demo.launch()
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/automatic-speech-recognition/DESCRIPTION.md b/testbed/gradio-app__gradio/demo/automatic-speech-recognition/DESCRIPTION.md
new file mode 100644
index 0000000000000000000000000000000000000000..876b9b10d7b3b69847929a445fb9cf862850534e
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/automatic-speech-recognition/DESCRIPTION.md
@@ -0,0 +1 @@
+Automatic speech recognition English. Record from your microphone and the app will transcribe the audio.
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/automatic-speech-recognition/run.ipynb b/testbed/gradio-app__gradio/demo/automatic-speech-recognition/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..81d2c2d7acd048a8b5c08163991de66db043b78d
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/automatic-speech-recognition/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: automatic-speech-recognition\n", "### Automatic speech recognition English. Record from your microphone and the app will transcribe the audio.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting\n", "auth_token = os.getenv(\"auth_token\")\n", "\n", "# automatically load the interface from a HF model \n", "# you can remove the api_key parameter if you don't care about rate limiting. \n", "demo = gr.load(\n", " \"huggingface/facebook/wav2vec2-base-960h\",\n", " title=\"Speech-to-text\",\n", " inputs=\"mic\",\n", " description=\"Let me try to guess what you're saying!\",\n", " hf_token=auth_token\n", ")\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/automatic-speech-recognition/run.py b/testbed/gradio-app__gradio/demo/automatic-speech-recognition/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..0bb8cf6b8d7e0d2d6d0d247f3a5f0c95b32453b2
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/automatic-speech-recognition/run.py
@@ -0,0 +1,17 @@
+import gradio as gr
+import os
+
+# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting
+auth_token = os.getenv("auth_token")
+
+# automatically load the interface from a HF model
+# you can remove the api_key parameter if you don't care about rate limiting.
+demo = gr.load(
+ "huggingface/facebook/wav2vec2-base-960h",
+ title="Speech-to-text",
+ inputs="mic",
+ description="Let me try to guess what you're saying!",
+ hf_token=auth_token
+)
+
+demo.launch()
diff --git a/testbed/gradio-app__gradio/demo/blocks_essay/run.ipynb b/testbed/gradio-app__gradio/demo/blocks_essay/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..124c0deae2154a6d8b0d05d62fce5ce82b7e6e46
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_essay/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_essay"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def change_textbox(choice):\n", " if choice == \"short\":\n", " return gr.Textbox(lines=2, visible=True)\n", " elif choice == \"long\":\n", " return gr.Textbox(lines=8, visible=True, value=\"Lorem ipsum dolor sit amet\")\n", " else:\n", " return gr.Textbox(visible=False)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " radio = gr.Radio(\n", " [\"short\", \"long\", \"none\"], label=\"What kind of essay would you like to write?\"\n", " )\n", " text = gr.Textbox(lines=2, interactive=True, show_copy_button=True)\n", " radio.change(fn=change_textbox, inputs=radio, outputs=text)\n", "\n", " with gr.Row():\n", " num = gr.Number(minimum=0, maximum=100, label=\"input\")\n", " out = gr.Number(label=\"output\")\n", " minimum_slider = gr.Slider(0, 100, 0, label=\"min\")\n", " maximum_slider = gr.Slider(0, 100, 100, label=\"max\")\n", "\n", " def reset_bounds(minimum, maximum):\n", " return gr.Number(minimum=minimum, maximum=maximum)\n", " \n", " minimum_slider.change(reset_bounds, [minimum_slider, maximum_slider], outputs=num)\n", " maximum_slider.change(reset_bounds, [minimum_slider, maximum_slider], outputs=num)\n", " num.submit(lambda x:x, num, out)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_essay/run.py b/testbed/gradio-app__gradio/demo/blocks_essay/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..d38a7f652ffc5dac052d7a7ec06896b4caf96274
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_essay/run.py
@@ -0,0 +1,35 @@
+import gradio as gr
+
+
+def change_textbox(choice):
+ if choice == "short":
+ return gr.Textbox(lines=2, visible=True)
+ elif choice == "long":
+ return gr.Textbox(lines=8, visible=True, value="Lorem ipsum dolor sit amet")
+ else:
+ return gr.Textbox(visible=False)
+
+
+with gr.Blocks() as demo:
+ radio = gr.Radio(
+ ["short", "long", "none"], label="What kind of essay would you like to write?"
+ )
+ text = gr.Textbox(lines=2, interactive=True, show_copy_button=True)
+ radio.change(fn=change_textbox, inputs=radio, outputs=text)
+
+ with gr.Row():
+ num = gr.Number(minimum=0, maximum=100, label="input")
+ out = gr.Number(label="output")
+ minimum_slider = gr.Slider(0, 100, 0, label="min")
+ maximum_slider = gr.Slider(0, 100, 100, label="max")
+
+ def reset_bounds(minimum, maximum):
+ return gr.Number(minimum=minimum, maximum=maximum)
+
+ minimum_slider.change(reset_bounds, [minimum_slider, maximum_slider], outputs=num)
+ maximum_slider.change(reset_bounds, [minimum_slider, maximum_slider], outputs=num)
+ num.submit(lambda x:x, num, out)
+
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/testbed/gradio-app__gradio/demo/blocks_flag/requirements.txt b/testbed/gradio-app__gradio/demo/blocks_flag/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..296d654528b719e554528b956c4bf5a1516e812c
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_flag/requirements.txt
@@ -0,0 +1 @@
+numpy
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_flag/run.ipynb b/testbed/gradio-app__gradio/demo/blocks_flag/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..3159e1c11690fe67c47f1a5c0c00f8de6cb40458
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_flag/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_flag"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "import gradio as gr\n", "\n", "def sepia(input_img, strength):\n", " sepia_filter = strength * np.array(\n", " [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]\n", " ) + (1-strength) * np.identity(3)\n", " sepia_img = input_img.dot(sepia_filter.T)\n", " sepia_img /= sepia_img.max()\n", " return sepia_img\n", "\n", "callback = gr.CSVLogger()\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " img_input = gr.Image()\n", " strength = gr.Slider(0, 1, 0.5)\n", " img_output = gr.Image()\n", " with gr.Row():\n", " btn = gr.Button(\"Flag\")\n", " \n", " # This needs to be called at some point prior to the first call to callback.flag()\n", " callback.setup([img_input, strength, img_output], \"flagged_data_points\")\n", "\n", " img_input.change(sepia, [img_input, strength], img_output)\n", " strength.change(sepia, [img_input, strength], img_output)\n", " \n", " # We can choose which components to flag -- in this case, we'll flag all of them\n", " btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_flag/run.py b/testbed/gradio-app__gradio/demo/blocks_flag/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2579e0e731163e701e027d9a78b6b12ce9a628c
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_flag/run.py
@@ -0,0 +1,33 @@
+import numpy as np
+import gradio as gr
+
+def sepia(input_img, strength):
+ sepia_filter = strength * np.array(
+ [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]
+ ) + (1-strength) * np.identity(3)
+ sepia_img = input_img.dot(sepia_filter.T)
+ sepia_img /= sepia_img.max()
+ return sepia_img
+
+callback = gr.CSVLogger()
+
+with gr.Blocks() as demo:
+ with gr.Row():
+ with gr.Column():
+ img_input = gr.Image()
+ strength = gr.Slider(0, 1, 0.5)
+ img_output = gr.Image()
+ with gr.Row():
+ btn = gr.Button("Flag")
+
+ # This needs to be called at some point prior to the first call to callback.flag()
+ callback.setup([img_input, strength, img_output], "flagged_data_points")
+
+ img_input.change(sepia, [img_input, strength], img_output)
+ strength.change(sepia, [img_input, strength], img_output)
+
+ # We can choose which components to flag -- in this case, we'll flag all of them
+ btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/testbed/gradio-app__gradio/demo/blocks_flashcards/run.ipynb b/testbed/gradio-app__gradio/demo/blocks_flashcards/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..96dd33e5d89c0a278f1c1e1d87a62d855d1030bb
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_flashcards/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_flashcards"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import random\n", "\n", "import gradio as gr\n", "\n", "demo = gr.Blocks()\n", "\n", "with demo:\n", " gr.Markdown(\n", " \"Load the flashcards in the table below, then use the Practice tab to practice.\"\n", " )\n", "\n", " with gr.Tab(\"Word Bank\"):\n", " flashcards_table = gr.Dataframe(headers=[\"front\", \"back\"], type=\"array\")\n", " with gr.Tab(\"Practice\"):\n", " with gr.Row():\n", " with gr.Column():\n", " front = gr.Textbox(label=\"Prompt\")\n", " with gr.Row():\n", " new_btn = gr.Button(\"New Card\")\n", " flip_btn = gr.Button(\"Flip Card\")\n", " with gr.Column(visible=False) as answer_col:\n", " back = gr.Textbox(label=\"Answer\")\n", " selected_card = gr.State()\n", " with gr.Row():\n", " correct_btn = gr.Button(\"Correct\")\n", " incorrect_btn = gr.Button(\"Incorrect\")\n", "\n", " with gr.Tab(\"Results\"):\n", " results = gr.State(value={})\n", " correct_field = gr.Markdown(\"# Correct: 0\")\n", " incorrect_field = gr.Markdown(\"# Incorrect: 0\")\n", " gr.Markdown(\"Card Statistics: \")\n", " results_table = gr.Dataframe(headers=[\"Card\", \"Correct\", \"Incorrect\"])\n", "\n", " def load_new_card(flashcards):\n", " card = random.choice(flashcards)\n", " return (\n", " card,\n", " card[0],\n", " gr.Column(visible=False),\n", " )\n", "\n", " new_btn.click(\n", " load_new_card,\n", " [flashcards_table],\n", " [selected_card, front, answer_col],\n", " )\n", "\n", " def flip_card(card):\n", " return card[1], gr.Column(visible=True)\n", "\n", " flip_btn.click(flip_card, [selected_card], [back, answer_col])\n", "\n", " def mark_correct(card, results):\n", " if card[0] not in results:\n", " results[card[0]] = [0, 0]\n", " results[card[0]][0] += 1\n", " correct_count = sum(result[0] for result in results.values())\n", " return (\n", " results,\n", " f\"# Correct: {correct_count}\",\n", " [[front, scores[0], scores[1]] for front, scores in results.items()],\n", " )\n", "\n", " def mark_incorrect(card, results):\n", " if card[0] not in results:\n", " results[card[0]] = [0, 0]\n", " results[card[0]][1] += 1\n", " incorrect_count = sum(result[1] for result in results.values())\n", " return (\n", " results,\n", " f\"# Inorrect: {incorrect_count}\",\n", " [[front, scores[0], scores[1]] for front, scores in results.items()],\n", " )\n", "\n", " correct_btn.click(\n", " mark_correct,\n", " [selected_card, results],\n", " [results, correct_field, results_table],\n", " )\n", "\n", " incorrect_btn.click(\n", " mark_incorrect,\n", " [selected_card, results],\n", " [results, incorrect_field, results_table],\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_flashcards/run.py b/testbed/gradio-app__gradio/demo/blocks_flashcards/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e8c606bfdba0af7e92e67853d398775c498d711
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_flashcards/run.py
@@ -0,0 +1,89 @@
+import random
+
+import gradio as gr
+
+demo = gr.Blocks()
+
+with demo:
+ gr.Markdown(
+ "Load the flashcards in the table below, then use the Practice tab to practice."
+ )
+
+ with gr.Tab("Word Bank"):
+ flashcards_table = gr.Dataframe(headers=["front", "back"], type="array")
+ with gr.Tab("Practice"):
+ with gr.Row():
+ with gr.Column():
+ front = gr.Textbox(label="Prompt")
+ with gr.Row():
+ new_btn = gr.Button("New Card")
+ flip_btn = gr.Button("Flip Card")
+ with gr.Column(visible=False) as answer_col:
+ back = gr.Textbox(label="Answer")
+ selected_card = gr.State()
+ with gr.Row():
+ correct_btn = gr.Button("Correct")
+ incorrect_btn = gr.Button("Incorrect")
+
+ with gr.Tab("Results"):
+ results = gr.State(value={})
+ correct_field = gr.Markdown("# Correct: 0")
+ incorrect_field = gr.Markdown("# Incorrect: 0")
+ gr.Markdown("Card Statistics: ")
+ results_table = gr.Dataframe(headers=["Card", "Correct", "Incorrect"])
+
+ def load_new_card(flashcards):
+ card = random.choice(flashcards)
+ return (
+ card,
+ card[0],
+ gr.Column(visible=False),
+ )
+
+ new_btn.click(
+ load_new_card,
+ [flashcards_table],
+ [selected_card, front, answer_col],
+ )
+
+ def flip_card(card):
+ return card[1], gr.Column(visible=True)
+
+ flip_btn.click(flip_card, [selected_card], [back, answer_col])
+
+ def mark_correct(card, results):
+ if card[0] not in results:
+ results[card[0]] = [0, 0]
+ results[card[0]][0] += 1
+ correct_count = sum(result[0] for result in results.values())
+ return (
+ results,
+ f"# Correct: {correct_count}",
+ [[front, scores[0], scores[1]] for front, scores in results.items()],
+ )
+
+ def mark_incorrect(card, results):
+ if card[0] not in results:
+ results[card[0]] = [0, 0]
+ results[card[0]][1] += 1
+ incorrect_count = sum(result[1] for result in results.values())
+ return (
+ results,
+ f"# Inorrect: {incorrect_count}",
+ [[front, scores[0], scores[1]] for front, scores in results.items()],
+ )
+
+ correct_btn.click(
+ mark_correct,
+ [selected_card, results],
+ [results, correct_field, results_table],
+ )
+
+ incorrect_btn.click(
+ mark_incorrect,
+ [selected_card, results],
+ [results, incorrect_field, results_table],
+ )
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/testbed/gradio-app__gradio/demo/blocks_flipper/run.ipynb b/testbed/gradio-app__gradio/demo/blocks_flipper/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..a4f48e1867856d40b0e81d2feb47f688f59a3f4b
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_flipper/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_flipper"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "import gradio as gr\n", "\n", "\n", "def flip_text(x):\n", " return x[::-1]\n", "\n", "\n", "def flip_image(x):\n", " return np.fliplr(x)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\"Flip text or image files using this demo.\")\n", " with gr.Tab(\"Flip Text\"):\n", " text_input = gr.Textbox()\n", " text_output = gr.Textbox()\n", " text_button = gr.Button(\"Flip\")\n", " with gr.Tab(\"Flip Image\"):\n", " with gr.Row():\n", " image_input = gr.Image()\n", " image_output = gr.Image()\n", " image_button = gr.Button(\"Flip\")\n", "\n", " with gr.Accordion(\"Open for More!\"):\n", " gr.Markdown(\"Look at me...\")\n", "\n", " text_button.click(flip_text, inputs=text_input, outputs=text_output)\n", " image_button.click(flip_image, inputs=image_input, outputs=image_output)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_flipper/run.py b/testbed/gradio-app__gradio/demo/blocks_flipper/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..29753508d941d26ccd5bc3951e9a3bf126878842
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_flipper/run.py
@@ -0,0 +1,32 @@
+import numpy as np
+import gradio as gr
+
+
+def flip_text(x):
+ return x[::-1]
+
+
+def flip_image(x):
+ return np.fliplr(x)
+
+
+with gr.Blocks() as demo:
+ gr.Markdown("Flip text or image files using this demo.")
+ with gr.Tab("Flip Text"):
+ text_input = gr.Textbox()
+ text_output = gr.Textbox()
+ text_button = gr.Button("Flip")
+ with gr.Tab("Flip Image"):
+ with gr.Row():
+ image_input = gr.Image()
+ image_output = gr.Image()
+ image_button = gr.Button("Flip")
+
+ with gr.Accordion("Open for More!"):
+ gr.Markdown("Look at me...")
+
+ text_button.click(flip_text, inputs=text_input, outputs=text_output)
+ image_button.click(flip_image, inputs=image_input, outputs=image_output)
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/testbed/gradio-app__gradio/demo/blocks_form/run.ipynb b/testbed/gradio-app__gradio/demo/blocks_form/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..ee6a47e2e0ff17d0b22aed99e77708bd48894d3f
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_form/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_form"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " error_box = gr.Textbox(label=\"Error\", visible=False)\n", "\n", " name_box = gr.Textbox(label=\"Name\")\n", " age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n", " symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n", " submit_btn = gr.Button(\"Submit\")\n", "\n", " with gr.Column(visible=False) as output_col:\n", " diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n", " patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n", "\n", " def submit(name, age, symptoms):\n", " if len(name) == 0:\n", " return {error_box: gr.Textbox(value=\"Enter name\", visible=True)}\n", " return {\n", " output_col: gr.Column(visible=True),\n", " diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n", " patient_summary_box: f\"{name}, {age} y/o\",\n", " }\n", "\n", " submit_btn.click(\n", " submit,\n", " [name_box, age_box, symptoms_box],\n", " [error_box, diagnosis_box, patient_summary_box, output_col],\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_form/run.py b/testbed/gradio-app__gradio/demo/blocks_form/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..956083cb0840909d890807f126ee643b05da6099
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_form/run.py
@@ -0,0 +1,31 @@
+import gradio as gr
+
+with gr.Blocks() as demo:
+ error_box = gr.Textbox(label="Error", visible=False)
+
+ name_box = gr.Textbox(label="Name")
+ age_box = gr.Number(label="Age", minimum=0, maximum=100)
+ symptoms_box = gr.CheckboxGroup(["Cough", "Fever", "Runny Nose"])
+ submit_btn = gr.Button("Submit")
+
+ with gr.Column(visible=False) as output_col:
+ diagnosis_box = gr.Textbox(label="Diagnosis")
+ patient_summary_box = gr.Textbox(label="Patient Summary")
+
+ def submit(name, age, symptoms):
+ if len(name) == 0:
+ return {error_box: gr.Textbox(value="Enter name", visible=True)}
+ return {
+ output_col: gr.Column(visible=True),
+ diagnosis_box: "covid" if "Cough" in symptoms else "flu",
+ patient_summary_box: f"{name}, {age} y/o",
+ }
+
+ submit_btn.click(
+ submit,
+ [name_box, age_box, symptoms_box],
+ [error_box, diagnosis_box, patient_summary_box, output_col],
+ )
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/testbed/gradio-app__gradio/demo/blocks_gpt/run.ipynb b/testbed/gradio-app__gradio/demo/blocks_gpt/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..4b935ce032dd6a1fdd708faa6a829bead1148e2b
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_gpt/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_gpt"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "api = gr.load(\"huggingface/gpt2-xl\")\n", "\n", "def complete_with_gpt(text):\n", " # Use the last 50 characters of the text as context\n", " return text[:-50] + api(text[-50:])\n", "\n", "with gr.Blocks() as demo:\n", " textbox = gr.Textbox(placeholder=\"Type here and press enter...\", lines=4)\n", " btn = gr.Button(\"Generate\")\n", " \n", " btn.click(complete_with_gpt, textbox, textbox)\n", " \n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_gpt/run.py b/testbed/gradio-app__gradio/demo/blocks_gpt/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb1e4e476befd084d61589faf9da0f5d34ae60b5
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_gpt/run.py
@@ -0,0 +1,16 @@
+import gradio as gr
+
+api = gr.load("huggingface/gpt2-xl")
+
+def complete_with_gpt(text):
+ # Use the last 50 characters of the text as context
+ return text[:-50] + api(text[-50:])
+
+with gr.Blocks() as demo:
+ textbox = gr.Textbox(placeholder="Type here and press enter...", lines=4)
+ btn = gr.Button("Generate")
+
+ btn.click(complete_with_gpt, textbox, textbox)
+
+if __name__ == "__main__":
+ demo.launch()
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_hello/run.ipynb b/testbed/gradio-app__gradio/demo/blocks_hello/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..eea80a1eee39fc9097f5921126249ba6ea408254
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_hello/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_hello"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def welcome(name):\n", " return f\"Welcome to Gradio, {name}!\"\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\n", " \"\"\"\n", " # Hello World!\n", " Start typing below to see the output.\n", " \"\"\")\n", " inp = gr.Textbox(placeholder=\"What is your name?\")\n", " out = gr.Textbox()\n", " inp.change(welcome, inp, out)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_hello/run.py b/testbed/gradio-app__gradio/demo/blocks_hello/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d416ee4ee0594fbd7c2ab1035fe2a1dc399d903
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_hello/run.py
@@ -0,0 +1,17 @@
+import gradio as gr
+
+def welcome(name):
+ return f"Welcome to Gradio, {name}!"
+
+with gr.Blocks() as demo:
+ gr.Markdown(
+ """
+ # Hello World!
+ Start typing below to see the output.
+ """)
+ inp = gr.Textbox(placeholder="What is your name?")
+ out = gr.Textbox()
+ inp.change(welcome, inp, out)
+
+if __name__ == "__main__":
+ demo.launch()
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_inputs/run.ipynb b/testbed/gradio-app__gradio/demo/blocks_inputs/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..b2bb1997a2385beca294ff600b8ed50528683e42
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_inputs/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_inputs"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/blocks_inputs/lion.jpg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "\n", "def combine(a, b):\n", " return a + \" \" + b\n", "\n", "\n", "def mirror(x):\n", " return x\n", "\n", "\n", "with gr.Blocks() as demo:\n", "\n", " txt = gr.Textbox(label=\"Input\", lines=2)\n", " txt_2 = gr.Textbox(label=\"Input 2\")\n", " txt_3 = gr.Textbox(value=\"\", label=\"Output\")\n", " btn = gr.Button(value=\"Submit\")\n", " btn.click(combine, inputs=[txt, txt_2], outputs=[txt_3])\n", "\n", " with gr.Row():\n", " im = gr.Image()\n", " im_2 = gr.Image()\n", "\n", " btn = gr.Button(value=\"Mirror Image\")\n", " btn.click(mirror, inputs=[im], outputs=[im_2])\n", "\n", " gr.Markdown(\"## Text Examples\")\n", " gr.Examples(\n", " [[\"hi\", \"Adam\"], [\"hello\", \"Eve\"]],\n", " [txt, txt_2],\n", " txt_3,\n", " combine,\n", " cache_examples=True,\n", " )\n", " gr.Markdown(\"## Image Examples\")\n", " gr.Examples(\n", " examples=[os.path.join(os.path.abspath(''), \"lion.jpg\")],\n", " inputs=im,\n", " outputs=im_2,\n", " fn=mirror,\n", " cache_examples=True,\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_inputs/run.py b/testbed/gradio-app__gradio/demo/blocks_inputs/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..b259312d8ecab627ab77007ddb964da528438921
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_inputs/run.py
@@ -0,0 +1,46 @@
+import gradio as gr
+import os
+
+
+def combine(a, b):
+ return a + " " + b
+
+
+def mirror(x):
+ return x
+
+
+with gr.Blocks() as demo:
+
+ txt = gr.Textbox(label="Input", lines=2)
+ txt_2 = gr.Textbox(label="Input 2")
+ txt_3 = gr.Textbox(value="", label="Output")
+ btn = gr.Button(value="Submit")
+ btn.click(combine, inputs=[txt, txt_2], outputs=[txt_3])
+
+ with gr.Row():
+ im = gr.Image()
+ im_2 = gr.Image()
+
+ btn = gr.Button(value="Mirror Image")
+ btn.click(mirror, inputs=[im], outputs=[im_2])
+
+ gr.Markdown("## Text Examples")
+ gr.Examples(
+ [["hi", "Adam"], ["hello", "Eve"]],
+ [txt, txt_2],
+ txt_3,
+ combine,
+ cache_examples=True,
+ )
+ gr.Markdown("## Image Examples")
+ gr.Examples(
+ examples=[os.path.join(os.path.dirname(__file__), "lion.jpg")],
+ inputs=im,
+ outputs=im_2,
+ fn=mirror,
+ cache_examples=True,
+ )
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/testbed/gradio-app__gradio/demo/blocks_joined/run.ipynb b/testbed/gradio-app__gradio/demo/blocks_joined/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..e18ac6911e277872e898822145001d4b4194da13
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_joined/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_joined"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/blocks_joined/files/cheetah1.jpg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["from time import sleep\n", "import gradio as gr\n", "import os\n", "\n", "cheetah = os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\")\n", "\n", "\n", "def img(text):\n", " sleep(3)\n", " return [\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " ]\n", "\n", "\n", "with gr.Blocks(css=\".container { max-width: 800px; margin: auto; }\") as demo:\n", " gr.Markdown(\"DALL\u00b7E mini \")\n", " gr.Markdown(\n", " \"DALL\u00b7E mini is an AI model that generates images from any prompt you give!\"\n", " )\n", " with gr.Group():\n", " with gr.Row(equal_height=True):\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " max_lines=1,\n", " container=False,\n", " )\n", " btn = gr.Button(\"Run\", scale=0)\n", " gallery = gr.Gallery(\n", " label=\"Generated images\",\n", " show_label=False,\n", " columns=(1, 3),\n", " height=\"auto\",\n", " )\n", " btn.click(img, inputs=text, outputs=gallery)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n", "\n", "\n", "# margin = (TOP, RIGHT, BOTTOM, LEFT)\n", "# rounded = (TOPLEFT, TOPRIGHT, BOTTOMRIGHT, BOTTOMLEFT)\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_joined/run.py b/testbed/gradio-app__gradio/demo/blocks_joined/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba2d71e24fa9bf4a2484cc5b0a86153492744d92
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_joined/run.py
@@ -0,0 +1,50 @@
+from time import sleep
+import gradio as gr
+import os
+
+cheetah = os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg")
+
+
+def img(text):
+ sleep(3)
+ return [
+ cheetah,
+ cheetah,
+ cheetah,
+ cheetah,
+ cheetah,
+ cheetah,
+ cheetah,
+ cheetah,
+ cheetah,
+ ]
+
+
+with gr.Blocks(css=".container { max-width: 800px; margin: auto; }") as demo:
+ gr.Markdown("DALL·E mini ")
+ gr.Markdown(
+ "DALL·E mini is an AI model that generates images from any prompt you give!"
+ )
+ with gr.Group():
+ with gr.Row(equal_height=True):
+ text = gr.Textbox(
+ label="Enter your prompt",
+ max_lines=1,
+ container=False,
+ )
+ btn = gr.Button("Run", scale=0)
+ gallery = gr.Gallery(
+ label="Generated images",
+ show_label=False,
+ columns=(1, 3),
+ height="auto",
+ )
+ btn.click(img, inputs=text, outputs=gallery)
+
+
+if __name__ == "__main__":
+ demo.launch()
+
+
+# margin = (TOP, RIGHT, BOTTOM, LEFT)
+# rounded = (TOPLEFT, TOPRIGHT, BOTTOMRIGHT, BOTTOMLEFT)
diff --git a/testbed/gradio-app__gradio/demo/blocks_js_methods/run.ipynb b/testbed/gradio-app__gradio/demo/blocks_js_methods/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..fdf6f4dcbf9358c239e6d5a2e582cec98f1dc8b6
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_js_methods/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_js_methods"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "blocks = gr.Blocks()\n", "\n", "with blocks as demo:\n", " subject = gr.Textbox(placeholder=\"subject\")\n", " verb = gr.Radio([\"ate\", \"loved\", \"hated\"])\n", " object = gr.Textbox(placeholder=\"object\")\n", "\n", " with gr.Row():\n", " btn = gr.Button(\"Create sentence.\")\n", " reverse_btn = gr.Button(\"Reverse sentence.\")\n", " foo_bar_btn = gr.Button(\"Append foo\")\n", " reverse_then_to_the_server_btn = gr.Button(\n", " \"Reverse sentence and send to server.\"\n", " )\n", "\n", " def sentence_maker(w1, w2, w3):\n", " return f\"{w1} {w2} {w3}\"\n", "\n", " output1 = gr.Textbox(label=\"output 1\")\n", " output2 = gr.Textbox(label=\"verb\")\n", " output3 = gr.Textbox(label=\"verb reversed\")\n", " output4 = gr.Textbox(label=\"front end process and then send to backend\")\n", "\n", " btn.click(sentence_maker, [subject, verb, object], output1)\n", " reverse_btn.click(\n", " None, [subject, verb, object], output2, _js=\"(s, v, o) => o + ' ' + v + ' ' + s\"\n", " )\n", " verb.change(lambda x: x, verb, output3, _js=\"(x) => [...x].reverse().join('')\")\n", " foo_bar_btn.click(None, [], subject, _js=\"(x) => x + ' foo'\")\n", "\n", " reverse_then_to_the_server_btn.click(\n", " sentence_maker,\n", " [subject, verb, object],\n", " output4,\n", " _js=\"(s, v, o) => [s, v, o].map(x => [...x].reverse().join(''))\",\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_js_methods/run.py b/testbed/gradio-app__gradio/demo/blocks_js_methods/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..a24b3301363bbbb06ca23437b2a4ff8ea29cf56d
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_js_methods/run.py
@@ -0,0 +1,41 @@
+import gradio as gr
+
+blocks = gr.Blocks()
+
+with blocks as demo:
+ subject = gr.Textbox(placeholder="subject")
+ verb = gr.Radio(["ate", "loved", "hated"])
+ object = gr.Textbox(placeholder="object")
+
+ with gr.Row():
+ btn = gr.Button("Create sentence.")
+ reverse_btn = gr.Button("Reverse sentence.")
+ foo_bar_btn = gr.Button("Append foo")
+ reverse_then_to_the_server_btn = gr.Button(
+ "Reverse sentence and send to server."
+ )
+
+ def sentence_maker(w1, w2, w3):
+ return f"{w1} {w2} {w3}"
+
+ output1 = gr.Textbox(label="output 1")
+ output2 = gr.Textbox(label="verb")
+ output3 = gr.Textbox(label="verb reversed")
+ output4 = gr.Textbox(label="front end process and then send to backend")
+
+ btn.click(sentence_maker, [subject, verb, object], output1)
+ reverse_btn.click(
+ None, [subject, verb, object], output2, _js="(s, v, o) => o + ' ' + v + ' ' + s"
+ )
+ verb.change(lambda x: x, verb, output3, _js="(x) => [...x].reverse().join('')")
+ foo_bar_btn.click(None, [], subject, _js="(x) => x + ' foo'")
+
+ reverse_then_to_the_server_btn.click(
+ sentence_maker,
+ [subject, verb, object],
+ output4,
+ _js="(s, v, o) => [s, v, o].map(x => [...x].reverse().join(''))",
+ )
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/testbed/gradio-app__gradio/demo/blocks_kinematics/run.ipynb b/testbed/gradio-app__gradio/demo/blocks_kinematics/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..fda5b6341dbce54e0632398abe3dfeb3b7accb9d
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_kinematics/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_kinematics"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import pandas as pd\n", "import numpy as np\n", "\n", "import gradio as gr\n", "\n", "\n", "def plot(v, a):\n", " g = 9.81\n", " theta = a / 180 * 3.14\n", " tmax = ((2 * v) * np.sin(theta)) / g\n", " timemat = tmax * np.linspace(0, 1, 40)\n", "\n", " x = (v * timemat) * np.cos(theta)\n", " y = ((v * timemat) * np.sin(theta)) - ((0.5 * g) * (timemat**2))\n", " df = pd.DataFrame({\"x\": x, \"y\": y})\n", " return df\n", "\n", "\n", "demo = gr.Blocks()\n", "\n", "with demo:\n", " gr.Markdown(\n", " r\"Let's do some kinematics! Choose the speed and angle to see the trajectory. Remember that the range $R = v_0^2 \\cdot \\frac{\\sin(2\\theta)}{g}$\"\n", " )\n", "\n", " with gr.Row():\n", " speed = gr.Slider(1, 30, 25, label=\"Speed\")\n", " angle = gr.Slider(0, 90, 45, label=\"Angle\")\n", " output = gr.LinePlot(\n", " x=\"x\",\n", " y=\"y\",\n", " overlay_point=True,\n", " tooltip=[\"x\", \"y\"],\n", " x_lim=[0, 100],\n", " y_lim=[0, 60],\n", " width=350,\n", " height=300,\n", " )\n", " btn = gr.Button(value=\"Run\")\n", " btn.click(plot, [speed, angle], output)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_kinematics/run.py b/testbed/gradio-app__gradio/demo/blocks_kinematics/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..513eb4d5ed161be9c036fcd71f2eb217da01249c
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_kinematics/run.py
@@ -0,0 +1,43 @@
+import pandas as pd
+import numpy as np
+
+import gradio as gr
+
+
+def plot(v, a):
+ g = 9.81
+ theta = a / 180 * 3.14
+ tmax = ((2 * v) * np.sin(theta)) / g
+ timemat = tmax * np.linspace(0, 1, 40)
+
+ x = (v * timemat) * np.cos(theta)
+ y = ((v * timemat) * np.sin(theta)) - ((0.5 * g) * (timemat**2))
+ df = pd.DataFrame({"x": x, "y": y})
+ return df
+
+
+demo = gr.Blocks()
+
+with demo:
+ gr.Markdown(
+ r"Let's do some kinematics! Choose the speed and angle to see the trajectory. Remember that the range $R = v_0^2 \cdot \frac{\sin(2\theta)}{g}$"
+ )
+
+ with gr.Row():
+ speed = gr.Slider(1, 30, 25, label="Speed")
+ angle = gr.Slider(0, 90, 45, label="Angle")
+ output = gr.LinePlot(
+ x="x",
+ y="y",
+ overlay_point=True,
+ tooltip=["x", "y"],
+ x_lim=[0, 100],
+ y_lim=[0, 60],
+ width=350,
+ height=300,
+ )
+ btn = gr.Button(value="Run")
+ btn.click(plot, [speed, angle], output)
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/testbed/gradio-app__gradio/demo/blocks_layout/run.py b/testbed/gradio-app__gradio/demo/blocks_layout/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a7f3660ffce75035d191274fecf3063f28b647d
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_layout/run.py
@@ -0,0 +1,39 @@
+import gradio as gr
+
+
+demo = gr.Blocks()
+
+with demo:
+ with gr.Row():
+ gr.Image(interactive=True, scale=2)
+ gr.Image()
+ with gr.Row():
+ gr.Textbox(label="Text")
+ gr.Number(label="Count", scale=2)
+ gr.Radio(choices=["One", "Two"])
+ with gr.Row():
+ gr.Button("500", scale=0, min_width=500)
+ gr.Button("A", scale=0)
+ gr.Button("grow")
+ with gr.Row():
+ gr.Textbox()
+ gr.Textbox()
+ gr.Button()
+ with gr.Row():
+ with gr.Row():
+ with gr.Column():
+ gr.Textbox(label="Text")
+ gr.Number(label="Count")
+ gr.Radio(choices=["One", "Two"])
+ gr.Image()
+ with gr.Column():
+ gr.Image(interactive=True)
+ gr.Image()
+ gr.Image()
+ gr.Textbox(label="Text")
+ gr.Number(label="Count")
+ gr.Radio(choices=["One", "Two"])
+
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/testbed/gradio-app__gradio/demo/blocks_multiple_event_triggers/run.ipynb b/testbed/gradio-app__gradio/demo/blocks_multiple_event_triggers/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..52d3052d5c4d4f9d4808f43defeec2539bc3580f
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_multiple_event_triggers/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_multiple_event_triggers"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly pypistats"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pypistats\n", "from datetime import date\n", "from dateutil.relativedelta import relativedelta\n", "import pandas as pd\n", "\n", "def get_plot(lib, time):\n", " data = pypistats.overall(lib, total=True, format=\"pandas\")\n", " data = data.groupby(\"category\").get_group(\"with_mirrors\").sort_values(\"date\")\n", " start_date = date.today() - relativedelta(months=int(time.split(\" \")[0]))\n", " data = data[(data['date'] > str(start_date))]\n", " data.date = pd.to_datetime(pd.to_datetime(data.date))\n", " return gr.LinePlot(value=data, x=\"date\", y=\"downloads\",\n", " tooltip=['date', 'downloads'],\n", " title=f\"Pypi downloads of {lib} over last {time}\",\n", " overlay_point=True,\n", " height=400,\n", " width=900)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\n", " \"\"\"\n", " ## Pypi Download Stats \ud83d\udcc8\n", " See live download stats for all of Hugging Face's open-source libraries \ud83e\udd17\n", " \"\"\")\n", " with gr.Row():\n", " lib = gr.Dropdown([\"transformers\", \"datasets\", \"huggingface-hub\", \"gradio\", \"accelerate\"],\n", " value=\"gradio\", label=\"Library\")\n", " time = gr.Dropdown([\"3 months\", \"6 months\", \"9 months\", \"12 months\"],\n", " value=\"3 months\", label=\"Downloads over the last...\")\n", "\n", " plt = gr.LinePlot()\n", " # You can add multiple event triggers in 2 lines like this\n", " for event in [lib.change, time.change, demo.load]:\n", " event(get_plot, [lib, time], [plt])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_multiple_event_triggers/run.py b/testbed/gradio-app__gradio/demo/blocks_multiple_event_triggers/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8340d8f760e90b0ce571c751cc2c98813e1dbca
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_multiple_event_triggers/run.py
@@ -0,0 +1,39 @@
+import gradio as gr
+import pypistats
+from datetime import date
+from dateutil.relativedelta import relativedelta
+import pandas as pd
+
+def get_plot(lib, time):
+ data = pypistats.overall(lib, total=True, format="pandas")
+ data = data.groupby("category").get_group("with_mirrors").sort_values("date")
+ start_date = date.today() - relativedelta(months=int(time.split(" ")[0]))
+ data = data[(data['date'] > str(start_date))]
+ data.date = pd.to_datetime(pd.to_datetime(data.date))
+ return gr.LinePlot(value=data, x="date", y="downloads",
+ tooltip=['date', 'downloads'],
+ title=f"Pypi downloads of {lib} over last {time}",
+ overlay_point=True,
+ height=400,
+ width=900)
+
+
+with gr.Blocks() as demo:
+ gr.Markdown(
+ """
+ ## Pypi Download Stats 📈
+ See live download stats for all of Hugging Face's open-source libraries 🤗
+ """)
+ with gr.Row():
+ lib = gr.Dropdown(["transformers", "datasets", "huggingface-hub", "gradio", "accelerate"],
+ value="gradio", label="Library")
+ time = gr.Dropdown(["3 months", "6 months", "9 months", "12 months"],
+ value="3 months", label="Downloads over the last...")
+
+ plt = gr.LinePlot()
+ # You can add multiple event triggers in 2 lines like this
+ for event in [lib.change, time.change, demo.load]:
+ event(get_plot, [lib, time], [plt])
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/testbed/gradio-app__gradio/demo/blocks_neural_instrument_coding/run.ipynb b/testbed/gradio-app__gradio/demo/blocks_neural_instrument_coding/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..c0ed94376531f5d902abcaccb8056ca3e0e61a88
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_neural_instrument_coding/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_neural_instrument_coding"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/blocks_neural_instrument_coding/flute.wav\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/blocks_neural_instrument_coding/new-sax-1.mp3\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/blocks_neural_instrument_coding/new-sax-1.wav\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/blocks_neural_instrument_coding/new-sax.wav\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/blocks_neural_instrument_coding/sax.wav\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/blocks_neural_instrument_coding/sax2.wav\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/blocks_neural_instrument_coding/trombone.wav"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["# A Blocks implementation of https://erlj.notion.site/Neural-Instrument-Cloning-from-very-few-samples-2cf41d8b630842ee8c7eb55036a1bfd6\n", "\n", "import datetime\n", "import os\n", "import random\n", "\n", "import gradio as gr\n", "from gradio.components import Markdown as m\n", "\n", "\n", "def get_time():\n", " now = datetime.datetime.now()\n", " return now.strftime(\"%m/%d/%Y, %H:%M:%S\")\n", "\n", "\n", "def generate_recording():\n", " return random.choice([\"new-sax-1.mp3\", \"new-sax-1.wav\"])\n", "\n", "\n", "def reconstruct(audio):\n", " return random.choice([\"new-sax-1.mp3\", \"new-sax-1.wav\"])\n", "\n", "\n", "io1 = gr.Interface(\n", " lambda x, y, z: os.path.join(os.path.abspath(''),\"sax.wav\"),\n", " [\n", " gr.Slider(label=\"pitch\"),\n", " gr.Slider(label=\"loudness\"),\n", " gr.Audio(label=\"base audio file (optional)\"),\n", " ],\n", " gr.Audio(),\n", ")\n", "\n", "io2 = gr.Interface(\n", " lambda x, y, z: os.path.join(os.path.abspath(''),\"flute.wav\"),\n", " [\n", " gr.Slider(label=\"pitch\"),\n", " gr.Slider(label=\"loudness\"),\n", " gr.Audio(label=\"base audio file (optional)\"),\n", " ],\n", " gr.Audio(),\n", ")\n", "\n", "io3 = gr.Interface(\n", " lambda x, y, z: os.path.join(os.path.abspath(''),\"trombone.wav\"),\n", " [\n", " gr.Slider(label=\"pitch\"),\n", " gr.Slider(label=\"loudness\"),\n", " gr.Audio(label=\"base audio file (optional)\"),\n", " ],\n", " gr.Audio(),\n", ")\n", "\n", "io4 = gr.Interface(\n", " lambda x, y, z: os.path.join(os.path.abspath(''),\"sax2.wav\"),\n", " [\n", " gr.Slider(label=\"pitch\"),\n", " gr.Slider(label=\"loudness\"),\n", " gr.Audio(label=\"base audio file (optional)\"),\n", " ],\n", " gr.Audio(),\n", ")\n", "\n", "demo = gr.Blocks(title=\"Neural Instrument Cloning\")\n", "\n", "with demo.clear():\n", " m(\n", " \"\"\"\n", " ## Neural Instrument Cloning from Very Few Samples\n", " \"\"\"\n", " )\n", " m(\n", " \"\"\"\n", " This Blocks implementation is an adaptation [a report written](https://erlj.notion.site/Neural-Instrument-Cloning-from-very-few-samples-2cf41d8b630842ee8c7eb55036a1bfd6) by Nicolas Jonason and Bob L.T. Sturm.\n", " \n", " I've implemented it in Blocks to show off some cool features, such as embedding live ML demos. More on that ahead...\n", " \n", " ### What does this machine learning model do?\n", " It combines techniques from neural voice cloning with musical instrument synthesis. This makes it possible to produce neural instrument synthesisers from just seconds of target instrument audio.\n", " \n", " ### Audio Examples\n", " Here are some **real** 16 second saxophone recordings:\n", " \"\"\"\n", " )\n", " gr.Audio(os.path.join(os.path.abspath(''),\"sax.wav\"), label=\"Here is a real 16 second saxophone recording:\")\n", " gr.Audio(os.path.join(os.path.abspath(''),\"sax.wav\"))\n", "\n", " m(\n", " \"\"\"\\n\n", " Here is a **generated** saxophone recordings:\"\"\"\n", " )\n", " a = gr.Audio(os.path.join(os.path.abspath(''),\"new-sax.wav\"))\n", "\n", " gr.Button(\"Generate a new saxophone recording\")\n", "\n", " m(\n", " \"\"\"\n", " ### Inputs to the model\n", " The inputs to the model are:\n", " * pitch\n", " * loudness\n", " * base audio file\n", " \"\"\"\n", " )\n", "\n", " m(\n", " \"\"\"\n", " Try the model live!\n", " \"\"\"\n", " )\n", "\n", " gr.TabbedInterface(\n", " [io1, io2, io3, io4], [\"Saxophone\", \"Flute\", \"Trombone\", \"Another Saxophone\"]\n", " )\n", "\n", " m(\n", " \"\"\"\n", " ### Using the model for cloning\n", " You can also use this model a different way, to simply clone the audio file and reconstruct it \n", " using machine learning. Here, we'll show a demo of that below:\n", " \"\"\"\n", " )\n", "\n", " a2 = gr.Audio()\n", " a2.change(reconstruct, a2, a2)\n", "\n", " m(\n", " \"\"\"\n", " Thanks for reading this! As you may have realized, all of the \"models\" in this demo are fake. They are just designed to show you what is possible using Blocks \ud83e\udd17.\n", " \n", " For details of the model, read the [original report here](https://erlj.notion.site/Neural-Instrument-Cloning-from-very-few-samples-2cf41d8b630842ee8c7eb55036a1bfd6).\n", " \n", " *Details for nerds*: this report was \"launched\" on:\n", " \"\"\"\n", " )\n", "\n", " t = gr.Textbox(label=\"timestamp\")\n", "\n", " demo.load(get_time, [], t)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_outputs/run.py b/testbed/gradio-app__gradio/demo/blocks_outputs/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..084be0da9c4b676974e06a2cd7dc87f4b11f95e0
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_outputs/run.py
@@ -0,0 +1,95 @@
+import gradio as gr
+
+
+def make_markdown():
+ return [
+ [
+ "# hello again",
+ "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
+ ' ',
+ ],
+ [
+ "## hello again again",
+ "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
+ ' ',
+ ],
+ [
+ "### hello thrice",
+ "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
+ ' ',
+ ],
+ ]
+
+
+with gr.Blocks() as demo:
+ with gr.Column():
+ txt = gr.Textbox(label="Small Textbox", lines=1, show_label=False)
+ txt = gr.Textbox(label="Large Textbox", lines=5, show_label=False)
+ num = gr.Number(label="Number", show_label=False)
+ check = gr.Checkbox(label="Checkbox", show_label=False)
+ check_g = gr.CheckboxGroup(
+ label="Checkbox Group", choices=["One", "Two", "Three"], show_label=False
+ )
+ radio = gr.Radio(
+ label="Radio", choices=["One", "Two", "Three"], show_label=False
+ )
+ drop = gr.Dropdown(
+ label="Dropdown", choices=["One", "Two", "Three"], show_label=False
+ )
+ slider = gr.Slider(label="Slider", show_label=False)
+ audio = gr.Audio(show_label=False)
+ file = gr.File(show_label=False)
+ video = gr.Video(show_label=False)
+ image = gr.Image(show_label=False)
+ ts = gr.Timeseries(show_label=False)
+ df = gr.Dataframe(show_label=False)
+ html = gr.HTML(show_label=False)
+ json = gr.JSON(show_label=False)
+ md = gr.Markdown(show_label=False)
+ label = gr.Label(show_label=False)
+ highlight = gr.HighlightedText(show_label=False)
+ gr.Dataframe(interactive=True, col_count=(3, "fixed"), label="Dataframe")
+ gr.Dataframe(interactive=True, col_count=4, label="Dataframe")
+ gr.Dataframe(
+ interactive=True, headers=["One", "Two", "Three", "Four"], label="Dataframe"
+ )
+ gr.Dataframe(
+ interactive=True,
+ headers=["One", "Two", "Three", "Four"],
+ col_count=(4, "fixed"),
+ row_count=(7, "fixed"),
+ value=[[0, 0, 0, 0]],
+ label="Dataframe",
+ )
+ gr.Dataframe(
+ interactive=True, headers=["One", "Two", "Three", "Four"], col_count=4
+ )
+ df = gr.DataFrame(
+ [
+ [
+ "# hello",
+ "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
+ ' ',
+ ],
+ [
+ "## hello",
+ "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
+ ' ',
+ ],
+ [
+ "### hello",
+ "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
+ ' ',
+ ],
+ ],
+ headers=["One", "Two", "Three"],
+ wrap=True,
+ datatype=["markdown", "markdown", "html"],
+ interactive=True,
+ )
+ btn = gr.Button("Run")
+ btn.click(fn=make_markdown, inputs=None, outputs=df)
+
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/testbed/gradio-app__gradio/demo/blocks_random_slider/run.ipynb b/testbed/gradio-app__gradio/demo/blocks_random_slider/run.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..857e8e7ef3c8350d63518c5ab73f4b6bf31f44d4
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_random_slider/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_random_slider"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["\n", "import gradio as gr\n", "\n", "\n", "def func(slider_1, slider_2):\n", " return slider_1 * 5 + slider_2\n", "\n", "\n", "with gr.Blocks() as demo:\n", " slider = gr.Slider(minimum=-10.2, maximum=15, label=\"Random Slider (Static)\", randomize=True)\n", " slider_1 = gr.Slider(minimum=100, maximum=200, label=\"Random Slider (Input 1)\", randomize=True)\n", " slider_2 = gr.Slider(minimum=10, maximum=23.2, label=\"Random Slider (Input 2)\", randomize=True)\n", " slider_3 = gr.Slider(value=3, label=\"Non random slider\")\n", " btn = gr.Button(\"Run\")\n", " btn.click(func, inputs=[slider_1, slider_2], outputs=gr.Number())\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/testbed/gradio-app__gradio/demo/blocks_random_slider/run.py b/testbed/gradio-app__gradio/demo/blocks_random_slider/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..105ddcb0e4f8e77d7da977400d25e95210add2fc
--- /dev/null
+++ b/testbed/gradio-app__gradio/demo/blocks_random_slider/run.py
@@ -0,0 +1,18 @@
+
+import gradio as gr
+
+
+def func(slider_1, slider_2):
+ return slider_1 * 5 + slider_2
+
+
+with gr.Blocks() as demo:
+ slider = gr.Slider(minimum=-10.2, maximum=15, label="Random Slider (Static)", randomize=True)
+ slider_1 = gr.Slider(minimum=100, maximum=200, label="Random Slider (Input 1)", randomize=True)
+ slider_2 = gr.Slider(minimum=10, maximum=23.2, label="Random Slider (Input 2)", randomize=True)
+ slider_3 = gr.Slider(value=3, label="Non random slider")
+ btn = gr.Button("Run")
+ btn.click(func, inputs=[slider_1, slider_2], outputs=gr.Number())
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/testbed/pyvista__pyvista/tests/plotting/fonts/Mplus2-Regular.ttf b/testbed/pyvista__pyvista/tests/plotting/fonts/Mplus2-Regular.ttf
new file mode 100644
index 0000000000000000000000000000000000000000..c5189ecba11ed799b03df27a07c90bb9158c765d
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/fonts/Mplus2-Regular.ttf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9dd07b5f51d53a0b5ff7325a9ff62a37eab2b828f7c4d7aaad31526bdd0d46a5
+size 1739780
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[+X].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[+X].png
new file mode 100644
index 0000000000000000000000000000000000000000..4d842827b9cd47228df0cd030445395678109d61
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[+X].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3712a315a30ea25e64cb1a8e01572a99084f838bfd5e71b463e476cbdb8159fb
+size 4389
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[+Y].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[+Y].png
new file mode 100644
index 0000000000000000000000000000000000000000..9ce29d6884d75b5119741c721b18d40dc4a05f0d
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[+Y].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bd5f95b41cc12a7f24fc2917a272e7f8a5427ad98eba3d8bbfd2fce8a6cb2fc5
+size 4340
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[+Z].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[+Z].png
new file mode 100644
index 0000000000000000000000000000000000000000..d6da194b68aca9e164779a4ff79b709cb26571e3
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[+Z].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e19a189db0d40fbb0ccdbb062a21f438e45c033a9f8342ab42acc650f69cefc4
+size 4051
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[-X].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[-X].png
new file mode 100644
index 0000000000000000000000000000000000000000..52ac59e2986b47f2416fefec1cc50b03e1456caa
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[-X].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d2f9d69fa4d5d3b075c659e7a182774f7b9a60d83132fdd009bf53883055aac3
+size 4393
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[-Y].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[-Y].png
new file mode 100644
index 0000000000000000000000000000000000000000..af1e234981f6a93e60220aa819fd45e5955b401b
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[-Y].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:67eb87126b8037e2177d66851bb7678a3db7686c79efe47ba570cefe9344f300
+size 4316
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[-Z].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[-Z].png
new file mode 100644
index 0000000000000000000000000000000000000000..79bbcc655782aae34fc77ae106ba8b98576b592e
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_floor[-Z].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:023a6c9e3977c948bab9481a538590d0dc18f9e8235b2e3981d7450ee6684c68
+size 4425
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/add_legend.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_legend.png
new file mode 100644
index 0000000000000000000000000000000000000000..642d1fadb3ca953b9aeaed93f36c6b73cd4e343f
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_legend.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c40d7d1b2939f6b3d2ee6f08962fae1fd599129909c9b2df25d51d1cf120bb20
+size 13247
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/add_point_labels_shape[None].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_point_labels_shape[None].png
new file mode 100644
index 0000000000000000000000000000000000000000..31060e6ec167f484b4678d63768308412d63c873
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_point_labels_shape[None].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6cd95b903575cf96ca399019498f465ab1fb064d5d11f5f0bdaa2a90e2e6371a
+size 2454
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/add_point_labels_shape[rect].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_point_labels_shape[rect].png
new file mode 100644
index 0000000000000000000000000000000000000000..c20825bca01246da614a1031d46bb800dee2a7d8
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_point_labels_shape[rect].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:50f102627a9de3b0652019fd95ea37eef81fc8e04464a7ef647c14ae40c1b977
+size 2414
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/add_point_labels_shape[rounded_rect].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_point_labels_shape[rounded_rect].png
new file mode 100644
index 0000000000000000000000000000000000000000..c02d10e6e7b1c3d0d06d0a14ad9e262df9d7487a
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/add_point_labels_shape[rounded_rect].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:19e6f804c1430bf88aaee165bca43aa6974d92608a43965641ef09f969bad48a
+size 2437
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/algorithm_add_mesh_methods.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/algorithm_add_mesh_methods.png
new file mode 100644
index 0000000000000000000000000000000000000000..1111cffa5e020df35e38d1ff2af0bb49bc1b5293
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/algorithm_add_mesh_methods.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8a306cff79972b438dc8a141dd70a88b4938472124e9cd1dbc2cbff3b6d501ed
+size 31103
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/algorithm_add_mesh_methods_1.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/algorithm_add_mesh_methods_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..bdd0fc85649669d2d4625e232655cc54985dac29
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/algorithm_add_mesh_methods_1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:821846ec97383486cc0e78ff08a504f6349cdeb8d389549d720dce6ec24433c2
+size 6328
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/array_volume_rendering.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/array_volume_rendering.png
new file mode 100644
index 0000000000000000000000000000000000000000..3014084d96afa95682148b375f3996951892d9ea
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/array_volume_rendering.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ab1345e9ddf453613065d358be4cd8049f3018586673b07ec2d8f5a97aee336a
+size 57759
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/axes_actor_properties.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/axes_actor_properties.png
new file mode 100644
index 0000000000000000000000000000000000000000..398dab8c1ccede0fe604ce202e18e33d46d97c0d
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/axes_actor_properties.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38a0cad7038920f3ca160a62a8de2278bf42261c73d683a05093aeb1b05d0799
+size 6585
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/backface_params.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/backface_params.png
new file mode 100644
index 0000000000000000000000000000000000000000..902fa1df0aa21257977ca588983aaaec4d225706
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/backface_params.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c23c2456ad6cfec65c6a183305e36c95da11da06c5bfbe65fa6ec06dce9c233b
+size 13779
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/box_axes.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/box_axes.png
new file mode 100644
index 0000000000000000000000000000000000000000..fac71cb1191a4ac4ee4a66556940a1d716722270
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/box_axes.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:00e9ec3c29248560a90a2c9a5600eaaed2f8ea546c520e34f05dc0e328552a5b
+size 12861
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/fly_to_right_click_multi_render.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/fly_to_right_click_multi_render.png
new file mode 100644
index 0000000000000000000000000000000000000000..db6f41b198d10486be5bbf1ef3f199ac17ee9883
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/fly_to_right_click_multi_render.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:951f208e3464469b671a7ca81535937d753d0145e4b054d5c521555a05b45835
+size 7357
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/legend_circle_face.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/legend_circle_face.png
new file mode 100644
index 0000000000000000000000000000000000000000..4bea6df8b2c1df7196dfd26ca9a33be6389900ff
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/legend_circle_face.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:51b026c35b57cd2009a26e32099325b8fa47798bf1b173cbf511c8d9daa1e7aa
+size 12966
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/legend_rectangle_face.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/legend_rectangle_face.png
new file mode 100644
index 0000000000000000000000000000000000000000..36177620ed58033cebc1065a8fa9183fd5f49dbf
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/legend_rectangle_face.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:54be37e279646be6f150ed2bb9c3ae12e1e5db393f743ccba7c02bff6b7e216c
+size 12955
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/legend_subplots.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/legend_subplots.png
new file mode 100644
index 0000000000000000000000000000000000000000..b676f903a4e154e4e9eb613a8115a7e19bcd7cda
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/legend_subplots.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ca7c0034baa64138839b90e4e3030daa04a8de4a1857aaf28e68fd99001f1893
+size 8184
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/logo_widget.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/logo_widget.png
new file mode 100644
index 0000000000000000000000000000000000000000..0780b4de874f9295ada6e182ac70033c5227106e
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/logo_widget.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dd5138be0271797c25aa8a14e8129723039f77cb425099599a908395af2a080d
+size 18590
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/logo_widget_1.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/logo_widget_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..3da1dec44a1c308221ae72f2b3f3447e2ac2518a
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/logo_widget_1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c5c2aa2a69dc2d0ee70dbd2d78a728d6ad76f53900f95569fc33b73303b5d6d0
+size 33332
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/logo_widget_2.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/logo_widget_2.png
new file mode 100644
index 0000000000000000000000000000000000000000..a2a80980ec32fe45c7b77257dd337175accddd38
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/logo_widget_2.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9ab824f17f3b1d764d33548089c8e753fe92d7586509d72b84d59217d00bcb3f
+size 80127
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/logo_widget_3.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/logo_widget_3.png
new file mode 100644
index 0000000000000000000000000000000000000000..242a393fc3cb2a96242f6764f850e6ed161b2054
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/logo_widget_3.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2643f1a496c517f9254fd3a845eb34fa120f2f3d7a9ee7b7454d500cc1058d4e
+size 41244
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/multi_renderers.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/multi_renderers.png
new file mode 100644
index 0000000000000000000000000000000000000000..544df5737e05731993c0bd42d02716b781ed3c27
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/multi_renderers.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9482944c15cb538e445c6a04ba636b354d066163694a974ddb4f8dbbabcb2589
+size 50201
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/multiblock_volume_rendering.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/multiblock_volume_rendering.png
new file mode 100644
index 0000000000000000000000000000000000000000..f742913ed608fdd9e9c48c721e04525bf5027303
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/multiblock_volume_rendering.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c8d2d368da714ebe1b2aeb0bd3fc7a7d96e50e9e85aa086f5e2c8068414a02d8
+size 74066
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot[False].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot[False].png
new file mode 100644
index 0000000000000000000000000000000000000000..69bbe581c44ee23f24f51eb11a552fc8c58b11c0
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot[False].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1a7fffa5fcf9fddd89d79051fa69aeda39fea4348e2dce456767064ab5df8026
+size 75893
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot[True].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot[True].png
new file mode 100644
index 0000000000000000000000000000000000000000..69bbe581c44ee23f24f51eb11a552fc8c58b11c0
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot[True].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1a7fffa5fcf9fddd89d79051fa69aeda39fea4348e2dce456767064ab5df8026
+size 75893
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot[msaa].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot[msaa].png
new file mode 100644
index 0000000000000000000000000000000000000000..69bbe581c44ee23f24f51eb11a552fc8c58b11c0
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot[msaa].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1a7fffa5fcf9fddd89d79051fa69aeda39fea4348e2dce456767064ab5df8026
+size 75893
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_algorithm_simple.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_algorithm_simple.png
new file mode 100644
index 0000000000000000000000000000000000000000..3db5065ad0400e9d839818752e361690f829c104
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_algorithm_simple.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1158b6f967b29cd1246d70195e2c1488cc4e09a111f2d75b22854ad97e8fee2f
+size 2309
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_bounds_axes_with_no_data.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_bounds_axes_with_no_data.png
new file mode 100644
index 0000000000000000000000000000000000000000..6500f2bbdf45249cb0685ab81ac09022899cf3ce
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_bounds_axes_with_no_data.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:34c5645276acbaa3d1a7bb51b3dbe069d7c8d639c8cb147221f464550703f48a
+size 9797
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_cell.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_cell.png
new file mode 100644
index 0000000000000000000000000000000000000000..e2742f14585ec1ab39e3637666fe16d2cebf04f4
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_cell.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:19e8d85bfb10e9bd9b776f4b740b9ce889a5b01d053f77ebf683e7f62dfe3380
+size 24173
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_composite_categories.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_composite_categories.png
new file mode 100644
index 0000000000000000000000000000000000000000..0b72c52a876c468cc778c17220c32d32d81f3713
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_composite_categories.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:099a2ebad31b03dddcbbdf9d6d687629c5a898ba3b56a40382aa413d17689373
+size 20074
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_composite_many_options.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_composite_many_options.png
new file mode 100644
index 0000000000000000000000000000000000000000..28317b4c82050fd0b6adaa9812832ce8263e860a
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_composite_many_options.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fbe9e51dc09082762da35d0b9e080f922023d71862fb14ff9a8e4f0ffb5bb433
+size 38227
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_composite_poly_scalars_cell.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_composite_poly_scalars_cell.png
new file mode 100644
index 0000000000000000000000000000000000000000..9820e4214a63c545ca77587d9e1038c993207909
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_composite_poly_scalars_cell.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:941e174b52edaf92446bf5fd84b1de35e88655b17b6af4feb63fc322e701cdd1
+size 21171
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_composite_poly_scalars_opacity.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_composite_poly_scalars_opacity.png
new file mode 100644
index 0000000000000000000000000000000000000000..231af18bcf8235bafaa1e822633ed27949a0101d
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_composite_poly_scalars_opacity.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:87e923e36ddcbfe4bfb243dc02a32e8044590be4fec8bfb907cb41bdf2ec05f6
+size 69575
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_cubemap_alone.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_cubemap_alone.png
new file mode 100644
index 0000000000000000000000000000000000000000..20488dfe1be122a2989f660979a8aa99d35139b0
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_cubemap_alone.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2e56f797b0aa23e6464e1640553ec1db5786b322abe55de751ef490f4b97799d
+size 20779
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_helper_two_volumes.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_helper_two_volumes.png
new file mode 100644
index 0000000000000000000000000000000000000000..8dd6fb9f30d22b6e53a5cd943efa57f6760ed7bc
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_helper_two_volumes.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8a32b0e9e2c4d596abe2af510f87ad8e6ae85b56f953cb40bae216e3a77cd00a
+size 54705
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_helper_volume.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_helper_volume.png
new file mode 100644
index 0000000000000000000000000000000000000000..b0af784cd88eefcfe5785aa14c66286dd24dd688
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_helper_volume.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f61d16126805d1011ad549d9c5f1dd79ce64486717567f84c84452157e86ced3
+size 72708
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_individual_cell.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_individual_cell.png
new file mode 100644
index 0000000000000000000000000000000000000000..13bbbc925e0902ebbe3ef358c5e1ef23bc0bb9bb
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_individual_cell.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e25bfdec872000d044fd22043a9dd7c1b0a93ddee7ab7f0e93e50ded691fd907
+size 3413
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_label_fmt.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_label_fmt.png
new file mode 100644
index 0000000000000000000000000000000000000000..e07a0e0248e87a87ac81514c52d8d14673b7109a
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_label_fmt.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0679946047ec07d8c2e1c8d2976fe97d09385510e949d42a07bfb257f09283c4
+size 28844
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_list.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_list.png
new file mode 100644
index 0000000000000000000000000000000000000000..9ba0409401391cc83a9c83c5b03c69a01d553c5d
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_list.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0047264b9403192a5c7dd91ff54563ef9f996e979a629e9022c8d8aaba21d584
+size 67088
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_remove_scalar_bar.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_remove_scalar_bar.png
new file mode 100644
index 0000000000000000000000000000000000000000..371c7f6c640dd8eb19441cf0b2dcbd424c0d0fb8
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_remove_scalar_bar.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4deebdda31c8cfcb82cff1bddc10b88ab17577d77667127fc2f0cd5625664314
+size 27782
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_rgb.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_rgb.png
new file mode 100644
index 0000000000000000000000000000000000000000..8183066234a99ce31ee220c8b7b4241bdd299f07
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_rgb.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:12945e3a7597aa060e007415a172736be9690dd5de97c67047f2ee52cfd7f61c
+size 3409
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[all-True].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[all-True].png
new file mode 100644
index 0000000000000000000000000000000000000000..7c9338f7edf3fc4e42be360c538b042c9a5f25c5
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[all-True].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2e88b21746b080121f9b7f9c96f9012e371454510da7c7271dc67b02a2b63c2c
+size 31654
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[all-back].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[all-back].png
new file mode 100644
index 0000000000000000000000000000000000000000..7c9338f7edf3fc4e42be360c538b042c9a5f25c5
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[all-back].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2e88b21746b080121f9b7f9c96f9012e371454510da7c7271dc67b02a2b63c2c
+size 31654
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[all-both].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[all-both].png
new file mode 100644
index 0000000000000000000000000000000000000000..13af24c073310d8d3b45f550d096c65059fc7d9f
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[all-both].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0d34e34276a18875888d4973b65c3d82c67c4770258f4074e2962f06fe46f082
+size 34873
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[all-front].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[all-front].png
new file mode 100644
index 0000000000000000000000000000000000000000..4d04042daf28003c11bbf0f6ebf495e8f631f3aa
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[all-front].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c2f6376411159972761af18051cb329a76e4f7a57302defe769d8822c47c33ce
+size 33433
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[back-True].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[back-True].png
new file mode 100644
index 0000000000000000000000000000000000000000..57d19cd791736852c611f25a7c9672377f7c7bf4
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[back-True].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2be9ed50a79a3e6623e527f9b7b0a30e0077e68e4da0eee1eafa0b4c06dadf8e
+size 10354
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[back-back].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[back-back].png
new file mode 100644
index 0000000000000000000000000000000000000000..57d19cd791736852c611f25a7c9672377f7c7bf4
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[back-back].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2be9ed50a79a3e6623e527f9b7b0a30e0077e68e4da0eee1eafa0b4c06dadf8e
+size 10354
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[back-both].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[back-both].png
new file mode 100644
index 0000000000000000000000000000000000000000..acbe55b072bf999644fcac9558d0d1aa47eae18c
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[back-both].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5d420464683679ec645d16c2820ce102f5f4bd1f7154e90172fafc5445af6fda
+size 14502
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[back-front].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[back-front].png
new file mode 100644
index 0000000000000000000000000000000000000000..9e66900457d2fca0a25adba9330741463f217593
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[back-front].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8f594d6a97cf93509869b1f2f9fa828405a45f279ed9c28c7f53cfd2b934dae0
+size 12947
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[front-True].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[front-True].png
new file mode 100644
index 0000000000000000000000000000000000000000..a2be8c483ec1ea5a5a6d2df35fcec479d196804a
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[front-True].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4027dd3fa905387c1d7eda3b7f81f8675aede606989f2f30cd83a260e945ce12
+size 16082
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[front-back].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[front-back].png
new file mode 100644
index 0000000000000000000000000000000000000000..a2be8c483ec1ea5a5a6d2df35fcec479d196804a
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[front-back].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4027dd3fa905387c1d7eda3b7f81f8675aede606989f2f30cd83a260e945ce12
+size 16082
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[front-both].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[front-both].png
new file mode 100644
index 0000000000000000000000000000000000000000..01945e2c3674003ba10710e7eec516c486a0276f
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[front-both].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4dde8c4bced5d786507a16cc8fe780d22947cab13bae57a31f3c506af38fa3c0
+size 19263
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[front-front].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[front-front].png
new file mode 100644
index 0000000000000000000000000000000000000000..617c84ee0e472325bb9ae545a1d313efae6667ec
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[front-front].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:71f974f00272bc724acced90608db5cf64f26d6f0d9eba957362a42d001c381a
+size 17570
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[origin-True].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[origin-True].png
new file mode 100644
index 0000000000000000000000000000000000000000..57d19cd791736852c611f25a7c9672377f7c7bf4
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[origin-True].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2be9ed50a79a3e6623e527f9b7b0a30e0077e68e4da0eee1eafa0b4c06dadf8e
+size 10354
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[origin-back].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[origin-back].png
new file mode 100644
index 0000000000000000000000000000000000000000..57d19cd791736852c611f25a7c9672377f7c7bf4
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[origin-back].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2be9ed50a79a3e6623e527f9b7b0a30e0077e68e4da0eee1eafa0b4c06dadf8e
+size 10354
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[origin-both].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[origin-both].png
new file mode 100644
index 0000000000000000000000000000000000000000..acbe55b072bf999644fcac9558d0d1aa47eae18c
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[origin-both].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5d420464683679ec645d16c2820ce102f5f4bd1f7154e90172fafc5445af6fda
+size 14502
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[origin-front].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[origin-front].png
new file mode 100644
index 0000000000000000000000000000000000000000..9e66900457d2fca0a25adba9330741463f217593
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[origin-front].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8f594d6a97cf93509869b1f2f9fa828405a45f279ed9c28c7f53cfd2b934dae0
+size 12947
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[outer-True].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[outer-True].png
new file mode 100644
index 0000000000000000000000000000000000000000..26ffd3a616c5e494f1ca08c250dbcc5a5f400380
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[outer-True].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:514d304954239518b3ede06afa2f864ab305f9c3f4be2bf3435d7ca70891eb79
+size 13033
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[outer-back].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[outer-back].png
new file mode 100644
index 0000000000000000000000000000000000000000..26ffd3a616c5e494f1ca08c250dbcc5a5f400380
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[outer-back].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:514d304954239518b3ede06afa2f864ab305f9c3f4be2bf3435d7ca70891eb79
+size 13033
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[outer-both].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[outer-both].png
new file mode 100644
index 0000000000000000000000000000000000000000..9356dff0c9cb3099b17a3550518beb7363ac5aa0
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[outer-both].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:07763aee830f4dd4da541b31c1dd13d81ca4157799106f45accb79b1aa02cc34
+size 17327
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[outer-front].png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[outer-front].png
new file mode 100644
index 0000000000000000000000000000000000000000..94bc2d10be0703b7c5a9069f0c1ad9565e458820
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_bounds_params[outer-front].png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8514a89fae9b3cfd88639ee6893e58359b7ce664b20631f93451d56a970b872e
+size 15625
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_grid.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_grid.png
new file mode 100644
index 0000000000000000000000000000000000000000..9a1686f78c8dfd066ee153c3cd24bf50baa85688
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_grid.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7bce34b020d5e002ba9412a56632d78477ecdf76bd7e9a05ca04e461f0497b24
+size 25826
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_grid_with_mesh.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_grid_with_mesh.png
new file mode 100644
index 0000000000000000000000000000000000000000..fc9ae385fa8826a7701515ce7ea27797344ab207
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_grid_with_mesh.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09fd6e47b3f1983aea3098bd716560414d340fddc7c6fd841f3f7baf509fe562
+size 12727
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_vertices.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_vertices.png
new file mode 100644
index 0000000000000000000000000000000000000000..8643da9794ff7cd509c5c0850f2947750d15dcc0
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_vertices.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:34b93873e34e52d1ef2d25a5a1fdb2f86b0f3033c392648d07999aa4bbaecdae
+size 14648
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_vertices_2.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_vertices_2.png
new file mode 100644
index 0000000000000000000000000000000000000000..27e07bd761cc5a6acb749fea3bb70d920cb75477
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_show_vertices_2.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1df7765320448b00860e465cdd8cba440e67e99925df239da020db302d672313
+size 16547
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_silhouette_non_poly.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_silhouette_non_poly.png
new file mode 100644
index 0000000000000000000000000000000000000000..9afe4769e0c60b6037de6cd4149000bba2bdb815
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_silhouette_non_poly.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:db1e28b9fa49627d19e52e305d34228f9ffe5b2aa178a56654a2e7994e276246
+size 5623
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_texture_alone.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_texture_alone.png
new file mode 100644
index 0000000000000000000000000000000000000000..8da567a8d1f35a8cc6a55eb9e4392f815d5609cc
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_texture_alone.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e5fc44a8c35152bd80a8d515c940bd70a099725afc26527a67bfcd09ded20d86
+size 10089
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_texture_associated.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_texture_associated.png
new file mode 100644
index 0000000000000000000000000000000000000000..e304085a2e3ffa9dc5b2c5c1f6212b111f800560
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_texture_associated.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f7f5d917e35aeff2b7940a8e74e1e22c395166178c4709c6d72d0f00df28d12d
+size 68129
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_texture_flip_x.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_texture_flip_x.png
new file mode 100644
index 0000000000000000000000000000000000000000..84b46c63805cfca0a897cbc5e5fb9c94d45d0ec1
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_texture_flip_x.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fb887aea9af4cc0a5acd7b4d0eff13f91a37b49b94e0f6e5e704324effaf5af8
+size 10380
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_volume_rgba.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_volume_rgba.png
new file mode 100644
index 0000000000000000000000000000000000000000..5e6bd20d06affcf146b612f80bb8c111c4aa7299
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_volume_rgba.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1526719f3b0e8f9a00a587ade13be68a57e373cfaed79b55ef35ca67f26fe463
+size 64651
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_volume_rgba_1.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_volume_rgba_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..5e6bd20d06affcf146b612f80bb8c111c4aa7299
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_volume_rgba_1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1526719f3b0e8f9a00a587ade13be68a57e373cfaed79b55ef35ca67f26fe463
+size 64651
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_volume_ugrid.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_volume_ugrid.png
new file mode 100644
index 0000000000000000000000000000000000000000..860fc6580c05f2896df44eee0a59cb3d64ee2a3c
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_volume_ugrid.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ab9f027552152b51382688f2402dcff05c5d97ef55f76aa1c5e72a69e973db7c
+size 30084
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_volume_ugrid_1.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_volume_ugrid_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..82faba97914124e79972f161f4707c98395a77e3
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plot_volume_ugrid_1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:91d0e409f9ac54b09af5194cdb7f6642efbfb43aba0fbb5aa00fbb8a655e6a67
+size 62548
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_image_1.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_image_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..0e47ed58a89aa49c80cc40026994522fb129ee8b
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_image_1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dcbf0854fbd1fe65da3f1f05c7bd73241dacb2ca9dfe3f68f0a593c3296f5df2
+size 1388
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_line_point_smoothing.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_line_point_smoothing.png
new file mode 100644
index 0000000000000000000000000000000000000000..3cc070852b32812d8890698ca32ac852175acc50
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_line_point_smoothing.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1e361820018dda30195ade0506e717877af7f46a395bab624c213251a0ddb963
+size 40775
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_render_callback.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_render_callback.png
new file mode 100644
index 0000000000000000000000000000000000000000..0e47ed58a89aa49c80cc40026994522fb129ee8b
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_render_callback.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dcbf0854fbd1fe65da3f1f05c7bd73241dacb2ca9dfe3f68f0a593c3296f5df2
+size 1388
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_scale.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_scale.png
new file mode 100644
index 0000000000000000000000000000000000000000..ec9d5821b2d140eeaddb3b3a3cf3ab544256028b
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_scale.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af496e5e8aacc332057aca1a737d00437c49fea63277cfc608f0ce5e69e79103
+size 17233
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_scale_2.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_scale_2.png
new file mode 100644
index 0000000000000000000000000000000000000000..d37500276c6d348528a2c20ee8386bbf0279e5c8
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_scale_2.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e5c99e9e48a1dd8a74da46e89ccffb076c38ba95f19aae9c175e769885f29e0c
+size 16447
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_volume_add_scalars.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_volume_add_scalars.png
new file mode 100644
index 0000000000000000000000000000000000000000..4f9eb55a2abc563c2e9193d02e46ba6495f39bd7
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_volume_add_scalars.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:10de348b4f2552180ce54dfdaa4ed09c67644d33e842bab8ef1430f8ac020ada
+size 49328
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_volume_add_scalars_log_scale.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_volume_add_scalars_log_scale.png
new file mode 100644
index 0000000000000000000000000000000000000000..7b3cf4e6cf2100852007e0b5ab6054752e9f1fa7
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_volume_add_scalars_log_scale.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09e13eafbf5b8f1a586284fdf275d38ce041ccb297b11f2e3add3aa990a92237
+size 13626
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_volume_lookup_table.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_volume_lookup_table.png
new file mode 100644
index 0000000000000000000000000000000000000000..78b64200e9e7323fbb82c82c07d56bf3bd2e062c
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/plotter_volume_lookup_table.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:34f539b9bcea4fe3fed28907845e6ed3ac7726a568dda45f1b0704edb5f34e28
+size 67134
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/pointset_to_polydata_algorithm.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/pointset_to_polydata_algorithm.png
new file mode 100644
index 0000000000000000000000000000000000000000..97bcd706d9d3149e238754d307c2f6da7f02d26c
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/pointset_to_polydata_algorithm.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:13815d9c8e2d35dd459d8843197b8c43736041915660b3ce0de1ffd56d7f9b12
+size 3814
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/poked_subplot_context.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/poked_subplot_context.png
new file mode 100644
index 0000000000000000000000000000000000000000..f76b0cc0448cd527d1a264d11cd3eaed822abedb
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/poked_subplot_context.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:05215e65c171b8d3e410052395f8a9773c19c423312b8c1723e3cac3158a8954
+size 31646
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/property_pbr.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/property_pbr.png
new file mode 100644
index 0000000000000000000000000000000000000000..9da3f46d810047062e24b4fef01dd6fa8b4cbbd9
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/property_pbr.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:342eecd49838976b5eaf85b192019542b4c37566014ea909880efe29dbde8795
+size 40120
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/radial_gradient_background.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/radial_gradient_background.png
new file mode 100644
index 0000000000000000000000000000000000000000..e094fb4e1d5df6eb4a2974e3dd618b7d0bc75a91
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/radial_gradient_background.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cc9561236421b75a3fb61a5ee2dfb01e5b3f743a8a77142ee0ba825254393767
+size 1407
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/radial_gradient_background_1.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/radial_gradient_background_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..f6b6c8a8c87adbffe4981027000b3708bbb69f48
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/radial_gradient_background_1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8568778d9f0f5d3198b2a8341ae1bc5e30d492f3660c752e785f6f4cb74e794d
+size 20813
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/radial_gradient_background_2.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/radial_gradient_background_2.png
new file mode 100644
index 0000000000000000000000000000000000000000..10f5132142dec8e431249a0f1dce5568edfc21f5
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/radial_gradient_background_2.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ed531de5eae847dc4411eb93d82a1ef09d300fd4511b8e142d6f4c3010feb05
+size 22451
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/remove_vertices_actor.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/remove_vertices_actor.png
new file mode 100644
index 0000000000000000000000000000000000000000..0e47ed58a89aa49c80cc40026994522fb129ee8b
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/remove_vertices_actor.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dcbf0854fbd1fe65da3f1f05c7bd73241dacb2ca9dfe3f68f0a593c3296f5df2
+size 1388
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/remove_vertices_actor_1.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/remove_vertices_actor_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..0e47ed58a89aa49c80cc40026994522fb129ee8b
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/remove_vertices_actor_1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dcbf0854fbd1fe65da3f1f05c7bd73241dacb2ca9dfe3f68f0a593c3296f5df2
+size 1388
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/scalar_bar_actor_removal.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/scalar_bar_actor_removal.png
new file mode 100644
index 0000000000000000000000000000000000000000..0e47ed58a89aa49c80cc40026994522fb129ee8b
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/scalar_bar_actor_removal.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dcbf0854fbd1fe65da3f1f05c7bd73241dacb2ca9dfe3f68f0a593c3296f5df2
+size 1388
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/screenshot_notebook.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/screenshot_notebook.png
new file mode 100644
index 0000000000000000000000000000000000000000..a096b58d99fc85b8e0887c5cc902ae38d4ad2fff
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/screenshot_notebook.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e4826cf1bcee0d5a6322434ff27e80eedaba583f3114af4ae1714ae198f889df
+size 4344
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/set_environment_texture_cubemap.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/set_environment_texture_cubemap.png
new file mode 100644
index 0000000000000000000000000000000000000000..2478afa89eee27e45d196869a646b20ada32a381
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/set_environment_texture_cubemap.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e18ef2ea0a2bc899902624d7930a287b6621d61178b14b89dc016e8d2058a233
+size 54707
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/set_focus.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/set_focus.png
new file mode 100644
index 0000000000000000000000000000000000000000..a723a6a60816f875d97b950d8c89f5de0d6e5cf2
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/set_focus.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:70ee9063e8330eb214591ebfe11c7cc862cfdac2dc93da54896988e085de90f0
+size 8575
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/set_viewup.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/set_viewup.png
new file mode 100644
index 0000000000000000000000000000000000000000..68383d1e699d665bbf1a7819040b6c653b253f67
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/set_viewup.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8f934bdbb4b56005be2eb0f775f6ba9042bdf5d8d2aa2d0f78e7fe11ca1719d0
+size 4030
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/show_bounds_n_labels.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/show_bounds_n_labels.png
new file mode 100644
index 0000000000000000000000000000000000000000..cdaa6d22f44f1a25208af869ee03e904f26fb17e
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/show_bounds_n_labels.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a892b0e1488736bb7c812fd33f73c1c69289cfbeb5d95ecd34dbe342018d2b4d
+size 8242
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/show_bounds_no_labels.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/show_bounds_no_labels.png
new file mode 100644
index 0000000000000000000000000000000000000000..361bb519501fa9ded1ad7354c9852b877e86df16
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/show_bounds_no_labels.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e9d21dd95c6a2232ea5cfc8b98e75910d6773f8aa4c8d7b1d3986a3869d875d1
+size 7165
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/ssao_pass.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/ssao_pass.png
new file mode 100644
index 0000000000000000000000000000000000000000..b5cedf9069a532d5e8ec5aa80d1dc877f68bafb1
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/ssao_pass.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:239ce931a8c9c1376a84275696b6254ee348ab6c109c2c73e0624fe659c51028
+size 66977
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/ssao_pass_1.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/ssao_pass_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..fbb26764b444a9186087579e6b0b73b83408767f
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/ssao_pass_1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:595b9570b3f59456cef4995adcfc8e303dbe90c63929aaba79c4614efe6258ca
+size 4740
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/update_scalar_bar_range.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/update_scalar_bar_range.png
new file mode 100644
index 0000000000000000000000000000000000000000..b224415427b65b2bbab45e67e153b1d322c8a084
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/update_scalar_bar_range.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:acc7aa46dadb969cd2bf2912d8b9363ad7e42f81d472f41e22d5b15f0865aef8
+size 29240
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/user_annotations_scalar_bar_volume.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/user_annotations_scalar_bar_volume.png
new file mode 100644
index 0000000000000000000000000000000000000000..1747626877be64c0950673dd72de1c9e3ac54d86
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/user_annotations_scalar_bar_volume.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3d1c960977859c6a4cf11d638990093941de82081036cafc2a44b1f99348acbb
+size 58610
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/user_logo.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/user_logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..33f69ef4760a8a0a8bbde188ec1c0a8d0e5d82b4
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/user_logo.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4327d2f5972dc4f09b760dd8b5fb062b5a23d57c08a48d35fec7d3c1baa4e4b7
+size 3689
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/vector_array.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/vector_array.png
new file mode 100644
index 0000000000000000000000000000000000000000..ba0ae7b3c92d7c60afacc8e69c5645dc30c81428
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/vector_array.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3d329eb5a63c73cd0aa4e3d17f808b159c03a024ebcfc14606e221d299c92195
+size 14597
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/vector_array_with_cells.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/vector_array_with_cells.png
new file mode 100644
index 0000000000000000000000000000000000000000..8a6df5655931e2963931bc33fb9b87bcd48f7aa4
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/vector_array_with_cells.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2db4b8c0624361b368be1d8ea82a2271f540f4768a8d9c67b2304ca5662c4ee9
+size 5413
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/vector_array_with_cells_1.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/vector_array_with_cells_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..ad94a823fcbc3523bb40c6094a64a57de1e9bf0b
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/vector_array_with_cells_1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:150c5070b84dfe726653728f93119e1e3f98fc640047a7dd501c8ac5186a86cf
+size 5410
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/vector_array_with_points.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/vector_array_with_points.png
new file mode 100644
index 0000000000000000000000000000000000000000..14351de08f44ec91005b5a7c058f6cdcd451a000
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/vector_array_with_points.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3d2032cf8adddfe4fc19128ad74e346bfb5adb5f7d8e5e732a2a76db5c081fbb
+size 22146
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/vector_array_with_points_1.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/vector_array_with_points_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..4cd1499380250f7750204242c8f2b1ad719519cb
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/vector_array_with_points_1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c1be8097a5ff97d04128726c6cd69f6a187acb20ebea2e1b5ef57ddbf592c5d7
+size 11492
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/volume_rendering_from_helper.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/volume_rendering_from_helper.png
new file mode 100644
index 0000000000000000000000000000000000000000..d9dbed76a0e4369015892aa676976a0d817c3b24
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/volume_rendering_from_helper.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6dc555ca9072bc5b8d899c352994592b7bb9b9fec68246bad2cc53fd6a3f410a
+size 58278
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/volume_rendering_from_plotter.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/volume_rendering_from_plotter.png
new file mode 100644
index 0000000000000000000000000000000000000000..dd7561893418feae4f4b43ecccfc880f6911bfeb
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/volume_rendering_from_plotter.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:324aa17856766cb9b421ee6545e57c4597f31d0d2c941499f5a0f7e1cf70a00e
+size 22174
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/volume_rendering_rectilinear.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/volume_rendering_rectilinear.png
new file mode 100644
index 0000000000000000000000000000000000000000..647e09393454ea36c54b4fdd8a1c71dafad2d3fc
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/volume_rendering_rectilinear.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0f40fb9840d973f2032dbf3195387efc5ed4b8922ef8466f9d5dd96f76abcc36
+size 25793
diff --git a/testbed/pyvista__pyvista/tests/plotting/image_cache/volume_rendering_rectilinear_1.png b/testbed/pyvista__pyvista/tests/plotting/image_cache/volume_rendering_rectilinear_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..cf55af96938eba46deb8861f48fc66818289af55
--- /dev/null
+++ b/testbed/pyvista__pyvista/tests/plotting/image_cache/volume_rendering_rectilinear_1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1c5f035e527bf54b8370334d9b4266061c6f8a8ec762e6e2a80f888728630b59
+size 32270
diff --git a/testbed/roboflow__supervision/docs/assets/supervision-lenny.png b/testbed/roboflow__supervision/docs/assets/supervision-lenny.png
new file mode 100644
index 0000000000000000000000000000000000000000..da2ac5411817193f50eb8bc46ae957701e62eff5
--- /dev/null
+++ b/testbed/roboflow__supervision/docs/assets/supervision-lenny.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9bad9588cc15b69bb96918f88ff8d0919634397c64c1c4094d4a271261586a5d
+size 19080
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/anaconda.png b/testbed/scikit-learn__scikit-learn/doc/images/anaconda.png
new file mode 100644
index 0000000000000000000000000000000000000000..3d599e04e17e968d13ce2f37098d55674c40b029
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/anaconda.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6694ab3144956514796b37b04b32d415ff924ed6405a6fb180d0ceae980812ed
+size 39373
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/axa-small.png b/testbed/scikit-learn__scikit-learn/doc/images/axa-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..454417f94c19b6f7266bbddce80b2980a9d84fe0
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/axa-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7d23a735c70b9f074bac14709349e0eda6a4adcf7c825dc14748d03da533f4a2
+size 11616
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/axa.png b/testbed/scikit-learn__scikit-learn/doc/images/axa.png
new file mode 100644
index 0000000000000000000000000000000000000000..0c8daa94bb0e6552562fee07e6a2a009ff35b8c1
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/axa.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d75d8cdbf40fad5a20e6f4ee744c77cb580571f6d52cf4fa9598c45364c4376a
+size 17847
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/bcg-small.png b/testbed/scikit-learn__scikit-learn/doc/images/bcg-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..58814f6d4ca6259a4451833752617b78024032f9
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/bcg-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cb5ddf5df1733987d0fb0c1e964a9a3d96afb6245b1c1892f5875087bd019aa8
+size 17039
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/bcg.png b/testbed/scikit-learn__scikit-learn/doc/images/bcg.png
new file mode 100644
index 0000000000000000000000000000000000000000..fa664e6d5c59a08c7ebd7939ff5ae877072cc5aa
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/bcg.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1d94922a756c7daa24071652f4a28ce7127665c317adf7671fca5b9778ee9c68
+size 31049
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/bnp-small.png b/testbed/scikit-learn__scikit-learn/doc/images/bnp-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..4473d42853053162b8a2a8099186f5589092b942
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/bnp-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d97d77296f5ff99f280b5212156302095f29464dffa11a1e80d69d048c050b7a
+size 12497
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/bnp.png b/testbed/scikit-learn__scikit-learn/doc/images/bnp.png
new file mode 100644
index 0000000000000000000000000000000000000000..fd4d432606824122455067c748cb2c36b176ce4c
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/bnp.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5c56dfec1031c0be8b2a922ea9a95a5c0340730d79a63ab5130b4ef388e9a6cb
+size 21156
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/cds-logo.png b/testbed/scikit-learn__scikit-learn/doc/images/cds-logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..f8feca70c7e9afa62b88bebde079ad456a958c91
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/cds-logo.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:01e173362e1df894e433c05367563295e2bffa6b09c2535506ddb8920bc599b0
+size 13205
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/columbia-small.png b/testbed/scikit-learn__scikit-learn/doc/images/columbia-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..1cac85d9a48c0f65d7c2cf954d159cfb3b4ae435
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/columbia-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:767db712162ec6d2f17583e4204bb82f5b6abedb60548d529eb62019f7f96646
+size 1170
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/dataiku-small.png b/testbed/scikit-learn__scikit-learn/doc/images/dataiku-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..44575617023a604bb21012a0ab84872e2dc1ff96
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/dataiku-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3da5f03b38ba755fa3305494bce58ce687eeb15184a2baf5d4a665c2c7eb523b
+size 6101
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/dataiku.png b/testbed/scikit-learn__scikit-learn/doc/images/dataiku.png
new file mode 100644
index 0000000000000000000000000000000000000000..a222c98d45f7dc95333874ae8179db818ff29c39
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/dataiku.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bd7d113468a91529068841cdfbeee87b3c14f1a5acdb41901cee58c9e47d4366
+size 9040
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/dysco.png b/testbed/scikit-learn__scikit-learn/doc/images/dysco.png
new file mode 100644
index 0000000000000000000000000000000000000000..166ec8c77b4e8d3d0023d120adf0fd41ac9c6dd3
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/dysco.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9ce1f0d64497bf008c4479d1b319d3d5e351a4a8eb0924ff294e1042c8e92da6
+size 17842
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/fnrs-logo-small.png b/testbed/scikit-learn__scikit-learn/doc/images/fnrs-logo-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..bbac89ec06218e4b218884c87f6f3bade5d6f6c0
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/fnrs-logo-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5fd6607062ac69b2d47c96b52ebcb6622986b422c6af473ce890caab5f59de16
+size 1110
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/fujitsu-small.png b/testbed/scikit-learn__scikit-learn/doc/images/fujitsu-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..546d9dc29419c1f1b563664361f1c3f677580dd0
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/fujitsu-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f1a9417fe20471ccabd516b54e8863cebc5cc877836ae46a369582de48815b87
+size 6618
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/fujitsu.png b/testbed/scikit-learn__scikit-learn/doc/images/fujitsu.png
new file mode 100644
index 0000000000000000000000000000000000000000..f4c819983ae6400205759603aa4a06680810cd1a
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/fujitsu.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:96d82107e46b249dec6ef1e87eba4498016fe13b8d86b04288ddf011cfd97ab9
+size 18012
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/google-small.png b/testbed/scikit-learn__scikit-learn/doc/images/google-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..c541daaf818747a3642a0d127f84d75f08b70aa7
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/google-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2bd313e0bc284ea1a53437493fe9be27178edeba071c98b06847ef680f9da10e
+size 4692
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/grid_search_cross_validation.png b/testbed/scikit-learn__scikit-learn/doc/images/grid_search_cross_validation.png
new file mode 100644
index 0000000000000000000000000000000000000000..0eb0c75bfbe5d816867f2f560b1fbc7700e48bb3
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/grid_search_cross_validation.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dd970f8235a3d8cd88abadf56bd698876221aa45fc379339001868e1821b70dd
+size 45148
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/grid_search_workflow.png b/testbed/scikit-learn__scikit-learn/doc/images/grid_search_workflow.png
new file mode 100644
index 0000000000000000000000000000000000000000..2d7916324683d94717d0218baccb426bf30729f5
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/grid_search_workflow.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9cc32da186e44c7c00297f32b62e370865050a8f173f7c6b8864bbf9adad75b9
+size 100447
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/inria-logo.jpg b/testbed/scikit-learn__scikit-learn/doc/images/inria-logo.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..50aa688d0f963ad724e8dc8c2232661f17a5fa8a
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/inria-logo.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7e90ed06303f12a91d264dc064a4323e9b34060c2bddbd45cbba6042d72b222e
+size 26245
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/inria-small.png b/testbed/scikit-learn__scikit-learn/doc/images/inria-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..a90a085b403e1b592fe6e58f5081f8bd654eeeda
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/inria-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:72ba5b196711d123d567b128791679ae3dcd5faea8cf16f83e951b384a887925
+size 7105
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/intel-small.png b/testbed/scikit-learn__scikit-learn/doc/images/intel-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..1fc1e1d5268bf335d2bd0791bf2f09b84c97db76
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/intel-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:de380e9d73cc804445dfeff3dc49e643b3417c2c909631376aeb9e6553c4914b
+size 9623
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/intel.png b/testbed/scikit-learn__scikit-learn/doc/images/intel.png
new file mode 100644
index 0000000000000000000000000000000000000000..bc515c79aa40b937bed5f22c15db5cb3cf3a4e3e
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/intel.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8d93a8f8758e8d01754c7a7e7dc537e72fec071d3682bec6149baafae263ef62
+size 15019
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/last_digit.png b/testbed/scikit-learn__scikit-learn/doc/images/last_digit.png
new file mode 100644
index 0000000000000000000000000000000000000000..e8dd3c782dafec01e2e21acc9b2440e380d52ece
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/last_digit.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a90eaf7f6fb842a22a02117235fded7d848b6b8051d3819d88a9870cc5574d55
+size 3037
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/lda_model_graph.png b/testbed/scikit-learn__scikit-learn/doc/images/lda_model_graph.png
new file mode 100644
index 0000000000000000000000000000000000000000..e08971174d1c2e6477fe9d50f38e5e9d617afac0
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/lda_model_graph.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:785d90359fcee9db45b27f7edf7adff5f9ad24e817201ea5c66172a58047840b
+size 13032
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/logo_APHP.png b/testbed/scikit-learn__scikit-learn/doc/images/logo_APHP.png
new file mode 100644
index 0000000000000000000000000000000000000000..cdbd1245242f0be9aacbbd1b4c2de8f3c98192a1
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/logo_APHP.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:87bb8894c7888130703d843c7088f306b077db50c9bd1408e38d344a51540757
+size 16452
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/logo_APHP_text.png b/testbed/scikit-learn__scikit-learn/doc/images/logo_APHP_text.png
new file mode 100644
index 0000000000000000000000000000000000000000..714038ad045eb372f046bb0924aee1a8bc69cdb8
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/logo_APHP_text.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:31896aa3c91d0c07af694299f28d9ffa4304dff9e9041f95871c95a3a6872fb9
+size 30396
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/microsoft-small.png b/testbed/scikit-learn__scikit-learn/doc/images/microsoft-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..cbb4f1bc156217c2e31eaa210a92341b7d7ff5c1
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/microsoft-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4f520db137f9e0cb307e6c75b248bf51fc49afae3b1bf12978c9d3877bdb9f2c
+size 8047
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/microsoft.png b/testbed/scikit-learn__scikit-learn/doc/images/microsoft.png
new file mode 100644
index 0000000000000000000000000000000000000000..c093a34c252fe9f8d9a816ab86ab6b4ae36c6546
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/microsoft.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c5e48dd6f718e064b1b29c00cdf408842df6c1f254401a9438964c896035b65c
+size 10320
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/ml_map.png b/testbed/scikit-learn__scikit-learn/doc/images/ml_map.png
new file mode 100644
index 0000000000000000000000000000000000000000..b14b7cc3e1d5e1e2cf298e298071fe1647b670d9
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/ml_map.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:138688c3a6c61a07bfbbc3b8fcf9d7b0de4bfe0bfb50c6b2bad8410d8cce86d6
+size 761071
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/multi_org_chart.png b/testbed/scikit-learn__scikit-learn/doc/images/multi_org_chart.png
new file mode 100644
index 0000000000000000000000000000000000000000..ef213188c5af61490fd07a4d8fe6c4bdfc752931
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/multi_org_chart.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a12a1091bc41f5114ca2b565a1b24f3e0cf361c0a28ec2c3e92ea549e728d5c9
+size 26546
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/multilayerperceptron_network.png b/testbed/scikit-learn__scikit-learn/doc/images/multilayerperceptron_network.png
new file mode 100644
index 0000000000000000000000000000000000000000..3e249de21cd59bdcc3df25f572634859c7774757
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/multilayerperceptron_network.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5b42a0a2d78d694228bcd5c239c5f2f4cee2dc6a7284cded90117153810bc668
+size 89381
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/no_image.png b/testbed/scikit-learn__scikit-learn/doc/images/no_image.png
new file mode 100644
index 0000000000000000000000000000000000000000..01f027e51d8fc07236eb4b6bc9485c6c33b5db4c
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/no_image.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:27b8fc9492dd62400179aa80668486731484df204fb87e045eb85601dfb0963b
+size 4315
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/nvidia-small.png b/testbed/scikit-learn__scikit-learn/doc/images/nvidia-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..24204325a58eb3944ea8f998c55cca0b821a9b7c
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/nvidia-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:29a6449fb9d679f309b875aa5efd245c65cec211b6530e207b5cda8f69faa0eb
+size 8070
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/nvidia.png b/testbed/scikit-learn__scikit-learn/doc/images/nvidia.png
new file mode 100644
index 0000000000000000000000000000000000000000..009d6f12d772bf64da211478eb70f74bb017f983
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/nvidia.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:adc3b4b2c59d9efa5d00d33eb1192ed4448bf37d641711c51fe7034f620a48ec
+size 10764
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/nyu_short_color.png b/testbed/scikit-learn__scikit-learn/doc/images/nyu_short_color.png
new file mode 100644
index 0000000000000000000000000000000000000000..8fedf5ec04aa0f98f4592b1160b9308f93a7fb23
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/nyu_short_color.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c86d683b902e55d764f78de0b14116e83f2e6dbe0e0c066e0e58a7e42cd915c1
+size 5485
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/plot_digits_classification.png b/testbed/scikit-learn__scikit-learn/doc/images/plot_digits_classification.png
new file mode 100644
index 0000000000000000000000000000000000000000..acfd77eda4afb4fa22621fc99a613f8b5f3afa77
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/plot_digits_classification.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c6c350afc70e94b4c08fe0db642ebd1970d913676e9ac1ee030506817931dd8d
+size 31108
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/plot_face_recognition_1.png b/testbed/scikit-learn__scikit-learn/doc/images/plot_face_recognition_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..60b4e12d52e1d1ea76514c375a4d6e3ea809b52c
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/plot_face_recognition_1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a1f2842e60d1791d9466882e4110cd0073370ca8300e35426aa21950f851cb5f
+size 124459
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/plot_face_recognition_2.png b/testbed/scikit-learn__scikit-learn/doc/images/plot_face_recognition_2.png
new file mode 100644
index 0000000000000000000000000000000000000000..2f4ed44693accb33314ca9f84a9c2c26268b3f0c
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/plot_face_recognition_2.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1026f0719a1b38262e09466bc5521fe5bae871bf0a37db7be82d03f428baba1e
+size 86623
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/png-logo-inria-la-fondation.png b/testbed/scikit-learn__scikit-learn/doc/images/png-logo-inria-la-fondation.png
new file mode 100644
index 0000000000000000000000000000000000000000..9c820408ce6cf9fe27bc51a6de31ce64ee128fb4
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/png-logo-inria-la-fondation.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fcedec122cbc0252c3d7ad59ee97c40864cd1d95afb2638710b68603b0e3bade
+size 6152
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/rbm_graph.png b/testbed/scikit-learn__scikit-learn/doc/images/rbm_graph.png
new file mode 100644
index 0000000000000000000000000000000000000000..f0546a9140fcb0468983aa867133a1d99bf82227
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/rbm_graph.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ec1c0e4f83fe7882e35b9f5dee617dd4cb04347ee119db0cc040f2400d1db845
+size 15495
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/scikit-learn-logo-notext.png b/testbed/scikit-learn__scikit-learn/doc/images/scikit-learn-logo-notext.png
new file mode 100644
index 0000000000000000000000000000000000000000..65a66d00eba86a60edc1b46f9193c05294cafef4
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/scikit-learn-logo-notext.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8fd136c52a3f6f01aba169333d3c2c9fa4185f4bf1f5e8adb04ce71a387cab63
+size 8053
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/scikit-learn-logo-small.png b/testbed/scikit-learn__scikit-learn/doc/images/scikit-learn-logo-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..67cdd0ed9172f3ef16f9934cccdf8bed49f6a38c
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/scikit-learn-logo-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6910d3b64c34fc604219bf44ec92c1cc35de129da959ae77a410b40082ed6cfc
+size 5468
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/sloan_banner.png b/testbed/scikit-learn__scikit-learn/doc/images/sloan_banner.png
new file mode 100644
index 0000000000000000000000000000000000000000..34df3033137871d4ad2dec075e48841fcb19a7df
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/sloan_banner.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:70330cd5bb361d17169a18cc7b31ed2ba0b4770aef8ffc0aeffce0782825cd5f
+size 29042
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/sloan_logo-small.png b/testbed/scikit-learn__scikit-learn/doc/images/sloan_logo-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..16529aaad3541603247089f2ce67c46e4646fd63
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/sloan_logo-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:57f6e8efd53c389e24095eb5ecbf952135cba10241007bfa48583fd95acce0a7
+size 2236
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/sydney-stacked-small.png b/testbed/scikit-learn__scikit-learn/doc/images/sydney-stacked-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..b6041d38753e6aecd127a7ea7cbcb7dc7e5b869f
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/sydney-stacked-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5634d343c3165fd8875ebecdf7eb05422866c26cfbded80b24bff336a8a9af90
+size 1728
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/telecom-small.png b/testbed/scikit-learn__scikit-learn/doc/images/telecom-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..f8903e604135752b2a853b4e425386f5568a5bf9
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/telecom-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2e2bc37c81f0e97d967d330533ec488e1627e698cea57e06db5f2a6e9583e098
+size 3779
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/zalando_logo-small.png b/testbed/scikit-learn__scikit-learn/doc/images/zalando_logo-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..c693183bfaca2403263f8332e773db01c9754f0d
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/zalando_logo-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d478730631a88c33d905092537bca30993eaf9cd6978c7c9fd1fedb534fed87
+size 12753
diff --git a/testbed/scikit-learn__scikit-learn/doc/images/zalando_logo.png b/testbed/scikit-learn__scikit-learn/doc/images/zalando_logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..ca4f05980c94c1d1763d49f535a70096fb785649
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/images/zalando_logo.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:25f4c8042c0a3ec55909294354ec54a8eb0f623bbbf4a132543b3374eb555000
+size 11171
diff --git a/testbed/scikit-learn__scikit-learn/doc/logos/identity.pdf b/testbed/scikit-learn__scikit-learn/doc/logos/identity.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..7850e12fe17f97e5eef525c508a8685124ea2648
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/logos/identity.pdf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:43e3a5bf59d73be0350e21f7a1a4da0caa5a6e41c5456cce9e2f66d677e9104e
+size 120865
diff --git a/testbed/scikit-learn__scikit-learn/doc/logos/scikit-learn-logo-notext.png b/testbed/scikit-learn__scikit-learn/doc/logos/scikit-learn-logo-notext.png
new file mode 100644
index 0000000000000000000000000000000000000000..65a66d00eba86a60edc1b46f9193c05294cafef4
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/logos/scikit-learn-logo-notext.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8fd136c52a3f6f01aba169333d3c2c9fa4185f4bf1f5e8adb04ce71a387cab63
+size 8053
diff --git a/testbed/scikit-learn__scikit-learn/doc/logos/scikit-learn-logo-small.png b/testbed/scikit-learn__scikit-learn/doc/logos/scikit-learn-logo-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..67cdd0ed9172f3ef16f9934cccdf8bed49f6a38c
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/logos/scikit-learn-logo-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6910d3b64c34fc604219bf44ec92c1cc35de129da959ae77a410b40082ed6cfc
+size 5468
diff --git a/testbed/scikit-learn__scikit-learn/doc/logos/scikit-learn-logo-thumb.png b/testbed/scikit-learn__scikit-learn/doc/logos/scikit-learn-logo-thumb.png
new file mode 100644
index 0000000000000000000000000000000000000000..036ea474d4e8193c6488c48055c93e6e74669846
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/logos/scikit-learn-logo-thumb.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:768040dbb4abbea32d8e1d192b2d83eb6da6ab1aff28aca4c81b7af321b0f219
+size 7069
diff --git a/testbed/scikit-learn__scikit-learn/doc/logos/scikit-learn-logo.bmp b/testbed/scikit-learn__scikit-learn/doc/logos/scikit-learn-logo.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..0d58043bd23321778ec893ed94cfe7bd9448f972
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/logos/scikit-learn-logo.bmp
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df76db4d06401d98a276e6b99488fd2c3fc28b25e6628bd1698804834aad0072
+size 37902
diff --git a/testbed/scikit-learn__scikit-learn/doc/logos/scikit-learn-logo.png b/testbed/scikit-learn__scikit-learn/doc/logos/scikit-learn-logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..8800bcc7d5f3e89dee5ac2cb1abbe566d9685e82
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/logos/scikit-learn-logo.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:adbc9a763bb131c04a949f3427ef201ead9e0e50d4cbf37409b441537c9d32e0
+size 10879
diff --git a/testbed/scikit-learn__scikit-learn/doc/modules/glm_data/lasso_enet_coordinate_descent.png b/testbed/scikit-learn__scikit-learn/doc/modules/glm_data/lasso_enet_coordinate_descent.png
new file mode 100644
index 0000000000000000000000000000000000000000..5d2e38bbb311383001e7d0ccad4c7f29f2c62a53
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/modules/glm_data/lasso_enet_coordinate_descent.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:65eb95e0f2b1bdc8f75a1ca5ddcbb2f70dc9bca1cfa35801a5906ebf2782677c
+size 28954
diff --git a/testbed/scikit-learn__scikit-learn/doc/modules/glm_data/poisson_gamma_tweedie_distributions.png b/testbed/scikit-learn__scikit-learn/doc/modules/glm_data/poisson_gamma_tweedie_distributions.png
new file mode 100644
index 0000000000000000000000000000000000000000..213d2f2b469c7d9ad1d6b47001307d9ebeee8f72
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/modules/glm_data/poisson_gamma_tweedie_distributions.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3156cef1496d9abeb575ff31d41af903650c511431f8fbb8612c7ec4e235af09
+size 63830
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/aweber.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/aweber.png
new file mode 100644
index 0000000000000000000000000000000000000000..5a166109d66b649889e3345d5ca5f26ef6e544fe
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/aweber.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bd501194112605bc2d893039cdcef46a347133ac43312ed314f6cccbae70b072
+size 41412
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/bestofmedia-logo.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/bestofmedia-logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..6c7689b18227875b1e3d23e4c48ca9d1c04110ab
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/bestofmedia-logo.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:74af3ed56a11a063ce21397255f06c9deda113d94992af5afdceea22d9161987
+size 3321
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/betaworks.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/betaworks.png
new file mode 100644
index 0000000000000000000000000000000000000000..f120507d33f6a00f1a188c576e9993fc81703694
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/betaworks.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:340e3b22ad4834ebc34c017df73bfda1d378ce342a92865446722c73b33cd4dc
+size 4891
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/birchbox.jpg b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/birchbox.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ac9761697ad3b51053e88d199baadbea7fedb6a7
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/birchbox.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d07afe3365043b1b437f7a1d8386f3b054971fe80c3f85d1240b1ec8e6608fba
+size 14595
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/bnp_paribas_cardif.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/bnp_paribas_cardif.png
new file mode 100644
index 0000000000000000000000000000000000000000..09d5227df07ff353948f1b337d1cb3ac5277126d
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/bnp_paribas_cardif.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:56cd6687b8fefcab6a65d1da5cd6e21a4bb5dd9ef2474978f26c845db823495b
+size 65058
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/booking.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/booking.png
new file mode 100644
index 0000000000000000000000000000000000000000..d6cf0b89cf540224df27597d10a2b832a6ebc1d2
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/booking.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f871a39d887dad24dc257112693abff70860bb3b9064a76714e371322a2213db
+size 5937
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/change-logo.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/change-logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..756ab2e6bedb19f1b7db6a2af41dcb35a190f9ee
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/change-logo.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:87d6d29529a049012b2d4b335253ce87f03fd0636f348d53252982952ee18192
+size 3294
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/dataiku_logo.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/dataiku_logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..081443286e566145959afc8e8f86fed133fb303e
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/dataiku_logo.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:01d75458da0f012560186fcb1e7da110796cfc391fabc7de470687d136a2bd5a
+size 10684
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/datapublica.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/datapublica.png
new file mode 100644
index 0000000000000000000000000000000000000000..ceb18d2c20ddfdf7be0c723ce315beeaf42cec8b
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/datapublica.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5ad9a168ac1313a08c7a1062e749ffb35c7b4d103f0f5f8493794f6bd8d47bc8
+size 5177
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/datarobot.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/datarobot.png
new file mode 100644
index 0000000000000000000000000000000000000000..a24b56281594f9f4961a51147ad4e08fb76469a2
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/datarobot.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:780cd16362b15af9f73f2c3221fa19cf7359c2bb9e2a852c543efa3e6b30859e
+size 19895
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/evernote.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/evernote.png
new file mode 100644
index 0000000000000000000000000000000000000000..c8c745426919074e41ec702dad423f4698c1a795
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/evernote.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c4ca3759101e206bc73ac0bb44bace10d6611298c3660bd7b7b41e4fca763ddc
+size 2629
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/howaboutwe.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/howaboutwe.png
new file mode 100644
index 0000000000000000000000000000000000000000..22f8361f3ea9d74638a5098ad8d66fa9036cfb61
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/howaboutwe.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af52326aa1aee42cb32179d350ce2f221e06165ebca701ea5cc914eaf4689134
+size 24772
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/huggingface.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/huggingface.png
new file mode 100644
index 0000000000000000000000000000000000000000..616788b8a1714a4365aca9e88ebb5a5b33c7fda9
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/huggingface.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8199c1e613508cff822a02e551a2763b3a67e55cce4ff09bbf0b81666c2bcfcb
+size 31051
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/infonea.jpg b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/infonea.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..7aa81d3d53fa76319e93037ad796851031467ee3
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/infonea.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7fe5466c7ac58464061200368466f3ae9d0c77e1e79056962fea42f2e4c9e846
+size 85087
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/inria.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/inria.png
new file mode 100644
index 0000000000000000000000000000000000000000..c9d00fecbbd27453cfce8d8d6b13fa6cbc79e781
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/inria.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8de673e9ac5219ac521c6e8ce31a1cc1bd59523d68d69fa1ee3eae8e7305c97f
+size 23903
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/jpmorgan.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/jpmorgan.png
new file mode 100644
index 0000000000000000000000000000000000000000..7913a825c7acd78cbf1a4cf13c5cb38049ee4404
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/jpmorgan.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:be5c0da72fd3d173198252ed62e63d7d167122da30b3ec88d894c115a244f64a
+size 8359
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/lovely.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/lovely.png
new file mode 100644
index 0000000000000000000000000000000000000000..e6d0457b1d6454a0f7c1cb6ed0fc4fd0b51ec389
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/lovely.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7610d0cb2f2158ae78b6f25534d42387bde250354c0870e6d4bbf6086c6f1055
+size 3307
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/machinalis.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/machinalis.png
new file mode 100644
index 0000000000000000000000000000000000000000..17379683c3fbe6be9167b76bf2ffe0d97ab43095
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/machinalis.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3297d5528598b7af5ce7038ef1e2709e4d1bf24967229a81ac131575523e157e
+size 12363
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/mars.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/mars.png
new file mode 100644
index 0000000000000000000000000000000000000000..ec9081d3469209c6ccbe5096bb87033bea71d578
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/mars.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a50c25250698fd0253447c9008a6004094c780bd5cb694cc4182ed7da40c7dc3
+size 47018
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/okcupid.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/okcupid.png
new file mode 100644
index 0000000000000000000000000000000000000000..e0a815425dc5a267d7232661ef989d4acb372e3e
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/okcupid.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3e9393faa80be34e01a0ffb4dad6d314839ecae750702b3b386f8d5e56b14c16
+size 10246
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/ottogroup_logo.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/ottogroup_logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..93134d0bbcb0c497f963fc84eb16eafe359263f2
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/ottogroup_logo.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b6610f6b02337e76694a7fee27200b2f0c46688cb8e9faef676d5c3d949de7d0
+size 8603
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/peerindex.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/peerindex.png
new file mode 100644
index 0000000000000000000000000000000000000000..6732676f1a2d2a7b0ca722ee84daad522588f0ed
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/peerindex.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:232030bc3e60d039852279cb6975794e44331864bcc6e3e1e35cfb22400bf384
+size 4689
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/phimeca.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/phimeca.png
new file mode 100644
index 0000000000000000000000000000000000000000..2f837acef9308d7380f71c4c12cc806eb62c7595
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/phimeca.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6d4b245caca3feacb88b4d4576660092ba7dae27652fa90ddbc990b4a58ceee4
+size 2571
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/rangespan.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/rangespan.png
new file mode 100644
index 0000000000000000000000000000000000000000..43cfe12e6a9a7df864aadf33d7724d72259e5618
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/rangespan.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6ba53f90ec01ee1577d7852bfad3c4fa7bc44e670117c205f27fe18a5b4dbc60
+size 11944
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/solido_logo.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/solido_logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..3b11a3e123718c1e4aab8521235350795fba6409
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/solido_logo.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:57b00859489bf0f05bdf38498981572fdbca6a291eb46250a7993ad48c488dde
+size 6569
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/spotify.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/spotify.png
new file mode 100644
index 0000000000000000000000000000000000000000..b47b5b476f0516caca0dcdc446254ccad7fc7d13
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/spotify.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5a58f24bcbb3d1097768401ae4aeacf35af18f7ac7de5b30cdab40938d805113
+size 12293
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/telecomparistech.jpg b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/telecomparistech.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..75d32295e626e3e7d8f40d4c6b8c44359c3da90b
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/telecomparistech.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6df999718a3556938f127e382fbb3e4fac5692c34fe2a8a8bb01523805e62604
+size 11473
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/yhat.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/yhat.png
new file mode 100644
index 0000000000000000000000000000000000000000..2c2f1695ee7a96e468563288d970979ad7301d62
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/yhat.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3490b4787512f51eba52f35a07362565cce4a8f5b5caf500ed8e0e2d125ded2c
+size 6350
diff --git a/testbed/scikit-learn__scikit-learn/doc/testimonials/images/zopa.png b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/zopa.png
new file mode 100644
index 0000000000000000000000000000000000000000..62c029e239b9df0d6021f973bcfbe55f04819226
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/testimonials/images/zopa.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6bda1b2869b74908de7bd596fa07b3bf7646a472c221decd66cdc43dd25d509c
+size 22810
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/FNRS-logo.png b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/FNRS-logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..a1a1a524df15774a24de9799988b850c72031343
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/FNRS-logo.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5e6acaaa691b424747733475c2cb2b2647ca9fcef75b053398db6d3eeb2af47e
+size 7835
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/columbia.png b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/columbia.png
new file mode 100644
index 0000000000000000000000000000000000000000..16ccb0011c1db500bb61ef55bea9a31b030b62eb
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/columbia.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4cd7a7c06da68a186d8af4dbfe8564c70d84264dff7ae9a6e2ac50c3e40c447a
+size 1769
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/digicosme.png b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/digicosme.png
new file mode 100644
index 0000000000000000000000000000000000000000..09550d1eae4ea5a837af0860def31df1677bfb4b
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/digicosme.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7fc63a4d0a74e5a8613fcbcf480aa74a8686408f326f811076372fdb721a12cb
+size 18585
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/forkme.png b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/forkme.png
new file mode 100644
index 0000000000000000000000000000000000000000..b9eeda548db6b80f259e426338148189a8ccb044
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/forkme.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:64bcd7ef699e6304cf1f4a4b938dce28ec45db1cbb2834bc8331b9d251e9c8c4
+size 8676
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/glyphicons-halflings-white.png b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/glyphicons-halflings-white.png
new file mode 100644
index 0000000000000000000000000000000000000000..e34a03afea9f7719e5a8b5375a9c235d22324a18
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/glyphicons-halflings-white.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f0e0d95a9c8abcdfabf46348e2d4285829bb0491f5f6af0e05af52bffb6324c4
+size 8777
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/glyphicons-halflings.png b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/glyphicons-halflings.png
new file mode 100644
index 0000000000000000000000000000000000000000..5b97d91d4d5505cf355c944c92c18c9851da3780
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/glyphicons-halflings.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f4daa9a381d5812df69336b62daf38f676ce78e47ea1a78c0a621bad7120e9d8
+size 12764
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/google.png b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/google.png
new file mode 100644
index 0000000000000000000000000000000000000000..d668432596578be4e282a98f8c2c7fc108127282
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/google.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:22326d88aa79c49f212b530d853d4adcbfb7f125fffacbf3bdb66c4fc2b9c95a
+size 6982
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/inria-small.jpg b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/inria-small.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..86d395874e7111112ba46394a45ca0a5c8085c84
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/inria-small.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:237c1a042ba24e2fe776820006bfc10e2d53891532a2243199e564101cb8073c
+size 11762
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/inria-small.png b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/inria-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..e6e61d80970ac904a4423640f7454ce4ca381caa
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/inria-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f205a7f8eef42eb7f38d07873daa31276ba0f308638cdd5ccfdcc541cd1de84a
+size 10055
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/nyu_short_color.png b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/nyu_short_color.png
new file mode 100644
index 0000000000000000000000000000000000000000..8fedf5ec04aa0f98f4592b1160b9308f93a7fb23
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/nyu_short_color.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c86d683b902e55d764f78de0b14116e83f2e6dbe0e0c066e0e58a7e42cd915c1
+size 5485
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/plot_classifier_comparison_1.png b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/plot_classifier_comparison_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..cb40fba7b935c16979eaff90ad7b86a0aa2da34c
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/plot_classifier_comparison_1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef5c2f3273ab9c372038cc38eab8c2ee40b5a67923b82084a0c7f82ce704e9fb
+size 402605
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/plot_manifold_sphere_1.png b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/plot_manifold_sphere_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..36aff3bdbd27f9f03c7e4fe6496e958e3d72f3bc
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/plot_manifold_sphere_1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2697b0eabb5fc04a520a71e504e7790bfadf607fea8aa93658008a30b84fb1aa
+size 551524
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/scikit-learn-logo-notext.png b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/scikit-learn-logo-notext.png
new file mode 100644
index 0000000000000000000000000000000000000000..65a66d00eba86a60edc1b46f9193c05294cafef4
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/scikit-learn-logo-notext.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8fd136c52a3f6f01aba169333d3c2c9fa4185f4bf1f5e8adb04ce71a387cab63
+size 8053
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/scikit-learn-logo-small.png b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/scikit-learn-logo-small.png
new file mode 100644
index 0000000000000000000000000000000000000000..96aa93b6786ba6168cce087260fda45e7e85d9f2
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/scikit-learn-logo-small.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:969b27d713ec639e6ad327c1032ce6d0c9f3b828d9db84a606b1c3785f173a6d
+size 3538
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/scikit-learn-logo.png b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/scikit-learn-logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..79fa7fa56aabb6ce7fc495bb77cb2d37ee380175
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/scikit-learn-logo.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da664f9be8adc229bb1cc767d6fe500ee184264190f7ab097f6a833914c3313e
+size 11986
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/sloan_logo.jpg b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/sloan_logo.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..555b9a34381907fc3214e47c6537cd2df0dbdafc
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/sloan_logo.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7b57fa79cc2fc0e3bc2c46ffa3b1618d71b098a59c25cdbd1a842b7595c8fcdc
+size 96721
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/sydney-primary.jpeg b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/sydney-primary.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..5a07bc07c9f8b4833333d08a9a9f60ebfa2d5603
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/sydney-primary.jpeg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0313d1c5cf26fab712817fb53dad1912f52df7b61f1808ae881d77cf4b8ed0b8
+size 38356
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/sydney-stacked.jpeg b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/sydney-stacked.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..8bc8b835534092267b41bf3fb80092329a7a8776
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/sydney-stacked.jpeg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:28305e3fece29051154c702b948cf97c742990b542822547a3d6ead650b0ec69
+size 3356
diff --git a/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/telecom.png b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/telecom.png
new file mode 100644
index 0000000000000000000000000000000000000000..911d0bf881ff11aa5391102bb1e34d35cb4e9604
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/doc/themes/scikit-learn/static/img/telecom.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cb3bff72893adabc9f21887258b3e2a937d65989c84547103ac30f1dd5a29c0c
+size 35103
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/data/diabetes_data.csv.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/data/diabetes_data.csv.gz
new file mode 100644
index 0000000000000000000000000000000000000000..0e666add3b88ff0b17e7543c5adcfe18628b13cf
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/data/diabetes_data.csv.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5a21b8e4ecef9a4fd738f72b09f9ddfb786cbbf4e401f78b43050b6b92eedea8
+size 23803
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/data/diabetes_target.csv.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/data/diabetes_target.csv.gz
new file mode 100644
index 0000000000000000000000000000000000000000..b11a1924f6085214fbedb70b19e689b05750cd11
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/data/diabetes_target.csv.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e53f65eb811df43c206f3534bb3af0e5fed213bc37ed6ba36310157d6023803
+size 1050
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/data/digits.csv.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/data/digits.csv.gz
new file mode 100644
index 0000000000000000000000000000000000000000..b655e3ffa0818ef8048d461352aaa58599baa4e0
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/data/digits.csv.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09f66e6debdee2cd2b5ae59e0d6abbb73fc2b0e0185d2e1957e9ebb51e23aa22
+size 57523
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/images/china.jpg b/testbed/scikit-learn__scikit-learn/sklearn/datasets/images/china.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9e885acfcf3f5a562290d081e204046372238233
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/images/china.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8378025ad2519d649d02e32bd98990db4ab572357d9f09841c2fbfbb4fefad29
+size 196653
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/images/flower.jpg b/testbed/scikit-learn__scikit-learn/sklearn/datasets/images/flower.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..56350635174c5d062428d0128910faa0476b66ee
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/images/flower.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a77f6ec41e353afdf8bdff2ea981b2955535d8d83294f8cfa49cf4e423dd5638
+size 142987
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1/api-v1-json-data-1.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1/api-v1-json-data-1.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..951ceb7f7f17c2f89280aac5d5c2da81afd69d43
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1/api-v1-json-data-1.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:862e08520a2433a495a3bd3ae9fd9e6c7c540a9c632db29bb8252784cbdad779
+size 1786
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1/api-v1-json-data-features-1.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1/api-v1-json-data-features-1.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..2f757032db273b37ef22dc6d4468e675e7bd0915
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1/api-v1-json-data-features-1.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a966dad58cf5fbc914a374ad5556c0414f5ed962237ed55a379fe96e308d00de
+size 889
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1/api-v1-json-data-qualities-1.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1/api-v1-json-data-qualities-1.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..c9c6d8fb40f9db23fb31349fa8a087c288f5dae9
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1/api-v1-json-data-qualities-1.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:84a8726d2c3f8bbca79d54d8b191158744b1993146f8f083b111a8ea78536057
+size 145
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1/data-v1-download-1.arff.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1/data-v1-download-1.arff.gz
new file mode 100644
index 0000000000000000000000000000000000000000..ee6e378589d722771363d186944ed1f0f78c9836
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1/data-v1-download-1.arff.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cfe8945b949770b0da42daf58ce67d1c5fee25cf7b4fd145161837c2abc09429
+size 1841
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/api-v1-json-data-1119.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/api-v1-json-data-1119.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..8e23c4a4051b50c2a5dbe0b93f4619bbed92b9f3
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/api-v1-json-data-1119.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c41e5fbb3e59cd4de881ed7c8f88f9b03a750d537ba63581cafde6aafd77adc1
+size 711
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/api-v1-json-data-features-1119.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/api-v1-json-data-features-1119.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..cfe21c720a6a6f97d6857de1d0cf268ab20dda53
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/api-v1-json-data-features-1119.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:82f899edc59cb41fdd671b256a228e5e06dfc5e24c92712e75005b251b000865
+size 1108
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/api-v1-json-data-list-data_name-adult-census-limit-2-data_version-1.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/api-v1-json-data-list-data_name-adult-census-limit-2-data_version-1.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..c18f2eec9107a3e1455512f8d92e0289bb6d714d
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/api-v1-json-data-list-data_name-adult-census-limit-2-data_version-1.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4a5dc36ca9758313978b2a9d79cce763c6f84d5d95f15ac557b3d7482f22ee21
+size 364
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/api-v1-json-data-list-data_name-adult-census-limit-2-status-active-.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/api-v1-json-data-list-data_name-adult-census-limit-2-status-active-.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..7b7718d29ecb2075088f54c5f2c5fc0d01d9404b
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/api-v1-json-data-list-data_name-adult-census-limit-2-status-active-.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6ec0955788914fa81f698e97a4d1aff773d7a125ed6e769c6271a0b48fc4011d
+size 363
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/api-v1-json-data-qualities-1119.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/api-v1-json-data-qualities-1119.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..3265a7d933efe836193228b86e84c6c7a8b45afd
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/api-v1-json-data-qualities-1119.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef7cbcb58c2edcfea45c058b751faf7783e710462a924e9aacad8d47a7e9f94b
+size 1549
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/data-v1-download-54002.arff.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/data-v1-download-54002.arff.gz
new file mode 100644
index 0000000000000000000000000000000000000000..8f610044b5cc550df4d4ef18cd2131306dba05be
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/1119/data-v1-download-54002.arff.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6931af256195fcdd2e47dd8b0f9edf16fbf03b198e77b70e3dfd9877cdf09515
+size 1190
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/api-v1-json-data-2.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/api-v1-json-data-2.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..24caf1bf71f829c85f13b7d2b8d0a94e4d27f1b3
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/api-v1-json-data-2.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a672d435b97a6033dfd1d2a5c823d237ad1865101bd5e403cd99b5be0ba4e03b
+size 1363
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/api-v1-json-data-features-2.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/api-v1-json-data-features-2.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..be96cc72487b20a47142fb8c999ce032d73fba2e
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/api-v1-json-data-features-2.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c1b8387a7d08014a1c09807ae458ca7666ab8a3c579cbfb189e09c6d7de892a6
+size 866
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/api-v1-json-data-list-data_name-anneal-limit-2-data_version-1.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/api-v1-json-data-list-data_name-anneal-limit-2-data_version-1.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..e1f109fd6086eb97a3be2e7533dc658dac0970d5
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/api-v1-json-data-list-data_name-anneal-limit-2-data_version-1.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e900b190795224ff48e46a1c02b10020d4c986ba142880c02c86f0b472ded3c9
+size 309
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/api-v1-json-data-list-data_name-anneal-limit-2-status-active-.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/api-v1-json-data-list-data_name-anneal-limit-2-status-active-.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..d5feb2e1a57bf4ba4d811dbff391977f38122fed
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/api-v1-json-data-list-data_name-anneal-limit-2-status-active-.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ff6225cb98260ca4ebec015a1a2754f2a7b0dbfb4d0f17dcf6727542154e2a10
+size 346
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/api-v1-json-data-qualities-2.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/api-v1-json-data-qualities-2.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..08e36a9fb7d7eb1d95b74eebf7c1b870d4a052c1
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/api-v1-json-data-qualities-2.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c46f6c5f221d877de604b906403b20cbdf674f1225bcdbb3e15bd1882a69a471
+size 1501
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/data-v1-download-1666876.arff.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/data-v1-download-1666876.arff.gz
new file mode 100644
index 0000000000000000000000000000000000000000..ee6e378589d722771363d186944ed1f0f78c9836
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/2/data-v1-download-1666876.arff.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cfe8945b949770b0da42daf58ce67d1c5fee25cf7b4fd145161837c2abc09429
+size 1841
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-292.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-292.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..83ac698458c7adac8bcda219b26f50cb0b2a2100
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-292.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1e6a38d79d8f9e53a2ce11b68b4153062d4e96ec0b368d02b2e64f1b33c51693
+size 551
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-40981.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-40981.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..8015288dcd2399e2c86a4050ce81ec49902d6baf
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-40981.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c26dcbe30cfb39161f305b2b3d43a9b50adc8b368d0749568c47106cbdb20897
+size 553
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-features-292.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-features-292.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..b3c915315eff5a266c715e3f99584b16ec06ea8f
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-features-292.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:255c16f33ed2967fe100cd8011a7e69f789603724b1ec2ecf91dfeb72067c190
+size 306
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-features-40981.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-features-40981.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..b3c915315eff5a266c715e3f99584b16ec06ea8f
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-features-40981.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:255c16f33ed2967fe100cd8011a7e69f789603724b1ec2ecf91dfeb72067c190
+size 306
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-list-data_name-australian-limit-2-data_version-1-status-deactivated.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-list-data_name-australian-limit-2-data_version-1-status-deactivated.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..9c2f6f263750517c2b3ca25942cc4a426bc72de0
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-list-data_name-australian-limit-2-data_version-1-status-deactivated.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8ef6025425fdfc5f736555ea385252af5bcbf62383615db82489366d4f96a0a7
+size 327
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-list-data_name-australian-limit-2-data_version-1.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-list-data_name-australian-limit-2-data_version-1.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..9cd17f124ef74920b925490ecc8e415dcd59d225
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-list-data_name-australian-limit-2-data_version-1.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9da09e9a6031d060ec416f639a6bf34989e6c88ce641d10621eb906ba1d8c293
+size 99
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-list-data_name-australian-limit-2-status-active-.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-list-data_name-australian-limit-2-status-active-.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..29b93d4214dac84a592173457e4eac04c15bb926
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/api-v1-json-data-list-data_name-australian-limit-2-status-active-.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:35890d08165c804526b48aad462d7ccc09e808bd7975ba604bd612b9608797ac
+size 319
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/data-v1-download-49822.arff.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/data-v1-download-49822.arff.gz
new file mode 100644
index 0000000000000000000000000000000000000000..7bdb62f1628f096b9f91eb2e94ffc413bab4696c
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/292/data-v1-download-49822.arff.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b7ee24adabd4aaed6419b43fe9d3f86d55fcf4bee0f1698ae21d86c2701314e3
+size 2532
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/3/api-v1-json-data-3.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/3/api-v1-json-data-3.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..65982d59860e015a25a42d8bb57f72bf327c9e0b
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/3/api-v1-json-data-3.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:066a216679b197cc51946e17ee9a2e28215425991b0ceb7f10988c14f7f3f869
+size 2473
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/3/api-v1-json-data-features-3.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/3/api-v1-json-data-features-3.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..faf70da9cea25d998883721d679ca9f0030d9575
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/3/api-v1-json-data-features-3.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ec4f2d6bc4df3882b08bba01571e0792a56f79e0a922d984897773acd284b426
+size 535
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/3/data-v1-download-3.arff.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/3/data-v1-download-3.arff.gz
new file mode 100644
index 0000000000000000000000000000000000000000..32bdf94f0f4eac4f936d82476fc75917e92317fe
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/3/data-v1-download-3.arff.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c63fdf8861761f1ca70509f7d2d169a7cc053988c7b7c09c09a6db6124e208be
+size 19485
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/api-v1-json-data-40589.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/api-v1-json-data-40589.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..c3454ff8e5a399b14e2033d6122315c4e4b2dbfc
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/api-v1-json-data-40589.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:59d1aa6b02d2358c16fa9e4fbeff523a3bd10ebd38c7c371911fa8335e7bdcbf
+size 598
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/api-v1-json-data-features-40589.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/api-v1-json-data-features-40589.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..d9ac42c2bbe778d928f3da1e09e3099962e412ad
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/api-v1-json-data-features-40589.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:826bab057a3929f41189bc51afa0a1752695e63ccf20e128ca6129e9e3321fc2
+size 856
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/api-v1-json-data-list-data_name-emotions-limit-2-status-active-.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/api-v1-json-data-list-data_name-emotions-limit-2-status-active-.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..f8f940438f61ac6fbeaa00c46741c80579af46eb
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/api-v1-json-data-list-data_name-emotions-limit-2-status-active-.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4da63a60163340b6e18922abfe7f1f2a7a7da23da63c269324985d61ffaa6075
+size 318
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40945/api-v1-json-data-features-40945.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40945/api-v1-json-data-features-40945.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..24e0e87d484661242d46a4cf18e2e6695736fa26
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40945/api-v1-json-data-features-40945.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:95f0938dfdf1b87d0ffc4d526f2c91e097ef7689480b693970126d908f291030
+size 320
diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/api-v1-json-data-list-data_name-miceprotein-limit-2-status-active-.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/api-v1-json-data-list-data_name-miceprotein-limit-2-status-active-.json.gz
new file mode 100644
index 0000000000000000000000000000000000000000..ecd8d1b12a547833c2d00ed29be640a12167d082
--- /dev/null
+++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/api-v1-json-data-list-data_name-miceprotein-limit-2-status-active-.json.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:301396b4a42c814b1a15038ddfcbcf5c8590501231747d0dc2a500b84b2fd0df
+size 328